filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
cmd/main.go | package main
import (
"fmt"
"io/ioutil"
"os"
"os/user"
"path"
"path/filepath"
"github.com/anuvu/stacker"
stackerlog "github.com/anuvu/stacker/log"
"github.com/apex/log"
"github.com/pkg/errors"
"github.com/urfave/cli"
"golang.org/x/crypto/ssh/terminal"
"gopkg.in/yaml.v2"
)
var (
config stacker.StackerConfig
version = ""
)
func shouldShowProgress(ctx *cli.Context) bool {
/* if the user provided explicit recommendations, follow those */
if ctx.GlobalBool("no-progress") {
return false
}
if ctx.GlobalBool("progress") {
return true
}
/* otherise, show it when we're attached to a terminal */
return terminal.IsTerminal(int(os.Stdout.Fd()))
}
func main() {
user, err := user.Current()
if err != nil {
fmt.Fprintf(os.Stderr, "couldn't get current user: %s", err)
os.Exit(1)
}
app := cli.NewApp()
app.Name = "stacker"
app.Usage = "stacker builds OCI images"
app.Version = version
app.Commands = []cli.Command{
buildCmd,
recursiveBuildCmd,
publishCmd,
chrootCmd,
cleanCmd,
inspectCmd,
grabCmd,
umociCmd,
squashfsCmd,
unprivSetupCmd,
gcCmd,
containerSetupCmd,
}
configDir := os.Getenv("XDG_CONFIG_HOME")
if configDir == "" {
configDir = path.Join(user.HomeDir, ".config", app.Name)
}
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "stacker-dir",
Usage: "set the directory for stacker's cache",
Value: ".stacker",
},
cli.StringFlag{
Name: "oci-dir",
Usage: "set the directory for OCI output",
Value: "oci",
},
cli.StringFlag{
Name: "roots-dir",
Usage: "set the directory for the rootfs output",
Value: "roots",
},
cli.StringFlag{
Name: "config",
Usage: "stacker config file with defaults",
Value: path.Join(configDir, "conf.yaml"),
},
cli.BoolFlag{
Name: "debug",
Usage: "enable stacker debug mode",
},
cli.BoolFlag{
Name: "q, quiet",
Usage: "silence all logs",
},
cli.StringFlag{
Name: "log-file",
Usage: "log to a file instead of stderr",
},
}
/*
* Here's a barrel of suck: urfave/cli v1 doesn't allow for default
* values of boolean flags. So what we want is to append either
* --progress if this is not a tty, or --no-progress if it is a tty, so
* that we can allow for the right disabling of the thing in the right
* case.
*
* We don't want to convert to v2, because among other things it
* restricts *even more* the order of arguments and flags.
*
* see shouldShowProgress() for how we resolve whether or not to
* actually show it.
*/
isTerminal := terminal.IsTerminal(int(os.Stdout.Fd()))
if isTerminal {
app.Flags = append(app.Flags, cli.BoolFlag{
Name: "no-progress",
Usage: "disable progress when downloading container images or files",
})
} else {
app.Flags = append(app.Flags, cli.BoolFlag{
Name: "progress",
Usage: "enable progress when downloading container images or files",
})
}
var logFile *os.File
// close the log file if we happen to open it
defer func() {
if logFile != nil {
logFile.Close()
}
}()
debug := false
app.Before = func(ctx *cli.Context) error {
logLevel := log.InfoLevel
if ctx.Bool("debug") {
debug = true
logLevel = log.DebugLevel
if ctx.Bool("quiet") {
return errors.Errorf("debug and quiet don't make sense together")
}
} else if ctx.Bool("quiet") {
logLevel = log.FatalLevel
}
var err error
content, err := ioutil.ReadFile(ctx.String("config"))
if err == nil {
err = yaml.Unmarshal(content, &config)
if err != nil {
return err
}
}
if config.StackerDir == "" || ctx.IsSet("stacker-dir") {
config.StackerDir = ctx.String("stacker-dir")
}
if config.OCIDir == "" || ctx.IsSet("oci-dir") {
config.OCIDir = ctx.String("oci-dir")
}
if config.RootFSDir == "" || ctx.IsSet("roots-dir") {
config.RootFSDir = ctx.String("roots-dir")
}
config.StackerDir, err = filepath.Abs(config.StackerDir)
if err != nil {
return err
}
config.OCIDir, err = filepath.Abs(config.OCIDir)
if err != nil {
return err
}
config.RootFSDir, err = filepath.Abs(config.RootFSDir)
if err != nil {
return err
}
var handler log.Handler
handler = stackerlog.NewTextHandler(os.Stderr)
if ctx.String("log-file") != "" {
logFile, err = os.Create(ctx.String("log-file"))
if err != nil {
return errors.Wrapf(err, "failed to access %v", logFile)
}
handler = stackerlog.NewTextHandler(logFile)
}
stackerlog.FilterNonStackerLogs(handler, logLevel)
stackerlog.Debugf("stacker version %s", version)
return nil
}
if err := app.Run(os.Args); err != nil {
format := "error: %v\n"
if debug {
format = "error: %+v\n"
}
fmt.Fprintf(os.Stderr, format, err)
os.Exit(1)
}
}
| [
"\"XDG_CONFIG_HOME\""
]
| []
| [
"XDG_CONFIG_HOME"
]
| [] | ["XDG_CONFIG_HOME"] | go | 1 | 0 | |
floworch/main.go | package main
import (
"log"
"net/http"
"os"
"strconv"
"github.com/gorilla/mux"
"github.com/myntra/shuttle-engine/config"
"github.com/myntra/shuttle-engine/types"
)
var (
// MapOfDeleteChannelDetails ...
MapOfDeleteChannelDetails = make(map[string]*types.DeleteChannelDetails)
// EnableMetrics will have the value of an environment variable("ENABLE_METRICS") which enables the metrics if its value is "ON"
EnableMetrics bool
err error
)
func main() {
router := mux.NewRouter()
if err := config.ReadConfig(); err != nil {
log.Println(err)
return
}
if err := config.InitDatabaseSession(); err != nil {
log.Println(err)
return
}
EnableMetrics, err = strconv.ParseBool(os.Getenv("ENABLE_METRICS"))
if err != nil {
log.Println(err)
EnableMetrics = false
}
if EnableMetrics {
HealthCheckTelegraf()
}
router.HandleFunc("/execute", executeHandler).Methods("Post")
router.HandleFunc("/callback", callbackHandler).Methods("Post")
router.HandleFunc("/healthcheck", HealthCheckHandler).Methods("Get")
router.HandleFunc("/runs/{id}/abort", AbortRunHandler).Methods("Post")
router.HandleFunc("/queue", QueueStatusHandler).Methods("Get")
port := 5500
log.Printf("Starting server on :%d", port)
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(port), router))
}
| [
"\"ENABLE_METRICS\""
]
| []
| [
"ENABLE_METRICS"
]
| [] | ["ENABLE_METRICS"] | go | 1 | 0 | |
src/wildfires/data/datasets.py | # -*- coding: utf-8 -*-
"""Module that simplifies use of various datasets.
TODO:
Figure out how to handle the fact that saving a cube for certain kinds of
longitude coordinates adds the attribute `circular=True`.
Since longitudes registered in the [-180, 180] system fail to register as circular
using `iris.util._is_circular` but the same coordinates + 180° do register
correctly (if they actually cover all of [0, 360], as expected), does regridding
using longitudes in [-180, 180] actually consider the coordinate's circular nature
correctly?
See also NOTE in `lat_lon_dimcoords`.
Before caching, regrid() always needs to be called since this also carries out
crucial coordinate attribute regularisation, which is essential for consistent
caching behaviour! Make this intrinsic to __init__?
"""
import concurrent.futures
import glob
import logging
import multiprocessing
import operator
import os
import re
import shutil
import tempfile
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from copy import copy, deepcopy
from datetime import datetime, timedelta
from functools import reduce
import cf_units
import dask.array as da
import h5py
import iris
import iris.coord_categorisation
import iris.quickplot as qplt
import matplotlib.pyplot as plt
import netCDF4
import numpy as np
import pandas as pd
import rasterio
import scipy.ndimage
from dateutil.relativedelta import relativedelta
from era5analysis import MonthlyMeanMinMaxDTRWorker, retrieval_processing, retrieve
from git import InvalidGitRepositoryError, Repo
from iris.time import PartialDateTime
from joblib import Parallel, delayed
from numpy.testing import assert_allclose
from pyhdf.SD import SD, SDC
from scipy.optimize import minimize
from tqdm import tqdm
from .. import __version__
from ..cache import IrisMemory
from ..cache.hashing import _dataset_hasher
from ..chunked_regrid import spatial_chunked_regrid
from ..configuration import DATA_DIR, M_PER_HR_THRES, MM_PER_HR_THRES
from ..qstat import get_ncpus
from ..utils import (
box_mask,
ensure_datetime,
get_batches,
get_centres,
get_land_mask,
in_360_longitude_system,
match_shape,
reorder_cube_coord,
strip_multiline,
translate_longitude_system,
)
from .landcover import conversion as lc_to_pft_map
from .landcover import convert_to_pfts, get_mapping_pfts
__all__ = (
"CommitMatchError",
"Dataset",
"DatasetNotFoundError",
"Error",
"IGNORED_DATASETS",
"MM_PER_HR_THRES",
"M_PER_HR_THRES",
"MonthlyDataset",
"NonUniformCoordError",
"ObservedAreaError",
"VariableNotFoundError",
"cube_contains_coords",
"data_map_plot",
"dataset_preprocessing",
"dataset_times",
"dummy_lat_lon_cube",
"fill_cube",
"fill_dataset",
"get_climatology",
"get_dataset_climatology_cubes",
"get_dataset_mean_cubes",
"get_dataset_monthly_cubes",
"get_mean",
"get_monthly",
"get_monthly_mean_climatology",
"homogenise_cube_attributes",
"homogenise_cube_mask",
"homogenise_time_coordinate",
"join_adjacent_intervals",
"lat_lon_dimcoords",
"lat_lon_match",
"load_cubes",
"monthly_constraint",
"regions_GFED",
"regrid",
"regrid_dataset",
)
IGNORED_DATASETS = [
"AvitabileAGB",
"CRU",
"ESA_CCI_Fire",
"ESA_CCI_Landcover",
"ESA_CCI_Soilmoisture",
"ESA_CCI_Soilmoisture_Daily",
"GPW_v4_pop_dens",
"LIS_OTD_lightning_time_series",
"Simard_canopyheight",
"Thurner_AGB",
]
logger = logging.getLogger(__name__)
repo_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)
try:
repo = Repo(repo_dir)
except InvalidGitRepositoryError:
repo = None
iris_cache = IrisMemory("datasets", verbose=0).cache
class Error(Exception):
"""Base class for exceptions in the datasets module."""
class ObservedAreaError(Error):
"""Raised when a Dataset does not satisfy observed area calculation requirements."""
class NonUniformCoordError(Error):
"""Raised when a coordinate is neither monotonically increasing or decreasing."""
class CommitMatchError(Error):
"""Raised when commit hashes of loaded cubes do not match."""
class VariableNotFoundError(ValueError, Error):
"""Raised when a requested variable could not be found."""
class DatasetNotFoundError(ValueError, Error):
"""Raised when a requested dataset could not be found."""
class UnexpectedCoordinateError(ValueError, Error):
"""Raised when an unexpected coordinate is encountered."""
def fill_cube(cube, mask):
"""Process cube in-place by filling gaps using NN interpolation and also filtering.
The idea is to respect the masking of one variable (usually burned area) alone.
For all others, replace masked data using nearest-neighbour interpolation.
Thereafter, apply the aggregated mask `mask`, so that eg. only data over land and
within the latitude limits is considered. Latitude limits might be due to
limitations of GSMaP precipitation data, as well as limitations of the lightning
LIS/OTD dataset, for example.
Args:
cube (iris.cube.Cube): Cube to be filled.
mask (numpy.ndarray): Boolean mask typically composed of the land mask,
latitude mask and burned area mask like:
`mask=land_mask | lat_mask | burned_area_mask`.
This controls which data points remain after the processing, while the
mask internal to the cube's data controls where interpolation will take
place.
"""
assert isinstance(cube.data, np.ma.core.MaskedArray) and isinstance(
cube.data.mask, np.ndarray
), "Cube needs to have a full (non-sparse) data mask."
# Here, data gaps are filled, so that the maximum possible area of data
# (limited by where burned area data is available) is used for the analysis.
# Choose to fill the gaps using nearest-neighbour interpolation. To do this,
# define a mask which will tell the algorithm where to replace data.
logger.info("Filling: '{}'.".format(cube.name()))
# Interpolate data where (and if) it is masked.
fill_mask = cube.data.mask
if np.sum(fill_mask[~mask]):
orig_data = cube.data.data.copy()
logger.info(
"Filling {:} elements ({:} after final masking).".format(
np.sum(fill_mask), np.sum(fill_mask[~mask])
)
)
filled_data = cube.data.data[
tuple(
scipy.ndimage.distance_transform_edt(
fill_mask, return_distances=False, return_indices=True
)
)
]
assert np.all(np.isclose(cube.data.data[~fill_mask], orig_data[~fill_mask]))
selected_unfilled_data = orig_data[~mask]
selected_filled_data = filled_data[~mask]
logger.info(
"Min {:0.1e}/{:0.1e}, max {:0.1e}/{:0.1e} before/after "
"filling (for relevant regions)".format(
np.min(selected_unfilled_data),
np.min(selected_filled_data),
np.max(selected_unfilled_data),
np.max(selected_filled_data),
)
)
else:
# Prevent overwriting with previous loop's filled data if there is nothing
# to fill.
filled_data = cube.data.data
# Always apply global combined mask.
cube.data = np.ma.MaskedArray(filled_data, mask=mask)
# Check that there aren't any inf's or nan's in the data.
assert not np.any(np.isinf(cube.data.data[~cube.data.mask]))
assert not np.any(np.isnan(cube.data.data[~cube.data.mask]))
return cube
def cube_contains_coords(cube, *coords):
"""Check whether the given cube contains all the requested coordinates."""
for coord in coords:
if not cube.coords(coord):
return False
return True
def homogenise_time_coordinate(cubes):
"""Ensure all given cubes have the same time coordinate.
Args:
cubes (iris.cube.CubeList): List of cubes to process.
Returns:
iris.cube.CubeList: List of processed cubes.
Raises:
ValueError: If any cube does not have a "time" coordinate.
ValueError: If `cubes` have differing calendars.
"""
# Check that all cubes have a "time" coordinate.
for cube in cubes:
if not cube.coords("time"):
raise ValueError(f"Cube '{cube}' did not have a 'time' coordinate.")
# Check that all cubes have the same calendar.
calendars = {cube.coord("time").units.calendar for cube in cubes}
if len(calendars) > 1:
raise ValueError(f"Cubes had differing calendars: '{calendars}'.")
# Define the common calendar to use.
calendar = list(calendars)[0]
ref_time = datetime(1970, 1, 1)
ref_unit = cf_units.Unit(
f"days since {ref_time.strftime('%Y-%m-%d %H:%M:%S')}", calendar=calendar
)
# Convert all times (and bounds).
for cube in cubes:
cube.coord("time").convert_units(ref_unit)
return cubes
def homogenise_cube_attributes(cubes, adjust_time=False):
"""Ensure all given cubes have compatible attributes in-place.
Args:
cubes (iris.cube.CubeList): List of cubes to process.
adjust_time (bool): If True, modify the temporal coordinates (with name
"time") to have the same units.
Returns:
iris.cube.CubeList: List of processed cubes.
Raises:
ValueError: If `adjust_time` is True and any cube does not have a "time"
coordinate.
ValueError: If `adjust_time` is True and `cubes` have differing calendars.
"""
attribute_list = [cube.attributes for cube in cubes]
common_values = attribute_list[0].copy()
for attributes in attribute_list[1:]:
shared_keys = set(common_values).intersection(set(attributes))
common_values = dict(
(key, common_values[key])
for key in shared_keys
if common_values[key] == attributes[key]
)
for cube in cubes:
cube.attributes = common_values
if adjust_time:
homogenise_time_coordinate(cubes)
return cubes
def homogenise_cube_mask(cube):
"""Ensure cube.data is a masked array with a full mask (in-place).
Note:
This function realises lazy data.
"""
array = cube.data
if isinstance(array, np.ma.core.MaskedArray):
if isinstance(array.mask, np.ndarray):
return cube
else:
if array.mask:
raise ValueError(
"The only mask entry is True, meaning all entries are masked!"
)
cube.data = np.ma.MaskedArray(array, mask=np.zeros_like(array, dtype=np.bool_))
return cube
def dataset_preprocessing(dataset, min_time, max_time):
"""Process `Dataset` `dataset` in-place to enforce uniformity."""
# This step does take on the order of seconds, but re-caching here would be a
# waste of space.
# Limit the amount of data that has to be processed.
dataset.limit_months(min_time, max_time)
# Regrid cubes to the same lat-lon grid.
# TODO: change lat and lon limits and also the number of points!!
# Always work in 0.25 degree steps? From the same starting point?
dataset.regrid()
def get_monthly_mean_climatology(dataset, min_time, max_time, *args, **kwargs):
"""Modifies `dataset` in-place, also generating temporal averages.
TODO:
Currently `dataset` is modified by regridding, temporal selection and
`get_monthly_data`. Should this be circumvented by copying it?
Returns:
tuple of `Dataset`
"""
dataset_preprocessing(dataset, min_time, max_time)
# Generate overall temporal mean. Do this before monthly data is created for all
# datasets, since this will only increase the computational burden and skew the
# mean towards newly synthesised months (created using `get_monthly_data`,
# for static, yearly, or climatological datasets).
mean_dataset = dataset.get_mean_dataset()
climatology_dataset = dataset.get_climatology_dataset(min_time, max_time)
# Get monthly data over the chosen interval for all dataset.
# TODO: Inplace argument for get_monthly_data methods?
monthly_dataset = dataset.get_monthly_dataset(min_time, max_time)
# TODO: See note in `get_mean_climatology_monthly_dataset`.
# mean_dataset, climatology_dataset, monthly_dataset = monthly_dataset.get_mean_climatology_monthly_dataset(
# min_time, max_time
# )
return monthly_dataset, mean_dataset, climatology_dataset
def get_monthly(dataset, min_time, max_time):
dataset_preprocessing(dataset, min_time, max_time)
return (dataset.get_monthly_dataset(min_time, max_time),)
def get_climatology(dataset, min_time, max_time):
dataset_preprocessing(dataset, min_time, max_time)
return (dataset.get_climatology_dataset(min_time, max_time),)
def get_mean(dataset, min_time, max_time):
dataset_preprocessing(dataset, min_time, max_time)
return (dataset.get_mean_dataset(),)
@iris_cache
def fill_dataset(dataset, mask):
"""Perform processing on all cubes."""
logger.debug(f"Filling '{dataset}' with {len(dataset)} variable(s).")
return iris.cube.CubeList([fill_cube(cube, mask) for cube in dataset])
@iris_cache
def get_dataset_mean_cubes(dataset):
"""Return mean cubes."""
logger.debug(f"Calculating mean for '{dataset}' with {len(dataset)} variable(s).")
# TODO: Should we copy the cube if it does not require averaging?
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=r".*Collapsing a non-contiguous coordinate.*"
)
mean_cubes = iris.cube.CubeList(
cube.collapsed("time", iris.analysis.MEAN) if cube.coords("time") else cube
for cube in dataset
)
for cube in mean_cubes:
# This function has the wanted side-effect of realising the data.
# Without this, calculations of things like the total temporal mean are
# delayed until the cube is needed, which we do not want here as we are
# caching the results.
homogenise_cube_mask(cube)
# This return value will be cached by writing it to disk as NetCDF files.
return mean_cubes
@iris_cache
def get_dataset_monthly_cubes(dataset, start, end):
"""Return monthly cubes between two dates."""
logger.debug(
f"Calculating monthly cubes for '{dataset}' with {len(dataset)} variable(s)."
)
monthly_cubes = dataset.get_monthly_data(start, end)
for cube in monthly_cubes:
# This function has the wanted side-effect of realising the data.
# Without this, calculations of things like the total temporal mean are
# delayed until the cube is needed, which we do not want here as we are
# caching the results.
homogenise_cube_mask(cube)
# This return value will be cached by writing it to disk as NetCDF files.
return monthly_cubes
@iris_cache
def get_dataset_climatology_cubes(dataset, start, end):
logger.debug(
f"Calculating climatology for '{dataset}' with {len(dataset)} variable(s)."
)
# NOTE: Calling get_dataset_monthly_cubes using the slices is important, as this is
# how it is called originally, and therefore how it is represented in the cache!!
monthly_cubes = iris.cube.CubeList(
get_dataset_monthly_cubes(dataset[cube_slice], start, end)[0]
for cube_slice in dataset.single_cube_slices()
)
climatology_cubes = iris.cube.CubeList()
# Calculate monthly climatology.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=r".*Collapsing a non-contiguous coordinate.*"
)
for cube in monthly_cubes:
if not cube.coords("month_number"):
iris.coord_categorisation.add_month_number(cube, "time")
climatology_cubes.append(
cube.aggregated_by("month_number", iris.analysis.MEAN)
)
for i in range(len(climatology_cubes)):
# This function has the wanted side-effect of realising the data.
# Without this, calculations of things like the total temporal mean are
# delayed until the cube is needed, which we do not want here as we are
# caching the results.
homogenise_cube_mask(climatology_cubes[i])
sort_indices = np.argsort(climatology_cubes[i].coord("month_number").points)
if not np.all(sort_indices == np.arange(len(sort_indices))):
# Reorder cubes to let month numbers increase monotonically if needed.
climatology_cubes[i] = reorder_cube_coord(
climatology_cubes[i], sort_indices, name="month_number", promote=False
)
# Promote the month_number coordinate to being the leading coordinate.
iris.util.promote_aux_coord_to_dim_coord(climatology_cubes[i], "month_number")
# This return value will be cached by writing it to disk as NetCDF files.
return climatology_cubes
def join_adjacent_intervals(intervals):
"""Join adjacent or overlapping intervals into contiguous intervals.
Args:
intervals (list of 2-element iterables): A list of iterables with 2
elements where each such iterable (eg. the tuple (start, end))
defines the start and end of the interval.
Returns:
list of list: Contiguous intervals.
Examples:
>>> join_adjacent_intervals([[1, 2], [2, 3], [-1, 1]])
[[-1, 3]]
>>> from datetime import datetime
>>> contiguous = join_adjacent_intervals([
... (datetime(2000, 1, 1), datetime(2000, 2, 1)),
... (datetime(1999, 1, 1), datetime(2000, 1, 1)),
... (datetime(1995, 1, 1), datetime(1995, 2, 1))
... ])
>>> contiguous == [
... [datetime(1995, 1, 1), datetime(1995, 2, 1)],
... [datetime(1999, 1, 1), datetime(2000, 2, 1)],
... ]
True
>>> overlapping_contiguous = join_adjacent_intervals([
... (datetime(1999, 1, 1), datetime(2000, 2, 1)),
... (datetime(2000, 1, 1), datetime(2000, 2, 1)),
... (datetime(1995, 1, 1), datetime(1995, 3, 1)),
... (datetime(1995, 2, 1), datetime(1995, 4, 1)),
... (datetime(1995, 4, 1), datetime(1995, 5, 1)),
... ])
>>> overlapping_contiguous == [
... [datetime(1995, 1, 1), datetime(1995, 5, 1)],
... [datetime(1999, 1, 1), datetime(2000, 2, 1)],
... ]
True
>>> join_adjacent_intervals([]) == []
True
"""
if not intervals:
return []
intervals = list(map(list, intervals))
sorted_intervals = sorted(intervals, key=lambda x: x[0])
contiguous_intervals = [sorted_intervals.pop(0)]
while sorted_intervals:
if sorted_intervals[0][0] <= contiguous_intervals[-1][1]:
contiguous_intervals[-1][1] = max(
[sorted_intervals.pop(0)[1], contiguous_intervals[-1][1]]
)
else:
contiguous_intervals.append(sorted_intervals.pop(0))
return contiguous_intervals
def dummy_lat_lon_cube(
data, lat_lims=(-90, 90), lon_lims=(-180, 180), monthly=False, **kwargs
):
"""Construct a cube from data given certain assumptions and optional arguments.
Args:
lat_lims (2-tuple):
lon_lims (2-tuple):
monthly (bool): If True, the dummy temporal coordinate will have monthly
instead of daily increments. Note that this is only used if the
`dim_coords_and_dims` keyword argument is not given.
kwargs:
Of note are:
- dim_coords_and_dims: If supplied, will be use to initialise
coordinates instead of `lat_lims`, `lon_lims` and a simple
numerical time coordinate.
"""
n_dims = len(data.shape)
assert n_dims in {2, 3}
new_latitudes = get_centres(np.linspace(*lat_lims, data.shape[0 + n_dims % 2] + 1))
new_longitudes = get_centres(np.linspace(*lon_lims, data.shape[1 + n_dims % 2] + 1))
new_lat_coord = iris.coords.DimCoord(
new_latitudes, standard_name="latitude", units="degrees"
)
new_lon_coord = iris.coords.DimCoord(
new_longitudes,
standard_name="longitude",
units="degrees",
circular=True,
)
if n_dims == 2:
grid_coords = [(new_lat_coord, 0), (new_lon_coord, 1)]
else:
# Define a time coordinate as well.
units = cf_units.Unit("days since 1970-01-01", calendar="gregorian")
if monthly:
num_dates = units.date2num(
[
datetime(1970, 1, 1) + relativedelta(months=months)
for months in range(data.shape[0])
]
)
else:
num_dates = list(range(data.shape[0]))
time_coord = iris.coords.DimCoord(
num_dates, standard_name="time", var_name="time", units=units
)
grid_coords = [(time_coord, 0), (new_lat_coord, 1), (new_lon_coord, 2)]
kwargs_mod = kwargs.copy()
if "dim_coords_and_dims" in kwargs_mod:
del kwargs_mod["dim_coords_and_dims"]
new_lat_coord.guess_bounds()
new_lon_coord.guess_bounds()
return iris.cube.Cube(
data,
dim_coords_and_dims=kwargs.get("dim_coords_and_dims", grid_coords),
**kwargs_mod,
)
def data_map_plot(
data, lat_lims=(-90, 90), lon_lims=(-180, 180), filename=None, log=False, **kwargs
):
"""Used to plot data or an iris.cube.Cube on a map with coastlines."""
if isinstance(data, iris.cube.Cube):
cube = data
else:
cube = dummy_lat_lon_cube(data, lat_lims, lon_lims)
cube = cube.copy()
if "name" in kwargs:
cube.long_name = kwargs["name"]
else:
cube.long_name = cube.name()
if log:
future_name = "log " + cube.long_name
cube = iris.analysis.maths.log(cube)
cube.long_name = future_name
fig = plt.figure()
qplt.contourf(cube)
plt.gca().coastlines()
if filename is not None:
plt.savefig(filename)
return fig
def load_cubes(files, n=None):
"""Similar to iris.load(), but seems to scale much better.
The better scaling is partly due to the fact that this function does not try to
merge any cubes.
This function also solves the problem that the order in which iris.load() loads
cubes into a CubeList is not constant, varying from one run to the next
(presumably due to some random seed).
"""
# Make sure files are sorted so that times increase.
files.sort()
cube_list = iris.cube.CubeList()
logger.info("Loading files.")
for f in tqdm(files[slice(0, n)]):
cube_list.extend(iris.load(f))
return cube_list
def lat_lon_dimcoords(latitudes, longitudes):
"""Make sure latitudes and longitudes are iris DimCoords."""
if not isinstance(latitudes, iris.coords.DimCoord):
latitudes = iris.coords.DimCoord(
latitudes, standard_name="latitude", units="degrees", var_name="latitude"
)
if not isinstance(longitudes, iris.coords.DimCoord):
longitudes = iris.coords.DimCoord(
longitudes, standard_name="longitude", units="degrees", var_name="longitude"
)
assert_allclose(longitudes.units.modulus, 360)
longitudes.circular = True
return latitudes, longitudes
def lat_lon_match(
cube,
new_latitudes=get_centres(np.linspace(-90, 90, 721)),
new_longitudes=get_centres(np.linspace(-180, 180, 1441)),
):
"""Test whether regridding is necessary."""
assert cube_contains_coords(
cube, "latitude", "longitude"
), "Need [[time,] lat, lon] dimensions."
new_latitudes, new_longitudes = lat_lon_dimcoords(new_latitudes, new_longitudes)
for (coord_old, coord_new) in (
(cube.coord("latitude"), new_latitudes),
(cube.coord("longitude"), new_longitudes),
):
if tuple(coord_old.points) != tuple(coord_new.points):
break
else:
return True
return False
def regrid(
cube,
area_weighted=None,
new_latitudes=get_centres(np.linspace(-90, 90, 721)),
new_longitudes=get_centres(np.linspace(-180, 180, 1441)),
scheme=None,
regridder=None,
return_regridder=False,
verbose=False,
):
"""Regrid latitudes and longitudes.
Expects at least latitude and longitude coordinates.
For some input data, the `iris.analysis.Linear()` regridder yields very large
values, which seems to be related to interpolation of masked data (at the edges of
the domain). To combat this, the output array mask will be supplanted by one which
masks any points outside of the original extrema.
Args:
cube (iris.cube.Cube): Cube to regrid.
area_weighted (bool): If True, perform first order conservative area weighted
regridding. If False, perform bilinear regridding. For more control, see
`scheme` and `regridder`. If none of `area_weighted`, `scheme`, or
`regridder` are given, nearest-neighbour regridding will be used.
new_latitudes (array-like): New grid latitudes.
new_longitudes (array-like): New grid longitudes.
scheme (object with a `regridder()` method): This object should define a
`regridder` method with interface `scheme.regridder(src_cube, target_cube)`,
eg. `iris.analysis.Linear()`. Takes precedence over `area_weighted`.
regridder (callable): Regridder to use. Analogous to `scheme.regridder()`,
eg. `iris.analysis.Linear().regridder(src_cube, target_cube)`. If given,
`regridder` needs to take an `iris.cube.Cube` as an argument and return an
`iris.cube.Cube` on the target grid. Takes precedence over both
`area_weighted` and `scheme`.
return_regridder (bool): If True, return the regridder which contains the
interpolation weights. This can be re-used for the same type of regridding
operation between the same lat-lon grids.
verbose (bool): Show a progress meter showing the remaining slices. Applies
only if `cube` also has coordinates other than latitude and longitude.
Returns:
iris.cube.Cube: The interpolated cube. This will always contain a masked
array even if the input data did not have a mask.
iris Regridder: If `return_regridder` is True, the interpolated cube and
associated regridder are returned.
Raises:
ValueError: If a coordinate system other than `None` or `WGS84` is
encountered.
Note:
Do time coordinate differences disrupt the usage of a cached regridders?
"""
assert cube_contains_coords(
cube, "latitude", "longitude"
), "Need at least latitude and longitude coordinates."
# TODO: Check that coordinate system discrepancies are picked up by
# this check!!
if lat_lon_match(cube, new_latitudes, new_longitudes):
logger.info("No regridding needed for '{}'.".format(cube.name()))
return cube
# Use nearest-neighbour by default.
if area_weighted is None and scheme is None and regridder is None:
scheme = iris.analysis.Nearest()
logger.debug("Regridding '{}'.".format(cube.name()))
new_latitudes, new_longitudes = lat_lon_dimcoords(new_latitudes, new_longitudes)
if len(cube.shape) > 2:
# Call the regridding function recursively with slices of the
# data, in order to try to prevent occasional Segmentation Faults
# that occur when trying to regrid a large chunk of data in > 2
# dimensions.
# Make sure that the latitude and longitude coordinates are placed after the
# initial coordinates to ensure proper indexing below. Note that additional
# coordinates (or fewer) may exist which are not reflected in the data's shape
# - thus the use of both `len(cube.shape) - 1` as opposed to simply `-1` and
# cube.coord(dimensions=...) instead of simply cube.coords().
assert (
set(
(
coord.name()
for coord in (
cube.coord(dimensions=len(cube.shape) - 2),
cube.coord(dimensions=len(cube.shape) - 1),
)
)
)
== set(("latitude", "longitude"))
)
# Ensure all dim coords are associated with a single dimension only.
for coord, dims in cube._dim_coords_and_dims:
assert isinstance(dims, (np.integer, int)) or len(dims) == 1
# Iterate over all dimensions but (guaranteed to be preceding) latitude and
# longitude.
indices_lists = [
ind_arr.flatten()
for ind_arr in np.indices(cube.shape[: len(cube.shape) - 2])
]
new_shape = [*cube.shape[: len(cube.shape) - 2], None, None]
lat_coord_dims = cube.coord_dims("latitude")
assert len(lat_coord_dims) == 1
lon_coord_dims = cube.coord_dims("longitude")
assert len(lon_coord_dims) == 1
new_shape[lat_coord_dims[0]] = new_latitudes.shape[0]
new_shape[lon_coord_dims[0]] = new_longitudes.shape[0]
regridded_data = np.ma.MaskedArray(
np.zeros(tuple(new_shape)),
mask=True,
)
for indices in tqdm(
zip(*indices_lists),
total=len(indices_lists[0]),
desc="Regridding spatial slices",
disable=not verbose,
):
# Reuse the regridder between subsequent regridding operations.
regridded_cube, regridder = regrid(
cube[indices],
area_weighted=area_weighted,
new_latitudes=new_latitudes,
new_longitudes=new_longitudes,
scheme=scheme,
regridder=regridder,
return_regridder=True,
)
regridded_data[indices] = regridded_cube.data
new_lat_lon_coords = [None, None]
new_lat_lon_coords[lat_coord_dims[0] - (len(cube.shape) - 2)] = (
new_latitudes,
lat_coord_dims[0],
)
new_lat_lon_coords[lon_coord_dims[0] - (len(cube.shape) - 2)] = (
new_longitudes,
lon_coord_dims[0],
)
def get_single_dim(dim):
if isinstance(dim, tuple):
assert len(dim) == 1
return dim[0]
return dim
regridded_cube = iris.cube.Cube(
regridded_data,
dim_coords_and_dims=(
# Account for potentially unordered dim_coords_and_dims, e.g. with
# ('time', 0) being the last element in the list.
[
(dim_coord, dim)
for dim_coord, dim in cube._dim_coords_and_dims
if get_single_dim(dim) in range(len(cube.shape) - 2)
]
+ new_lat_lon_coords
),
aux_coords_and_dims=cube._aux_coords_and_dims,
aux_factories=cube.aux_factories,
)
regridded_cube.metadata = cube.metadata
if return_regridder:
return regridded_cube, regridder
return regridded_cube
# This is where the core 2D regridding takes place.
assert cube_contains_coords(
cube, "latitude", "longitude"
), "Need [lat, lon] dimensions for core algorithm."
WGS84 = iris.coord_systems.GeogCS(
semi_major_axis=6378137.0, semi_minor_axis=6356752.314245179
)
# Make sure coordinate systems are uniform.
systems = [
cube.coord(coord_name).coord_system for coord_name in ["latitude", "longitude"]
]
assert systems[0] == systems[1]
if systems[0] is None:
coord_sys = None
elif (systems[0].semi_major_axis == WGS84.semi_major_axis) and (
systems[0].semi_minor_axis == WGS84.semi_minor_axis
):
logger.debug("Using WGS84 coordinate system for regridding.")
coord_sys = WGS84
# Fix floating point 'bug' where the inverse flattening of the
# coord system that comes with the dataset does not match the
# inverse flattening that is calculated by iris upon giving the two
# axis parameters above (which do match between the two coordinate
# systems). Inverse flattening calculated by iris:
# 298.2572235629972, vs that in the Copernicus_SWI dataset:
# 298.257223563, which seems like it is simply truncated.
for coord_name in ["latitude", "longitude"]:
cube.coord(coord_name).coord_system = WGS84
else:
raise ValueError("Unknown coord_system:{:}".format(systems[0]))
for coord in [c for c in cube.coords() if c.name() in ["latitude", "longitude"]]:
if not coord.has_bounds():
coord.guess_bounds()
if regridder is None:
logger.debug("Constructing regridder.")
for coord in [new_latitudes, new_longitudes]:
coord.coord_system = coord_sys
grid_coords = [(new_latitudes, 0), (new_longitudes, 1)]
new_grid = iris.cube.Cube(
np.zeros([coord[0].points.size for coord in grid_coords]),
dim_coords_and_dims=grid_coords,
)
for coord in new_grid.coords():
if not coord.has_bounds():
coord.guess_bounds()
if scheme is None:
if area_weighted:
regridder = iris.analysis.AreaWeighted(mdtol=1).regridder(
cube, new_grid
)
else:
regridder = iris.analysis.Linear().regridder(cube, new_grid)
else:
regridder = scheme.regridder(cube, new_grid)
else:
logger.debug("Using given regridder.")
logger.debug("Cube has lazy data: {}.".format(cube.has_lazy_data()))
interpolated_cube = homogenise_cube_mask(regridder(cube))
if isinstance(regridder, iris.analysis._regrid.RectilinearRegridder):
# Fix the extreme values that can occur with the
# `iris.analysis.Linear()` regridding scheme.
omax = float(cube.collapsed(cube.dim_coords, iris.analysis.MAX).data)
omin = float(cube.collapsed(cube.dim_coords, iris.analysis.MIN).data)
range_thres = omax - omin
# Heuristic approach to filtering extreme values.
extr_mask = (interpolated_cube.data > (omax + range_thres)) | (
interpolated_cube.data < (omin - range_thres)
)
if np.any(extr_mask):
logger.warning(
f"Masking {np.sum(extr_mask)} regridded values that exceeded the "
f"threshold range for cube {cube.name()}."
)
interpolated_cube.data.mask |= extr_mask
if return_regridder:
return interpolated_cube, regridder
return interpolated_cube
@iris_cache(ignore=["verbose"])
def regrid_dataset(
dataset,
new_latitudes=get_centres(np.linspace(-90, 90, 721)),
new_longitudes=get_centres(np.linspace(-180, 180, 1441)),
verbose=False,
**kwargs,
):
logger.debug(f"Regridding '{dataset}' with {len(dataset)} variable(s).")
regridded_cubes = iris.cube.CubeList(
[
regrid(
cube,
new_latitudes=new_latitudes,
new_longitudes=new_longitudes,
verbose=verbose,
**kwargs,
)
for cube in dataset.cubes
]
)
return regridded_cubes
def _temporal_nn(source_data, target_index, interpolate_mask, n_months, verbose=False):
"""Temporal NN interpolation of a single month.
Args:
source_data ((s, m, n) array of float): Source data to use for the interpolation.
target_index (int): Temporal index (axis=0) corresponding to the timestep to
fill using interpolation.
interpolate_mask ((m, n) array of bool): Boolean mask that is true where
interpolation should take place.
n_months (int): Number of months to look forwards and backwards to find valid
data. The total number of months selected for each interpolation step will
be `2 * n_months + 1` (including the current timestep).
verbose (bool): If True, show progress bar.
Returns:
numpy masked array: (N,) masked array containing the interpolated values for
each of the `N` True elements in `interpolate_mask`.
"""
n_interp = interpolate_mask.sum()
monthly_target_data = np.ma.MaskedArray(np.empty(n_interp), mask=True)
for i, indices in enumerate(
tqdm(
zip(*np.where(interpolate_mask)),
total=n_interp,
leave=False,
disable=not verbose,
)
):
adjacent_data = source_data[
(slice(target_index, target_index + 2 * n_months + 1), *indices)
]
assert adjacent_data.mask[n_months], "Data to interpolate must be masked."
# Try to find at least one match in the fewest months possible.
for d in range(1, n_months + 1):
selection_mask = (
adjacent_data.mask[n_months - d],
adjacent_data.mask[n_months + d],
)
if all(selection_mask):
# All data is masked, so there is no valid data to choose from.
continue
selection = np.ma.MaskedArray(
[adjacent_data[n_months - d], adjacent_data[n_months + d]]
)
# Fill in the missing element.
monthly_target_data[i] = np.mean(selection)
# Stop looking for matches.
break
return monthly_target_data
@iris_cache(ignore=["verbose"])
def temporal_nn(
dataset, target_timespan, n_months, mask=None, threshold=0, verbose=False
):
"""Temporal NN interpolation of missing data.
Args:
dataset (wildfires.data.Dataset): Dataset containing a single cube to
interpolate.
target_timespan (tuple of datetime): Start and end datetimes between which to
interpolate.
n_months (int): Number of months to look forwards and backwards to find valid
data. The total number of months selected for each interpolation step will
be `2 * n_months + 1` (including the current timestep).
mask ((m, n) array of bool): Mask that is True where interpolation should occur.
If None, interpolation is not carried out over water and south of 60
degrees latitude.
threshold (float): Threshold in [0, 1] denoting the minimum fraction (not
inclusive) of missing data in `source_masks` for which to carry out
interpolation. Larger thresholds can be used to restrict interpolation
geographically to regions with poor data availability.
verbose (bool or int): If True, show progress bars. Giving 0 is equivalent to
giving False (i.e. no progress bars), giving 1 shows a progress bar for
the individual months, while giving 2 shows an additional progress bar for
the individual samples within each month.
Returns:
iris CubeList: CubeList containing the interpolated data for the time period
`target_timespan` in a single iris Cube.
Raises:
ValueError: If `dataset` does not contain exactly 1 cube.
ValueError: If `n_months` is not an integer.
RuntimeError: If there was insufficient data to satisfy either the target or
the source time period.
"""
if len(dataset.cubes) != 1:
raise ValueError(f"Expected 1 cube, got {len(dataset.cubes)}.")
if not isinstance(n_months, (int, np.integer)):
raise ValueError(
f"`n_months` should be an integer. Got type '{type(n_months)}'."
)
if isinstance(verbose, int):
m_verbose = False # Month verbosity.
s_verbose = False # Sample verbosity.
if verbose > 0:
m_verbose = True
if verbose > 1:
s_verbose = True
elif verbose:
m_verbose = True
s_verbose = True
# Set up data.
source = dataset.copy(deep=False)
target = dataset.copy(deep=True)
# Discard unneeded months.
source_timespan = (
target_timespan[0] - relativedelta(months=n_months),
target_timespan[1] + relativedelta(months=n_months),
)
source.limit_months(*source_timespan)
target.limit_months(*target_timespan)
target_months = (
(target_timespan[1].year - target_timespan[0].year) * 12
+ target_timespan[1].month
- target_timespan[0].month
+ 1
)
source_months = target_months + n_months * 2
# Sanity check.
assert source_months == (
(source_timespan[1].year - source_timespan[0].year) * 12
+ source_timespan[1].month
- source_timespan[0].month
+ 1
)
# Check that a sufficient number of months were present.
if not target.cube.shape[0] == target_months:
raise RuntimeError("Missing data for the target time period.")
if not source.cube.shape[0] == source_months:
raise RuntimeError("Missing data for the source time period.")
if not target.cube.coords("month_number"):
iris.coord_categorisation.add_month_number(target.cube, "time")
target.homogenise_masks()
# Set up the source mask (where to interpolate).
if mask is None:
# Add the land mask.
mask = match_shape(
get_land_mask(n_lon=dataset.cube.shape[-1]), dataset.cube.shape[1:]
)
# Ignore regions south of -60° S.
mask &= match_shape(
box_mask(lats=(-60, 90), lons=(-180, 180), n_lon=dataset.cube.shape[-1]),
mask.shape,
)
interpolate_masks = {}
for month_number in range(1, 13):
single_months = target.cube.extract(iris.Constraint(month_number=month_number))
missing_fraction = np.mean(single_months.data.mask & mask, axis=0)
interpolate_masks[month_number] = missing_fraction > threshold
# Iterate over the months to fill.
current = target_timespan[0]
for target_index in tqdm(range(target_months), disable=not m_verbose):
month_number = current.month
interpolate_mask = (
interpolate_masks[month_number]
& source.cube.data.mask[target_index + n_months]
)
target.cube.data[target_index][interpolate_mask] = _temporal_nn(
source.cube.data,
target_index,
interpolate_mask,
n_months,
verbose=s_verbose,
)
current += relativedelta(months=1)
return target.cubes
def _persistent_gap_filling(cube, thres=0.5, verbose=False):
"""Fill gaps >= (thres * 100)% of months with minimum value at that location.
This is done in-place.
"""
if not cube.coords("month_number"):
iris.coord_categorisation.add_month_number(cube, "time")
combined_mask = np.all(cube.data.mask, axis=0)
nr_inval_cube = cube.copy(
data=np.ma.MaskedArray(
cube.data.mask, mask=match_shape(combined_mask, cube.shape)
)
)
min_cube = cube.collapsed("time", iris.analysis.MIN)
# Month numbers in [1, 12].
month_numbers = cube.coord("month_number").points
for month_number in tqdm(range(1, 13), desc="Months", disable=not verbose):
extracted = iris.Constraint(month_number=month_number).extract(nr_inval_cube)
missing_frac = np.sum(extracted.data, axis=0) / extracted.shape[0]
persistent = ((missing_frac + 1e-5) >= thres).data
persistent[combined_mask] = False
for month_index in np.where(month_numbers == month_number)[0]:
month_data = cube.data[month_index]
fill_mask = persistent & cube.data.mask[month_index]
month_data[fill_mask] = min_cube.data[fill_mask]
cube.data[month_index] = month_data
return cube
def _harmonic_fit(t, params):
"""Sine-based fitting including offset.
Args:
t (int): Time index.
params (array-like):
0th - offset
1th - gradient
(2j, 2j+1) entries - jth component amplitude and phase, j <= 1.
Returns:
float: Fitted function value at `t`.
"""
t = np.asarray(t, dtype=np.float64)
output = np.zeros_like(t, dtype=np.float64)
output += params[0]
output += params[1] * t
for (j, (amplitude, phase)) in enumerate(zip(params[2::2], params[3::2])):
j += 1
output += amplitude * np.sin((2 * np.pi * j * t / 12) + phase)
return output
def _min_fit(x, *args):
"""Function to be minimised.
Args:
x (array-like): Fit parameters.
args: Month indices and corresponding data to fit to.
Returns:
float: MSE fit error.
"""
ts = args[0]
fit_data = args[1]
return np.sum((fit_data - _harmonic_fit(ts, x)) ** 2.0)
def _season_fill(fill_locs, data, k):
ts = np.arange(data.shape[0])
for xi, yi in zip(*np.where(fill_locs)):
sel = data[:, xi, yi]
# Execute minimisation.
res = minimize(_min_fit, np.zeros(2 * k + 2), (ts, sel))
# Replace masked elements with function fit values.
sel[sel.mask] = _harmonic_fit(ts, res.x)[sel.mask]
return data
def _season_model_filling(cube, k=4, ncpus=1, verbose=False):
"""Season-trend filling in-place."""
# Fill where there is some valid data, but not only valid data, since there would
# be nothing to fill in the latter case.
fill_locs = np.any(~cube.data.mask, axis=0) & (~np.all(~cube.data.mask, axis=0))
# Partition the rows of the array into chunks to be processed.
chunk_edges = np.unique(
np.append(np.arange(0, cube.shape[1], 2, dtype=np.int64), cube.shape[1])
)
with concurrent.futures.ProcessPoolExecutor(max_workers=ncpus) as executor:
fs = []
processed_slices = []
for chunk_s, chunk_e in zip(chunk_edges[:-1], chunk_edges[1:]):
chunk_slice = slice(chunk_s, chunk_e)
if not np.any(fill_locs[chunk_slice]):
# Skip those slices without anything to fill.
continue
processed_slices.append(chunk_slice)
fs.append(
executor.submit(
_season_fill, fill_locs[chunk_slice], cube.data[:, chunk_slice], k
)
)
for f in tqdm(
concurrent.futures.as_completed(fs),
desc="Season model filling",
total=len(fs),
disable=not verbose,
):
pass
for f, chunk_slice in zip(fs, processed_slices):
cube.data[:, chunk_slice] = f.result()
return cube
@iris_cache(ignore=["verbose", "ncpus"])
def persistent_season_trend_fill(
dataset, target_timespan=None, persistent_perc=50, k=4, verbose=False, ncpus=None
):
"""Interpolation of missing data using minimum values and season-trend model.
First, persistent gaps are filled using the minimum value observed at that
location. This is followed by the fitting of a season-trend model to fill the
remaining missing values.
Args:
target_timespan (tuple of datetime or None): Start and end datetimes
between which to interpolate. If None, the current temporal range of
data will be used.
persistent_perc (int in [0, 100]): Percentage of data that needs to be
missing for a given month at a given location for the month to be
considered affected by a persistent gap.
k (int): Number of harmonic terms used in the season-trend model.
verbose (bool): If True, show progress bars.
ncpus (int or None): Number of processes to use for the season-trend model
fitting. If None, `wildfires.qstat.get_ncpus()` will be used.
Returns:
iris CubeList: CubeList containing the interpolated data for the time period
`target_timespan` in a single iris Cube.
Raises:
ValueError: If `dataset` does not contain exactly 1 cube.
ValueError: If `persistent_perc` is not an integer.
ValueError: If `k` is not an integer.
"""
if len(dataset.cubes) != 1:
raise ValueError(f"Expected 1 cube, got {len(dataset.cubes)}.")
if not isinstance(persistent_perc, (int, np.integer)):
raise ValueError(
f"`persistent_perc` should be an integer. "
f"Got type '{type(persistent_perc)}'."
)
if not isinstance(k, (int, np.integer)):
raise ValueError(f"`k` should be an integer. Got type '{type(k)}'.")
if ncpus is None:
ncpus = get_ncpus()
# Set up data.
target = dataset.copy(deep=True)
if target_timespan is not None:
target.limit_months(*target_timespan)
if not target.cube.coords("month_number"):
iris.coord_categorisation.add_month_number(target.cube, "time")
target.homogenise_masks()
_season_model_filling(
_persistent_gap_filling(
target.cube, thres=persistent_perc / 100, verbose=verbose
),
k=k,
ncpus=ncpus,
verbose=verbose,
)
return target.cubes
regrid_dataset.__doc__ = "Dataset wrapper\n" + regrid.__doc__
def monthly_constraint(
t,
time_range=(PartialDateTime(2000, 1), PartialDateTime(2010, 1)),
inclusive_lower=True,
inclusive_upper=True,
):
"""Constraint function which ignores the day and only considers the
year and month.
"""
lower_op = operator.ge if inclusive_lower else operator.gt
upper_op = operator.le if inclusive_upper else operator.lt
comp_datetime = PartialDateTime(year=t.year, month=t.month)
return lower_op(comp_datetime, time_range[0]) and upper_op(
comp_datetime, time_range[1]
)
class RegisterDatasets(ABCMeta):
"""Datasets are registered into a central list to keep track of them.
If any subclass (dataset) is used as another class's superclass, it is removed
from the registry in favour of its subclasses.
"""
datasets = None
def __init__(cls, name, bases, namespace):
super().__init__(name, bases, namespace)
if cls.datasets is None:
cls.datasets = set()
cls.datasets.add(cls)
cls.datasets -= set(bases)
def __iter__(cls):
return iter(cls.datasets)
def __str__(cls):
if cls in cls.datasets:
return cls.__name__
return f"{cls.__name__}: {', '.join(map(str, cls))}"
class Dataset(metaclass=RegisterDatasets):
# TODO: Make sure these get overridden by the subclasses, or that every
# dataset uses these consistently (if defining custom date coordinates).
calendar = "gregorian"
time_unit_str = "days since 1970-01-01 00:00:00"
time_unit = cf_units.Unit(time_unit_str, calendar=calendar)
_pretty = None
# Override the `pretty_variable_names` dictionary in each class where bespoke
# pretty variable names are desired. Keys are the raw variables names.
pretty_variable_names = dict()
@abstractmethod
def __init__(self):
"""Instantiate the dataset by loading Iris Cubes."""
def __str__(self):
return "{} ({}, {}, {})".format(
self.name, self.min_time, self.max_time, self.frequency
)
def __repr__(self):
return str(self) + " at {}".format(id(self))
def __eq__(self, other):
"""Equality testing that ignores data values and only looks at metadata.
The equality is derived solely from the `self.cubes` attribute, and the
coordinates and metadata of each cube in this CubeList. This means that
changes in the stored data are ignored!
"""
if isinstance(other, Dataset):
return self.hash_value == other.hash_value
return NotImplemented
def __len__(self):
return len(self.cubes)
def __getitem__(self, index):
"""Return a subset or single cube of `self`.
Args:
index (slice, str, int): Index used to retrieve (on or more) items see
`Returns` section below.
Returns:
`Dataset`: If `index` is a slice object, a subset of `self` containing the
cubes at the indices included in the slice will be returned.
`iris.cube.Cube`: If `index` is an integer or a string, the corresponding
cube will be returned, if found. Matching works with pretty and raw
names.
Raises:
VariableNotFoundError: If `index` is an integer or string, but the
specified cube could not be found.
"""
if isinstance(index, slice):
new_dataset = self.copy(deep=False)
new_dataset.cubes = self.cubes[index]
return new_dataset
try:
if isinstance(index, str):
# Substitute pretty name for raw name if needed.
index = self._get_raw_variable_names().get(index, index)
new_index = self.variable_names(which="raw").index(index)
else:
new_index = index
return self.cubes[new_index]
except ValueError as exc:
error_msg = strip_multiline(
f"""No cube could be found for index '{index}'.
Available: integer indices {list(range(len(self.cubes)))},
raw names {self.variable_names(which="raw")},
or pretty names {self.variable_names(which="pretty")}."""
)
raise VariableNotFoundError(error_msg) from exc
@classmethod
def _get_raw_variable_names(cls):
"""The inverse of the pretty variable name dict."""
all_pretty = list(cls.pretty_variable_names.values())
assert len(set(all_pretty)) == len(
all_pretty
), "Mapping pretty to raw names requires unique pretty variable names."
return dict((pretty, raw) for raw, pretty in cls.pretty_variable_names.items())
@property
def hash_value(self):
"""Calculate a hash value.
Returns:
str
"""
return _dataset_hasher.hash(self)
def __check_cubes(self):
"""Verification functions that should be run prior to accessing data.
Cubes are sorted by name and the uniqueness of the variables names is
verified.
"""
self.__cubes = iris.cube.CubeList(
sorted(self.__cubes, key=lambda cube: cube.name())
)
raw_names = tuple(cube.name() for cube in self.__cubes)
all_names = []
for raw_name in raw_names:
all_names.append(raw_name)
if raw_name in self.pretty_variable_names:
all_names.append(self.pretty_variable_names[raw_name])
assert len(set(all_names)) == len(
all_names
), "All variable names should be unique."
for cube in self.__cubes:
n_dim = len(cube.shape)
coord_names = []
if (
hasattr(self, "_special_coord_cubes")
and cube.name() in self._special_coord_cubes
):
coord_names.extend(self._special_coord_cubes[cube.name()])
elif n_dim == 2:
coord_names.extend(("latitude", "longitude"))
elif n_dim == 3:
coord_names.extend(("time", "latitude", "longitude"))
elif n_dim > 3:
warnings.warn(
f"\n{cube}\nin '{type(self)}' at '{id(self)}' has {n_dim} axes "
"with unexpected coordinate names."
)
for coord_name in coord_names:
try:
coord = cube.coord(coord_name)
coord.var_name = coord_name
if coord_name in ("latitude", "longitude"):
# Check that the coordinates are monotonically increasing.
assert np.all(
np.diff(coord.points) > 0
), f"{coord_name.capitalize()}s need to increase monotonically."
if coord_name == "longitude":
# Check that longitudes are in the [-180, 180] system.
assert not (
in_360_longitude_system(coord.points)
), "Longitudes need to be in the [-180, 180] system."
except iris.exceptions.CoordinateNotFoundError:
warnings.warn(
f"{n_dim}-dimensional cube '{cube}' in '{self.name}' did not "
f"have a '{coord_name}' coordinate."
)
# NOTE: This step is necessary to remove discrepancies between cubes
# before and after saving & loading them using iris.save() & iris.load(),
# which seems to change key attributes, like 'Conventions', from CF-1.4 to
# CF-1.5, for example.
cube.attributes["Conventions"] = "CF-1.5"
@property
def cubes(self):
# This might happen when assigning self.cubes to the result of
# self.read_cache(), for example.
# FIXME: Resolve this hack by changing the way the result of self.read_cache()
# FIXME: is used.
if self.__cubes is None:
logger.warning("Cubes is None.")
return None
self.__check_cubes()
return self.__cubes
@cubes.setter
def cubes(self, new_cubes):
"""Assign new cubes.
Raises:
NonUniformCoordError: If one or more coordinates are not uniform.
"""
# This might happen when assigning self.cubes to the result of
# self.read_cache(), for example.
# FIXME: Resolve this hack by changing the way the result of self.read_cache()
# FIXME: is used.
if new_cubes is None:
logger.warning(f"Assigning None to cubes.")
self.__cubes = None
else:
assert isinstance(
new_cubes, iris.cube.CubeList
), "New cube list must be an iris CubeList (`iris.cube.CubeList`)."
# Ensure uniformity of latitudes and longitudes. They should both be
# monotonically increasing, and the longitudes should be in the
# [-180, 180] system.
for i in range(len(new_cubes)):
# Ensure the proper longitude system.
if new_cubes[i].coords("longitude"):
if in_360_longitude_system(new_cubes[i].coord("longitude").points):
# Reorder longitudes into the [-180, 180] system.
tr_longitudes, tr_indices = translate_longitude_system(
new_cubes[i].coord("longitude").points, return_indices=True
)
new_cubes[i] = reorder_cube_coord(
new_cubes[i], tr_indices, tr_longitudes, name="longitude"
)
new_cubes[i].coord("longitude").circular = True
# Ensure longitudes and latitudes are properly ordered.
for coord in ("latitude", "longitude"):
if new_cubes[i].coords(coord):
if not np.all(np.diff(new_cubes[i].coord(coord).points) > 0):
# If the coordinate is not monotonically increasing we
# need to handle this.
if np.all(np.diff(new_cubes[i].coord(coord).points) < 0):
# If they are monotonically decreasing we just need the flip them.
logger.debug(
f"Inverting {coord}s for: {new_cubes[i].name()}."
)
lat_index = [
coord.name() for coord in new_cubes[i].coords()
].index(coord)
slices = [
slice(None) for i in range(len(new_cubes[i].shape))
]
slices[lat_index] = slice(None, None, -1)
new_cubes[i] = new_cubes[i][tuple(slices)]
else:
# If there is another pattern, one could attempt
# regridding, but we will alert the user to this
# instead.
raise NonUniformCoordError(
f"{coord.capitalize()}s for {new_cubes[i].name()} are not "
"uniform."
)
self.__cubes = new_cubes
@property
def cube(self):
"""Convenience method to access a single stored cube."""
if len(self.cubes) != 1:
raise ValueError(f"Expected 1 cube, but found {len(self.cubes)} cubes.")
return self.cubes[0]
def copy_cubes_no_data(self):
"""Copy everything except the cube data.
This includes cube metadata.
Returns:
`Dataset`: The copied dataset.
"""
new_cubelist = iris.cube.CubeList()
for cube in self:
# Copy everything about the cubes but their data.
new_cubelist.append(cube.copy(data=cube.core_data()))
dataset = copy(self)
dataset.cubes = new_cubelist
return dataset
def copy(self, deep=False):
"""Make a copy.
Args:
deep (bool): If False (default), create a shallow copy which will copy the
cube list but not the underlying cubes. If True, create a deep copy of
everything including the underlying cubes and their data.
Returns:
`Dataset`: The copy.
"""
if deep:
return deepcopy(self)
dataset = copy(self)
dataset.cubes = copy(self.cubes)
return dataset
def homogenise_masks(self):
for i, cube in enumerate(self):
self.cubes[i] = homogenise_cube_mask(cube)
def apply_masks(self, *masks):
"""Apply given masks on top of existing masks inplace."""
# Ensure masks are recorded in a format to enable the modifications below.
self.homogenise_masks()
# Check the given masks.
# TODO: If the masks contain cubes, check that their coordinates are
# TODO: consistent.
masks = list(masks)
for i, mask in enumerate(masks):
if isinstance(mask, iris.cube.Cube):
masks[i] = mask.data
for cube in self:
# Create the combined mask and apply them each in turn.
# TODO: Only calculate the aggregated mask for each unique shape present.
cube.data.mask |= reduce(
np.logical_or, (match_shape(mask, cube.shape) for mask in masks)
)
return self
def grid(self, coord="latitude"):
try:
diffs = np.diff(self[0].coord(coord).points)
mean = np.mean(diffs)
if np.all(np.isclose(diffs, mean)):
return np.abs(mean)
except iris.exceptions.CoordinateNotFoundError:
pass
return "N/A"
@property
def lat_grid(self):
return self.grid("latitude")
@property
def lon_grid(self):
return self.grid("longitude")
@property
def _temporal_cubes(self):
temporal_cubes = iris.cube.CubeList()
for cube in self:
if any(coord.name() == "time" for coord in cube.coords()):
temporal_cubes.append(cube)
return temporal_cubes
@property
def frequency(self):
temporal_cubes = self._temporal_cubes
if temporal_cubes:
time_coord = temporal_cubes[0].coord("time")
if len(time_coord.points) == 1:
return "static"
raw_start = time_coord.cell(0).point
raw_end = time_coord.cell(1).point
start = datetime(raw_start.year, raw_start.month, 1)
end = datetime(raw_end.year, raw_end.month, 1)
if (start + relativedelta(months=+1)) == end:
return "monthly"
if (start + relativedelta(months=+12)) == end:
return "yearly"
month_number_coords = temporal_cubes[0].coords("month_number")
if month_number_coords:
assert len(month_number_coords) == 1
if tuple(month_number_coords[0].points) == tuple(range(1, 13)):
return "climatology"
return str(raw_end - raw_start)
else:
return "static"
@property
def min_time(self):
temporal_cubes = self._temporal_cubes
if temporal_cubes:
return ensure_datetime(
max(cube.coord("time").cell(0).point for cube in temporal_cubes)
)
else:
return "static"
@property
def max_time(self):
temporal_cubes = self._temporal_cubes
if temporal_cubes:
return ensure_datetime(
min(cube.coord("time").cell(-1).point for cube in temporal_cubes)
)
else:
return "static"
@property
def name(self):
return type(self).__name__
def names(self, which="all", squeeze=True):
if which == "all":
return (self.name, self.pretty)
if which == "raw":
if squeeze:
return self.name
return (self.name,)
if which == "pretty":
if squeeze:
return self.pretty
return (self.pretty,)
raise ValueError("Unknown format: '{}'.".format(which))
@property
def pretty(self):
if self._pretty is None:
return self.name
return self._pretty
@pretty.setter
def pretty(self, value):
self._pretty = value
def variable_names(self, which="all"):
raw_names = tuple(cube.name() for cube in self.cubes)
if which == "all":
all_names = []
for raw_name in raw_names:
all_names.append(
(raw_name, self.pretty_variable_names.get(raw_name, raw_name))
)
return tuple(all_names)
if which == "raw":
return raw_names
if which == "pretty":
pretty_names = []
for raw_name in raw_names:
pretty_names.append(self.pretty_variable_names.get(raw_name, raw_name))
return tuple(pretty_names)
raise ValueError("Unknown format: '{}'.".format(which))
@property
def cache_filename(self):
return os.path.join(DATA_DIR, "cache", self.name + ".nc")
@classmethod
def _get_cache_filename(cls):
return os.path.join(DATA_DIR, "cache", cls.__name__ + ".nc")
@staticmethod
def save_data(cache_data, target_filename):
"""Save as NetCDF file.
Args:
cache_data (iris.cube.Cube or iris.cube.CubeList): This will be
saved as a NetCDF file.
target_filename (str): The filename that the data will be saved
to. Must end in '.nc', since the data is meant to be saved
as a NetCDF file.
Returns:
str or None: The current hex commit sha hash of the repo if a new file was
created (or the current package version). Otherwise, if the file was
already there and not overwritten, None is returned.
"""
assert (
target_filename[-3:] == ".nc"
), "Data must be saved as a NetCDF file, got:'{:}'".format(target_filename)
assert isinstance(
cache_data, (iris.cube.Cube, iris.cube.CubeList)
), f"Data to be saved must either be a Cube or a CubeList. Got: {cache_data}."
if isinstance(cache_data, iris.cube.Cube):
cache_data = iris.cube.CubeList([cache_data])
if os.path.isfile(target_filename):
# TODO: Want to overwrite if the commit hash is different?
# Maybe add a flag to do this.
logger.info("File exists, not overwriting:'{:}'".format(target_filename))
else:
if repo is not None:
assert (not repo.untracked_files) and (
not repo.is_dirty()
), "All changes must be committed and all files must be tracked."
repo_commit = repo.head.ref.commit.hexsha
else:
# Simply use the package version otherwise.
repo_commit = __version__
# Note down the commit sha hash so that the code used to
# generate the cached data can be retrieved easily later on.
for cube in cache_data:
cube.attributes["commit"] = repo_commit
if not os.path.isdir(os.path.dirname(target_filename)):
os.makedirs(os.path.dirname(target_filename))
logger.info("Realising data.")
cache_data.realise_data()
logger.info("Saving cubes to:'{:}'".format(target_filename))
iris.save(cache_data, target_filename, zlib=False)
return cube.attributes["commit"]
@staticmethod
def read_data(target_filename):
"""Read from NetCDF file.
Args:
target_filename (str): The filename that the data will be saved to. Must
end in '.nc', since the data is meant to be saved as a NetCDF file.
Raises:
CommitMatchError: If the commit hashes of the cubes that are loaded do not
match.
"""
if os.path.isfile(target_filename):
cubes = iris.load(target_filename)
if not cubes:
os.remove(target_filename)
logger.warning(
"No cubes were found. Deleted file:{:}".format(target_filename)
)
return
commit_hashes = [cube.attributes["commit"] for cube in cubes]
if not len(set(commit_hashes)) == 1:
raise CommitMatchError("Cubes do not stem from the same commit.")
logger.debug("Returning cubes from:'{:}'".format(target_filename))
return cubes
else:
logger.info("File does not exist:'{:}'".format(target_filename))
def write_cache(self):
"""Write list of cubes to disk as a NetCDF file using iris.
Also record the git commit id that the data was generated with,
making sure that there are no uncommitted changes in the repository
at the time.
"""
self.__check_cubes()
self.save_data(self.cubes, self.cache_filename)
def read_cache(self):
cubes = self.read_data(self.cache_filename)
if cubes:
self.cubes = cubes
logger.info(
"Returning cubes from:'{:}' -> Dataset "
"timespan {:} -- {:}. Generated using commit {:}".format(
self.cache_filename,
self.min_time,
self.max_time,
self.cubes[0].attributes["commit"],
)
)
return self.cubes
def select_data(
self, latitude_range=(-90, 90), longitude_range=(-180, 180), inplace=False
):
"""Select a geographical sub-set of the original data for each cube.
Args:
latitude_range (tuple of float): Latitude range to select.
longitude_range (tuple of float): Longitude range to select.
inplace (bool): If True, subset the cubes inplace without returning a copy
of the selected data.
Returns:
`Dataset`: Dataset with cubes containing only data for the selected
region. Depending on `inplace`, this may be a copy of the original
dataset.
"""
def geo_subset(cube):
return cube.intersection(latitude=latitude_range).intersection(
longitude=longitude_range
)
if inplace:
dataset = self if inplace else self.copy_cubes_no_data()
else:
dataset = self.copy()
for i, cube in enumerate(dataset.cubes):
dataset.cubes[i] = (
geo_subset(cube) if inplace else deepcopy(geo_subset(cube))
)
return dataset
def regrid(
self,
new_latitudes=get_centres(np.linspace(-90, 90, 721)),
new_longitudes=get_centres(np.linspace(-180, 180, 1441)),
**kwargs,
):
"""Replace stored cubes with regridded versions in-place."""
# The time needed for this check is only on the order of ms.
if all(lat_lon_match(cube, new_latitudes, new_longitudes) for cube in self):
logger.info("No regridding needed for '{}'.".format(self.name))
else:
for cube_slice in self.single_cube_slices():
self.cubes[cube_slice] = regrid_dataset(
self[cube_slice],
new_latitudes=new_latitudes,
new_longitudes=new_longitudes,
**kwargs,
)
@abstractmethod
def get_monthly_data(self, start, end):
"""Return monthly cubes between two dates."""
@staticmethod
def date_order_check(start, end):
if start is None and end is None:
return
if not all(
getattr(date, required_type, None) is not None
for date in (start, end)
for required_type in ("year", "month")
):
raise ValueError(
"Both '{}' and '{}' need to define a year and month.".format(start, end)
)
days = [getattr(date, "day", 1) for date in (start, end)]
days = [day if day is not None else 1 for day in days]
assert datetime(start.year, start.month, days[0]) <= datetime(
end.year, end.month, days[1]
), "End date must be greater than start date."
def limit_months(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
"""Discard time period outside the specified bounds.
Crucially, this allows for regridding to take place much faster, as
unused years/months are not considered.
If the dataset consists of monthly data, the corresponding time
period is selected, and the other times discarded.
For yearly data, due to the need of interpolation, start/end
dates are rounded down/up to the previous/next year respectively.
"""
self.date_order_check(start, end)
freq = self.frequency
if freq in ("static", "monthly climatology"):
logger.debug("Not limiting times, as data is static")
return
start = PartialDateTime(start.year, start.month)
end = PartialDateTime(end.year, end.month)
if freq == "yearly":
start = PartialDateTime(start.year)
if end.month != 1:
end = PartialDateTime(end.year + 1)
if freq not in ("monthly",):
logger.warning("Encountered frequency:{:}".format(freq))
self.cubes = self.cubes.extract(
iris.Constraint(time=lambda t: end >= t.point >= start)
)
def select_monthly_from_monthly(
self,
start=PartialDateTime(2000, 1),
end=PartialDateTime(2000, 12),
inclusive_lower=True,
inclusive_upper=True,
):
"""Select specified months from `self.cubes`.
If only a single temporal coordinate matches the range specified by `start`
and `end`, the resulting cube(s) will lack that dimension (ie. it is
squeezed by Iris).
"""
self.date_order_check(start, end)
assert self.frequency == "monthly"
lower_op = operator.ge if inclusive_lower else operator.gt
upper_op = operator.le if inclusive_upper else operator.lt
end = PartialDateTime(end.year, end.month)
start = PartialDateTime(start.year, start.month)
def constraint_func(t):
return lower_op(t, start) and upper_op(t, end)
return self.cubes.extract(
iris.Constraint(time=lambda t: constraint_func(t.point))
)
def broadcast_static_data(self, start, end):
"""Broadcast every cube in 'self.cubes' to monthly intervals.
Daily information is ignored (truncated, ie. days are assumed to be
1).
Limits are inclusive.
"""
self.date_order_check(start, end)
datetimes = [datetime(start.year, start.month, 1)]
while datetimes[-1] != PartialDateTime(end.year, end.month):
datetimes.append(datetimes[-1] + relativedelta(months=+1))
calendar = "gregorian"
time_unit_str = "days since 1970-01-01 00:00:00"
time_unit = cf_units.Unit(time_unit_str, calendar=calendar)
time_coord = iris.coords.DimCoord(
cf_units.date2num(datetimes, time_unit_str, calendar),
standard_name="time",
units=time_unit,
)
new_cubes = iris.cube.CubeList()
for cube in self.cubes:
new_data = np.ma.vstack([cube.data[np.newaxis] for i in datetimes])
coords = [
(time_coord, 0),
(cube.coord("latitude"), 1),
(cube.coord("longitude"), 2),
]
new_cubes.append(iris.cube.Cube(new_data, dim_coords_and_dims=coords))
new_cubes[-1].metadata = cube.metadata
return new_cubes
def interpolate_yearly_data(self, start, end):
"""Linear interpolation onto the target months.
Daily information is ignored (truncated, ie. days are assumed to be 1).
Limits are inclusive.
"""
self.date_order_check(start, end)
time_unit = cf_units.Unit(self.time_unit_str, calendar=self.calendar)
datetimes = [datetime(start.year, start.month, 1)]
while datetimes[-1] != PartialDateTime(end.year, end.month):
datetimes.append(datetimes[-1] + relativedelta(months=+1))
time = iris.coords.DimCoord(
cf_units.date2num(datetimes, self.time_unit_str, calendar=self.calendar),
standard_name="time",
units=time_unit,
)
interp_cubes = iris.cube.CubeList()
for i in range(time.points.size):
interp_points = [("time", time[i].points)]
interp_cubes.extend(
iris.cube.CubeList(
[
cube.interpolate(interp_points, iris.analysis.Linear())
for cube in self.cubes
]
)
)
final_cubelist = interp_cubes.concatenate()
# Ignore extrema resulting from the interpolation (e.g. Iris linear
# interpolation suffers from this problem sometimes).
for cube in final_cubelist:
original_data = self.cubes.extract_cube(
iris.Constraint(name=cube.name())
).data
omin, omax = np.min(original_data), np.max(original_data)
# Augment the original mask.
cube.data.mask = (
cube.data.mask | (cube.data.data < omin) | (cube.data.data > omax)
)
return final_cubelist
def single_cube_slices(self):
"""Get slices to select new datasets containing each variable one at a time.
Using these slices in conjunction with cached method calls caches computations
for each variable instead of one specific combination of variables, therefore
making the caching much more flexible.
"""
slices = []
for index in range(len(self)):
slices.append(slice(index, index + 1))
return slices
def get_mean_dataset(self):
"""Return new `Dataset` containing mean cubes between two dates.
Note:
Returned cubes may contain lazy data.
"""
logger.info(f"Getting mean for '{self}'.")
mean_cubes = iris.cube.CubeList()
for cube_slice in self.single_cube_slices():
mean_cubes.extend(get_dataset_mean_cubes(self[cube_slice]))
mean_dataset = self.copy()
mean_dataset.cubes = mean_cubes
logger.debug(f"Finished getting mean for '{self}'.")
return mean_dataset
def get_monthly_dataset(self, start, end):
"""Return new `Dataset` containing monthly cubes between two dates.
Note:
Returned cubes will not contain lazy data, and this operation will realise
all selected data!
"""
logger.info(f"Getting monthly cubes for '{self}'.")
monthly_cubes = iris.cube.CubeList()
for cube_slice in self.single_cube_slices():
monthly_cubes.extend(
get_dataset_monthly_cubes(self[cube_slice], start, end)
)
monthly_dataset = self.copy()
monthly_dataset.cubes = monthly_cubes
logger.debug(f"Finished getting monthly cubes for '{self}'.")
return monthly_dataset
def get_climatology_dataset(self, start, end):
"""Return new `Dataset` containing monthly climatology cubes between two dates.
Note:
Returned cubes may contain lazy data.
"""
logger.info(f"Getting monthly climatology for '{self}'.")
climatology_cubes = iris.cube.CubeList()
for cube_slice in self.single_cube_slices():
climatology_cubes.extend(
get_dataset_climatology_cubes(self[cube_slice], start, end)
)
climatology_dataset = self.copy()
climatology_dataset.cubes = climatology_cubes
logger.debug(f"Finished getting monthly climatology for '{self}'.")
return climatology_dataset
def get_mean_climatology_monthly_dataset(self, start, end):
"""Return new `Dataset` instances containing processed data.
The output will contain the output of the `get_mean_dataset`,
`get_climatology_dataset`, and `get_monthly_dataset` functions, except that it
is slightly more efficient as fewer cache-retrieval operations need to be
carried out.
"""
mean_dataset = self.get_mean_dataset()
logger.info(f"Getting monthly cubes for '{self}'.")
monthly_cubes = iris.cube.CubeList()
for cube_slice in self.single_cube_slices():
monthly_cubes.extend(
get_dataset_monthly_cubes(self[cube_slice], start, end)
)
monthly_dataset = self.copy()
monthly_dataset.cubes = monthly_cubes
logger.debug(f"Finished getting monthly cubes for '{self}'.")
logger.info(f"Getting monthly climatology for '{self}'.")
climatology_cubes = iris.cube.CubeList()
# TODO: Implement this `optional_dataset` parameter which would allow passing
# in the generated monthly cubes without having the function read it from the
# cache.
for cube_slice in self.single_cube_slices():
climatology_cubes.extend(
get_dataset_climatology_cubes(
self[cube_slice],
start,
end,
# TODO:
# optional_dataset=monthly_dataset[cube_slice],
)
)
climatology_dataset = self.copy()
climatology_dataset.cubes = climatology_cubes
logger.debug(f"Finished getting monthly climatology for '{self}'.")
return mean_dataset, climatology_dataset, monthly_dataset
def get_observed_mask(self, thres=0.8, frequency=None):
"""Calculate a mask from the observed area and a minimum threshold.
Args:
thres (float): Minimum observed area threshold in [0, 1].
frequency (str or None): If None, use the native temporal frequency. If
"monthly", average observed fraction to monthly data before applying
the threshold.
Returns:
iris.cube.Cube: Cube containing the Boolean mask.
Raises:
ObservedAreaError: If the `_observed_area` attribute is not defined, or
the cube specified therein does not match one of the supported units
(1,).
"""
assert (
0 <= thres <= 1
), f"Threshold needs to be in range [0, 1], but got '{thres}'."
assert frequency in (None, "monthly"), (
"Expected frequency to be one of 'None' and 'monthly', but got "
f"'{frequency}'."
)
if not hasattr(self, "_observed_area"):
raise ObservedAreaError(
f"The dataset {self} does not specify the information necessary to "
"determine the observed area mask."
)
if self._observed_area["name"] in self.variable_names("raw"):
# If the cube containing the observed fraction is still present.
target_dataset = self.copy()
else:
# Otherwise recreate all cubes.
logger.warning(f"Recreating original cubes for {self}.")
target_dataset = type(self)()
target_dataset.cubes[:] = [
deepcopy(target_dataset[self._observed_area["name"]])
]
# Implement unit conversions here if needed.
if target_dataset.cube.units != cf_units.Unit(1):
raise ObservedAreaError(
"Unsupported observed area unit '{self._observed_area['unit']}'."
)
if frequency == "monthly" and target_dataset.frequency != "monthly":
logger.info("Converting mask to monthly data.")
target_dataset = target_dataset.get_monthly_dataset(
target_dataset.min_time, target_dataset.max_time
)
target_cube = target_dataset.cube
observed_mask = target_cube.data.data < thres
# Exchange data with the original (perhaps averaged) Cube for consistency.
target_cube.data = observed_mask
target_cube.units = cf_units.Unit("1")
return target_cube
@classmethod
def get_obs_masked_dataset(cls, mask_vars, thres=0.8, ndigits=3, cached_only=False):
"""Create a new dataset based on masking of certain variables.
The mask will be based on the observed area and the given threshold.
Args:
mask_vars ([iterable of] str): Variable(s) to mask using the observed area
mask.
thres (float): Minimum observed area threshold in [0, 1].
ndigits (int): Number of digits to round `thres` to.
cached_only (bool): If True, only load cached data. Otherwise return None.
Returns:
Instance of `cls` subclass or None: The name of this class will reflect the masked
variables and the applied threshold.
"""
rounded_thres = round(thres, ndigits)
assert np.isclose(thres - rounded_thres, 0), (
"Supplied threshold has too much precision. Either decrease precision or "
"increase `ndigits`."
)
if isinstance(mask_vars, str):
mask_vars = (mask_vars,)
# Map given names to raw names if needed.
raw_mask_vars = [
cls._get_raw_variable_names()
.get(name, name)
.replace(" ", "_")
.replace("-", "_")
.replace("__", "_")
.strip("_")
for name in mask_vars
]
name_mask_vars = [
raw_name.replace(" ", "_").replace("-", "_").replace("__", "_").strip("_")
for raw_name in raw_mask_vars
]
format_str = "_thres_{rounded_thres:0." + str(ndigits) + "f}"
pretty_format_str = "Thres {rounded_thres:0." + str(ndigits) + "f}"
new_name = (
cls.__name__
+ f"_{'__'.join(name_mask_vars)}_"
+ format_str.format(rounded_thres=rounded_thres)
)
# Initialise new Dataset instance.
new_pretty_dataset_name = (
cls._pretty
+ f" {' '.join(raw_mask_vars)} "
+ pretty_format_str.format(rounded_thres=rounded_thres)
)
# Intercept the cache writing operation in order to modify the cubes with the
# observation mask before they get written to the cache. This will then also
# affect subsequent retrievals of the cache.
def new_cache_func(self):
# Apply the mask. At this point the `cubes` attribute has already been
# populated.
# Retrieve the observation mask at the dataset-native frequency.
obs_mask = cls().get_observed_mask(thres=rounded_thres)
# Apply the mask to the cubes as set out in `mask_vars`.
for var in raw_mask_vars:
self[var].data.mask |= obs_mask.data
# Call the original cache function to actually store the modified CubeList.
cls.write_cache(self)
masked_dataset_class = type(
new_name,
(cls,),
{"_pretty": new_pretty_dataset_name, "write_cache": new_cache_func},
)
if cached_only and not masked_dataset_class.read_data(
masked_dataset_class._get_cache_filename()
):
return
return masked_dataset_class()
def get_temporally_shifted_dataset(self, months=0, deep=False):
"""Derive a new dataset with shifted temporal cubes.
The definition of the sign of the shift is motivated by the investigation of
pre-seasonal vegetation effects. Thus, `months=-1` shifts the data from
January to February. Following this, an unshifted dataset's data from February
could be compared to the shifted dataset's data from January by simply
selecting the month February for both datasets.
Args:
months (int): Number of months to shift the "time" coordinates by.
deep (bool): If True, copy the underlying data when creating the new
dataset.
Returns:
An instance of a subclass of `type(self)` containing the shifted cubes.
Raises:
TypeError: If `months` is not an integer.
UnexpectedCoordinateError: If any cube has multiple temporal coordinates.
UnexpectedCoordinateError: If any cube has a temporal coordinate that
isn't placed along the first axis.
"""
if not isinstance(months, (int, np.integer)):
raise TypeError(
f"`months` should be an integer. Got type '{type(months)}'."
)
# Check temporal coordinates.
for cube in self:
# Cubes without a time coordinate are simply skipped.
if not cube.coords("time"):
continue
# Ensure the time coordinate is first.
if cube.coord_dims(cube.coord("time"))[0] != 0:
raise UnexpectedCoordinateError(
"Temporal coordinate should correspond to the first axis."
)
if len(cube.coords(dimensions=0)) > 1:
raise UnexpectedCoordinateError(
"There should only be a single temporal coordinate."
)
if deep:
# Copy everything.
orig_inst = self.copy(deep=True)
else:
# Copy everything but the underlying data, still making a copy of `self`,
# `self.cubes` and the non-data attributes of every cube therein.
orig_inst = self.copy_cubes_no_data()
if not months:
# No shift to be carried out - return instance of original class.
return orig_inst
if not orig_inst.frequency == "monthly":
orig_inst = orig_inst.get_monthly_dataset(
orig_inst.min_time, orig_inst.max_time
)
shift_dir = "plus" if months > 0 else "minus"
def cube_name_mod_func(s):
return s + f" {months} Month"
# Handle each cube different, since each cube may have unique time coordinates
# (different bands for example).
for cube in orig_inst:
if not cube.coords("time"):
continue
time_coord = cube.coord("time")
time_coord.bounds = None
shifted_dates = [
ensure_datetime(time_coord.cell(i).point) - relativedelta(months=months)
for i in range(len(time_coord.points))
]
time_unit_str = time_coord.units.name
time_unit_cal = time_coord.units.calendar
num_shifted_dates = [
cf_units.date2num(shifted_date, time_unit_str, time_unit_cal)
for shifted_date in shifted_dates
]
time_coord.points = num_shifted_dates
cube.long_name = cube_name_mod_func(cube.name())
cube.standard_name = None
cube.var_name = None
# Instantiate new dataset instance. This will lack any instantiation, which
# must be replicated by manually assigning to the cubes attribute below.
new_inst = type(
self.name + f"__{shift_dir}_{abs(months)}_month",
(type(self),),
{
"__init__": lambda self: None,
"_pretty": self.pretty + f" {months} Month",
"pretty_variable_names": dict(
(cube_name_mod_func(raw), cube_name_mod_func(pretty))
for raw, pretty in type(self).pretty_variable_names.items()
),
},
)()
new_inst.cubes = orig_inst.cubes
return new_inst
def get_temporally_interpolated_dataset(
self, target_timespan, n_months, mask=None, threshold=0, verbose=False
):
"""Temporal NN interpolation of missing data.
Args:
target_timespan (tuple of datetime): Start and end datetimes between which
to interpolate.
n_months (int): Number of months to look forwards and backwards to find
valid data. The total number of months selected for each interpolation
step will be `2 * n_months + 1` (including the current timestep).
mask ((m, n) array of bool): Mask that is True where interpolation should
occur. If None, interpolation is not carried out over water and south
of 60 degrees latitude.
threshold (float): Threshold in [0, 1] denoting the minimum fraction (not
inclusive) of missing data in `source_masks` for which to carry out
interpolation. Larger thresholds can be used to restrict interpolation
geographically to regions with poor data availability.
verbose (bool or int): If True, show progress bars. Giving 0 is equivalent
to giving False (i.e. no progress bars), giving 1 shows a progress bar
for the individual months, while giving 2 shows an additional progress
bar for the individual samples within each month.
Returns:
An instance of a subclass of `type(self)` containing the interpolated cubes.
Raises:
ValueError: If `n_months` is not an integer.
RuntimeError: If there was insufficient data to satisfy either the target or
the source time period.
"""
def cube_name_mod_func(s):
return s + f" {n_months}NN"
orig_inst = self.copy_cubes_no_data()
interp_cubes = iris.cube.CubeList()
# Handle each cube different, since each cube may have unique time coordinates
# (different bands for example).
for cube_slice in orig_inst.single_cube_slices():
if not orig_inst[cube_slice].cube.coords("time"):
continue
cube = temporal_nn(
orig_inst[cube_slice],
target_timespan,
n_months,
mask=mask,
threshold=threshold,
verbose=verbose,
)[0]
cube.long_name = cube_name_mod_func(cube.name())
cube.standard_name = None
cube.var_name = None
interp_cubes.append(cube)
# Instantiate new dataset instance. This will lack any instantiation, which
# must be replicated by manually assigning to the cubes attribute below.
new_inst = type(
self.name + f"__{n_months}NN",
(type(self),),
{
"__init__": lambda self: None,
"_pretty": cube_name_mod_func(self.pretty),
"pretty_variable_names": dict(
(cube_name_mod_func(raw), cube_name_mod_func(pretty))
for raw, pretty in type(self).pretty_variable_names.items()
),
},
)()
new_inst.cubes = interp_cubes
return new_inst
def get_persistent_season_trend_dataset(
self, target_timespan=None, persistent_perc=50, k=4, verbose=False, ncpus=None
):
"""Interpolation of missing data using minimum values and season-trend model.
First, persistent gaps are filled using the minimum value observed at that
location. This is followed by the fitting of a season-trend model to fill the
remaining missing values.
Args:
target_timespan (tuple of datetime or None): Start and end datetimes
between which to interpolate. If None, the current temporal range of
data will be used.
persistent_perc (int in [0, 100]): Percentage of data that needs to be
missing for a given month at a given location for the month to be
considered affected by a persistent gap.
k (int): Number of harmonic terms used in the season-trend model.
verbose (bool): If True, show progress bars.
ncpus (int or None): Number of processes to use for the season-trend model
fitting. If None, `wildfires.qstat.get_ncpus()` will be used.
Returns:
An instance of a subclass of `type(self)` containing the interpolated cubes.
Raises:
ValueError: If `persistent_perc` is not an integer.
ValueError: If `k` is not an integer.
"""
def cube_name_mod_func(s):
return s + f" {persistent_perc}P {k}k"
if ncpus is None:
ncpus = get_ncpus()
orig_inst = self.copy_cubes_no_data()
interp_cubes = iris.cube.CubeList()
# Handle each cube different, since each cube may have unique time coordinates
# (different bands for example).
for cube_slice in orig_inst.single_cube_slices():
if not orig_inst[cube_slice].cube.coords("time"):
continue
cube = persistent_season_trend_fill(
orig_inst[cube_slice],
target_timespan=target_timespan,
persistent_perc=persistent_perc,
k=k,
verbose=verbose,
ncpus=ncpus,
)[0]
cube.long_name = cube_name_mod_func(cube.name())
cube.standard_name = None
cube.var_name = None
interp_cubes.append(cube)
# Instantiate new dataset instance. This will lack any instantiation, which
# must be replicated by manually assigning to the cubes attribute below.
new_inst = type(
self.name + f"__{persistent_perc}P_{k}k",
(type(self),),
{
"__init__": lambda self: None,
"_pretty": cube_name_mod_func(self.pretty),
"pretty_variable_names": dict(
(cube_name_mod_func(raw), cube_name_mod_func(pretty))
for raw, pretty in type(self).pretty_variable_names.items()
),
},
)()
new_inst.cubes = interp_cubes
return new_inst
class MonthlyDataset(Dataset):
"""A `Dataset` containing monthly data."""
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.select_monthly_from_monthly(start, end)
class AvitabileAGB(Dataset):
_pretty = "Avitabile AGB"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "Avitabile_AGB")
self.cubes = iris.cube.CubeList(
[iris.load_cube(os.path.join(self.dir, "Avitabile_AGB_Map_0d25.nc"))]
)
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.broadcast_static_data(start, end)
class AvitabileThurnerAGB(Dataset):
_pretty = "Avitabile Thurner AGB"
pretty_variable_names = {"AGBtree": "AGB Tree"}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "AvitabileThurner-merged_AGB")
self.cubes = iris.cube.CubeList(
[
iris.load_cube(
os.path.join(self.dir, "Avi2015-Thu2014-merged_AGBtree.nc")
)
]
)
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.broadcast_static_data(start, end)
class CarvalhaisGPP(Dataset):
_pretty = "Carvalhais GPP"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "Carvalhais_VegC-TotalC-Tau")
raw_cubes = iris.cube.CubeList(
[iris.load_cube(os.path.join(self.dir, "Carvalhais.gpp_50.360.720.1.nc"))]
)
# There is only one time coordinate, and its value is of no relevance.
# Therefore, remove this coordinate.
raw_cubes[0] = raw_cubes[0][0]
raw_cubes[0].remove_coord("time")
self.cubes = raw_cubes
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.broadcast_static_data(start, end)
class CCI_BurnedArea_MERIS_4_1(Dataset):
_pretty = "CCI MERIS 4.1"
pretty_variable_names = {"burned_area": "CCI MERIS BA"}
_special_coord_cubes = {
"vegetation class name": ["vegetation_class"],
"burned area in vegetation class": [
"time",
"vegetation_class",
"latitude",
"longitude",
],
}
_observed_area = {"name": "fraction of observed area"}
def __init__(self):
# Manually input directory name here to maintain this directory for subclasses.
self.dir = os.path.join(DATA_DIR, "CCI_BurnedArea_MERIS_4_1")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
cubes = iris.cube.CubeList()
for f in tqdm(
glob.glob(os.path.join(self.dir, "**", "*.nc"), recursive=True),
desc="Loading cubes",
):
cubes.extend(iris.load(f))
named_cubes = dict(
[
(var_name, cubes.extract(iris.Constraint(var_name)))
for var_name in set([cube.name() for cube in cubes])
]
)
for var_name, var_cubes in tqdm(
named_cubes.items(), desc="Homogenising cube attributes"
):
# TODO: Fuse some of the discarded attributes, like the time coverage.
homogenise_cube_attributes(var_cubes)
var_cube = var_cubes[0]
assert all(
var_cube.is_compatible(var_cubes[i]) for i in range(1, len(var_cubes))
), "Should be able to concatenate cubes now."
if var_name == "vegetation class name":
# All cubes are the same (except for isolated metadata, like timing
# information) so we only deal with one cube.
# Convert '|S1' dtype to 'u1' ('uint8') dtype to avoid errors during storage.
# Replace b'' placeholder values with b' ' to enable usage of `ord'.
var_cube.data.data[var_cube.data.mask] = b" "
int_veg_data = np.asarray(
np.vectorize(ord)(var_cube.data.data), dtype="u1"
)
var_cube.data = np.ma.MaskedArray(
int_veg_data, mask=var_cube.data.mask, dtype="u1", fill_value=32
)
# NOTE: Figure out why the masked data, the mask itself, and the fill
# value are modified when saving the cube with the data created above.
named_cubes[var_name] = iris.cube.CubeList([var_cube])
else:
# The time bounds seem to be wrong in the original data, so remove them.
for cube in var_cubes:
cube.coord("time").bounds = None
raw_cubes = iris.cube.CubeList(
[var_cubes.concatenate_cube() for var_cubes in named_cubes.values()]
)
for i, cube in enumerate(tqdm(raw_cubes, desc="Normalising cubes")):
if cube.name() in [
"burned_area",
"burned area in vegetation class",
"standard error of the estimation of burned area",
]:
# Normalise using the grid cell areas
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=(("Using DEFAULT_SPHERICAL_EARTH_RADIUS."))
)
raw_cubes[i].data /= iris.analysis.cartography.area_weights(
raw_cubes[i]
)
raw_cubes[i].units = cf_units.Unit(1)
# Rewrite coordinate `long_name' values to conform to netCDF variable name
# standards to ensure compatibility with the coordinate `var_name'
# requirements.
if cube.name() == "burned area in vegetation class":
cube.coords()[1].long_name = "vegetation_class"
elif cube.name() == "vegetation class name":
cube.coords()[0].long_name = "vegetation_class"
self.cubes = raw_cubes
self.write_cache()
@property
def vegetation_class_names(self):
"""Retrieve the vegetation class names."""
# Make the vegetation names persist even if the corresponding cube is removed.
if hasattr(self, "_cached_vegetation_class_names"):
return self._cached_vegetation_class_names
if "vegetation class name" in self.variable_names("raw"):
# If the cube containing the names is still present.
target_dataset = self
else:
# Otherwise recreate all cubes and extract the needed data.
target_dataset = type(self)()
vegetation_cube = target_dataset["vegetation class name"]
# Remove artefacts of saving the cube. See note above.
vegetation_class_names = [
"".join(class_name_data).strip().strip(chr(255))
for class_name_data in np.vectorize(chr)(vegetation_cube.data.data)
]
self._cached_vegetation_class_names = vegetation_class_names
return vegetation_class_names
def get_monthly_data(
self,
start=PartialDateTime(2000, 1),
end=PartialDateTime(2000, 12),
inclusive_lower=True,
inclusive_upper=True,
):
"""Transform the data from two samples a month to having just one."""
self.date_order_check(start, end)
lower_op = operator.ge if inclusive_lower else operator.gt
upper_op = operator.le if inclusive_upper else operator.lt
end = PartialDateTime(end.year, end.month)
start = PartialDateTime(start.year, start.month)
def constraint_func(t):
return lower_op(t, start) and upper_op(t, end)
monthly_cubes = iris.cube.CubeList()
for cube in self.cubes.extract(
iris.Constraint(time=lambda t: constraint_func(t.point))
):
try:
iris.coord_categorisation.add_month_number(cube, "time")
iris.coord_categorisation.add_year(cube, "time")
monthly_cubes.append(
cube.aggregated_by(["month_number", "year"], iris.analysis.MEAN)
)
except iris.exceptions.CoordinateNotFoundError:
monthly_cubes.append(cube)
return monthly_cubes
class CCI_BurnedArea_MODIS_5_1(MonthlyDataset):
_pretty = "CCI MODIS 5.1"
pretty_variable_names = {"burned_area": "CCI MODIS BA"}
_special_coord_cubes = {
"vegetation class name": ["vegetation_class"],
"burned area in vegetation class": [
"time",
"vegetation_class",
"latitude",
"longitude",
],
}
def __init__(self):
# Manually input directory name here to maintain this directory for subclasses.
self.dir = os.path.join(DATA_DIR, "CCI_BurnedArea_MODIS_5_1")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
cubes = iris.cube.CubeList()
for f in tqdm(
glob.glob(os.path.join(self.dir, "**", "*.nc"), recursive=True),
desc="Loading cubes",
):
cubes.extend(iris.load(f))
named_cubes = dict(
[
(var_name, cubes.extract(iris.Constraint(var_name)))
for var_name in set([cube.name() for cube in cubes])
]
)
for var_name, var_cubes in tqdm(
named_cubes.items(), desc="Homogenising cube attributes"
):
# TODO: Fuse some of the discarded attributes, like the time coverage.
homogenise_cube_attributes(var_cubes)
var_cube = var_cubes[0]
assert all(
var_cube.is_compatible(var_cubes[i]) for i in range(1, len(var_cubes))
), "Should be able to concatenate cubes now."
if var_name == "vegetation class name":
# All cubes are the same (except for isolated metadata, like timing
# information) so we only deal with one cube.
# Convert '|S1' dtype to 'u1' ('uint8') dtype to avoid errors during storage.
# Replace b'' placeholder values with b' ' to enable usage of `ord'.
var_cube.data.data[var_cube.data.mask] = b" "
int_veg_data = np.asarray(
np.vectorize(ord)(var_cube.data.data), dtype="u1"
)
var_cube.data = np.ma.MaskedArray(
int_veg_data, mask=var_cube.data.mask, dtype="u1", fill_value=32
)
# NOTE: Figure out why the masked data, the mask itself, and the fill
# value are modified when saving the cube with the data created above.
named_cubes[var_name] = iris.cube.CubeList([var_cube])
raw_cubes = iris.cube.CubeList(
[var_cubes.concatenate_cube() for var_cubes in named_cubes.values()]
)
for i, cube in enumerate(tqdm(raw_cubes, desc="Normalising cubes")):
if cube.name() in [
"burned_area",
"burned area in vegetation class",
"standard error of the estimation of burned area",
]:
# Normalise using the grid cell areas
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=(("Using DEFAULT_SPHERICAL_EARTH_RADIUS."))
)
raw_cubes[i].data /= iris.analysis.cartography.area_weights(
raw_cubes[i]
)
raw_cubes[i].units = cf_units.Unit(1)
# Rewrite coordinate `long_name' values to conform to netCDF variable name
# standards to ensure compatibility with the coordinate `var_name'
# requirements.
if cube.name() == "burned area in vegetation class":
cube.coords()[1].long_name = "vegetation_class"
elif cube.name() == "vegetation class name":
cube.coords()[0].long_name = "vegetation_class"
self.cubes = raw_cubes
self.write_cache()
@property
def vegetation_class_names(self):
vegetation_cube = self["vegetation class name"]
# Remove artefacts of saving the cube. See note above.
vegetation_class_names = [
"".join(class_name_data).strip().strip(chr(255))
for class_name_data in np.vectorize(chr)(vegetation_cube.data.data)
]
return vegetation_class_names
class CHELSA(MonthlyDataset):
"""For primary analysis, it is advisable to use hpc
(cx1_scipts/run_chelsa_script.sh) in order to process the tif files
into nc files as a series of jobs, which would take an incredibly long
time otherwise (on the order of days).
Once that script has been run, the resulting nc files can be used to
easily construct a large iris Cube containing all the data.
"""
_pretty = "CHELSA"
pretty_variable_names = {
"maximum temperature": "Max Temp",
"minimum temperature": "Min Temp",
"mean temperature": "Mean Temp",
"monthly precipitation": "Precipitation",
}
def __init__(self, process_slice=slice(None)):
"""Initialise the cubes.
Args:
process_slice (slice): Used to limit the loading/processing of
raw data .tif data files. Slices resulting in single
elements (eg. slice(i, i+1)) can be provided with i being
the PBS array job index (for example) to quickly generate
all the required .nc files from the .tif files using array
jobs on the hpc.
"""
self.dir = os.path.join(DATA_DIR, "CHELSA")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
files = glob.glob(os.path.join(self.dir, "**", "*.tif"), recursive=True)
files.sort()
mapping = {
"prec": {
"scale": 1,
"unit": cf_units.Unit("mm/month"),
"long_name": "monthly precipitation",
},
"tmax": {
"scale": 0.1,
"unit": cf_units.Unit("degrees Celsius"),
"long_name": "maximum temperature",
},
"tmean": {
"scale": 0.1,
"unit": cf_units.Unit("degrees Celsius"),
"long_name": "mean temperature",
},
"tmin": {
"scale": 0.1,
"unit": cf_units.Unit("degrees Celsius"),
"long_name": "minimum temperature",
},
}
year_pattern = re.compile(r"_(\d{4})_")
month_pattern = re.compile(r"_(\d{2})_")
time_unit_str = "hours since 1970-01-01 00:00:00"
calendar = "gregorian"
time_unit = cf_units.Unit(time_unit_str, calendar=calendar)
commit_hashes = set()
cube_list = iris.cube.CubeList()
def update_hashes(commit_hash):
commit_hashes.update([commit_hash])
# TODO: Need to reinstate this constraint!!!!
"""
assert len(commit_hashes) == 1, (
"All loaded data should be from the same commit.")
"""
for f in files[process_slice]:
# If this file has been regridded already and saved as a NetCDF
# file, then do not redo this.
nc_file = f.replace(".tif", ".nc")
try:
cubes = self.read_data(nc_file)
except Exception:
# Try again, removing a potentially corrupt file
# beforehand.
logger.exception("Read failed, recreating:'{:}'".format(nc_file))
cubes = None
try:
os.remove(nc_file)
except Exception:
logger.exception("File did not exist:'{:}'".format(nc_file))
if cubes:
update_hashes(cubes[0].attributes["commit"])
cube_list.extend(cubes)
continue
try:
with rasterio.open(f) as dataset:
pass
except rasterio.RasterioIOError:
logger.exception("Corrupted file.")
# Try to download file again.
url = f.replace(
os.path.join(DATA_DIR, "CHELSA"), "https://www.wsl.ch/lud/chelsa"
)
command = "curl --connect-timeout 20 -L -o {:} {:}".format(f, url)
logger.debug("Executing:{:}".format(command))
os.system(command)
with rasterio.open(f) as dataset:
# NOTE: Since data is are stored as unsigned 16 bit
# integers, with temperature (in degrees Celsius) scaled by
# a factor x10, space can be saved by saving data in
# float16 format.
variable_key = os.path.split(os.path.split(f)[0])[1]
assert dataset.count == 1, "There should only be one band."
data = dataset.read(1).astype("float16")
data = np.ma.MaskedArray(
data * mapping[variable_key]["scale"],
np.isinf(data),
dtype=data.dtype,
)
latitudes = iris.coords.DimCoord(
get_centres(
np.linspace(
dataset.bounds.top,
dataset.bounds.bottom,
dataset.shape[0] + 1,
)
),
standard_name="latitude",
units="degrees",
)
longitudes = iris.coords.DimCoord(
get_centres(
np.linspace(
dataset.bounds.left,
dataset.bounds.right,
dataset.shape[1] + 1,
)
),
standard_name="longitude",
units="degrees",
)
grid_coords = [(latitudes, 0), (longitudes, 1)]
split_f = os.path.split(f)[1]
time_coord = iris.coords.DimCoord(
cf_units.date2num(
datetime(
int(year_pattern.search(split_f).group(1)),
int(month_pattern.search(split_f).group(1)),
1,
),
time_unit_str,
calendar,
),
standard_name="time",
units=time_unit,
)
cube = iris.cube.Cube(
data,
dim_coords_and_dims=grid_coords,
units=mapping[variable_key]["unit"],
var_name=variable_key,
long_name=mapping[variable_key]["long_name"],
aux_coords_and_dims=[(time_coord, None)],
)
# Regrid cubes to the same lat-lon grid.
# TODO: change lat and lon limits and also the number of points!!
# Always work in 0.25 degree steps? From the same starting point?
regrid_cube = regrid(cube)
# Need to save as float64 or float32, choose float64 for future
# interoperability.
regrid_cube.data = regrid_cube.data.astype("float64")
commit_hash = self.save_data(regrid_cube, nc_file)
# If None is returned, then the file already exists and is not
# being overwritten, which should not happen, as we check for
# the existence of the file above, loading the data in that
# case.
assert (
commit_hash is not None
), "Data should have been loaded before, since the file exists."
update_hashes(commit_hash)
cube_list.append(regrid_cube)
# TODO: TEMPORARY, in order to allow merging of data from different
# commits!!
for cube in cube_list:
del cube.attributes["commit"]
self.cubes = cube_list.merge()
assert len(self.cubes) == 4, "There should be 4 variables."
# If all the data has been processed, not just a subset.
if process_slice == slice(None):
self.write_cache()
class Copernicus_SWI(MonthlyDataset):
"""For primary analysis, it is advisable to use hpc
(cx1_scipts/run_swi_script.sh) in order to process the daily nc files
into monthly nc files as a series of jobs, which would take an
incredibly long time and large amounts of RAM otherwise (on the order
of days).
Once that script has been run, the resulting nc files can be used to
easily construct a large iris Cube containing all the desired monthly
data.
There are currently 147 available months of data, from 2007-01 to
2019-03.
"""
_pretty = "Copernicus SWI"
pretty_variable_names = {"Soil Water Index with T=1": "SWI(1)"}
def __init__(self, process_slice=slice(None)):
"""Initialise the cubes.
Args:
process_slice (slice): Used to limit the loading/processing of
raw daily .nc files. Slices resulting in single elements
(eg. slice(i, i+1)) will select a MONTH of data. For
example, this can be done with i being the PBS array job
index (for example) to quickly generate all the required
monthly .nc files from the daily files using array jobs on
the hpc.
"""
self.dir = os.path.join(DATA_DIR, "Copernicus_SWI")
logger.debug("Copernicus dir:{:}".format(self.dir))
monthly_dir = os.path.join(self.dir, "monthly")
logger.debug("Monthly dir:{:}".format(monthly_dir))
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
# The raw data is daily data, which has to be averaged to yield
# monthly data.
files = glob.glob(os.path.join(self.dir, "**", "*.nc"), recursive=True)
daily_files = []
monthly_files = []
for f in files:
if "monthly" in f:
monthly_files.append(f)
else:
daily_files.append(f)
logger.debug(
"Found {:} monthly & {:} daily files".format(
len(monthly_files), len(daily_files)
)
)
# Get times from the filenames, instead of having to load the cubes
# and look at the time coordinate that way.
pattern = re.compile(r"(\d{4})(\d{2})(\d{2})")
datetimes = [datetime(*map(int, pattern.search(f).groups())) for f in files]
# Isolate the year and month of each file only, and only in the
# times of the requested slice.
year_months = sorted(
list(set([datetime(dt.year, dt.month, 1) for dt in datetimes]))
)[process_slice]
start_year_month = year_months[0]
end_year_month = year_months[-1] + relativedelta(months=+1)
logger.debug(
"Processing data from {:} to {:}".format(start_year_month, end_year_month)
)
selected_daily_files = []
selected_monthly_files = []
selected_monthly_intervals = []
# Handle monthly files first, in order to eliminate double-counting
# later on.
for f, dt in zip(files, datetimes):
if start_year_month <= dt < end_year_month:
# Prevent loading monthly files into the daily file list
# which will get processed into monthly data.
#
# Only ignore the 1 month interval which is associated with
# each monthly file. If multiple intervals are found, they
# will be merged later.
if "monthly" in f:
selected_monthly_files.append(f)
selected_monthly_intervals.append(
[dt, dt + relativedelta(months=+1)]
)
# Fuse the monthly intervals into easier-to-use contiguous
# intervals.
contiguous_monthly_intervals = join_adjacent_intervals(
selected_monthly_intervals
)
logger.debug(
"Contiguous monthly intervals:{:}".format(contiguous_monthly_intervals)
)
for f, dt in zip(files, datetimes):
if start_year_month <= dt < end_year_month:
monthly_data = False
for interval in contiguous_monthly_intervals:
if interval[0] <= dt < interval[1]:
monthly_data = True
if not monthly_data:
assert (
"monthly" not in f
), "Monthly files should have been separated beforehand."
selected_daily_files.append(f)
logger.debug(
"Using {:} monthly & {:} daily files".format(
len(selected_monthly_files), len(selected_daily_files)
)
)
commit_hashes = set()
monthly_cubes = iris.cube.CubeList()
def update_hashes(commit_hash):
commit_hashes.update([commit_hash])
# TODO: Need to reinstate this constraint!!!!
"""
assert len(commit_hashes) == 1, (
"All loaded data should be from the same commit.")
"""
# Process the daily files here first, then combine with the already
# processed monthly data later. Processing involves regridding to a
# 0.25 degree resolution and averaging over months.
if selected_daily_files:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Skipping global attribute 'long_name': 'long_name' is "
"not a permitted attribute"
),
)
daily_cubes = load_cubes(selected_daily_files)
# Concatenate daily cubes into larger cubes with the same
# information (but with longer time coordinates).
raw_cubes = homogenise_cube_attributes(daily_cubes).concatenate()
while raw_cubes:
logger.debug("Regridding:{:}".format(repr(raw_cubes[0])))
regridded_cube = regrid(raw_cubes.pop(0), scheme=iris.analysis.Linear())
iris.coord_categorisation.add_month_number(regridded_cube, "time")
iris.coord_categorisation.add_year(regridded_cube, "time")
logger.debug("Averaging:{:}".format(repr(regridded_cube)))
averaged_cube = regridded_cube.aggregated_by(
["month_number", "year"], iris.analysis.MEAN
)
assert averaged_cube.core_data().shape[0] == 1, (
"There should be only 1 element in the time dimension "
"(ie. 1 month)."
)
monthly_cubes.append(averaged_cube[0])
logger.debug(
"Remaining nr to regrid & average:{:}".format(len(raw_cubes))
)
# Save these monthly files separately.
datetimes_to_save = []
for cube in monthly_cubes:
for i in range(len(cube.coord("time").points)):
datetimes_to_save.append(cube.coord("time").cell(i).point)
datetimes_to_save = list(set(datetimes_to_save))
for dt in datetimes_to_save:
cubes = monthly_cubes.extract(
iris.Constraint(time=lambda t: dt == t.point)
)
commit_hash = self.save_data(
cubes,
os.path.join(
monthly_dir,
(
"c_gls_SWI_{:04d}{:02d}{:02d}_monthly"
"_GLOBE_ASCAT_V3.1.1.nc"
).format(
# The day is always 1 for monthly files.
dt.year,
dt.month,
1,
),
),
)
# If None is returned, then the file already exists and is not
# being overwritten, which should not happen, as we check for
# the existence of the file above, loading the data in that
# case.
assert (
commit_hash is not None
), "Data should have been loaded before, since the file exists."
update_hashes(commit_hash)
if selected_monthly_files:
monthly_cubes.extend(load_cubes(selected_monthly_files))
# TODO: TEMPORARY, in order to allow merging of data from different
# commits!!
for cube in monthly_cubes:
if "commit" in cube.attributes:
del cube.attributes["commit"]
logger.debug("Merging final cubes.")
# TODO: Verify that this works as expected.
merged_cubes = monthly_cubes.merge()
self.cubes = iris.cube.CubeList(
cube
for cube in merged_cubes
if cube.attributes["processing_mode"] == "Reprocessing"
)
logger.debug("Finished merging.")
# If all the data has been processed, not just a subset.
if process_slice == slice(None):
logger.debug("Writing cache for entire timespan")
self.write_cache()
class CRU(MonthlyDataset):
_pretty = "CRU"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "CRU")
# Ignore warning regarding cloud cover units - they are fixed below.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=("Ignoring netCDF variable 'cld' invalid units 'percentage'"),
)
# TODO: In order to use the 'stn' variable - with information
# about the measurement stations, the files have to be handled
# individually so that we can keep track of which stn cube
# belongs to which data cube.
raw_cubes = iris.load(os.path.join(self.dir, "*.nc"))
# TODO: For now, remove the 'stn' cubes (see above).
self.cubes = iris.cube.CubeList(
[cube for cube in raw_cubes if cube.name() != "stn"]
)
# Fix units for cloud cover.
for cube in self.cubes:
if cube.name() == "cloud cover":
cube.units = cf_units.Unit("percent")
break
# NOTE: Measurement times are listed as being in the middle of the
# month, requiring no further intervention.
class ERA5_Temperature(MonthlyDataset):
_pretty = "ERA5 Temperature"
pretty_variable_names = {
"Mean 2 metre temperature": "Mean Temp",
"Min 2 metre temperature": "Min Temp",
"Max 2 metre temperature": "Max Temp",
"DTR 2 metre temperature": "Diurnal Temp Range",
}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "ERA5", "temperature")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
files = sorted(
glob.glob(
os.path.join(self.dir, "**", "*_monthly_mean_min_max_dtr.nc"),
recursive=True,
)
)
if not files:
logger.info("No processed files found. Downloading and processing now.")
retrieval_processing(
retrieve(
variable="2t",
start=PartialDateTime(1990, 1, 1),
end=PartialDateTime(2019, 1, 1),
target_dir=self.dir,
),
processing_class=MonthlyMeanMinMaxDTRWorker,
n_threads=10,
soft_filesize_limit=700,
)
files = sorted(
glob.glob(
os.path.join(self.dir, "**", "*_monthly_mean_min_max_dtr.nc"),
recursive=True,
)
)
assert files
self.cubes = homogenise_cube_attributes(load_cubes(files)).merge()
self.write_cache()
class ERA5_TotalPrecipitation(MonthlyDataset):
_pretty = "ERA5 Total Precipitation"
pretty_variable_names = {"Total precipitation": "Precipitation"}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "ERA5", "tp")
self.cubes = iris.cube.CubeList(
[iris.load_cube(os.path.join(self.dir, "*.nc"))]
)
class ERA5_DryDayPeriod(MonthlyDataset):
_pretty = "ERA5 Dry Day Period"
pretty_variable_names = {"dry_day_period": "Dry Day Period"}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "ERA5", "tp_daily")
self.cubes = self.read_cache()
if self.cubes:
return
# Sort so that time is increasing.
filenames = sorted(
glob.glob(os.path.join(self.dir, "**", "*_daily_mean.nc"), recursive=True)
)
logger.info("Constructing dry day period cube.")
dry_day_period_cubes = iris.cube.CubeList()
prev_dry_day_period = None
prev_end = None
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Collapsing a non-contiguous coordinate. Metadata may not "
"be fully descriptive for 'time'."
),
)
for filename in tqdm(filenames):
raw_cube = iris.load_cube(filename)
n_days = raw_cube.shape[0]
n_lats = raw_cube.shape[1]
n_lons = raw_cube.shape[2]
# The first time around only, create empty arrays. This will introduce
# some negative bias for the first month(s), but this should be
# negligible overall (especially since the first year is probably not
# being used anyway).
if prev_dry_day_period is None:
assert prev_end is None
prev_dry_day_period = np.zeros((n_lats, n_lons), dtype=np.int64)
prev_end = np.zeros((n_lats, n_lons), dtype=np.bool_)
# Calculate dry days using metre per hour threshold, since the daily
# data here is an average of the hourly total precipitation data.
dry_days = raw_cube.data < M_PER_HR_THRES
# Find contiguous blocks in the time dimension where dry_days is True.
structure = np.zeros((3, 3, 3), dtype=np.int64)
structure[:, 1, 1] = 1
labelled = scipy.ndimage.label(dry_days, structure=structure)
slices = scipy.ndimage.find_objects(labelled[0])
dry_day_period = np.zeros((n_lats, n_lons), dtype=np.int64)
beginning = np.zeros((n_lats, n_lons), dtype=np.bool_)
end = np.zeros_like(beginning)
for slice_object in slices:
time_slice = slice_object[0]
lat_slice = slice_object[1]
lon_slice = slice_object[2]
assert lat_slice.stop - lat_slice.start == 1
assert lon_slice.stop - lon_slice.start == 1
latitude = lat_slice.start
longitude = lon_slice.start
period_length = time_slice.stop - time_slice.start
if period_length > dry_day_period[latitude, longitude]:
dry_day_period[latitude, longitude] = period_length
if time_slice.start == 0:
beginning[latitude, longitude] = True
else:
beginning[latitude, longitude] = False
if time_slice.stop == n_days:
end[latitude, longitude] = True
else:
end[latitude, longitude] = False
# Once the data for the current month has been processed, look at the
# previous month to see if dry day periods may be joined up.
overlap = prev_end & beginning
dry_day_period[overlap] += prev_dry_day_period[overlap]
# Prepare for the next month's analysis.
prev_dry_day_period = dry_day_period
prev_end = end
# Create new Cube with the same latitudes and longitudes, and an
# averaged time.
coords = [
(raw_cube.coord("latitude"), 0),
(raw_cube.coord("longitude"), 1),
]
# Modify the time coordinate such that it is recorded with
# respect to a common date, as opposed to relative to the
# beginning of the respective month as is the case for the
# cube loaded above.
# Take the new 'mean' time as the average of the first and last time.
min_time = raw_cube.coord("time").cell(0).point
max_time = raw_cube.coord("time").cell(-1).point
centre_datetime = min_time + ((max_time - min_time) / 2)
new_time = cf_units.date2num(
centre_datetime, self.time_unit_str, self.calendar
)
time_coord = iris.coords.DimCoord(
new_time, units=self.time_unit, standard_name="time"
)
dry_day_period_cube = iris.cube.Cube(
dry_day_period,
dim_coords_and_dims=coords,
units=cf_units.Unit("days"),
var_name="dry_day_period",
aux_coords_and_dims=[(time_coord, None)],
)
dry_day_period_cube.units = cf_units.Unit("days")
dry_day_period_cubes.append(dry_day_period_cube)
raw_cubes = iris.cube.CubeList([dry_day_period_cubes.merge_cube()])
self.cubes = raw_cubes
self.write_cache()
class ERA5_CAPEPrecip(MonthlyDataset):
_pretty = "ERA5 Cape x Precip"
pretty_variable_names = {"Product of CAPE and Precipitation": "CAPE x Precip"}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "ERA5", "CAPE_P")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
files = sorted(glob.glob(os.path.join(self.dir, "**", "*.nc"), recursive=True))
raw_cubes = load_cubes(files)
self.cubes = iris.cube.CubeList([raw_cubes.merge_cube()])
self.write_cache()
class ESA_CCI_Fire(Dataset):
_pretty = "ESA CCI Fire"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "ESA-CCI-Fire_burnedarea")
self.cubes = iris.cube.CubeList(
[
iris.load_cube(
os.path.join(
self.dir, "MODIS_cci.BA.2001.2016.1440.720.365days.sum.nc"
)
)
]
)
self.time_unit_str = self.cubes[0].coord("time").units.name
self.calendar = self.cubes[0].coord("time").units.calendar
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.interpolate_yearly_data(start, end)
class ESA_CCI_Landcover(Dataset):
_pretty = "ESA Landcover"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "ESA-CCI-LC_landcover", "0d25_landcover")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
filenames = glob.glob(os.path.join(self.dir, "*.nc"))
filenames.sort() # increasing years
self.raw_cubes = iris.load(filenames)
# To concatenate the cubes, take advantage of the fact that there
# are 17 cubes per year, and then simply loop over the years,
# joining the corresponding cubes into lists corresponding to their
# variable.
cube_lists = []
for i in range(17):
cube_lists.append(iris.cube.CubeList())
n_years = len(self.raw_cubes) / 17
assert np.isclose(n_years, int(n_years))
n_years = int(n_years)
years = range(1992, 2016)
assert len(years) == n_years
self.time_unit_str = "hours since 1970-01-01 00:00:00"
self.calendar = "gregorian"
time_unit = cf_units.Unit(self.time_unit_str, calendar=self.calendar)
for i in range(n_years):
time = iris.coords.DimCoord(
[
cf_units.date2num(
datetime(years[i], 1, 1), self.time_unit_str, self.calendar
)
],
standard_name="time",
units=time_unit,
)
for j in range(17):
cube = self.raw_cubes[(17 * i) + j]
cube_coords = cube.coords()
cube2 = iris.cube.Cube(cube.lazy_data().reshape(1, 720, 1440))
cube2.attributes = cube.attributes
cube2.long_name = cube.long_name
cube2.name = cube.name
cube2.standard_name = cube.standard_name
cube2.units = cube.units
cube2.var_name = cube.var_name
for key in ["id", "tracking_id", "date_created"]:
del cube2.attributes[key]
cube2.attributes["time_coverage_start"] = self.raw_cubes[0].attributes[
"time_coverage_start"
]
cube2.attributes["time_coverage_end"] = self.raw_cubes[-1].attributes[
"time_coverage_end"
]
cube2.add_dim_coord(time, 0)
cube2.add_dim_coord(cube_coords[0], 1)
cube2.add_dim_coord(cube_coords[1], 2)
cube_lists[j].append(cube2)
self.cubes = iris.cube.CubeList()
for cube_list in cube_lists:
self.cubes.append(cube_list.concatenate_cube())
self.write_cache()
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.interpolate_yearly_data(start, end)
class Ext_ESA_CCI_Landcover_PFT(Dataset):
"""Extended ESA CCI Landcover dataset."""
_pretty = "Ext ESA Landcover"
def __init__(self, start_year=1992, end_year=2020, max_workers=2, write_cache=True):
"""Load the dataset with downloading and processing carried out as needed.
Args:
start_year (int): Start year (inclusive).
end_year (int): End year (exclusive).
max_workers (int): Maximum number of concurrent file downloads.
write_cache (bool): Whether to write processed data to the cache or not.
"""
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
import cdsapi
client = cdsapi.Client(quiet=True, progress=False, delete=False)
def non_delete_retrieve_extract(year, *request):
"""Retrieve and extract a request without overwriting."""
if os.path.exists(request[2]):
logger.warning(
f"Target file '{request[2]}' already exists. Not downloading."
)
else:
logger.info(f"Downloading to target file '{request[2]}'.")
client.retrieve(*request)
extract_dir = os.path.join(download_dir, f"extracted_{year}")
if os.path.isdir(extract_dir):
logger.warning(
f"Extract directory '{extract_dir}' already exists. "
"Not extracting."
)
else:
logger.info(f"Extracting to directory '{extract_dir}'. ")
# Unpack the downloaded archive.
shutil.unpack_archive(
request[2],
extract_dir=extract_dir,
)
# Download data for processing.
download_dir = os.path.join(
os.environ.get("EPHEMERAL", tempfile.gettempdir()), self.name
)
if not os.path.isdir(download_dir):
os.mkdir(download_dir)
executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
future_to_dest = {
executor.submit(
non_delete_retrieve_extract,
year,
"satellite-land-cover",
{
"variable": "all",
"format": "tgz",
"version": [
"v2.0.7cds",
"v2.1.1",
],
"year": [
f"{year}",
],
},
os.path.join(download_dir, f"download_{year}.tar.gz"),
): (year, os.path.join(download_dir, f"extracted_{year}"))
for year in range(start_year, end_year)
}
all_pfts = get_mapping_pfts(lc_to_pft_map)
regridded_pft_cubes = iris.cube.CubeList()
for future in tqdm(
concurrent.futures.as_completed(future_to_dest),
desc="Processing ESA CCI Landcover",
total=len(future_to_dest),
):
future.result() # Ensure any raised Exceptions are raised again here.
year, extract_dir = future_to_dest[future]
# Cache the results of processing.
processed_file = os.path.join(extract_dir, "processed.nc")
if os.path.isfile(processed_file):
logger.info(f"Loaded processed PFTs from '{processed_file}'.")
regridded_pft_cubes.extend(iris.load(processed_file))
# Do not carry out the normal processing below, as this is contained
# in the loaded file.
continue
# Process the resulting file by converting the landcover categories to
# PFTs, followed by regridding.
logger.info(f"Loading landcover for year {year}.")
category_cube = iris.load_cube(
os.path.join(extract_dir, "*.nc"),
constraint=iris.Constraint("land_cover_lccs"),
)
assert category_cube.shape[0] == 1, "There should only be one time."
logger.info(f"Converting landcover to PFTs for year {year}.")
# 18 MB for 2 GB worker memory.
category_cube = category_cube[0]
category_cube = category_cube.copy(
data=category_cube.core_data().rechunk(("10MiB", -1))
)
pft_cubes = convert_to_pfts(category_cube, lc_to_pft_map, 0, 220)
year_regridded_pft_cubes = iris.cube.CubeList()
logger.info(f"Regridding PFTs for year {year}.")
# 110 MB for 15 GB worker memory (1 thread per worker).
regrid_max_chunk_size = "110MB"
for pft_cube in tqdm(pft_cubes, desc=f"Regridding ({year})"):
tgt_cube = dummy_lat_lon_cube(da.zeros((720, 1440)))
tgt_cube.metadata = pft_cube.metadata
tgt_cube.add_aux_coord(pft_cube.coord("time"))
year_regridded_pft_cubes.append(
spatial_chunked_regrid(
pft_cube,
tgt_cube,
iris.analysis.AreaWeighted(),
max_src_chunk_size=regrid_max_chunk_size,
max_tgt_chunk_size=regrid_max_chunk_size,
)
)
# Cache the data.
logger.info("Realising processed PFTs data.")
year_regridded_pft_cubes.realise_data()
iris.save(year_regridded_pft_cubes, processed_file, zlib=False)
logger.info(f"Cached processed PFTs in '{processed_file}'.")
# Record the processed cubes.
regridded_pft_cubes.extend(year_regridded_pft_cubes)
executor.shutdown()
# Process attributes that will differ from year to years to allow merging of
# the cubes (id, tracking_id, creation_date, time_coverage_start,
# time_coverage_end).
merged_pft_cubes = iris.cube.CubeList()
for pft_name in all_pfts:
cubes = regridded_pft_cubes.extract(iris.Constraint(pft_name))
assert len(cubes) == len(
future_to_dest
), "There should be as many cubes as there are downloaded years."
join_attributes = (
"id",
"tracking_id",
"creation_date",
"contact",
"geospatial_lon_max",
"geospatial_lon_min",
"history",
"license",
"naming_authority",
"product_version",
"project",
"references",
"source",
"title",
"type",
)
joined_values = {}
for join_attribute in join_attributes:
joined_values[join_attribute] = tuple(
sorted(set(cube.attributes[join_attribute] for cube in cubes))
)
cube_time_coverages = tuple(
"-".join(
(
cube.attributes["time_coverage_start"],
cube.attributes["time_coverage_end"],
)
)
for cube in cubes
)
# Remove the original attributes.
for attribute in list(join_attributes) + [
"time_coverage_start",
"time_coverage_end",
# Removing flag-related attributes since this prevents round-trip
# consistency of the units '1' that will be assigned later. Otherwise
# these units would be reset upon saving/loading.
# See https://github.com/SciTools/iris/commit/d4f9a3a2c5b7125647a3fcb354a754d2a893d08e.
"flag_meanings",
"flag_values",
]:
for cube in cubes:
del cube.attributes[attribute]
# Replace the original attributes with aggregated versions.
for cube in cubes:
for join_attribute, joined in joined_values.items():
cube.attributes[join_attribute] = joined
cube.attributes["time_coverages"] = cube_time_coverages
# Ensure the correct units are set.
cube.units = cf_units.Unit("1")
# Merge and divide by 100 so that the result is in the range [0, 1].
merged_cube = cubes.merge_cube()
rescaled_cube = merged_cube / 100.0
# Avoid losing properties like 'long_name' due to the division.
rescaled_cube.metadata = merged_cube.metadata
merged_pft_cubes.append(rescaled_cube)
assert len(merged_pft_cubes) == len(
all_pfts
), "There should be as many cubes as PFTs."
# Rename variables to match the previous ESA CCI PFT dataset.
for cube in merged_pft_cubes:
cube.long_name = "pft" + cube.long_name.replace(".", "")
# Aggregate PFTs following Forkel et al. 2017.
pft_aggregations = {
# Natural and managed grass & croplands.
"HrbCrp": ("pftHerb", "pftCrop"),
# All Tree PFTs
"TreeAll": ("pftTreeBE", "pftTreeBD", "pftTreeNE", "pftTreeND"),
# All Shrub PFTs
"ShrubAll": ("pftShrubBE", "pftShrubBD", "pftShrubNE"),
# All Broadleaf PFTs
"Broadleaf": ("pftTreeBE", "pftTreeBD", "pftShrubBE", "pftShrubBD"),
# All Needleleaf PFTs
"Needleleaf": ("pftTreeNE", "pftTreeND", "pftShrubNE"),
}
for agg_name, pfts in pft_aggregations.items():
agg_cube = reduce(
operator.add,
(
merged_pft_cubes.extract_cube(iris.Constraint(pft_name))
for pft_name in pfts
),
)
agg_cube.long_name = agg_name
agg_cube.units = cf_units.Unit("1")
merged_pft_cubes.append(agg_cube)
self.cubes = merged_pft_cubes
if write_cache:
self.write_cache()
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.interpolate_yearly_data(start, end)
class ESA_CCI_Landcover_PFT(Dataset):
_pretty = "ESA Landcover"
_not_implemented = True
def __init__(self, override=False):
if not override:
raise NotImplementedError("Use Ext_ESA_CCI_Landcover_PFT instead.")
self.dir = os.path.join(DATA_DIR, "ESA-CCI-LC_landcover", "0d25_lc2pft")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
loaded_cubes = iris.load(os.path.join(self.dir, "*.nc"))
time_coord = None
for cube in loaded_cubes:
if cube.coords()[0].name() == "time":
time_coord = cube.coord("time")
break
assert time_coord.standard_name == "time"
# fix peculiar 'z' coordinate, which should be the number of years
for cube in loaded_cubes:
coord_names = [coord.name() for coord in cube.coords()]
if "z" in coord_names:
assert coord_names[0] == "z"
cube.remove_coord("z")
cube.add_dim_coord(time_coord, 0)
self.cubes = loaded_cubes
self.time_unit_str = time_coord.units.name
self.calendar = time_coord.units.calendar
for cube in self.cubes:
# Fill masked elements with the minimum value.
fill_val = np.min(cube.data)
cube.data.data[cube.data.mask] = fill_val
logger.warning(f"Filling dataset: {self}, cube: {cube} with: {fill_val}")
# Reset mask.
cube.data.mask = np.zeros_like(cube.data.mask)
self.write_cache()
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.interpolate_yearly_data(start, end)
class ESA_CCI_Soilmoisture(MonthlyDataset):
_pretty = "ESA CCI Soil Moisture"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "ESA-CCI-SM_soilmoisture")
self.cubes = iris.load(os.path.join(self.dir, "*.nc"))
class ESA_CCI_Soilmoisture_Daily(Dataset):
_pretty = "ESA CCI Daily Soil Moisture"
_not_implemented = True
def __init__(self):
raise NotImplementedError("Use ESA_CCI_Soilmoisture Dataset for monthly data!")
self.dir = os.path.join(DATA_DIR, "soil-moisture", "daily_files", "COMBINED")
files = sorted(glob.glob(os.path.join(self.dir, "**", "*.nc")))
raw_cubes = load_cubes(files, 100)
# Delete varying attributes.
for cube in raw_cubes:
for attr in ["id", "tracking_id", "date_created"]:
del cube.attributes[attr]
# For the observation timestamp cubes, remove the 'valid_range'
# attribute, which varies from cube to cube. The values of this
# parameter are [-0.5, 0.5] for day 0, [0.5, 1.5] for day 1, etc...
#
# TODO: This seems to work but seems kind of hacky - is it really
# guaranteed that the ordering of the cubes is constant?
for cube in raw_cubes[7:None:8]:
del cube.attributes["valid_range"]
self.cubes = raw_cubes.concatenate()
for cube in self.cubes:
iris.coord_categorisation.add_month_number(cube, "time")
iris.coord_categorisation.add_year(cube, "time")
# Perform averaging over months in each year.
self.monthly_means = iris.cube.CubeList()
for cube in self.cubes:
self.monthly_means.append(
cube.aggregated_by(["month_number", "year"], iris.analysis.MEAN)
)
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
raise NotImplementedError("Use ESA_CCI_Soilmoisture Dataset for monthly data!")
# TODO: Isolate actual soil moisture.
return self.monthly_means.extract(
iris.Constraint(time=lambda t: end >= t.point >= start)
)
class GFEDv4(MonthlyDataset):
"""Without small fires."""
_pretty = "GFED4"
pretty_variable_names = {"monthly burned area": "GFED4 BA"}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "gfed4", "data")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
filenames = glob.glob(os.path.join(self.dir, "*MQ*.hdf"))
filenames.sort() # increasing months & years
datetimes = []
data = []
for f in filenames:
hdf = SD(f, SDC.READ)
# TODO: Use 'BurnedAreaUncertainty' dataset, and maybe others,
# like 'FirePersistence' (viewed using hdf.datasets()).
burned_area = hdf.select("BurnedArea")
attributes = burned_area.attributes()
split_f = os.path.split(f)[1]
year = int(split_f[11:15])
month = int(split_f[15:17])
assert 1990 < year < 2030
assert 0 < month < 13
datetimes.append(datetime(year, month, 1))
data.append(
burned_area[:][np.newaxis].astype("float64")
* attributes["scale_factor"]
)
data = np.vstack(data)
unit = cf_units.Unit(attributes["units"])
long_name = attributes["long_name"]
calendar = "gregorian"
time_unit_str = "days since 1970-01-01 00:00:00"
time_unit = cf_units.Unit(time_unit_str, calendar=calendar)
time_coord = iris.coords.DimCoord(
[cf_units.date2num(dt, time_unit_str, calendar) for dt in datetimes],
standard_name="time",
units=time_unit,
)
latitudes = iris.coords.DimCoord(
get_centres(np.linspace(-90, 90, 721)),
standard_name="latitude",
units="degrees",
)
longitudes = iris.coords.DimCoord(
get_centres(np.linspace(-180, 180, 1441)),
standard_name="longitude",
units="degrees",
)
latitudes.guess_bounds()
longitudes.guess_bounds()
burned_area_cube = iris.cube.Cube(
# Reverse latitudes.
data[:, ::-1],
long_name=long_name,
units=unit,
dim_coords_and_dims=[(time_coord, 0), (latitudes, 1), (longitudes, 2)],
)
# Normalise using the grid cell areas, divide by 10000 to convert the scaling
# factors from m2 to hectares (the burned areas are in hectares originally).
# NOTE: Some burned area fractions may be above 1!
burned_area_cube.data /= (
iris.analysis.cartography.area_weights(burned_area_cube) / 10000
)
burned_area_cube.units = cf_units.Unit(1)
self.cubes = iris.cube.CubeList([burned_area_cube])
self.write_cache()
class GFEDv4s(MonthlyDataset):
"""Includes small fires."""
_pretty = "GFED4s"
pretty_variable_names = {"Burnt_Area": "GFED4s BA"}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "gfed4", "data")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
filenames = glob.glob(os.path.join(self.dir, "*.hdf5"))
filenames.sort() # increasing years
# for each file (each year), load the data, the latitudes and
# longitudes and place them into a cube
years = []
data = []
for f in filenames:
year = int(f[-9:-5])
years.append(year)
container = h5py.File(f, mode="r")
for month_str in [format(m, "02d") for m in range(1, 13)]:
data.append(
container["burned_area"][month_str]["burned_fraction"][()][
None, ...
]
)
assert years == sorted(years), "Should be monotonically increasing"
# use the last file (of previous for loop) to get latitudes and
# longitudes, assuming that they are the same for all the data
# files!
latitudes = container["lat"][()]
longitudes = container["lon"][()]
# make sure that the lats and lons are uniform along the grid
assert np.all(longitudes[0] == longitudes)
assert np.all(latitudes.T[0] == latitudes.T)
longitudes = iris.coords.DimCoord(
longitudes[0], standard_name="longitude", units="degrees"
)
latitudes = iris.coords.DimCoord(
latitudes.T[0], standard_name="latitude", units="degrees"
)
time_unit_str = "hours since 1970-01-01 00:00:00"
calendar = "gregorian"
time_unit = cf_units.Unit(time_unit_str, calendar=calendar)
num_times = []
for m in range(len(data)):
month = (m % 12) + 1
year = (m // 12) + min(years)
assert year <= max(years)
num_times.append(
cf_units.date2num(datetime(year, month, 1), time_unit_str, calendar)
)
time_coord = iris.coords.DimCoord(
num_times, standard_name="time", units=time_unit
)
for coord in (longitudes, latitudes, time_coord):
coord.guess_bounds()
self.cubes = iris.cube.CubeList(
[
iris.cube.Cube(
np.vstack(data),
dim_coords_and_dims=[
(time_coord, 0),
(latitudes, 1),
(longitudes, 2),
],
)
]
)
self.cubes[0].units = cf_units.Unit(1)
self.cubes[0].var_name = "Burnt_Area"
self.write_cache()
class GlobFluo_SIF(MonthlyDataset):
_pretty = "Glob Fluo SIF"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "GlobFluo_SIF")
loaded_cube = iris.load_cube(os.path.join(self.dir, "*.nc"))
# Need to convert to time coordinate, as values are relative to
# 1582-10-14, which is not supported by the cf_units gregorian
# calendar (needs to start from 1582-10-15, I think).
# Get the original number of days relative to 1582-10-14 00:00:00.
days_since_1582_10_14 = loaded_cube.coords()[0].points
# Define new time unit relative to a supported date.
new_time_unit = cf_units.Unit(
"days since 1582-10-16 00:00:00", calendar="gregorian"
)
# The corresponding number of days for the new time unit.
days_since_1582_10_16 = days_since_1582_10_14 - 2
loaded_cube.remove_coord("time")
new_time = iris.coords.DimCoord(
days_since_1582_10_16, standard_name="time", units=new_time_unit
)
loaded_cube.add_dim_coord(new_time, 0)
invalid_mask = np.logical_or(
loaded_cube.data.data > 20, loaded_cube.data.data < 0
)
logger.info(f"Masking {np.sum(invalid_mask)} invalid values for SIF.")
loaded_cube.data.mask |= invalid_mask
self.cubes = iris.cube.CubeList([loaded_cube])
class GPW_v4_pop_dens(Dataset):
_pretty = "GPW4 Pop Density"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "GPW_v4_pop_dens")
netcdf_dataset = netCDF4.Dataset(glob.glob(os.path.join(self.dir, "*.nc"))[0])
data = netcdf_dataset[
"Population Density, v4.10 (2000, 2005, 2010,"
" 2015, 2020): 30 arc-minutes"
]
datetimes = [datetime(year, 1, 1) for year in [2000, 2005, 2010, 2015, 2020]]
self.time_unit_str = "days since {:}".format(str(datetime(1970, 1, 1)))
self.calendar = "gregorian"
self.time_unit = cf_units.Unit(self.time_unit_str, calendar=self.calendar)
time = iris.coords.DimCoord(
cf_units.date2num(datetimes, self.time_unit_str, calendar="gregorian"),
standard_name="time",
units=self.time_unit,
)
latitudes = iris.coords.DimCoord(
netcdf_dataset["latitude"][:], standard_name="latitude", units="degrees"
)
longitudes = iris.coords.DimCoord(
netcdf_dataset["longitude"][:], standard_name="longitude", units="degrees"
)
coords = [(time, 0), (latitudes, 1), (longitudes, 2)]
self.cubes = iris.cube.CubeList(
[
iris.cube.Cube(
data[:5],
long_name=data.long_name,
var_name="Population_Density",
units=cf_units.Unit("1/km2"),
dim_coords_and_dims=coords,
)
]
)
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
"""Linear interpolation onto the target months."""
final_cubelist = self.interpolate_yearly_data(start, end)
assert len(final_cubelist) == 1
return final_cubelist
class GSMaP_dry_day_period(MonthlyDataset):
"""Calculate the length of the longest preceding dry day period.
This definition only considers dry day periods within the current month, or dry
day periods that occur within the current month AND previous months, ONLY if these
join up contiguously at the month boundaries.
Other definitions taking into account (only) dry day periods in a certain number
of months leading up to the current month may be possible as well, although this
could also be implemented in post-processing.
"""
_pretty = "GSMaP Dry Day Period"
pretty_variable_names = {"dry_day_period": "Dry Day Period"}
def __init__(self, times="00Z-23Z"):
self.dir = os.path.join(
DATA_DIR,
"GSMaP_Precipitation",
"hokusai.eorc.jaxa.jp",
"realtime_ver",
"v6",
"daily_G",
times,
)
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
# Sort so that time is increasing.
filenames = sorted(glob.glob(os.path.join(self.dir, "**", "*.nc")))
logger.info("Constructing dry day period cube.")
dry_day_period_cubes = iris.cube.CubeList()
prev_dry_day_period = None
prev_end = None
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Collapsing a non-contiguous coordinate. Metadata may not "
"be fully descriptive for 'time'."
),
)
for filename in tqdm(filenames):
# Clip outer values which are duplicated in the data
# selection below and not needed here.
raw_cube = iris.load_cube(filename)[..., 1:1441]
n_days = raw_cube.shape[0]
n_lats = raw_cube.shape[1]
n_lons = raw_cube.shape[2]
# The first time around only, create empty arrays. This will introduce
# some negative bias for the first month(s), but this should be
# negligible overall (especially since the first year is probably not
# being used anyway).
if prev_dry_day_period is None:
assert prev_end is None
prev_dry_day_period = np.zeros((n_lats, n_lons), dtype=np.int64)
prev_end = np.zeros((n_lats, n_lons), dtype=np.bool_)
longitude_points = raw_cube.coord("longitude").points
assert np.min(longitude_points) == 0.125
assert np.max(longitude_points) == 359.875
# No need to calculate mean cube here, as we are only interested in
# the raw daily precipitation data.
# Calculate dry days.
dry_days = raw_cube.data < MM_PER_HR_THRES
# Find contiguous blocks in the time dimension where dry_days is True.
structure = np.zeros((3, 3, 3), dtype=np.int64)
structure[:, 1, 1] = 1
labelled = scipy.ndimage.label(dry_days, structure=structure)
slices = scipy.ndimage.find_objects(labelled[0])
dry_day_period = np.zeros((n_lats, n_lons), dtype=np.int64)
beginning = np.zeros((n_lats, n_lons), dtype=np.bool_)
end = np.zeros_like(beginning)
for slice_object in slices:
time_slice = slice_object[0]
lat_slice = slice_object[1]
lon_slice = slice_object[2]
assert lat_slice.stop - lat_slice.start == 1
assert lon_slice.stop - lon_slice.start == 1
latitude = lat_slice.start
longitude = lon_slice.start
period_length = time_slice.stop - time_slice.start
if period_length > dry_day_period[latitude, longitude]:
dry_day_period[latitude, longitude] = period_length
if time_slice.start == 0:
beginning[latitude, longitude] = True
else:
beginning[latitude, longitude] = False
if time_slice.stop == n_days:
end[latitude, longitude] = True
else:
end[latitude, longitude] = False
# Once the data for the current month has been processed, look at the
# previous month to see if dry day periods may be joined up.
overlap = prev_end & beginning
dry_day_period[overlap] += prev_dry_day_period[overlap]
# Prepare for the next month's analysis.
prev_dry_day_period = dry_day_period
prev_end = end
# Create new Cube with the same latitudes and longitudes, and an
# averaged time.
coords = [
(raw_cube.coord("latitude"), 0),
(raw_cube.coord("longitude"), 1),
]
# Modify the time coordinate such that it is recorded with
# respect to a common date, as opposed to relative to the
# beginning of the respective month as is the case for the
# cube loaded above.
# Take the new 'mean' time as the average of the first and last time.
min_time = raw_cube.coord("time").cell(0).point
max_time = raw_cube.coord("time").cell(-1).point
centre_datetime = min_time + ((max_time - min_time) / 2)
new_time = cf_units.date2num(
centre_datetime, self.time_unit_str, self.calendar
)
time_coord = iris.coords.DimCoord(
new_time, units=self.time_unit, standard_name="time"
)
dry_day_period_cube = iris.cube.Cube(
dry_day_period,
dim_coords_and_dims=coords,
units=cf_units.Unit("days"),
var_name="dry_day_period",
aux_coords_and_dims=[(time_coord, None)],
)
dry_day_period_cube.units = cf_units.Unit("days")
dry_day_period_cubes.append(dry_day_period_cube)
self.cubes = iris.cube.CubeList([dry_day_period_cubes.merge_cube()])
self.write_cache()
class GSMaP_precipitation(MonthlyDataset):
_pretty = "GSMaP Precipitation"
pretty_variable_names = {"dry_days": "Dry Days", "precip": "Precipitation"}
def __init__(self, times="00Z-23Z"):
self.dir = os.path.join(
DATA_DIR,
"GSMaP_Precipitation",
"hokusai.eorc.jaxa.jp",
"realtime_ver",
"v6",
"daily_G",
times,
)
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
# Sort so that time is increasing.
filenames = sorted(glob.glob(os.path.join(self.dir, "**", "*.nc")))
calendar = "gregorian"
time_unit_str = "days since 1970-01-01 00:00:00"
time_unit = cf_units.Unit(time_unit_str, calendar=calendar)
logger.info("Constructing average precipitation and dry days cubes.")
monthly_average_cubes = iris.cube.CubeList()
dry_days_cubes = iris.cube.CubeList()
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(
"Collapsing a non-contiguous coordinate. Metadata may not "
"be fully descriptive for 'time'."
),
)
for filename in tqdm(filenames):
# Clip outer values which are duplicated in the data
# selection below and not needed here.
raw_cube = iris.load_cube(filename)[..., 1:1441]
monthly_cube = raw_cube.collapsed("time", iris.analysis.MEAN)
longitude_points = monthly_cube.coord("longitude").points
assert np.min(longitude_points) == 0.125
assert np.max(longitude_points) == 359.875
# Modify the time coordinate such that it is recorded with
# respect to a common date, as opposed to relative to the
# beginning of the respective month as is the case for the
# cube loaded above.
centre_datetime = monthly_cube.coord("time").cell(0).point
new_time = cf_units.date2num(centre_datetime, time_unit_str, calendar)
monthly_cube.coord("time").bounds = None
monthly_cube.coord("time").points = [new_time]
monthly_cube.coord("time").units = time_unit
monthly_cube.units = cf_units.Unit("mm/hr")
monthly_average_cubes.append(monthly_cube)
# Calculate dry day statistics.
dry_days_data = np.sum(raw_cube.data < MM_PER_HR_THRES, axis=0)
coords = [
(monthly_cube.coord("latitude"), 0),
(monthly_cube.coord("longitude"), 1),
]
dry_days_cubes.append(
iris.cube.Cube(
dry_days_data,
dim_coords_and_dims=coords,
units=cf_units.Unit("days"),
var_name="dry_days",
aux_coords_and_dims=[(monthly_cube.coord("time"), None)],
)
)
self.cubes = iris.cube.CubeList(
[monthly_average_cubes.merge_cube(), dry_days_cubes.merge_cube()]
)
assert len(self.cubes) == 2
self.write_cache()
class HYDE(Dataset):
_pretty = "HYDE"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "HYDE")
self.time_unit_str = "hours since 1970-01-01 00:00:00"
self.calendar = "gregorian"
self.time_unit = cf_units.Unit(self.time_unit_str, calendar=self.calendar)
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
# TODO: Consider upper and lower estimates as well, not just
# baseline??
files = glob.glob(os.path.join(self.dir, "baseline", "*.asc"), recursive=True)
cube_list = iris.cube.CubeList()
mapping = {
"uopp": {},
"urbc": {},
"tot_rice": {},
"tot_rainfed": {},
"tot_irri": {},
"rurc": {},
"rf_rice": {},
"rf_norice": {},
"rangeland": {},
"popd": {},
"popc": {},
"pasture": {},
"ir_rice": {},
"ir_norice": {},
"grazing": {},
"cropland": {},
"conv_rangeland": {},
}
pattern = re.compile(r"(.*)(\d{4})AD")
for f in tqdm(files):
groups = pattern.search(os.path.split(f)[1]).groups()
variable_key = groups[0].strip("_")
year = int(groups[1])
data = np.loadtxt(f, skiprows=6, ndmin=2)
assert data.shape == (2160, 4320)
data = data.reshape(2160, 4320)
data = np.ma.MaskedArray(data, mask=np.isclose(data, -9999))
new_latitudes = get_centres(np.linspace(90, -90, data.shape[0] + 1))
new_longitudes = get_centres(np.linspace(-180, 180, data.shape[1] + 1))
new_lat_coord = iris.coords.DimCoord(
new_latitudes, standard_name="latitude", units="degrees"
)
new_lon_coord = iris.coords.DimCoord(
new_longitudes, standard_name="longitude", units="degrees"
)
grid_coords = [(new_lat_coord, 0), (new_lon_coord, 1)]
time_coord = iris.coords.DimCoord(
cf_units.date2num(
datetime(year, 1, 1), self.time_unit_str, self.calendar
),
standard_name="time",
units=self.time_unit,
)
cube = iris.cube.Cube(
data,
dim_coords_and_dims=grid_coords,
units=mapping[variable_key].get("unit"),
var_name=variable_key,
long_name=mapping[variable_key].get("long_name"),
aux_coords_and_dims=[(time_coord, None)],
)
regrid_cube = regrid(cube)
cube_list.append(regrid_cube)
self.cubes = cube_list.merge()
self.write_cache()
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
"""Linear interpolation onto the target months."""
return self.interpolate_yearly_data(start, end)
class Ext_HYDE(Dataset):
_pretty = "Ext HYDE"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "HYDE_Ext")
self.time_unit_str = "hours since 1970-01-01 00:00:00"
self.calendar = "gregorian"
self.time_unit = cf_units.Unit(self.time_unit_str, calendar=self.calendar)
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
# TODO: Consider upper and lower estimates as well, not just
# baseline??
files = glob.glob(os.path.join(self.dir, "*.asc"), recursive=True)
result_queue = multiprocessing.Queue()
procs = []
for chunked_files in get_batches(files, n=get_ncpus()):
procs.append(
multiprocessing.Process(
target=self.process_files,
args=(
chunked_files,
self.time_unit_str,
self.calendar,
self.time_unit,
result_queue,
),
)
)
procs[-1].start()
cube_list = iris.cube.CubeList()
prog = tqdm(desc="Processing files", total=len(files))
while len(cube_list) < len(files):
cube_list.append(result_queue.get())
prog.update()
prog.close()
for proc in procs:
# Wait for workers to finish.
proc.join()
self.cubes = cube_list.merge()
self.write_cache()
@staticmethod
def process_files(files, time_unit_str, calendar, time_unit, result_queue):
mapping = {
variable: {}
for variable in [
"conv_rangeland",
"cropland",
"grazing",
"ir_norice",
"ir_rice",
"pasture",
"popc",
"popd",
"rangeland",
"rf_norice",
"rf_rice",
"rurc",
"shifting",
"tot_irri",
"tot_rainfed",
"tot_rice",
"uopp",
"urbc",
]
}
pattern = re.compile(r"(.*)(\d{4})AD")
regridder = None
for f in files:
groups = pattern.search(os.path.split(f)[1]).groups()
variable_key = groups[0].strip("_")
year = int(groups[1])
data = np.loadtxt(f, skiprows=6, ndmin=2)
assert data.shape == (2160, 4320)
data = np.ma.MaskedArray(data, mask=np.isclose(data, -9999))
new_latitudes = get_centres(np.linspace(90, -90, data.shape[0] + 1))
new_longitudes = get_centres(np.linspace(-180, 180, data.shape[1] + 1))
new_lat_coord = iris.coords.DimCoord(
new_latitudes, standard_name="latitude", units="degrees"
)
new_lon_coord = iris.coords.DimCoord(
new_longitudes, standard_name="longitude", units="degrees"
)
grid_coords = [(new_lat_coord, 0), (new_lon_coord, 1)]
time_coord = iris.coords.DimCoord(
cf_units.date2num(datetime(year, 1, 1), time_unit_str, calendar),
standard_name="time",
units=time_unit,
)
cube = iris.cube.Cube(
data,
dim_coords_and_dims=grid_coords,
units=mapping[variable_key].get("unit"),
var_name=variable_key,
long_name=mapping[variable_key].get("long_name"),
aux_coords_and_dims=[(time_coord, None)],
)
regrid_cube, regridder = regrid(
cube, area_weighted=True, regridder=regridder, return_regridder=True
)
result_queue.put(regrid_cube)
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
"""Linear interpolation onto the target months."""
return self.interpolate_yearly_data(start, end)
class LIS_OTD_lightning_climatology(Dataset):
_pretty = "LIS/OTD"
pretty_variable_names = {
"Combined Flash Rate Monthly Climatology": "Lightning Climatology"
}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "LIS_OTD_lightning_climatology")
loaded_cube = iris.load(os.path.join(self.dir, "*.nc")).extract_cube(
iris.Constraint(name="Combined Flash Rate Monthly Climatology")
)
# Fix time units so they do not refer months, as this can't be processed by
# iris / cf_units.
# Realign times so they are at the centre of each month.
# Check that existing time coordinate is as expected.
assert loaded_cube.coord("time").units.origin == "months since 2014-1-1 0:0:0"
assert all(loaded_cube.coord("time").points == np.arange(1, 13))
datetimes = [
(
(
(datetime(2014, month, 1) + relativedelta(months=+1))
- datetime(2014, month, 1)
)
/ 2
)
+ datetime(2014, month, 1)
for month in np.arange(1, 13)
]
time_unit_str = "days since {:}".format(str(datetime(2014, 1, 1)))
time_unit = cf_units.Unit(time_unit_str, calendar="gregorian")
time_coord = iris.coords.DimCoord(
cf_units.date2num(datetimes, time_unit_str, calendar="gregorian"),
standard_name="time",
units=time_unit,
)
loaded_cube.coord("time").points = time_coord.points
loaded_cube.coord("time").units = time_coord.units
invalid_mask = loaded_cube.data.data < 0
logger.info(f"Masking {np.sum(invalid_mask)} invalid values for LIS/OTD.")
loaded_cube.data.mask |= invalid_mask
self.cubes = iris.cube.CubeList([loaded_cube])
# Make sure that the time coordinate is the first coordinate.
self.cubes = self.get_monthly_data(
start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
)
@property
def frequency(self):
return "monthly climatology"
@property
def min_time(self):
# FIXME: Find beginning of data validity!
return "N/A"
@property
def max_time(self):
# FIXME: Find end of data validity!
return "N/A"
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
"""'Broadcast' monthly climatology across the requested time
period.
NOTE: This is a climatology, and not true monthly data!
This method ignores days.
"""
# TODO: Make this work with lazy data?
cube = self.cube
assert (
len(cube.coord("time").points) == 12
), "Only meant to be run starting from the initial state, which as 12 months."
# Time index will vary from the first run (simply re-shuffling the coordinate
# order) to the second run (which will then actually expand the months to the
# desired range).
time_index = cube.coords().index(cube.coord("time"))
datetimes = [datetime(start.year, start.month, 1)]
while datetimes[-1] != PartialDateTime(end.year, end.month):
datetimes.append(datetimes[-1] + relativedelta(months=+1))
output_arrs = []
for dt in datetimes:
selection = [slice(None)] * 3
selection[time_index] = (dt.month - 1) % 12
output_arrs.append(cube[tuple(selection)].data[np.newaxis])
output_data = np.vstack(output_arrs)
time_coord = iris.coords.DimCoord(
cf_units.date2num(datetimes, self.time_unit_str, calendar=self.calendar),
standard_name="time",
units=self.time_unit,
)
new_coords = [
(time_coord, 0),
(cube.coord("latitude"), 1),
(cube.coord("longitude"), 2),
]
output_cube = iris.cube.Cube(
output_data,
dim_coords_and_dims=new_coords,
standard_name=cube.standard_name,
long_name=cube.long_name,
var_name=cube.var_name,
units=cube.units,
attributes=cube.attributes,
)
return iris.cube.CubeList([output_cube])
class LIS_OTD_lightning_time_series(MonthlyDataset):
_pretty = "LIS/OTD Time Series"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "LIS_OTD_lightning_time_series")
self.cubes = self.read_cache()
# Exit __init__ if we have loaded the data.
if self.cubes:
return
# Otherwise keep loading the data.
raw_cubes = iris.load(os.path.join(self.dir, "*.nc"))
# TODO: Use other attributes as well? Eg. separate LIS / OTD data,
# grid cell area, or Time Series Sampling (km^2 / day)?
# Isolate single combined flash rate.
raw_cubes = raw_cubes.extract(
iris.Constraint(name="Combined Flash Rate Time Series")
)
for cube in raw_cubes:
iris.coord_categorisation.add_month_number(cube, "time")
iris.coord_categorisation.add_year(cube, "time")
monthly_cubes = [
cube.aggregated_by(["month_number", "year"], iris.analysis.MEAN)
for cube in raw_cubes
]
# Create new cube(s) where the time dimension is the first
# dimension. To do this, the cube metadata can be copied, while new
# coordinates and corresponding data (both simply
# reshaped/reordered) are assigned.
new_coords = [
(monthly_cubes[0].coord("time"), 0),
(monthly_cubes[0].coord("latitude"), 1),
(monthly_cubes[0].coord("longitude"), 2),
]
self.cubes = iris.cube.CubeList()
for cube in monthly_cubes:
# NOTE: This does not use any lazy data whatsoever, starting
# with the monthly aggregation above.
assert cube.shape[-1] == len(
cube.coord("time").points
), "Old and new time dimension should have the same length"
data_arrs = []
for time_index in range(cube.shape[-1]):
data_arrs.append(cube.data[..., time_index][np.newaxis])
new_data = np.ma.vstack(data_arrs)
new_cube = iris.cube.Cube(new_data, dim_coords_and_dims=new_coords)
new_cube.metadata = deepcopy(cube.metadata)
self.cubes.append(new_cube)
self.write_cache()
class Liu_VOD(MonthlyDataset):
_pretty = "Liu VOD"
pretty_variable_names = {"VODorig": "VOD"}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "Liu_VOD")
loaded_cubes = iris.cube.CubeList(
[iris.load_cube(os.path.join(self.dir, "*.nc"))]
)
# Need to convert to time coordinate, as values are relative to
# 1582-10-14, which is not supported by the cf_units gregorian
# calendar (needs to start from 1582-10-15, I think).
# Get the original number of days relative to 1582-10-14 00:00:00.
days_since_1582_10_14 = loaded_cubes[0].coords()[0].points
# Define new time unit relative to a supported date.
new_time_unit = cf_units.Unit(
"days since 1582-10-16 00:00:00", calendar="gregorian"
)
# The corresponding number of days for the new time unit.
days_since_1582_10_16 = days_since_1582_10_14 - 2
loaded_cubes[0].remove_coord("time")
new_time = iris.coords.DimCoord(
days_since_1582_10_16, standard_name="time", units=new_time_unit
)
loaded_cubes[0].add_dim_coord(new_time, 0)
self.cubes = loaded_cubes
class MCD64CMQ_C6(MonthlyDataset):
_pretty = "MCD64CMQ C6"
pretty_variable_names = {"Burned Area": "MCD64CMQ BA"}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "MCD64CMQ_C6")
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
filenames = glob.glob(os.path.join(self.dir, "*MQ*.hdf"))
filenames.sort() # increasing months & years
datetimes = []
data = []
for f in filenames:
hdf = SD(f, SDC.READ)
# TODO: Use 'QA' and 'UnmappedFraction' datasets (see hdf.datasets()).
burned_area = hdf.select("BurnedArea")
split_f = os.path.split(f)[1].split(".")[1][1:]
year = int(split_f[:4])
day = int(split_f[4:])
date = datetime(year, 1, 1) + timedelta(day - 1)
assert 2000 <= date.year <= 2030
assert 0 < date.month < 13
datetimes.append(date)
data.append(
burned_area[:][np.newaxis].astype("float64")
# Scale factor from MODIS_C6_BA_User_Guide_1.2, August 2018, to
# yield burnt area in hectares.
* 0.01
)
data = np.vstack(data)
time_coord = iris.coords.DimCoord(
[
cf_units.date2num(dt, self.time_unit_str, self.calendar)
for dt in datetimes
],
standard_name="time",
units=self.time_unit,
)
latitudes = iris.coords.DimCoord(
get_centres(np.linspace(90, -90, 721)),
standard_name="latitude",
units="degrees",
)
longitudes = iris.coords.DimCoord(
get_centres(np.linspace(-180, 180, 1441)),
standard_name="longitude",
units="degrees",
)
latitudes.guess_bounds()
longitudes.guess_bounds()
burned_area_cube = iris.cube.Cube(
data,
long_name="Burned Area",
dim_coords_and_dims=[(time_coord, 0), (latitudes, 1), (longitudes, 2)],
)
# Normalise using the grid cell areas, divide by 10000 to convert the scaling
# factors from m2 to hectares (the burned areas are in hectares originally).
# NOTE: Some burned area fractions may be above 1!
burned_area_cube.data /= (
iris.analysis.cartography.area_weights(burned_area_cube) / 10000
)
burned_area_cube.units = cf_units.Unit(1)
self.cubes = iris.cube.CubeList([burned_area_cube])
self.write_cache()
class MOD15A2H_LAI_fPAR(MonthlyDataset):
_pretty = "MOD15A2H"
pretty_variable_names = {
"Fraction of Absorbed Photosynthetically Active Radiation": "FAPAR",
"Leaf Area Index": "LAI",
}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "MOD15A2H_LAI-fPAR")
self.cubes = iris.load(os.path.join(self.dir, "*.nc"))
months = []
for i in range(self.cubes[0].shape[0]):
months.append(self.cubes[0].coords()[0].cell(i).point.month)
assert np.all(
np.diff(np.where(np.diff(months) != 1)) == 12
), "The year should increase every 12 samples!"
# TODO: Since the day in the month for which the data is provided
# is variable, take into account neighbouring months as well in a
# weighted average (depending on how many days away from the middle
# of the month these other samples are)?
class Ext_MOD15A2H_fPAR(MonthlyDataset):
_pretty = "Ext MOD15A2H"
pretty_variable_names = {
"Fraction of Absorbed Photosynthetically Active Radiation": "FAPAR",
}
def __init__(self):
self.cubes = self.read_cache()
# If a CubeList has been loaded successfully, exit __init__
if self.cubes:
return
# Location of temporary source files.
# See wildfires/data/mosaic_modis_tiles.py
raw_dir = os.path.join(os.environ["EPHEMERAL"], "MOD15A2Hv006_0d25")
files = [
# Ignore 2000217 because it appears to be missing some data which is not
# reflected in the extreme bounds, making processing difficult.
f
for f in glob.glob(os.path.join(raw_dir, "*.nc"))
if "_2000217_" not in f
]
files.sort()
raw_cubes = load_cubes(files)
raw_dates = []
centr_offset = timedelta(
days=3,
seconds=86399,
# The microseconds are not registered when using 'days since ...' units.
# microseconds=500000
)
for f in files:
match = re.search("Fpar_500m_(\d{7})_0d25.nc", os.path.split(f)[-1]).group(
1
)
# The filenames contains the starting date of the 8 day interval - compute
# the midpoint here.
# TODO: Actually propagate and use the original information from the HDF
# files, e.g.
# RANGEBEGINNINGDATE=2014-10-08
# RANGEBEGINNINGTIME=00:00:00
# RANGEENDINGDATE=2014-10-15
# RANGEENDINGTIME=23:59:59
# Note that some end dates overlap the next start dates, e.g. at year
# ends.
raw_dates.append(
datetime(int(match[:4]), 1, 1)
+ timedelta(days=(int(match[4:]) - 1))
+ centr_offset
)
cube_time_coords = np.array(
[
iris.coords.DimCoord(
self.time_unit.date2num(raw_date),
standard_name="time",
units=self.time_unit,
)
for raw_date in raw_dates
]
)
for cube, time_coord in zip(raw_cubes, cube_time_coords):
del cube.attributes["history"]
cube.add_aux_coord(time_coord)
merged_cube = raw_cubes.merge_cube()
merged_time_coord = merged_cube.coord("time")
assert merged_cube.coord_dims(merged_time_coord) == (0,)
assert np.all(merged_cube.data >= 0), "All data should be >= 0"
assert np.all(merged_cube.data <= 1), "All data should be <= 1"
# Dates are expected to be absent (start dates).
expected_missing = (
datetime(2000, 8, 4),
datetime(2001, 6, 18),
datetime(2001, 6, 26),
datetime(2016, 2, 18),
)
missing_indices = np.where(np.diff(merged_time_coord.points) > (8 + 1e-5))[0]
assert len(missing_indices) <= len(
expected_missing
), "There should usually be at most 8 days between samples."
for missing_index in missing_indices:
# Convert the centred date from above back to the starting date before
# adding the expected 8-day period.
# TODO: This may need to be modified should any missing values appear
# across year boundaries.
missing_date = raw_dates[missing_index] - centr_offset + timedelta(days=8)
assert missing_date in expected_missing, (
f"Missing date '{missing_date}' was not expected: "
f"{expected_missing}"
)
# TODO: Since the day in the month for which the data is provided
# is variable, take into account neighbouring months as well in a
# weighted average (depending on how many days away from the middle
# of the month these other samples are)?
iris.coord_categorisation.add_year(merged_cube, "time")
iris.coord_categorisation.add_month_number(merged_cube, "time")
averaged_cube = merged_cube.aggregated_by(
["year", "month_number"], iris.analysis.MEAN
)
# Verify the integrity of the time coordinate.
avg_time_coord = averaged_cube.coord("time")
months = []
years = []
for i in range(avg_time_coord.shape[0]):
months.append(avg_time_coord.cell(i).point.month)
years.append(avg_time_coord.cell(i).point.year)
assert np.all(
np.diff(np.where(np.diff(months) != 1)) == 12
), "The year should change every 12 samples!"
assert np.all(
np.diff(np.where(np.diff(years) == 1)) == 12
), "The year should increase every 12 samples!"
averaged_cube.units = "1"
self.cubes = iris.cube.CubeList([averaged_cube])
self.write_cache()
class Simard_canopyheight(Dataset):
_pretty = "Simard Canopy Height"
def __init__(self):
self.dir = os.path.join(DATA_DIR, "Simard_canopyheight")
self.cubes = iris.cube.CubeList(
[iris.load_cube(os.path.join(self.dir, "*.nc"))]
)
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.broadcast_static_data(start, end)
class Thurner_AGB(Dataset):
_pretty = "Thurner AGB"
# TODO: Look at data values - seems like there is a major issue there!
def __init__(self):
self.dir = os.path.join(DATA_DIR, "Thurner_AGB")
# Ignore warning about units, which are fixed below.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=(r"Ignoring netCDF variable.*invalid units 'kg\[C]/m2'"),
)
self.cubes = iris.load(os.path.join(self.dir, "*.nc"))
for cube in self.cubes:
cube.units = cf_units.Unit("kg(C)/m2")
def get_monthly_data(
self, start=PartialDateTime(2000, 1), end=PartialDateTime(2000, 12)
):
return self.broadcast_static_data(start, end)
class VODCA(MonthlyDataset):
"""Global VOD Dataset.
See: https://zenodo.org/record/2575599#.XO6qXHVKibI
"""
_pretty = "VODCA"
pretty_variable_names = {
"Vegetation optical depth Ku-band (18.7 GHz - 19.35 GHz)": "VOD Ku-band",
"Vegetation optical depth X-band (10.65 GHz - 10.7 GHz)": "VOD X-band",
}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "VODCA")
self.cubes = self.read_cache()
# Exit __init__ if we have loaded the data.
if self.cubes:
return
daily_dirs = glob.glob(os.path.join(self.dir, "daily", "*", "*"))
# Calculate monthly averages using the daily data.
assert all(len(os.path.split(dir_name)[1]) == 4 for dir_name in daily_dirs)
mean_cubes = iris.cube.CubeList(
# TODO: Check if using multi-processing here instead of using multiple
# threads has the potential to speed up the averaging.
# Parallel(n_jobs=get_ncpus(), prefer="threads")(
Parallel(n_jobs=1, prefer="threads")(
delayed(self._monthly_average_in_dir)(directory)
for directory in tqdm(daily_dirs)
)
)
mean_cubes = mean_cubes.concatenate()
for cube in mean_cubes:
# Add the band name to the cube name to prevent all variables (cubes)
# having the same name, ie. to differentiate the cubes.
cube.long_name = f"{cube.long_name} {cube.attributes['band']}"
# TODO: Isolate different VOD bands, ignore masks (maybe put in different
# `Dataset` instance?)
self.cubes = mean_cubes
self.write_cache()
@staticmethod
def _monthly_average_in_dir(directory):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=((r".*'vod' invalid units 'unitless'.*"))
)
warnings.filterwarnings(
"ignore", message=((r".*'calendar' is not a permitted attribute.*"))
)
raw_cubes = iris.load(os.path.join(directory, "*.nc"))
raw_cubes = iris.cube.CubeList(
[
cube
for cube in raw_cubes
if "vegetation optical depth" in cube.name().lower()
]
)
raw_cubes = raw_cubes.concatenate()
assert len(raw_cubes) == 1
raw_cube = raw_cubes[0]
iris.coord_categorisation.add_month_number(raw_cube, "time")
iris.coord_categorisation.add_year(raw_cube, "time")
return raw_cube.aggregated_by(["month_number", "year"], iris.analysis.MEAN)
class WWLLN(MonthlyDataset):
_pretty = "WWLLN Lightning"
pretty_variable_names = {
"frequency of lightning flashes per unit area": "lightning"
}
def __init__(self):
self.dir = os.path.join(DATA_DIR, "WWLLN")
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=((r".*'lightning' invalid units 'strokes km-2'.*"))
)
raw_cube = iris.load_cube(os.path.join(self.dir, "WWLLN_monthly.nc"))
raw_cube.units = cf_units.Unit("1/km2")
self.cubes = iris.cube.CubeList([raw_cube])
def get_implemented_datasets(
pretty_dataset_names=None, pretty_variable_names=None, ignore_names=IGNORED_DATASETS
):
"""Get all valid datasets defined in the `wildfires.data.datasets` module.
Args:
pretty_variable_names (dict): Dictionary mapping raw to pretty variable names
({raw: pretty, ...}).
Returns:
list of `Dataset`: List of datasets.
"""
# TODO: Implement pretty dataset and variable names.
if pretty_dataset_names is None:
pretty_dataset_names = {}
if pretty_variable_names is None:
pretty_variable_names = {}
if ignore_names is None:
ignore_names = []
dataset_list = []
for dataset_cls in Dataset:
if str(dataset_cls) in ignore_names:
logger.debug(f"Ignoring {dataset_cls}.")
continue
try:
dataset_list.append(dataset_cls())
except NotImplementedError:
logger.info(f"{dataset_cls} is not implemented.")
return dataset_list
def dataset_times(
datasets=None, dataset_names=None, lat_lon=False, bounds_from_monthly=True
):
"""Compile dataset time span information.
Args:
datasets (iterable of `Dataset`): If no value is given, defaults to using all
available datasets.
dataset_names (iterable of `str` or None): The names used for the datasets,
the number of which should match the number of items in `datasets`. If
None, use the `dataset.pretty` name for each `Dataset` in `datasets`.
lat_lon (bool): Collect lat/lon grid information.
bounds_from_monthly (bool): Only use monthly datasets to set the minimum and
maximum temporal bounds.
Returns:
If valid starting and end times can be found for at least one of the datasets:
- min_time: Minimum shared time of all datasets.
- max_time: Maximum shared time of all datasets.
- times_df: Pandas DataFrame encapsulating the timespan information.
Otherwise the 3-tuple (None, None, None) will be returned.
"""
if datasets is None:
datasets = get_implemented_datasets()
if dataset_names is None:
dataset_names = tuple(dataset.pretty for dataset in datasets)
attributes = ["min_time", "max_time", "frequency"]
if lat_lon:
attributes.extend(("lat_grid", "lon_grid"))
time_dict = OrderedDict(
(name, list(map(str, (getattr(dataset, attr) for attr in attributes))))
for dataset, name in zip(datasets, dataset_names)
)
min_times, max_times = [], []
for dataset in datasets:
if dataset.frequency in ("static", "yearly", "climatology"):
continue
dataset_times = tuple(
getattr(dataset, time_type) for time_type in ("min_time", "max_time")
)
# If there are any undefined dates they will be represented by strings and
# should be skipped here.
if any(isinstance(dataset_time, str) for dataset_time in dataset_times):
assert all(
isinstance(dataset_time, str) for dataset_time in dataset_times
), (
"If there is no valid start date, there should not be a valid "
"end date and vice versa (Dataset={}).".format(dataset)
)
continue
assert (
dataset_times[0] < dataset_times[1]
), "Maximum date should be after the minimum date (Dataset={}).".format(dataset)
min_times.append(dataset_times[0])
max_times.append(dataset_times[1])
if not min_times and not max_times:
logger.debug("No valid start or end times found.")
return None, None, None
# This timespan will encompass all the datasets.
min_time = np.max(min_times)
max_time = np.min(max_times)
overall_placeholders = ["N/A"]
if lat_lon:
overall_placeholders.extend(("N/A", "N/A"))
time_dict["Overall"] = list(map(str, (min_time, max_time, *overall_placeholders)))
dataset_names = pd.Series(list(time_dict.keys()), name="Dataset")
df_names = ["Minimum", "Maximum", "Frequency"]
if lat_lon:
df_names.extend(("Latitude Grid", "Longitude Grid"))
df_series = [dataset_names]
df_series.extend(
[
pd.Series([time_dict[name][i] for name in dataset_names], name=df_name)
for i, df_name in enumerate(df_names)
]
)
times_df = pd.DataFrame(df_series).T
if min_time >= max_time:
limited_df = times_df[:-1]
min_mask = limited_df["Minimum"].values.astype("str") == str(min_time)
max_mask = limited_df["Maximum"].values.astype("str") == str(max_time)
raise ValueError(
"Maximum date should be after the minimum date. This suggests the datasets "
"are improperly selected. Offending datasets:\n{}".format(
limited_df.loc[min_mask | max_mask].to_string(index=False)
)
)
return min_time, max_time, times_df
def regions_GFED():
"""Return cube describing the geographic regions used in GFED."""
regions = dummy_lat_lon_cube(
h5py.File(
os.path.join(DATA_DIR, "gfed4", "data", "GFED4.1s_1997.hdf5"), mode="r"
)["ancill"]["basis_regions"][:][::-1]
)
regions.long_name = "Basis-regions used for GFED analyses"
regions.attributes["regions"] = {
0: "Ocean",
1: "BONA (Boreal North America)",
2: "TENA (Temperate North America)",
3: "CEAM (Central America)",
4: "NHSA (Northern Hemisphere South America)",
5: "SHSA (Southern Hemisphere South America)",
6: "EURO (Europe)",
7: "MIDE (Middle East)",
8: "NHAF (Northern Hemisphere Africa)",
9: "SHAF (Southern Hemisphere Africa)",
10: "BOAS (Boreal Asia)",
11: "CEAS (Central Asia)",
12: "SEAS (Southeast Asia)",
13: "EQAS (Equatorial Asia)",
14: "AUST (Australia and New Zealand)",
}
regions.attributes["short_regions"] = {
0: "Ocean",
1: "BONA",
2: "TENA",
3: "CEAM",
4: "NHSA",
5: "SHSA",
6: "EURO",
7: "MIDE",
8: "NHAF",
9: "SHAF",
10: "BOAS",
11: "CEAS",
12: "SEAS",
13: "EQAS",
14: "AUST",
}
# Invert the previous mapping to facilitate masking later.
regions.attributes["region_codes"] = {
code: index for index, code in regions.attributes["short_regions"].items()
}
return regions
# Automatically export all Dataset subclass leaves defining individual datasets.
__all__ = list(set(__all__).union(set(map(str, Dataset))))
| []
| []
| [
"EPHEMERAL"
]
| [] | ["EPHEMERAL"] | python | 1 | 0 | |
client.go | package nri
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"sync"
"github.com/containerd/containerd"
"github.com/containerd/containerd/oci"
types "github.com/containerd/nri/types/v1"
"github.com/pkg/errors"
)
const (
// DefaultBinaryPath for nri plugins
DefaultBinaryPath = "/opt/nri/bin"
// DefaultConfPath for the global nri configuration
DefaultConfPath = "/etc/nri/conf.json"
// Version of NRI
Version = "0.1"
)
var appendPathOnce sync.Once
// New nri client
func New() (*Client, error) {
conf, err := loadConfig(DefaultConfPath)
if err != nil {
return nil, err
}
appendPathOnce.Do(func() {
err = os.Setenv("PATH", fmt.Sprintf("%s:%s", os.Getenv("PATH"), DefaultBinaryPath))
})
if err != nil {
return nil, err
}
return &Client{
conf: conf,
}, nil
}
// Client for calling nri plugins
type Client struct {
conf *types.ConfigList
}
// Sandbox information
type Sandbox struct {
// ID of the sandbox
ID string
// Labels of the sandbox
Labels map[string]string
}
// Invoke the ConfList of nri plugins
func (c *Client) Invoke(ctx context.Context, task containerd.Task, state types.State) ([]*types.Result, error) {
return c.InvokeWithSandbox(ctx, task, state, nil)
}
// Invoke the ConfList of nri plugins
func (c *Client) InvokeWithSandbox(ctx context.Context, task containerd.Task, state types.State, sandbox *Sandbox) ([]*types.Result, error) {
if len(c.conf.Plugins) == 0 {
return nil, nil
}
spec, err := task.Spec(ctx)
if err != nil {
return nil, err
}
rs, err := createSpec(spec)
if err != nil {
return nil, err
}
r := &types.Request{
Version: c.conf.Version,
ID: task.ID(),
Pid: int(task.Pid()),
State: state,
Spec: rs,
}
if sandbox != nil {
r.SandboxID = sandbox.ID
r.Labels = sandbox.Labels
}
for _, p := range c.conf.Plugins {
r.Conf = p.Conf
result, err := c.invokePlugin(ctx, p.Type, r)
if err != nil {
return nil, errors.Wrapf(err, "plugin: %s", p.Type)
}
r.Results = append(r.Results, result)
}
return r.Results, nil
}
func (c *Client) invokePlugin(ctx context.Context, name string, r *types.Request) (*types.Result, error) {
payload, err := json.Marshal(r)
if err != nil {
return nil, err
}
cmd := exec.CommandContext(ctx, name, "invoke")
cmd.Stdin = bytes.NewBuffer(payload)
cmdout := bytes.Buffer{}
cmderr := bytes.Buffer{}
cmd.Stdout = &cmdout
cmd.Stderr = &cmderr
err = cmd.Run()
msg := "plugin exec details:"
if cmdout.Len() > 0 {
msg = fmt.Sprintf("%s plugin output: \"%s\"", msg, cmdout.Bytes())
} else {
msg = fmt.Sprintf("%s plugin output: <empty>", msg)
}
if cmderr.Len() > 0 {
msg = fmt.Sprintf("%s plugin error: \"%s\"", msg, cmderr.Bytes())
} else {
msg = fmt.Sprintf("%s plugin error: <empty>", msg)
}
if err != nil {
// ExitError is returned when there is a non-zero exit status
if exitErr, ok := err.(*exec.ExitError); ok {
msg = fmt.Sprintf("%s plugin exit code: %d", msg, exitErr.ExitCode())
var pe types.PluginError
if err := json.Unmarshal(cmdout.Bytes(), &pe); err != nil {
return nil, errors.Errorf("failed to unmarshal plugin error: %s: %s", err.Error(), msg)
}
return nil, &pe
}
return nil, err
}
var result types.Result
if err := json.Unmarshal(cmdout.Bytes(), &result); err != nil {
return nil, errors.Errorf("failed to unmarshal plugin result: %s: %s", err.Error(), msg)
}
return &result, nil
}
func loadConfig(path string) (*types.ConfigList, error) {
f, err := os.Open(path)
if err != nil {
// if we don't have a config list on disk, create a new one for use
if os.IsNotExist(err) {
return &types.ConfigList{
Version: Version,
}, nil
}
return nil, err
}
var c types.ConfigList
err = json.NewDecoder(f).Decode(&c)
f.Close()
if err != nil {
return nil, err
}
return &c, nil
}
func createSpec(spec *oci.Spec) (*types.Spec, error) {
s := types.Spec{
Namespaces: make(map[string]string),
Annotations: spec.Annotations,
}
switch {
case spec.Linux != nil:
s.CgroupsPath = spec.Linux.CgroupsPath
data, err := json.Marshal(spec.Linux.Resources)
if err != nil {
return nil, err
}
s.Resources = json.RawMessage(data)
for _, ns := range spec.Linux.Namespaces {
s.Namespaces[string(ns.Type)] = ns.Path
}
case spec.Windows != nil:
data, err := json.Marshal(spec.Windows.Resources)
if err != nil {
return nil, err
}
s.Resources = json.RawMessage(data)
}
return &s, nil
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
tools/linter/clang_tidy/generate_build_files.py | import subprocess
import sys
import os
from typing import List
def run_cmd(cmd: List[str]) -> None:
print(f"Running: {cmd}")
result = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = (
result.stdout.decode("utf-8").strip(),
result.stderr.decode("utf-8").strip(),
)
print(stdout)
print(stderr)
if result.returncode != 0:
print(f"Failed to run {cmd}")
exit(1)
def run_timed_cmd(cmd: List[str]) -> None:
run_cmd(["time"] + cmd)
def update_submodules() -> None:
run_cmd(["git", "submodule", "update", "--init", "--recursive"])
def gen_compile_commands() -> None:
os.environ["USE_NCCL"] = "0"
os.environ["USE_DEPLOY"] = "1"
os.environ["CC"] = "clang"
os.environ["CXX"] = "clang++"
run_timed_cmd([sys.executable, "setup.py", "--cmake-only", "build"])
def run_autogen() -> None:
run_timed_cmd(
[
sys.executable,
"-m",
"torchgen.gen",
"-s",
"aten/src/ATen",
"-d",
"build/aten/src/ATen",
"--per-operator-headers",
]
)
run_timed_cmd(
[
sys.executable,
"tools/setup_helpers/generate_code.py",
"--native-functions-path",
"aten/src/ATen/native/native_functions.yaml",
"--tags-path",
"aten/src/ATen/native/tags.yaml",
"--gen_lazy_ts_backend",
]
)
def generate_build_files() -> None:
update_submodules()
gen_compile_commands()
run_autogen()
if __name__ == "__main__":
generate_build_files()
| []
| []
| [
"USE_NCCL",
"CC",
"USE_DEPLOY",
"CXX"
]
| [] | ["USE_NCCL", "CC", "USE_DEPLOY", "CXX"] | python | 4 | 0 | |
final_project/machinetranslation/machinetranslation/translator.py | """Translator from / to Enlish and French"""
#import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
def english_to_french(request):
"""English to French"""
response = language_translator.translate(text= request, model_id='en-fr').get_result()
translation = response['translations'][0]['translation']
return translation
def french_to_english(requset):
"""French to English"""
response = language_translator.translate(text= requset, model_id='fr-en').get_result()
translation = response['translations'][0]['translation']
return translation
| []
| []
| [
"url",
"apikey"
]
| [] | ["url", "apikey"] | python | 2 | 0 | |
Use_Cases/VPS_Popcorn_Production/Kubernetes/src/L3_PC_Evaluation.py | import os
from time import sleep
import traceback
import pandas as pd
from Use_Cases.VPS_Popcorn_Production.Kubernetes.src.classes.CognitionPC import CognitionPC
pd.set_option("display.max_columns", None)
pd.options.display.float_format = "{:.3f}".format
debugging = False
if debugging is True:
env_vars = {
"config_path": "./Use_Cases/VPS_Popcorn_Production/Kubernetes/src/configurations/config_local.yml",
"config_section": "General, Initial_Design, Objective_Function, 3_pc_evaluation"
}
else:
env_vars = {
"config_path": os.getenv("config_path"),
"config_section": os.getenv("config_section"),
}
new_cog = CognitionPC(**env_vars)
sleep(3)
print(
f"Creating initial design of the system by applying {new_cog.N_INITIAL_DESIGN} equally distributed\n"
f"values x over the whole working area of the CPPS."
f"\nSend x={new_cog.X[new_cog.nr_of_iterations]} to Adaption."
)
"""
"name": "New X",
"fields": [
{"name": "algorithm", "type": ["string"]},
{"name": "new_x", "type": ["float"]}
]
"""
# QUESTION continue sending initial design?
new_cog.send_point_from_initial_design()
new_cog.nr_of_iterations += 1
try:
while True:
msg = new_cog.consumer.poll(0.1)
if msg is None:
continue
elif msg.error() is not None:
print(f"Error occured: {str(msg.error())}")
else:
try:
topic = msg.topic()
except Exception as e:
print(f"Error retrieving topic: {repr(e)}")
try:
new_message = new_cog.decode_msg(msg)
# print(f"Received on topic '{msg.topic()}': {new_message}")
except Exception as e:
print(
f"Error decoding msg: {msg.topic()}, message: {new_message}")
print(f"Error: {repr(e)}")
try:
new_cog.func_dict[msg.topic()](msg)
except Exception as e:
print(
f"Error accessing the function for topic {msg.topic()}: {repr(e)}")
print(traceback.format_exc())
except KeyboardInterrupt:
pass
finally:
new_cog.consumer.close()
| []
| []
| [
"config_path",
"config_section"
]
| [] | ["config_path", "config_section"] | python | 2 | 0 | |
tests/integration/loginlogout/cmd_login_logout_test.go | package integration
import (
"os"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/redhat-developer/odo/tests/helper"
)
var _ = Describe("odo login and logout command tests", func() {
// user related constants
const loginTestUserForNoProject = "odologinnoproject"
const loginTestUserPassword = "password@123"
var session1 string
var testUserToken string
var oc helper.OcRunner
var currentUserToken string
BeforeEach(func() {
SetDefaultEventuallyTimeout(10 * time.Minute)
SetDefaultConsistentlyDuration(30 * time.Second)
oc = helper.NewOcRunner("oc")
})
Context("when running help for login command", func() {
It("should display the help", func() {
appHelp := helper.Cmd("odo", "login", "-h").ShouldPass().Out()
Expect(appHelp).To(ContainSubstring("Login to cluster"))
})
})
Context("when running help for logout command", func() {
It("should display the help", func() {
appHelp := helper.Cmd("odo", "logout", "-h").ShouldPass().Out()
Expect(appHelp).To(ContainSubstring("Logout of the cluster"))
})
})
Context("when running login tests", func() {
It("should successful with correct credentials and fails with incorrect token", func() {
// skip if requested
skipLogin := os.Getenv("SKIP_USER_LOGIN_TESTS")
if skipLogin == "true" {
Skip("Skipping login command tests as SKIP_USER_LOGIN_TESTS is true")
}
// Current user login token
currentUserToken = oc.GetToken()
// Login successful without any projects with appropriate message
session1 = helper.Cmd("odo", "login", "-u", loginTestUserForNoProject, "-p", loginTestUserPassword).ShouldPass().Out()
Expect(session1).To(ContainSubstring("Login successful"))
Expect(session1).To(ContainSubstring("You don't have any projects. You can try to create a new project, by running"))
Expect(session1).To(ContainSubstring("odo project create <project-name>"))
session1 = oc.GetLoginUser()
Expect(session1).To(ContainSubstring(loginTestUserForNoProject))
// odologinnoproject user login token
testUserToken = oc.GetToken()
// Login successful with token without any projects with appropriate message
session1 = helper.Cmd("odo", "login", "-t", testUserToken).ShouldPass().Out()
Expect(session1).To(ContainSubstring("Logged into"))
Expect(session1).To(ContainSubstring("You don't have any projects. You can try to create a new project, by running"))
Expect(session1).To(ContainSubstring("odo project create <project-name>"))
session1 = oc.GetLoginUser()
Expect(session1).To(ContainSubstring(loginTestUserForNoProject))
// Login fails on invalid token with appropriate message
sessionErr := helper.Cmd("odo", "login", "-t", "verybadtoken").ShouldFail().Err()
Expect(sessionErr).To(ContainSubstring("The token provided is invalid or expired"))
// loging back to current user
helper.Cmd("odo", "login", "--token", currentUserToken).ShouldPass()
})
})
})
| [
"\"SKIP_USER_LOGIN_TESTS\""
]
| []
| [
"SKIP_USER_LOGIN_TESTS"
]
| [] | ["SKIP_USER_LOGIN_TESTS"] | go | 1 | 0 | |
src/src/data_entry/admin_reg.java | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package data_entry;
import java.sql.*;
import java.util.*;
/**
*
* @author kanishk
*/
public class admin_reg extends javax.swing.JFrame {
/**
* Creates new form New_user_registration
*/
private static final String rootUsername="root";//System.getenv("SE_SQL_USER");
private static final String rootPassword="iit2017";//System.getenv("SE_SQL_PASS");
private static final String conn_string= "jdbc:mysql://localhost:3306/demodb";//System.getenv("SE_SQL_CONN");
private Connection conn;
public admin_reg() {
this.setVisible(true);
this.setSize(600,400);
initComponents();
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jButton1 = new javax.swing.JButton();
jPanel1 = new javax.swing.JPanel();
jLabel2 = new javax.swing.JLabel();
jLabel3 = new javax.swing.JLabel();
jTextField2 = new javax.swing.JTextField();
jButton2 = new javax.swing.JButton();
jPasswordField1 = new javax.swing.JPasswordField();
jButton1.setText("jButton1");
setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);
setBackground(new java.awt.Color(165, 55, 253));
jPanel1.setBackground(new java.awt.Color(1, 50, 67));
jPanel1.setBorder(javax.swing.BorderFactory.createTitledBorder(null, "WELCOME NEW ADMIN", javax.swing.border.TitledBorder.CENTER, javax.swing.border.TitledBorder.DEFAULT_POSITION, new java.awt.Font("Liberation Sans", 1, 14), new java.awt.Color(241, 90, 34))); // NOI18N
jLabel2.setFont(new java.awt.Font("Liberation Sans", 1, 12)); // NOI18N
jLabel2.setForeground(new java.awt.Color(241, 90, 34));
jLabel2.setText("Enter Username:");
jLabel3.setFont(new java.awt.Font("Liberation Sans", 1, 12)); // NOI18N
jLabel3.setForeground(new java.awt.Color(241, 90, 34));
jLabel3.setText("Enter Password:");
jButton2.setBackground(new java.awt.Color(241, 90, 34));
jButton2.setFont(new java.awt.Font("Liberation Sans", 1, 12)); // NOI18N
jButton2.setForeground(java.awt.Color.black);
jButton2.setText("Register");
jButton2.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jButton2ActionPerformed(evt);
}
});
javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1);
jPanel1.setLayout(jPanel1Layout);
jPanel1Layout.setHorizontalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGap(113, 113, 113)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel3, javax.swing.GroupLayout.PREFERRED_SIZE, 95, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel2))
.addGap(47, 47, 47)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jButton2, javax.swing.GroupLayout.PREFERRED_SIZE, 90, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jTextField2, javax.swing.GroupLayout.PREFERRED_SIZE, 135, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jPasswordField1, javax.swing.GroupLayout.PREFERRED_SIZE, 135, javax.swing.GroupLayout.PREFERRED_SIZE))
.addContainerGap(194, Short.MAX_VALUE))
);
jPanel1Layout.setVerticalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addContainerGap(82, Short.MAX_VALUE)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jTextField2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel2, javax.swing.GroupLayout.PREFERRED_SIZE, 34, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(18, 18, 18)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jPasswordField1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel3, javax.swing.GroupLayout.PREFERRED_SIZE, 30, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(54, 54, 54)
.addComponent(jButton2)
.addGap(41, 41, 41))
);
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jPanel1, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jPanel1, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
);
pack();
}// </editor-fold>//GEN-END:initComponents
private void jButton2ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jButton2ActionPerformed
// TODO add your handling code here
String table="USERS";
String username=jTextField2.getText();
String pass=String.valueOf(jPasswordField1.getPassword());
Admin a=new Admin();
Integer success = a.add_new(table, username, pass);
if (success == 1){
javax.swing.JOptionPane.showMessageDialog(getContentPane(),"You Are Now Registered!");
}
else{
javax.swing.JOptionPane.showMessageDialog(getContentPane(),"An Admin For The System Already Exists!");
}
this.setVisible(false);
this.dispose();
new login();
}//GEN-LAST:event_jButton2ActionPerformed
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
/* Set the Nimbus look and feel */
//<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) ">
/* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.
* For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html
*/
try {
for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {
if ("Nimbus".equals(info.getName())) {
javax.swing.UIManager.setLookAndFeel(info.getClassName());
break;
}
}
} catch (ClassNotFoundException ex) {
java.util.logging.Logger.getLogger(New_user_registration.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
java.util.logging.Logger.getLogger(New_user_registration.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
java.util.logging.Logger.getLogger(New_user_registration.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (javax.swing.UnsupportedLookAndFeelException ex) {
java.util.logging.Logger.getLogger(New_user_registration.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
}
//</editor-fold>
/* Create and display the form */
java.awt.EventQueue.invokeLater(new Runnable() {
public void run() {
new admin_reg().setVisible(true);
}
});
}
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JButton jButton1;
private javax.swing.JButton jButton2;
private javax.swing.JLabel jLabel2;
private javax.swing.JLabel jLabel3;
private javax.swing.JPanel jPanel1;
private javax.swing.JPasswordField jPasswordField1;
private javax.swing.JTextField jTextField2;
// End of variables declaration//GEN-END:variables
}
| [
"\"SE_SQL_USER\"",
"\"SE_SQL_PASS\"",
"\"SE_SQL_CONN\""
]
| []
| [
"SE_SQL_USER",
"SE_SQL_CONN",
"SE_SQL_PASS"
]
| [] | ["SE_SQL_USER", "SE_SQL_CONN", "SE_SQL_PASS"] | java | 3 | 0 | |
contrib/gitian-build.py | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs.ltc'):
subprocess.check_call(['git', 'clone', 'https://github.com/cbreezycoin-project/gitian.sigs.ltc.git'])
if not os.path.isdir('cbreezycoin-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/cbreezycoin-project/cbreezycoin-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('cbreezycoin'):
subprocess.check_call(['git', 'clone', 'https://github.com/cbreezycoin-project/cbreezycoin.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('cbreezycoin-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../cbreezycoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'cbreezycoin='+args.commit, '--url', 'cbreezycoin='+args.url, '../cbreezycoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/cbreezycoin-*.tar.gz build/out/src/cbreezycoin-*.tar.gz ../cbreezycoin-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'cbreezycoin='+args.commit, '--url', 'cbreezycoin='+args.url, '../cbreezycoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/cbreezycoin-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/cbreezycoin-*.zip build/out/cbreezycoin-*.exe ../cbreezycoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'cbreezycoin='+args.commit, '--url', 'cbreezycoin='+args.url, '../cbreezycoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/cbreezycoin-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/cbreezycoin-*.tar.gz build/out/cbreezycoin-*.dmg ../cbreezycoin-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs.ltc')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/cbreezycoin-' + args.version + '-win-unsigned.tar.gz inputs/cbreezycoin-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../cbreezycoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/cbreezycoin-*win64-setup.exe ../cbreezycoin-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/cbreezycoin-*win32-setup.exe ../cbreezycoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/cbreezycoin-' + args.version + '-osx-unsigned.tar.gz inputs/cbreezycoin-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../cbreezycoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs.ltc/', '../cbreezycoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/cbreezycoin-osx-signed.dmg ../cbreezycoin-binaries/'+args.version+'/cbreezycoin-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs.ltc')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-linux', '../cbreezycoin/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-win-unsigned', '../cbreezycoin/contrib/gitian-descriptors/gitian-win.yml'])
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-osx-unsigned', '../cbreezycoin/contrib/gitian-descriptors/gitian-osx.yml'])
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-win-signed', '../cbreezycoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs.ltc/', '-r', args.version+'-osx-signed', '../cbreezycoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/cbreezycoin-project/cbreezycoin', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
if args.setup:
setup()
os.chdir('cbreezycoin')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/cbreezycoin')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
| []
| []
| [
"USE_DOCKER",
"GITIAN_HOST_IP",
"USE_LXC",
"LXC_GUEST_IP"
]
| [] | ["USE_DOCKER", "GITIAN_HOST_IP", "USE_LXC", "LXC_GUEST_IP"] | python | 4 | 0 | |
examples/suppression/GetAllGlobalSuppressions.java | import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sendgrid.*;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
//////////////////////////////////////////////////////////////////
// Retrieve all global suppressions
// GET /suppression/unsubscribes
public class GetAllGlobalSuppressions {
public static void main(String[] args) throws IOException {
try {
SendGrid sg = new SendGrid(System.getenv("SENDGRID_API_KEY"));
Request request = new Request();
request.setMethod(Method.GET);
request.setEndpoint("suppression/unsubscribes");
request.addQueryParam("start_time", "1");
request.addQueryParam("limit", "1");
request.addQueryParam("end_time", "1");
request.addQueryParam("offset", "1");
Response response = sg.api(request);
System.out.println(response.getStatusCode());
System.out.println(response.getBody());
System.out.println(response.getHeaders());
} catch (IOException ex) {
throw ex;
}
}
} | [
"\"SENDGRID_API_KEY\""
]
| []
| [
"SENDGRID_API_KEY"
]
| [] | ["SENDGRID_API_KEY"] | java | 1 | 0 | |
pkg/cli/run_test.go | /*
Copyright (C) 2020 Accurics, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cli
import (
"io/ioutil"
"log"
"os"
"path/filepath"
"testing"
"github.com/accurics/terrascan/pkg/config"
"github.com/accurics/terrascan/pkg/iac-providers/output"
"github.com/accurics/terrascan/pkg/policy"
"github.com/accurics/terrascan/pkg/results"
"github.com/accurics/terrascan/pkg/runtime"
"github.com/accurics/terrascan/pkg/utils"
)
func TestMain(m *testing.M) {
setup()
code := m.Run()
shutdown()
os.Exit(code)
}
func setup() {
// set default config values before policy download
config.LoadGlobalConfig("")
// to download the policies for Run test
// downloads the policies at $HOME/.terrascan
initial(nil, nil, false)
}
func shutdown() {
// remove the downloaded policies
os.RemoveAll(config.GetPolicyBasePath())
// cleanup the loaded config values
}
var testDataDir = "testdata"
var runTestDir = filepath.Join(testDataDir, "run-test")
func TestRun(t *testing.T) {
// disable terraform logs when TF_LOG env variable is not set
if os.Getenv("TF_LOG") == "" {
log.SetOutput(ioutil.Discard)
}
kustomizeTestDirPath := filepath.Join(runTestDir, "kustomize-test")
testTerraformFilePath := filepath.Join(runTestDir, "config-only.tf")
testRemoteModuleFilePath := filepath.Join(runTestDir, "remote-modules.tf")
testTFJSONFilePath := filepath.Join(runTestDir, "tf-plan.json")
ruleSlice := []string{"AWS.ECR.DataSecurity.High.0579", "AWS.SecurityGroup.NetworkPortsSecurity.Low.0561"}
table := []struct {
name string
configFile string
scanOptions *ScanOptions
stdOut string
want string
wantErr bool
}{
{
name: "normal terraform run",
scanOptions: &ScanOptions{
// policy type terraform is not supported, error expected
policyType: []string{"terraform"},
iacDirPath: runTestDir,
},
wantErr: true,
},
{
name: "normal terraform run with successful output",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacDirPath: runTestDir,
outputType: "json",
},
},
{
name: "terraform run with --non-recursive flag",
scanOptions: &ScanOptions{
iacType: "terraform",
policyType: []string{"all"},
iacDirPath: testDataDir,
outputType: "json",
nonRecursive: true,
},
wantErr: true,
},
{
name: "normal k8s run",
scanOptions: &ScanOptions{
policyType: []string{"k8s"},
// kustomization.y(a)ml file not present under the dir path, error expected
iacDirPath: runTestDir,
},
wantErr: true,
},
{
name: "normal k8s run with successful output",
scanOptions: &ScanOptions{
policyType: []string{"k8s"},
iacDirPath: kustomizeTestDirPath,
outputType: "human",
},
},
{
name: "normal k8s run with successful output for junit-xml with passed tests",
scanOptions: &ScanOptions{
policyType: []string{"k8s"},
iacDirPath: kustomizeTestDirPath,
outputType: "junit-xml",
showPassedRules: true,
},
},
{
name: "config-only flag terraform",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacFilePath: testTerraformFilePath,
configOnly: true,
outputType: "yaml",
},
},
{
// test for https://github.com/accurics/terrascan/issues/718
// a valid tfplan file is supplied, error is not expected
name: "iac type is tfplan and -f option used to specify the tfplan.json",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacType: "tfplan",
iacFilePath: testTFJSONFilePath,
outputType: "yaml",
},
wantErr: false,
},
{
name: "config-only flag k8s",
scanOptions: &ScanOptions{
policyType: []string{"k8s"},
iacDirPath: kustomizeTestDirPath,
configOnly: true,
outputType: "json",
},
},
{
// xml doesn't support config-only, error expected
// modify the test results when xml supports config-only
name: "config-only flag true with xml output format",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacFilePath: testTerraformFilePath,
configOnly: true,
outputType: "xml",
},
wantErr: true,
},
{
name: "fail to download remote repository",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacFilePath: testTerraformFilePath,
remoteURL: "test",
remoteType: "test",
},
wantErr: true,
},
{
name: "incorrect config file",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacDirPath: testTerraformFilePath,
outputType: "json",
configFile: "invalidFile",
},
wantErr: true,
},
{
name: "run with skip rules",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacDirPath: runTestDir,
outputType: "json",
skipRules: ruleSlice,
},
},
{
name: "run with scan rules",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacDirPath: runTestDir,
outputType: "yaml",
scanRules: ruleSlice,
},
},
{
name: "config file with rules",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacDirPath: runTestDir,
outputType: "yaml",
configFile: filepath.Join(testDataDir, "configFile.toml"),
},
},
{
name: "scan file with remote module",
scanOptions: &ScanOptions{
policyType: []string{"all"},
iacFilePath: testRemoteModuleFilePath,
outputType: "human",
configFile: filepath.Join(testDataDir, "configFile.toml"),
},
},
{
name: "invalid remote type",
scanOptions: &ScanOptions{
policyType: []string{"all"},
remoteType: "test",
remoteURL: "test",
outputType: "human",
},
wantErr: true,
},
{
name: "valid remote type with invalid remote url",
scanOptions: &ScanOptions{
policyType: []string{"all"},
remoteType: "terraform-registry",
remoteURL: "terraform-aws-modules/eks",
outputType: "human",
},
wantErr: true,
},
}
for _, tt := range table {
t.Run(tt.name, func(t *testing.T) {
config.LoadGlobalConfig(tt.scanOptions.configFile)
err := tt.scanOptions.Run()
if (err != nil) != tt.wantErr {
t.Errorf("ScanOptions.Run() error = %v, wantErr %v", err, tt.wantErr)
return
}
})
}
}
func TestScanOptionsDownloadRemoteRepository(t *testing.T) {
testTempdir := filepath.Join(os.TempDir(), utils.GenRandomString(6))
defer os.RemoveAll(testTempdir)
type fields struct {
RemoteType string
RemoteURL string
}
tests := []struct {
name string
fields fields
tempDir string
want string
wantErr bool
}{
{
name: "blank input parameters",
fields: fields{
RemoteType: "",
RemoteURL: "",
},
tempDir: "",
},
{
name: "invalid input parameters",
fields: fields{
RemoteType: "test",
RemoteURL: "test",
},
tempDir: "test",
wantErr: true,
},
{
name: "valid input parameters",
fields: fields{
RemoteType: "git",
RemoteURL: "github.com/accurics/terrascan",
},
tempDir: testTempdir,
want: testTempdir,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ScanOptions{
remoteType: tt.fields.RemoteType,
remoteURL: tt.fields.RemoteURL,
}
err := s.downloadRemoteRepository(tt.tempDir)
if (err != nil) != tt.wantErr {
t.Errorf("ScanOptions.downloadRemoteRepository() error = %v, wantErr %v", err, tt.wantErr)
return
}
if s.iacDirPath != tt.want {
t.Errorf("ScanOptions.downloadRemoteRepository() = %v, want %v", s.iacDirPath, tt.want)
}
})
}
}
func TestScanOptionsWriteResults(t *testing.T) {
testInput := runtime.Output{
ResourceConfig: output.AllResourceConfigs{},
Violations: policy.EngineOutput{
ViolationStore: &results.ViolationStore{},
},
}
type fields struct {
ConfigOnly bool
OutputType string
}
tests := []struct {
name string
fields fields
args runtime.Output
wantErr bool
}{
{
name: "config only true",
fields: fields{
ConfigOnly: true,
OutputType: "yaml",
},
args: testInput,
},
{
name: "config only false",
fields: fields{
ConfigOnly: false,
OutputType: "json",
},
args: testInput,
},
{
// until we support config only flag for xml, this test case is for expected failure
name: "config only true for xml",
fields: fields{
ConfigOnly: true,
OutputType: "xml",
},
args: testInput,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ScanOptions{
configOnly: tt.fields.ConfigOnly,
outputType: tt.fields.OutputType,
}
if err := s.writeResults(tt.args); (err != nil) != tt.wantErr {
t.Errorf("ScanOptions.writeResults() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestScanOptionsValidate(t *testing.T) {
type fields struct {
configOnly bool
outputType string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "validate --config-only with human readable output",
fields: fields{
configOnly: true,
outputType: "human",
},
wantErr: true,
},
{
name: "validate --config-only with non human readable output",
fields: fields{
configOnly: true,
outputType: "json",
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := ScanOptions{
configOnly: tt.fields.configOnly,
outputType: tt.fields.outputType,
}
if err := s.validate(); (err != nil) != tt.wantErr {
t.Errorf("ScanOptions.validate() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestScanOptionsInitColor(t *testing.T) {
type fields struct {
useColors string
}
tests := []struct {
name string
fields fields
want bool
}{
{
name: "test for auto as input",
fields: fields{
useColors: "auto",
},
},
{
name: "test for true as input",
fields: fields{
useColors: "true",
},
want: true,
},
{
name: "test for 1 as input",
fields: fields{
useColors: "1",
},
want: true,
},
{
name: "test for false as input",
fields: fields{
useColors: "false",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &ScanOptions{
useColors: tt.fields.useColors,
}
s.initColor()
if s.useColors != "auto" {
if s.UseColors != tt.want {
t.Errorf("ScanOptions.initColor() incorrect value for UseColors, got: %v, want %v", s.useColors, tt.want)
}
}
})
}
}
func TestScanOptionsInit(t *testing.T) {
type fields struct {
configOnly bool
outputType string
useColors string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "test for init fail",
fields: fields{
useColors: "auto",
outputType: "human",
configOnly: true,
},
wantErr: true,
},
{
name: "test for init success",
fields: fields{
useColors: "auto",
outputType: "human",
configOnly: false,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &ScanOptions{
configOnly: tt.fields.configOnly,
outputType: tt.fields.outputType,
useColors: tt.fields.useColors,
}
if err := s.Init(); (err != nil) != tt.wantErr {
t.Errorf("ScanOptions.Init() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestScanOptionsScan(t *testing.T) {
type fields struct {
policyType []string
iacDirPath string
configOnly bool
outputType string
}
tests := []struct {
name string
fields fields
wantErr bool
}{
{
name: "failure in init",
fields: fields{
configOnly: true,
outputType: "human",
},
wantErr: true,
},
{
name: "failure in run",
fields: fields{
policyType: []string{"terraform"},
iacDirPath: runTestDir,
},
wantErr: true,
},
{
name: "successful scan",
fields: fields{
policyType: []string{"all"},
iacDirPath: runTestDir,
outputType: "json",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &ScanOptions{
policyType: tt.fields.policyType,
iacDirPath: tt.fields.iacDirPath,
configOnly: tt.fields.configOnly,
outputType: tt.fields.outputType,
}
if err := s.Scan(); (err != nil) != tt.wantErr {
t.Errorf("ScanOptions.Scan() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
| [
"\"TF_LOG\""
]
| []
| [
"TF_LOG"
]
| [] | ["TF_LOG"] | go | 1 | 0 | |
cmd/abapAddonAssemblyKitCheckCVs_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperenv"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type abapAddonAssemblyKitCheckCVsOptions struct {
AbapAddonAssemblyKitEndpoint string `json:"abapAddonAssemblyKitEndpoint,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
AddonDescriptorFileName string `json:"addonDescriptorFileName,omitempty"`
AddonDescriptor string `json:"addonDescriptor,omitempty"`
}
type abapAddonAssemblyKitCheckCVsCommonPipelineEnvironment struct {
abap struct {
addonDescriptor string
}
}
func (p *abapAddonAssemblyKitCheckCVsCommonPipelineEnvironment) persist(path, resourceName string) {
content := []struct {
category string
name string
value interface{}
}{
{category: "abap", name: "addonDescriptor", value: p.abap.addonDescriptor},
}
errCount := 0
for _, param := range content {
err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value)
if err != nil {
log.Entry().WithError(err).Error("Error persisting piper environment.")
errCount++
}
}
if errCount > 0 {
log.Entry().Fatal("failed to persist Piper environment")
}
}
// AbapAddonAssemblyKitCheckCVsCommand This step checks the validity of ABAP Software Component Versions.
func AbapAddonAssemblyKitCheckCVsCommand() *cobra.Command {
const STEP_NAME = "abapAddonAssemblyKitCheckCVs"
metadata := abapAddonAssemblyKitCheckCVsMetadata()
var stepConfig abapAddonAssemblyKitCheckCVsOptions
var startTime time.Time
var commonPipelineEnvironment abapAddonAssemblyKitCheckCVsCommonPipelineEnvironment
var logCollector *log.CollectorHook
var createAbapAddonAssemblyKitCheckCVsCmd = &cobra.Command{
Use: STEP_NAME,
Short: "This step checks the validity of ABAP Software Component Versions.",
Long: `This steps takes the list of ABAP Software Component Versions(repositories) from the addonDescriptor configuration file specified via addonDescriptorFileName (e.g. addon.yml) and checks by calling AAKaaS whether they exist or are a valid successor of an existing Software Component Version.
It resolves the dotted version string into version, support package level and patch level and writes it to the addonDescriptor structure in the Piper commonPipelineEnvironment for usage of subsequent pipeline steps.
<br />
For Terminology refer to the [Scenario Description](https://www.project-piper.io/scenarios/abapEnvironmentAddons/).`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment")
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
abapAddonAssemblyKitCheckCVs(stepConfig, &telemetryData, &commonPipelineEnvironment)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapAddonAssemblyKitCheckCVsFlags(createAbapAddonAssemblyKitCheckCVsCmd, &stepConfig)
return createAbapAddonAssemblyKitCheckCVsCmd
}
func addAbapAddonAssemblyKitCheckCVsFlags(cmd *cobra.Command, stepConfig *abapAddonAssemblyKitCheckCVsOptions) {
cmd.Flags().StringVar(&stepConfig.AbapAddonAssemblyKitEndpoint, "abapAddonAssemblyKitEndpoint", `https://apps.support.sap.com`, "Base URL to the Addon Assembly Kit as a Service (AAKaaS) system")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User for the Addon Assembly Kit as a Service (AAKaaS) system")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for the Addon Assembly Kit as a Service (AAKaaS) system")
cmd.Flags().StringVar(&stepConfig.AddonDescriptorFileName, "addonDescriptorFileName", `addon.yml`, "File name of the YAML file which describes the Product Version and corresponding Software Component Versions")
cmd.Flags().StringVar(&stepConfig.AddonDescriptor, "addonDescriptor", os.Getenv("PIPER_addonDescriptor"), "Structure in the commonPipelineEnvironment containing information about the Product Version and corresponding Software Component Versions")
cmd.MarkFlagRequired("abapAddonAssemblyKitEndpoint")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("addonDescriptorFileName")
}
// retrieve step metadata
func abapAddonAssemblyKitCheckCVsMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapAddonAssemblyKitCheckCVs",
Aliases: []config.Alias{},
Description: "This step checks the validity of ABAP Software Component Versions.",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "abapAddonAssemblyKitCredentialsId", Description: "CredentialsId stored in Jenkins for the Addon Assembly Kit as a Service (AAKaaS) system", Type: "jenkins"},
},
Parameters: []config.StepParameters{
{
Name: "abapAddonAssemblyKitEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: `https://apps.support.sap.com`,
},
{
Name: "username",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_username"),
},
{
Name: "password",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_password"),
},
{
Name: "addonDescriptorFileName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: `addon.yml`,
},
{
Name: "addonDescriptor",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "abap/addonDescriptor",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_addonDescriptor"),
},
},
},
Outputs: config.StepOutputs{
Resources: []config.StepResources{
{
Name: "commonPipelineEnvironment",
Type: "piperEnvironment",
Parameters: []map[string]interface{}{
{"Name": "abap/addonDescriptor"},
},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_addonDescriptor\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_addonDescriptor\""
]
| []
| [
"PIPER_addonDescriptor",
"PIPER_password",
"PIPER_username"
]
| [] | ["PIPER_addonDescriptor", "PIPER_password", "PIPER_username"] | go | 3 | 0 | |
lambda_function.py | import base64
import json
import os
from io import BytesIO
import boto3
from PIL import Image
MAX_SIZE = (1080, 810)
BUCKET_NAME = os.getenv("BUCKET_NAME", "TODO")
AWS_REGION = os.getenv("AWS_REGION", "ap-northeast-2")
s3_client = boto3.client('s3', region_name=AWS_REGION)
def _post_to_s3(byte_string, size, path, name, extension):
img = Image.open(BytesIO(byte_string))
if img.mode != 'RGB':
img = img.convert('RGB')
img.thumbnail(size, Image.ANTIALIAS)
output = BytesIO()
img.save(output, format=extension, quality=100)
output.seek(0)
key = f'{path}/{name}.{extension}'
s3_client.put_object(Bucket=BUCKET_NAME, Key=key, Body=output)
def lambda_handler(payloads, context):
"""
Create compressed image from origin && save to S3
:param payloads: dict
{
"image": utf-8 encoded byte string,
"path": string, path of target image,
"name": string, name of image
}
:return: void
"""
_image = base64.b64decode(bytes(payloads.get('image'), encoding='utf8'))
_path = payloads.get('path')
_name = payloads.get('name')
_origin_img_name, _extension = _name.rsplit('.', 1)
_post_to_s3(_image, MAX_SIZE, _path, _origin_img_name, _extension)
for prefix, size in ('xs', (100, 100)), ('sm', (200, 200)), ('md', (400, 400)), ('lg', (800, 800)):
_name = f'{prefix}_{_origin_img_name}'
_post_to_s3(_image, size, _path, _name, _extension)
| []
| []
| [
"AWS_REGION",
"BUCKET_NAME"
]
| [] | ["AWS_REGION", "BUCKET_NAME"] | python | 2 | 0 | |
openslides_backend/main.py | import logging
import multiprocessing
import os
import signal
import sys
import time
from typing import Any
from datastore.reader.app import register_services
from gunicorn.app.base import BaseApplication
from .shared.env import is_dev_mode
from .shared.interfaces.logging import LoggingModule
from .shared.interfaces.wsgi import WSGIApplication
register_services()
# ATTENTION: We use the Python builtin logging module. To change this use
# something like "import custom_logging as logging".
DEFAULT_ADDRESSES = {
"ActionView": "0.0.0.0:9002",
"PresenterView": "0.0.0.0:9003",
}
class OpenSlidesBackendGunicornApplication(BaseApplication): # pragma: no cover
"""
Standalone application class for Gunicorn. It prepares Gunicorn for using
OpenSlidesBackendWSGIApplication via OpenSlidesBackendWSGIContainer either
with action component or with presenter component.
"""
def __init__(self, view_name: str, *args: Any, **kwargs: Any) -> None:
# Setup global loglevel.
if is_dev_mode():
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
self.view_name = view_name
if self.view_name not in ("ActionView", "PresenterView"):
raise ValueError(
f"View name has to be ActionView or PresenterView, not {self.view_name}."
)
logger.debug(f"Create gunicorn application for {self.view_name}.")
super().__init__(*args, **kwargs)
def load_config(self) -> None:
dev_mode = is_dev_mode()
options = {
"bind": DEFAULT_ADDRESSES[self.view_name],
"worker_tmp_dir": "/dev/shm", # See https://pythonspeed.com/articles/gunicorn-in-docker/
"timeout": int(os.environ.get("OPENSLIDES_BACKEND_WORKER_TIMEOUT", "30")),
"loglevel": "debug" if dev_mode else "info",
"reload": dev_mode,
"reload_engine": "auto", # This is the default however.
}
for key, value in options.items():
self.cfg.set(key, value)
def load(self) -> WSGIApplication:
# We import this here so Gunicorn can use its reload feature properly.
from .wsgi import create_wsgi_application
# TODO: Fix this typing problem.
logging_module: LoggingModule = logging # type: ignore
return create_wsgi_application(logging_module, self.view_name)
def start_action_server() -> None: # pragma: no cover
OpenSlidesBackendGunicornApplication(view_name="ActionView").run()
def start_presenter_server() -> None: # pragma: no cover
OpenSlidesBackendGunicornApplication(view_name="PresenterView").run()
def start_them_all() -> None: # pragma: no cover
print(
f"Start all components in child processes. Parent process id is {os.getpid()}."
)
processes = {
"action": multiprocessing.Process(target=start_action_server),
"presenter": multiprocessing.Process(target=start_presenter_server),
}
for process in processes.values():
process.start()
def sigterm_handler(signalnum: int, current_stack_frame: Any) -> None:
strsignal = signal.strsignal # type: ignore
print(
f"Parent process {os.getpid()} received {strsignal(signalnum)} "
"signal. Terminate all child processes first."
)
for child in multiprocessing.active_children():
child.terminate()
child.join()
print(f"Parent process {os.getpid()} terminated successfully.")
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
while True:
for name, process in processes.items():
if not process.is_alive():
process.join()
print(
f"Component {name} terminated. Terminate all other components now."
)
for other_name, other_process in processes.items():
if name != other_name:
other_process.terminate()
other_process.join()
print("Parent process terminated.")
sys.exit(1)
time.sleep(0.1)
def main() -> None: # pragma: no cover
component = os.environ.get("OPENSLIDES_BACKEND_COMPONENT", "all")
if component == "action":
start_action_server()
elif component == "presenter":
start_presenter_server()
elif component == "all":
start_them_all()
else:
print(
f"Error: OPENSLIDES_BACKEND_COMPONENT must not be {component}.",
file=sys.stderr,
)
sys.stderr.flush()
sys.exit(1)
sys.exit(0)
| []
| []
| [
"OPENSLIDES_BACKEND_COMPONENT",
"OPENSLIDES_BACKEND_WORKER_TIMEOUT"
]
| [] | ["OPENSLIDES_BACKEND_COMPONENT", "OPENSLIDES_BACKEND_WORKER_TIMEOUT"] | python | 2 | 0 | |
railgun/test/integration/suite_test.go | //+build integration_tests
package integration
import (
"context"
"fmt"
"net/url"
"os"
"os/exec"
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
ktfkind "github.com/kong/kubernetes-testing-framework/pkg/kind"
"github.com/kong/kubernetes-ingress-controller/railgun/controllers"
)
var (
// cluster is the object which contains a Kubernetes client for the testing cluster
cluster ktfkind.Cluster
// proxyReady is the channel that indicates when the Kong proxyReady is ready to use.
proxyReady = make(chan *url.URL)
)
func TestMain(m *testing.M) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// create a new cluster for tests
config := ktfkind.ClusterConfigurationWithKongProxy{EnableMetalLB: true}
newCluster, ready, err := config.Deploy(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(10)
}
defer newCluster.Cleanup()
cluster = newCluster
// deploy the Kong Kubernetes Ingress Controller (KIC) to the cluster
if err := deployControllers(ctx, ready, cluster.Client(), os.Getenv("KONG_CONTROLLER_TEST_IMAGE"), controllers.DefaultNamespace); err != nil {
newCluster.Cleanup()
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(11)
}
code := m.Run()
newCluster.Cleanup()
os.Exit(code)
}
// FIXME: this is a total hack for now, in the future we should deploy the controller into the cluster via image or run it as a goroutine.
func deployControllers(ctx context.Context, ready chan ktfkind.ProxyReadinessEvent, kc *kubernetes.Clientset, containerImage, namespace string) error {
// ensure the controller namespace is created
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
if _, err := kc.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}); err != nil {
if !errors.IsAlreadyExists(err) {
return err
}
}
// run the controller in the background
go func() {
event := <-ready
if event.Err != nil {
panic(event.Err)
}
u := event.URL
proxyReady <- u
cmd := exec.CommandContext(ctx, "go", "run", "../../main.go", "--kong-url", fmt.Sprintf("http://%s:8001", u.Hostname()))
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
}
}()
return nil
}
| [
"\"KONG_CONTROLLER_TEST_IMAGE\""
]
| []
| [
"KONG_CONTROLLER_TEST_IMAGE"
]
| [] | ["KONG_CONTROLLER_TEST_IMAGE"] | go | 1 | 0 | |
cpu/cpu_test.go | package cpu
import (
"errors"
"fmt"
"os"
"runtime"
"testing"
"time"
"github.com/shirou/gopsutil/v3/internal/common"
"github.com/stretchr/testify/assert"
)
func skipIfNotImplementedErr(t *testing.T, err error) {
if errors.Is(err, common.ErrNotImplementedError) {
t.Skip("not implemented")
}
}
func TestCpu_times(t *testing.T) {
v, err := Times(false)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if len(v) == 0 {
t.Error("could not get CPUs ", err)
}
empty := TimesStat{}
for _, vv := range v {
if vv == empty {
t.Errorf("could not get CPU User: %v", vv)
}
}
// test sum of per cpu stats is within margin of error for cpu total stats
cpuTotal, err := Times(false)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if len(cpuTotal) == 0 {
t.Error("could not get CPUs", err)
}
perCPU, err := Times(true)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if len(perCPU) == 0 {
t.Error("could not get CPUs", err)
}
var perCPUUserTimeSum float64
var perCPUSystemTimeSum float64
var perCPUIdleTimeSum float64
for _, pc := range perCPU {
perCPUUserTimeSum += pc.User
perCPUSystemTimeSum += pc.System
perCPUIdleTimeSum += pc.Idle
}
margin := 2.0
t.Log(cpuTotal[0])
if cpuTotal[0].User == 0 && cpuTotal[0].System == 0 && cpuTotal[0].Idle == 0 {
t.Error("could not get cpu values")
}
if cpuTotal[0].User != 0 {
assert.InEpsilon(t, cpuTotal[0].User, perCPUUserTimeSum, margin)
}
if cpuTotal[0].System != 0 {
assert.InEpsilon(t, cpuTotal[0].System, perCPUSystemTimeSum, margin)
}
if cpuTotal[0].Idle != 0 {
assert.InEpsilon(t, cpuTotal[0].Idle, perCPUIdleTimeSum, margin)
}
}
func TestCpu_counts(t *testing.T) {
v, err := Counts(true)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if v == 0 {
t.Errorf("could not get logical CPU counts: %v", v)
}
t.Logf("logical cores: %d", v)
v, err = Counts(false)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if v == 0 {
t.Errorf("could not get physical CPU counts: %v", v)
}
t.Logf("physical cores: %d", v)
}
func TestCPUTimeStat_String(t *testing.T) {
v := TimesStat{
CPU: "cpu0",
User: 100.1,
System: 200.1,
Idle: 300.1,
}
e := `{"cpu":"cpu0","user":100.1,"system":200.1,"idle":300.1,"nice":0.0,"iowait":0.0,"irq":0.0,"softirq":0.0,"steal":0.0,"guest":0.0,"guestNice":0.0}`
if e != fmt.Sprintf("%v", v) {
t.Errorf("CPUTimesStat string is invalid: %v", v)
}
}
func TestCpuInfo(t *testing.T) {
v, err := Info()
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
if len(v) == 0 {
t.Errorf("could not get CPU Info")
}
for _, vv := range v {
if vv.ModelName == "" {
t.Errorf("could not get CPU Info: %v", vv)
}
}
}
func testCPUPercent(t *testing.T, percpu bool) {
numcpu := runtime.NumCPU()
testCount := 3
if runtime.GOOS != "windows" {
testCount = 100
v, err := Percent(time.Millisecond, percpu)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
// Skip CircleCI which CPU num is different
if os.Getenv("CIRCLECI") != "true" {
if (percpu && len(v) != numcpu) || (!percpu && len(v) != 1) {
t.Fatalf("wrong number of entries from CPUPercent: %v", v)
}
}
}
for i := 0; i < testCount; i++ {
duration := time.Duration(10) * time.Microsecond
v, err := Percent(duration, percpu)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
for _, percent := range v {
// Check for slightly greater then 100% to account for any rounding issues.
if percent < 0.0 || percent > 100.0001*float64(numcpu) {
t.Fatalf("CPUPercent value is invalid: %f", percent)
}
}
}
}
func testCPUPercentLastUsed(t *testing.T, percpu bool) {
numcpu := runtime.NumCPU()
testCount := 10
if runtime.GOOS != "windows" {
testCount = 2
v, err := Percent(time.Millisecond, percpu)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
// Skip CircleCI which CPU num is different
if os.Getenv("CIRCLECI") != "true" {
if (percpu && len(v) != numcpu) || (!percpu && len(v) != 1) {
t.Fatalf("wrong number of entries from CPUPercent: %v", v)
}
}
}
for i := 0; i < testCount; i++ {
v, err := Percent(0, percpu)
skipIfNotImplementedErr(t, err)
if err != nil {
t.Errorf("error %v", err)
}
time.Sleep(1 * time.Millisecond)
for _, percent := range v {
// Check for slightly greater then 100% to account for any rounding issues.
if percent < 0.0 || percent > 100.0001*float64(numcpu) {
t.Fatalf("CPUPercent value is invalid: %f", percent)
}
}
}
}
func TestCPUPercent(t *testing.T) {
testCPUPercent(t, false)
}
func TestCPUPercentPerCpu(t *testing.T) {
testCPUPercent(t, true)
}
func TestCPUPercentIntervalZero(t *testing.T) {
testCPUPercentLastUsed(t, false)
}
func TestCPUPercentIntervalZeroPerCPU(t *testing.T) {
testCPUPercentLastUsed(t, true)
}
| [
"\"CIRCLECI\"",
"\"CIRCLECI\""
]
| []
| [
"CIRCLECI"
]
| [] | ["CIRCLECI"] | go | 1 | 0 | |
Back-end/fog_node/fog_node/wsgi.py | """
WSGI config for fog_node project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fog_node.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/update.go | package pkg
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/litmuschaos/litmus-e2e/pkg/log"
"github.com/litmuschaos/litmus-e2e/pkg/types"
"github.com/pkg/errors"
)
//UpdateResultTable will update the result of pipelines in a table on github using python update script
func UpdateResultTable(experimentDetails, testVerdict string, testsDetails *types.TestDetails) error {
var out bytes.Buffer
var stderr bytes.Buffer
//Updating the result table
log.Infof("The job_id for the job is: %v", os.Getenv("CI_JOB_ID"))
log.Infof("The testVerdict for the experiment is: %v", testVerdict+"ed")
//Setup emoji with test result
if testVerdict == "Pass" {
testVerdict = testVerdict + "ed :smiley:"
} else if testVerdict == "Fail" {
testVerdict = testVerdict + "ed :worried:"
} else {
testVerdict = testVerdict + " :cold_sweat:"
}
imageTag := GetImageTag(testsDetails.GoExperimentImage)
//Running python script to update result table
cmd := exec.Command("python3", "-u", "../utils/result_update.py", "--job_id", os.Getenv("CI_JOB_ID"), "--tag", imageTag, "--test_desc", experimentDetails, "--test_result", testVerdict, "--time_stamp", (time.Now().Format(time.ANSIC))+"(IST)", "--token", os.Getenv("GITHUB_TOKEN"), "--test_name", testsDetails.ExperimentName)
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderr.String())
return err
}
log.Infof("Result: " + out.String())
log.Info("[Table]: Pipeline Result table updated successfully !!!")
return nil
}
//UpdatePipelineStatus will update the status of pipeline at the end of all jobs
func UpdatePipelineStatus(testsDetails *types.TestDetails, coverageData string) error {
var out, stderr bytes.Buffer
var pipelineName string
//Updating the result table
log.Infof("The pipeline id is:", os.Getenv("CI_PIPELINE_ID"))
if os.Getenv("POD_LEVEL") == "true" {
pipelineName = "pod-level"
} else if os.Getenv("NODE_LEVEL") == "true" {
pipelineName = "node-level"
} else if os.Getenv("COMPONENT_TEST") == "true" {
pipelineName = "component"
}
imageTag := GetImageTag(testsDetails.GoExperimentImage)
// Recording job number for pipeline update
cmd := exec.Command("python3", "-u", "../utils/pipeline_status_update.py", "--pipeline_id", os.Getenv("CI_PIPELINE_ID"), "--tag", imageTag, "--time_stamp", (time.Now().Format(time.ANSIC))+"(IST)", "--coverage", coverageData, "--pipeline", pipelineName, "--token", os.Getenv("GITHUB_TOKEN"))
cmd.Stdout = &out
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderr.String())
return err
}
fmt.Println("Result: " + out.String())
return nil
}
// GetImageTag returns the Go experiment image tag
func GetImageTag(goExperimentImage string) string {
tag := strings.Split((goExperimentImage), ":")
return tag[1]
}
// AddAnnotation will add or update annotation on an application
func AddAnnotation(deployment, key, value, ns string) error {
command := []string{"annotate", "--overwrite", "deploy/" + deployment, key + "=" + value, "-n", ns}
err := Kubectl(command...)
if err != nil {
return errors.Errorf("fail to modify annotation, err: %v", err)
}
return nil
}
| [
"\"CI_JOB_ID\"",
"\"CI_JOB_ID\"",
"\"GITHUB_TOKEN\"",
"\"CI_PIPELINE_ID\"",
"\"POD_LEVEL\"",
"\"NODE_LEVEL\"",
"\"COMPONENT_TEST\"",
"\"CI_PIPELINE_ID\"",
"\"GITHUB_TOKEN\""
]
| []
| [
"CI_PIPELINE_ID",
"NODE_LEVEL",
"COMPONENT_TEST",
"CI_JOB_ID",
"POD_LEVEL",
"GITHUB_TOKEN"
]
| [] | ["CI_PIPELINE_ID", "NODE_LEVEL", "COMPONENT_TEST", "CI_JOB_ID", "POD_LEVEL", "GITHUB_TOKEN"] | go | 6 | 0 | |
src/co/workamerica/functionality/shared/EMFUtil.java | package co.workamerica.functionality.shared;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import java.util.HashMap;
import java.util.Map;
/*
* This class provides a thread-safe method to obtain entity managers on a non-full Java EE server
*/
public class EMFUtil {
private static Map<String, String> env = System.getenv();
private static Map<String, Object> configOverrides = new HashMap<String, Object>();
private static EntityManagerFactory emf = buildFactory();
public static EntityManagerFactory getEMFactory() {
return emf;
}
private static EntityManagerFactory buildFactory () {
configOverrides.put("javax.persistence.jdbc.user", System.getProperty("DATABASE_USERNAME"));
configOverrides.put("javax.persistence.jdbc.password", System.getProperty("DATABASE_PASSWORD"));
configOverrides.put("javax.persistence.jdbc.url", System.getProperty("DATABASE_CONNECTION"));
return Persistence.createEntityManagerFactory("FinalPlatform", configOverrides);
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
main.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import sys
import json
import sqlite3
import subprocess
# SQLite3 aggregate 类
class Concatenate():
def __init__(self):
self.itemList = []
def step(self, value):
# print 'step(%r)' % value
self.itemList.append(value)
def finalize(self):
# print("final: %r" % ",".join(self.itemList))
return "!@#$".join(self.itemList)
# SQLite3 aggregate 类
class IdentifiersConcat():
def __init__(self):
self.itemList = []
def step(self, key, val):
# print 'step(%r)' % value
# 当val中存在冒号时,id2weblink 中 idJsonStr 会出现 {"aa":"bb":"xx", ...} 的情况而出错
# 所以使用 #kvSeparator# 作为 key 与 val 的分隔符
self.itemList.append(u'%s#kvSeparator#%s' % (key, val))
def finalize(self):
# print("final: %r" % ",".join(self.itemList))
return "!@#$".join(self.itemList)
# book's website id to link
def id2weblink(idStrs):
# idStrs amazon:0596005954, douban:1850938, isbn:9780596005955
websiteLinkDict = {"douban": "https://book.douban.com/subject/{}/",
"amazon": "https://www.amazon.com/dp/{}",
"amazon_cn": "https://www.amazon.cn/dp/{}",
"google": "https://books.google.com/books?id={}",
"isbn": "https://www.worldcat.org/isbn/{}"}
websiteOrderedList = ["douban", "amazon_cn", "amazon", "google", "isbn"]
idJsonStr = '{"' + idStrs.replace('#kvSeparator#', '":"').replace(', ', '", "') + '"}'
# {"isbn":"xxxx", "amazon":"xxxxx"}
idJsonObj = json.loads(idJsonStr)
bookWebsite = os.getenv("BookWebsite")
# if the env exists and the book has the website id
if bookWebsite and bookWebsite in idJsonObj:
return websiteLinkDict[bookWebsite].format(idJsonObj[bookWebsite])
# 如果没有指定的网站 或者 这本书没有指定网站的id,则按照一定的优先级顺序处理
for website in websiteOrderedList:
if website in idJsonObj:
return websiteLinkDict[website].format(idJsonObj[website])
# 如果没有以上网站的id,则获取 idJsonObj 中的第一个
return idJsonObj[idJsonObj.keys()[0]]
def main(querySQL):
libraryPath = subprocess.check_output(
'/Applications/calibre.app/Contents/MacOS/calibre-debug -c "from calibre.utils.config import prefs; print(prefs.get(\'library_path\'),end=\'\')"', shell=True)
metaDbPath = os.path.join(libraryPath, 'metadata.db')
con = sqlite3.connect(metaDbPath)
con.create_aggregate("concat", 1, Concatenate)
con.create_aggregate("identifiers_concat", 2, IdentifiersConcat)
cur = con.cursor()
cur.execute(querySQL)
queryResult = cur.fetchall()
workflowResult = {"items": []}
for item in queryResult:
# bookCalibreID = item[0]
bookTitle = item[1]
# if the book has no author, error will occur: "AttributeError: 'NoneType' object has no attribute 'replace'"
bookAuthors = item[2].replace("!@#$", ", ") if item[2] else ""
# bookSize = item[3]
bookTags = item[4].replace("!@#$", ", ") if item[4] else "No Tags"
bookFormatList = item[5].split("!@#$") if item[5] else ""
bookFilenameList = item[6].split("!@#$") if item[6] else ""
bookRating = (str(item[7]) + " ") if isinstance(item[7], int) else "N/A"
bookIdentifiers = item[8].replace("!@#$", ", ").replace("#kvSeparator#", ": ") if item[8] else "No data"
bookWeblink = id2weblink(item[8].replace("!@#$", ", ")) if item[8] else ""
bookPath = item[9]
bookFormat = bookFormatList[0] if bookFormatList else "No book file"
bookFilename = bookFilenameList[0] if bookFormatList else "No book file"
# 在没有书籍文件时,该值会成为 "/librarypath/author/booktitle/No book file.No book file"
# 不过可能由于 temp["type"] = "file" 因而 Alfred 会检查文件是否存在 因而 Alfred 不会出错
bookFullPath = os.path.join(libraryPath, bookPath, bookFilename + "." + bookFormat.lower())
temp = {}
temp["type"] = "file"
temp["title"] = bookTitle
temp["icon"] = {"path": os.path.join(libraryPath, bookPath, "cover.jpg")}
temp["subtitle"] = u"📙 {:<7} ⭐️ {:<5} ✍️ {}".format(bookFormat, bookRating, bookAuthors)
temp["arg"] = bookFullPath
temp["mods"] = {
"alt": {"valid": True, "arg": bookWeblink, "subtitle": u"🎫 " + bookIdentifiers},
"cmd": {"valid": True, "arg": bookFullPath, "subtitle": u"🏷 " + bookTags}}
workflowResult['items'].append(temp)
# if more than one format
if len(bookFormatList) > 1:
for i in range(1, len(bookFormatList)):
bookFormat = bookFormatList[i]
bookFilename = bookFilenameList[i]
temp = {}
temp["type"] = "file"
temp["title"] = bookTitle
temp["icon"] = {"path": os.path.join(
libraryPath, bookPath, "cover.jpg")}
temp["subtitle"] = u"📙 {:<7} ⭐️ {:<5} ✍️ {}".format(
bookFormat, bookRating, bookAuthors)
temp["arg"] = os.path.join(
libraryPath, bookPath, bookFilename + "." + bookFormat.lower())
temp["mods"] = {
"alt": {"valid": True, "arg": bookWeblink, "subtitle": u"🎫 " + bookIdentifiers},
"cmd": {"valid": True, "arg": bookTags, "subtitle": u"🏷 " + bookTags}}
workflowResult['items'].append(temp)
if workflowResult["items"]:
print(json.dumps(workflowResult, indent=4, sort_keys=True))
else:
print('{"items": [{"title": "None found","subtitle": "(*´・д・)?"}]}')
if __name__ == '__main__':
queryScope = sys.argv[1].strip()
queryStr = sys.argv[2].strip()
if not queryStr:
sys.exit()
if queryScope == "all":
querySQL = """SELECT id, title,
(SELECT concat(name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
(SELECT concat(name) FROM data WHERE data.book=books.id) filename,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
(SELECT identifiers_concat(type,val) FROM identifiers WHERE identifiers.book=books.id) ids,
path
FROM books
WHERE title like '%{qs}%' or tags like '%{qs}%'""".format(qs=queryStr)
elif queryScope == "title":
querySQL = """SELECT id, title,
(SELECT concat(name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
(SELECT concat(name) FROM data WHERE data.book=books.id) filename,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
(SELECT identifiers_concat(type,val) FROM identifiers WHERE identifiers.book=books.id) ids,
path
FROM books
WHERE title like '%{qs}%'""".format(qs=queryStr)
elif queryScope == "tags":
querySQL = """SELECT id, title,
(SELECT concat(name) FROM books_authors_link AS bal JOIN authors ON(author = authors.id) WHERE book = books.id) authors,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
(SELECT concat(name) FROM data WHERE data.book=books.id) filename,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
(SELECT identifiers_concat(type,val) FROM identifiers WHERE identifiers.book=books.id) ids,
path
FROM books
WHERE title like '%{qs}%' or tags like '%{qs}%'""".format(qs=queryStr)
main(querySQL)
| []
| []
| [
"BookWebsite"
]
| [] | ["BookWebsite"] | python | 1 | 0 | |
pkg/auth/manager/ldap/ldap.go | // Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package ldap
import (
"context"
"crypto/tls"
"fmt"
"strings"
user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/cs3org/reva/pkg/appctx"
"github.com/cs3org/reva/pkg/auth"
"github.com/cs3org/reva/pkg/auth/manager/registry"
"github.com/cs3org/reva/pkg/errtypes"
"github.com/cs3org/reva/pkg/logger"
"github.com/cs3org/reva/pkg/rgrpc/todo/pool"
"github.com/cs3org/reva/pkg/sharedconf"
"github.com/go-ldap/ldap/v3"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
)
func init() {
registry.Register("ldap", New)
}
type mgr struct {
c *config
}
type config struct {
Hostname string `mapstructure:"hostname"`
Port int `mapstructure:"port"`
BaseDN string `mapstructure:"base_dn"`
UserFilter string `mapstructure:"userfilter"`
LoginFilter string `mapstructure:"loginfilter"`
BindUsername string `mapstructure:"bind_username"`
BindPassword string `mapstructure:"bind_password"`
Idp string `mapstructure:"idp"`
GatewaySvc string `mapstructure:"gatewaysvc"`
Schema attributes `mapstructure:"schema"`
}
type attributes struct {
// DN is the distinguished name in ldap, e.g. `cn=einstein,ou=users,dc=example,dc=org`
DN string `mapstructure:"dn"`
// UID is an immutable user id, see https://docs.microsoft.com/en-us/azure/active-directory/hybrid/plan-connect-design-concepts
UID string `mapstructure:"uid"`
// CN is the username, typically `cn`, `uid` or `samaccountname`
CN string `mapstructure:"cn"`
// Mail is the email address of a user
Mail string `mapstructure:"mail"`
// Displayname is the Human readable name, e.g. `Albert Einstein`
DisplayName string `mapstructure:"displayName"`
// UIDNumber is a numeric id that maps to a filesystem uid, eg. 123546
UIDNumber string `mapstructure:"uidNumber"`
// GIDNumber is a numeric id that maps to a filesystem gid, eg. 654321
GIDNumber string `mapstructure:"gidNumber"`
}
// Default attributes (Active Directory)
var ldapDefaults = attributes{
DN: "dn",
UID: "ms-DS-ConsistencyGuid", // you can fall back to objectguid or even samaccountname but you will run into trouble when user names change. You have been warned.
CN: "cn",
Mail: "mail",
DisplayName: "displayName",
UIDNumber: "uidNumber",
GIDNumber: "gidNumber",
}
func parseConfig(m map[string]interface{}) (*config, error) {
c := &config{
Schema: ldapDefaults,
}
if err := mapstructure.Decode(m, c); err != nil {
err = errors.Wrap(err, "error decoding conf")
return nil, err
}
return c, nil
}
// New returns an auth manager implementation that connects to a LDAP server to validate the user.
func New(m map[string]interface{}) (auth.Manager, error) {
c, err := parseConfig(m)
if err != nil {
return nil, err
}
// backwards compatibility
if c.UserFilter != "" {
logger.New().Warn().Msg("userfilter is deprecated, use a loginfilter like `(&(objectclass=posixAccount)(|(cn={{login}}))(mail={{login}}))`")
}
if c.LoginFilter == "" {
c.LoginFilter = c.UserFilter
c.LoginFilter = strings.ReplaceAll(c.LoginFilter, "%s", "{{login}}")
}
c.GatewaySvc = sharedconf.GetGatewaySVC(c.GatewaySvc)
return &mgr{
c: c,
}, nil
}
func (am *mgr) Authenticate(ctx context.Context, clientID, clientSecret string) (*user.User, error) {
log := appctx.GetLogger(ctx)
l, err := ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", am.c.Hostname, am.c.Port), &tls.Config{InsecureSkipVerify: true})
if err != nil {
return nil, err
}
defer l.Close()
// First bind with a read only user
err = l.Bind(am.c.BindUsername, am.c.BindPassword)
if err != nil {
log.Error().Err(err).Msg("bind with system user failed")
return nil, err
}
// Search for the given clientID
searchRequest := ldap.NewSearchRequest(
am.c.BaseDN,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
am.getLoginFilter(clientID),
[]string{am.c.Schema.DN, am.c.Schema.UID, am.c.Schema.CN, am.c.Schema.Mail, am.c.Schema.DisplayName, am.c.Schema.UIDNumber, am.c.Schema.GIDNumber},
nil,
)
sr, err := l.Search(searchRequest)
if err != nil {
return nil, err
}
if len(sr.Entries) != 1 {
return nil, errtypes.NotFound(clientID)
}
userdn := sr.Entries[0].DN
// Bind as the user to verify their password
err = l.Bind(userdn, clientSecret)
if err != nil {
log.Debug().Err(err).Interface("userdn", userdn).Msg("bind with user credentials failed")
return nil, err
}
userID := &user.UserId{
Idp: am.c.Idp,
OpaqueId: sr.Entries[0].GetEqualFoldAttributeValue(am.c.Schema.UID),
}
gwc, err := pool.GetGatewayServiceClient(am.c.GatewaySvc)
if err != nil {
return nil, errors.Wrap(err, "ldap: error getting gateway grpc client")
}
getGroupsResp, err := gwc.GetUserGroups(ctx, &user.GetUserGroupsRequest{
UserId: userID,
})
if err != nil {
return nil, errors.Wrap(err, "ldap: error getting user groups")
}
if getGroupsResp.Status.Code != rpc.Code_CODE_OK {
return nil, errors.Wrap(err, "ldap: grpc getting user groups failed")
}
u := &user.User{
Id: userID,
// TODO add more claims from the StandardClaims, eg EmailVerified
Username: sr.Entries[0].GetEqualFoldAttributeValue(am.c.Schema.CN),
// TODO groups
Groups: getGroupsResp.Groups,
Mail: sr.Entries[0].GetEqualFoldAttributeValue(am.c.Schema.Mail),
DisplayName: sr.Entries[0].GetEqualFoldAttributeValue(am.c.Schema.DisplayName),
Opaque: &types.Opaque{
Map: map[string]*types.OpaqueEntry{
"uid": {
Decoder: "plain",
Value: []byte(sr.Entries[0].GetEqualFoldAttributeValue(am.c.Schema.UIDNumber)),
},
"gid": {
Decoder: "plain",
Value: []byte(sr.Entries[0].GetEqualFoldAttributeValue(am.c.Schema.GIDNumber)),
},
},
},
}
log.Debug().Interface("entry", sr.Entries[0]).Interface("user", u).Msg("authenticated user")
return u, nil
}
func (am *mgr) getLoginFilter(login string) string {
return strings.ReplaceAll(am.c.LoginFilter, "{{login}}", login)
}
| []
| []
| []
| [] | [] | go | null | null | null |
runtime/runtime.go | package runtime
import (
"container/list"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemonconfig"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/graph"
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/pkg/graphdb"
"github.com/dotcloud/docker/pkg/sysinfo"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/runtime/execdriver/execdrivers"
"github.com/dotcloud/docker/runtime/execdriver/lxc"
"github.com/dotcloud/docker/runtime/graphdriver"
_ "github.com/dotcloud/docker/runtime/graphdriver/vfs"
_ "github.com/dotcloud/docker/runtime/networkdriver/lxc"
"github.com/dotcloud/docker/runtime/networkdriver/portallocator"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"os"
"path"
"regexp"
"sort"
"strings"
"sync"
"time"
)
// Set the max depth to the aufs default that most
// kernels are compiled with
// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk
const MaxImageDepth = 127
var (
DefaultDns = []string{"8.8.8.8", "8.8.4.4"}
validContainerNameChars = `[a-zA-Z0-9_.-]`
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
)
type Runtime struct {
repository string
sysInitPath string
containers *list.List
graph *graph.Graph
repositories *graph.TagStore
idIndex *utils.TruncIndex
sysInfo *sysinfo.SysInfo
volumes *graph.Graph
srv Server
eng *engine.Engine
config *daemonconfig.Config
containerGraph *graphdb.Database
driver graphdriver.Driver
execDriver execdriver.Driver
}
// List returns an array of all containers registered in the runtime.
func (runtime *Runtime) List() []*Container {
containers := new(History)
for e := runtime.containers.Front(); e != nil; e = e.Next() {
containers.Add(e.Value.(*Container))
}
return *containers
}
func (runtime *Runtime) getContainerElement(id string) *list.Element {
for e := runtime.containers.Front(); e != nil; e = e.Next() {
container := e.Value.(*Container)
if container.ID == id {
return e
}
}
return nil
}
// Get looks for a container by the specified ID or name, and returns it.
// If the container is not found, or if an error occurs, nil is returned.
func (runtime *Runtime) Get(name string) *Container {
if c, _ := runtime.GetByName(name); c != nil {
return c
}
id, err := runtime.idIndex.Get(name)
if err != nil {
return nil
}
e := runtime.getContainerElement(id)
if e == nil {
return nil
}
return e.Value.(*Container)
}
// Exists returns a true if a container of the specified ID or name exists,
// false otherwise.
func (runtime *Runtime) Exists(id string) bool {
return runtime.Get(id) != nil
}
func (runtime *Runtime) containerRoot(id string) string {
return path.Join(runtime.repository, id)
}
// Load reads the contents of a container from disk
// This is typically done at startup.
func (runtime *Runtime) load(id string) (*Container, error) {
container := &Container{root: runtime.containerRoot(id)}
if err := container.FromDisk(); err != nil {
return nil, err
}
if container.ID != id {
return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
}
if container.State.IsRunning() {
container.State.SetGhost(true)
}
return container, nil
}
// Register makes a container object usable by the runtime as <container.ID>
func (runtime *Runtime) Register(container *Container) error {
if container.runtime != nil || runtime.Exists(container.ID) {
return fmt.Errorf("Container is already loaded")
}
if err := validateID(container.ID); err != nil {
return err
}
if err := runtime.ensureName(container); err != nil {
return err
}
container.runtime = runtime
// Attach to stdout and stderr
container.stderr = utils.NewWriteBroadcaster()
container.stdout = utils.NewWriteBroadcaster()
// Attach to stdin
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
} else {
container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
}
// done
runtime.containers.PushBack(container)
runtime.idIndex.Add(container.ID)
// FIXME: if the container is supposed to be running but is not, auto restart it?
// if so, then we need to restart monitor and init a new lock
// If the container is supposed to be running, make sure of it
if container.State.IsRunning() {
if container.State.IsGhost() {
utils.Debugf("killing ghost %s", container.ID)
container.State.SetGhost(false)
container.State.SetStopped(0)
// We only have to handle this for lxc because the other drivers will ensure that
// no ghost processes are left when docker dies
if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
lxc.KillLxc(container.ID, 9)
if err := container.Unmount(); err != nil {
utils.Debugf("ghost unmount error %s", err)
}
}
}
info := runtime.execDriver.Info(container.ID)
if !info.IsRunning() {
utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
if runtime.config.AutoRestart {
utils.Debugf("Restarting")
if err := container.Unmount(); err != nil {
utils.Debugf("restart unmount error %s", err)
}
container.State.SetGhost(false)
container.State.SetStopped(0)
if err := container.Start(); err != nil {
return err
}
} else {
utils.Debugf("Marking as stopped")
container.State.SetStopped(-127)
if err := container.ToDisk(); err != nil {
return err
}
}
}
} else {
// When the container is not running, we still initialize the waitLock
// chan and close it. Receiving on nil chan blocks whereas receiving on a
// closed chan does not. In this case we do not want to block.
container.waitLock = make(chan struct{})
close(container.waitLock)
}
return nil
}
func (runtime *Runtime) ensureName(container *Container) error {
if container.Name == "" {
name, err := generateRandomName(runtime)
if err != nil {
name = utils.TruncateID(container.ID)
}
container.Name = name
if err := container.ToDisk(); err != nil {
utils.Debugf("Error saving container name %s", err)
}
if !runtime.containerGraph.Exists(name) {
if _, err := runtime.containerGraph.Set(name, container.ID); err != nil {
utils.Debugf("Setting default id - %s", err)
}
}
}
return nil
}
func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error {
log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return err
}
src.AddWriter(log, stream)
return nil
}
// Destroy unregisters a container from the runtime and cleanly removes its contents from the filesystem.
func (runtime *Runtime) Destroy(container *Container) error {
if container == nil {
return fmt.Errorf("The given container is <nil>")
}
element := runtime.getContainerElement(container.ID)
if element == nil {
return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID)
}
if err := container.Stop(3); err != nil {
return err
}
if err := runtime.driver.Remove(container.ID); err != nil {
return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err)
}
initID := fmt.Sprintf("%s-init", container.ID)
if err := runtime.driver.Remove(initID); err != nil {
return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", runtime.driver, initID, err)
}
if _, err := runtime.containerGraph.Purge(container.ID); err != nil {
utils.Debugf("Unable to remove container from link graph: %s", err)
}
// Deregister the container before removing its directory, to avoid race conditions
runtime.idIndex.Delete(container.ID)
runtime.containers.Remove(element)
if err := os.RemoveAll(container.root); err != nil {
return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
}
return nil
}
func (runtime *Runtime) restore() error {
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
fmt.Printf("Loading containers: ")
}
dir, err := ioutil.ReadDir(runtime.repository)
if err != nil {
return err
}
containers := make(map[string]*Container)
currentDriver := runtime.driver.String()
for _, v := range dir {
id := v.Name()
container, err := runtime.load(id)
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
fmt.Print(".")
}
if err != nil {
utils.Errorf("Failed to load container %v: %v", id, err)
continue
}
// Ignore the container if it does not support the current driver being used by the graph
if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver {
utils.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
} else {
utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
register := func(container *Container) {
if err := runtime.Register(container); err != nil {
utils.Debugf("Failed to register container %s: %s", container.ID, err)
}
}
if entities := runtime.containerGraph.List("/", -1); entities != nil {
for _, p := range entities.Paths() {
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
fmt.Print(".")
}
e := entities[p]
if container, ok := containers[e.ID()]; ok {
register(container)
delete(containers, e.ID())
}
}
}
// Any containers that are left over do not exist in the graph
for _, container := range containers {
// Try to set the default name for a container if it exists prior to links
container.Name, err = generateRandomName(runtime)
if err != nil {
container.Name = utils.TruncateID(container.ID)
}
if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil {
utils.Debugf("Setting default id - %s", err)
}
register(container)
}
if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" {
fmt.Printf(": done.\n")
}
return nil
}
// Create creates a new container from the given configuration with a given name.
func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) {
// Lookup image
img, err := runtime.repositories.LookupImage(config.Image)
if err != nil {
return nil, nil, err
}
// We add 2 layers to the depth because the container's rw and
// init layer add to the restriction
depth, err := img.Depth()
if err != nil {
return nil, nil, err
}
if depth+2 >= MaxImageDepth {
return nil, nil, fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth)
}
checkDeprecatedExpose := func(config *runconfig.Config) bool {
if config != nil {
if config.PortSpecs != nil {
for _, p := range config.PortSpecs {
if strings.Contains(p, ":") {
return true
}
}
}
}
return false
}
warnings := []string{}
if checkDeprecatedExpose(img.Config) || checkDeprecatedExpose(config) {
warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.")
}
if img.Config != nil {
if err := runconfig.Merge(config, img.Config); err != nil {
return nil, nil, err
}
}
if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 {
return nil, nil, fmt.Errorf("No command specified")
}
// Generate id
id := utils.GenerateRandomID()
if name == "" {
name, err = generateRandomName(runtime)
if err != nil {
name = utils.TruncateID(id)
}
} else {
if !validContainerNamePattern.MatchString(name) {
return nil, nil, fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
}
}
if name[0] != '/' {
name = "/" + name
}
// Set the enitity in the graph using the default name specified
if _, err := runtime.containerGraph.Set(name, id); err != nil {
if !graphdb.IsNonUniqueNameError(err) {
return nil, nil, err
}
conflictingContainer, err := runtime.GetByName(name)
if err != nil {
if strings.Contains(err.Error(), "Could not find entity") {
return nil, nil, err
}
// Remove name and continue starting the container
if err := runtime.containerGraph.Delete(name); err != nil {
return nil, nil, err
}
} else {
nameAsKnownByUser := strings.TrimPrefix(name, "/")
return nil, nil, fmt.Errorf(
"Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser,
utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser)
}
}
// Generate default hostname
// FIXME: the lxc template no longer needs to set a default hostname
if config.Hostname == "" {
config.Hostname = id[:12]
}
var args []string
var entrypoint string
if len(config.Entrypoint) != 0 {
entrypoint = config.Entrypoint[0]
args = append(config.Entrypoint[1:], config.Cmd...)
} else {
entrypoint = config.Cmd[0]
args = config.Cmd[1:]
}
container := &Container{
// FIXME: we should generate the ID here instead of receiving it as an argument
ID: id,
Created: time.Now().UTC(),
Path: entrypoint,
Args: args, //FIXME: de-duplicate from config
Config: config,
hostConfig: &runconfig.HostConfig{},
Image: img.ID, // Always use the resolved image id
NetworkSettings: &NetworkSettings{},
Name: name,
Driver: runtime.driver.String(),
ExecDriver: runtime.execDriver.Name(),
}
container.root = runtime.containerRoot(container.ID)
// Step 1: create the container directory.
// This doubles as a barrier to avoid race conditions.
if err := os.Mkdir(container.root, 0700); err != nil {
return nil, nil, err
}
initID := fmt.Sprintf("%s-init", container.ID)
if err := runtime.driver.Create(initID, img.ID); err != nil {
return nil, nil, err
}
initPath, err := runtime.driver.Get(initID)
if err != nil {
return nil, nil, err
}
defer runtime.driver.Put(initID)
if err := graph.SetupInitLayer(initPath); err != nil {
return nil, nil, err
}
if err := runtime.driver.Create(container.ID, initID); err != nil {
return nil, nil, err
}
resolvConf, err := utils.GetResolvConf()
if err != nil {
return nil, nil, err
}
if len(config.Dns) == 0 && len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
runtime.config.Dns = DefaultDns
}
// If custom dns exists, then create a resolv.conf for the container
if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(runtime.config.DnsSearch) > 0 {
dns := utils.GetNameservers(resolvConf)
dnsSearch := utils.GetSearchDomains(resolvConf)
if len(config.Dns) > 0 {
dns = config.Dns
} else if len(runtime.config.Dns) > 0 {
dns = runtime.config.Dns
}
if len(config.DnsSearch) > 0 {
dnsSearch = config.DnsSearch
} else if len(runtime.config.DnsSearch) > 0 {
dnsSearch = runtime.config.DnsSearch
}
container.ResolvConfPath = path.Join(container.root, "resolv.conf")
f, err := os.Create(container.ResolvConfPath)
if err != nil {
return nil, nil, err
}
defer f.Close()
for _, dns := range dns {
if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil {
return nil, nil, err
}
}
if len(dnsSearch) > 0 {
if _, err := f.Write([]byte("search " + strings.Join(dnsSearch, " ") + "\n")); err != nil {
return nil, nil, err
}
}
} else {
container.ResolvConfPath = "/etc/resolv.conf"
}
// Step 2: save the container json
if err := container.ToDisk(); err != nil {
return nil, nil, err
}
// Step 3: register the container
if err := runtime.Register(container); err != nil {
return nil, nil, err
}
return container, warnings, nil
}
// Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository
func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
// FIXME: freeze the container before copying it to avoid data corruption?
if err := container.Mount(); err != nil {
return nil, err
}
defer container.Unmount()
rwTar, err := container.ExportRw()
if err != nil {
return nil, err
}
defer rwTar.Close()
// Create a new image from the container's base layers + a new layer from container changes
var (
containerID, containerImage string
containerConfig *runconfig.Config
)
if container != nil {
containerID = container.ID
containerImage = container.Image
containerConfig = container.Config
}
img, err := runtime.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
if err != nil {
return nil, err
}
// Register the image if needed
if repository != "" {
if err := runtime.repositories.Set(repository, tag, img.ID, true); err != nil {
return img, err
}
}
return img, nil
}
func GetFullContainerName(name string) (string, error) {
if name == "" {
return "", fmt.Errorf("Container name cannot be empty")
}
if name[0] != '/' {
name = "/" + name
}
return name, nil
}
func (runtime *Runtime) GetByName(name string) (*Container, error) {
fullName, err := GetFullContainerName(name)
if err != nil {
return nil, err
}
entity := runtime.containerGraph.Get(fullName)
if entity == nil {
return nil, fmt.Errorf("Could not find entity for %s", name)
}
e := runtime.getContainerElement(entity.ID())
if e == nil {
return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID())
}
return e.Value.(*Container), nil
}
func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
name, err := GetFullContainerName(name)
if err != nil {
return nil, err
}
children := make(map[string]*Container)
err = runtime.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
c := runtime.Get(e.ID())
if c == nil {
return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p)
}
children[p] = c
return nil
}, 0)
if err != nil {
return nil, err
}
return children, nil
}
func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if !runtime.containerGraph.Exists(fullName) {
_, err := runtime.containerGraph.Set(fullName, child.ID)
return err
}
return nil
}
// FIXME: harmonize with NewGraph()
func NewRuntime(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
runtime, err := NewRuntimeFromDirectory(config, eng)
if err != nil {
return nil, err
}
return runtime, nil
}
func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
// Set the default driver
graphdriver.DefaultDriver = config.GraphDriver
// Load storage driver
driver, err := graphdriver.New(config.Root)
if err != nil {
return nil, err
}
utils.Debugf("Using graph driver %s", driver)
runtimeRepo := path.Join(config.Root, "containers")
if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
// Migrate the container if it is aufs and aufs is enabled
if err = migrateIfAufs(driver, config.Root); err != nil {
return nil, err
}
utils.Debugf("Creating images graph")
g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver)
if err != nil {
return nil, err
}
// We don't want to use a complex driver like aufs or devmapper
// for volumes, just a plain filesystem
volumesDriver, err := graphdriver.GetDriver("vfs", config.Root)
if err != nil {
return nil, err
}
utils.Debugf("Creating volumes graph")
volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver)
if err != nil {
return nil, err
}
utils.Debugf("Creating repository list")
repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g)
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
}
if !config.DisableNetwork {
job := eng.Job("init_networkdriver")
job.SetenvBool("EnableIptables", config.EnableIptables)
job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication)
job.SetenvBool("EnableIpForward", config.EnableIpForward)
job.Setenv("BridgeIface", config.BridgeIface)
job.Setenv("BridgeIP", config.BridgeIP)
job.Setenv("DefaultBindingIP", config.DefaultIp.String())
if err := job.Run(); err != nil {
return nil, err
}
}
graphdbPath := path.Join(config.Root, "linkgraph.db")
graph, err := graphdb.NewSqliteConn(graphdbPath)
if err != nil {
return nil, err
}
localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION))
sysInitPath := utils.DockerInitPath(localCopy)
if sysInitPath == "" {
return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.io/en/latest/contributing/devenvironment for official build instructions.")
}
if sysInitPath != localCopy {
// When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade).
if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) {
return nil, err
}
if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil {
return nil, err
}
if err := os.Chmod(localCopy, 0700); err != nil {
return nil, err
}
sysInitPath = localCopy
}
sysInfo := sysinfo.New(false)
ed, err := execdrivers.NewDriver(config.ExecDriver, config.Root, sysInitPath, sysInfo)
if err != nil {
return nil, err
}
runtime := &Runtime{
repository: runtimeRepo,
containers: list.New(),
graph: g,
repositories: repositories,
idIndex: utils.NewTruncIndex(),
sysInfo: sysInfo,
volumes: volumes,
config: config,
containerGraph: graph,
driver: driver,
sysInitPath: sysInitPath,
execDriver: ed,
eng: eng,
}
if err := runtime.restore(); err != nil {
return nil, err
}
return runtime, nil
}
func (runtime *Runtime) Close() error {
errorsStrings := []string{}
if err := portallocator.ReleaseAll(); err != nil {
utils.Errorf("portallocator.ReleaseAll(): %s", err)
errorsStrings = append(errorsStrings, err.Error())
}
if err := runtime.driver.Cleanup(); err != nil {
utils.Errorf("runtime.driver.Cleanup(): %s", err.Error())
errorsStrings = append(errorsStrings, err.Error())
}
if err := runtime.containerGraph.Close(); err != nil {
utils.Errorf("runtime.containerGraph.Close(): %s", err.Error())
errorsStrings = append(errorsStrings, err.Error())
}
if len(errorsStrings) > 0 {
return fmt.Errorf("%s", strings.Join(errorsStrings, ", "))
}
return nil
}
func (runtime *Runtime) Mount(container *Container) error {
dir, err := runtime.driver.Get(container.ID)
if err != nil {
return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, runtime.driver, err)
}
if container.basefs == "" {
container.basefs = dir
} else if container.basefs != dir {
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
runtime.driver, container.ID, container.basefs, dir)
}
return nil
}
func (runtime *Runtime) Unmount(container *Container) error {
runtime.driver.Put(container.ID)
return nil
}
func (runtime *Runtime) Changes(container *Container) ([]archive.Change, error) {
if differ, ok := runtime.driver.(graphdriver.Differ); ok {
return differ.Changes(container.ID)
}
cDir, err := runtime.driver.Get(container.ID)
if err != nil {
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
}
defer runtime.driver.Put(container.ID)
initDir, err := runtime.driver.Get(container.ID + "-init")
if err != nil {
return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
}
defer runtime.driver.Put(container.ID + "-init")
return archive.ChangesDirs(cDir, initDir)
}
func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) {
if differ, ok := runtime.driver.(graphdriver.Differ); ok {
return differ.Diff(container.ID)
}
changes, err := runtime.Changes(container)
if err != nil {
return nil, err
}
cDir, err := runtime.driver.Get(container.ID)
if err != nil {
return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err)
}
archive, err := archive.ExportChanges(cDir, changes)
if err != nil {
return nil, err
}
return utils.NewReadCloserWrapper(archive, func() error {
err := archive.Close()
runtime.driver.Put(container.ID)
return err
}), nil
}
func (runtime *Runtime) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
return runtime.execDriver.Run(c.command, pipes, startCallback)
}
func (runtime *Runtime) Kill(c *Container, sig int) error {
return runtime.execDriver.Kill(c.command, sig)
}
// Nuke kills all containers then removes all content
// from the content root, including images, volumes and
// container filesystems.
// Again: this will remove your entire docker runtime!
func (runtime *Runtime) Nuke() error {
var wg sync.WaitGroup
for _, container := range runtime.List() {
wg.Add(1)
go func(c *Container) {
c.Kill()
wg.Done()
}(container)
}
wg.Wait()
runtime.Close()
return os.RemoveAll(runtime.config.Root)
}
// FIXME: this is a convenience function for integration tests
// which need direct access to runtime.graph.
// Once the tests switch to using engine and jobs, this method
// can go away.
func (runtime *Runtime) Graph() *graph.Graph {
return runtime.graph
}
func (runtime *Runtime) Repositories() *graph.TagStore {
return runtime.repositories
}
func (runtime *Runtime) Config() *daemonconfig.Config {
return runtime.config
}
func (runtime *Runtime) SystemConfig() *sysinfo.SysInfo {
return runtime.sysInfo
}
func (runtime *Runtime) SystemInitPath() string {
return runtime.sysInitPath
}
func (runtime *Runtime) GraphDriver() graphdriver.Driver {
return runtime.driver
}
func (runtime *Runtime) ExecutionDriver() execdriver.Driver {
return runtime.execDriver
}
func (runtime *Runtime) Volumes() *graph.Graph {
return runtime.volumes
}
func (runtime *Runtime) ContainerGraph() *graphdb.Database {
return runtime.containerGraph
}
func (runtime *Runtime) SetServer(server Server) {
runtime.srv = server
}
// History is a convenience type for storing a list of containers,
// ordered by creation date.
type History []*Container
func (history *History) Len() int {
return len(*history)
}
func (history *History) Less(i, j int) bool {
containers := *history
return containers[j].When().Before(containers[i].When())
}
func (history *History) Swap(i, j int) {
containers := *history
tmp := containers[i]
containers[i] = containers[j]
containers[j] = tmp
}
func (history *History) Add(container *Container) {
*history = append(*history, container)
sort.Sort(history)
}
| [
"\"DEBUG\"",
"\"TEST\"",
"\"DEBUG\"",
"\"TEST\"",
"\"DEBUG\"",
"\"TEST\"",
"\"DEBUG\"",
"\"TEST\""
]
| []
| [
"TEST",
"DEBUG"
]
| [] | ["TEST", "DEBUG"] | go | 2 | 0 | |
python/ray/util/collective/collective.py | """APIs exposed under the namespace ray.util.collective."""
import logging
import os
from typing import List
import numpy as np
import ray
from ray.util.collective import types
_GLOO_AVAILABLE = False
_NCCL_AVAILABLE = True
try:
from ray.util.collective.collective_group import NCCLGroup
except ImportError:
_NCCL_AVAILABLE = False
logger = logging.getLogger(__name__)
def nccl_available():
return _NCCL_AVAILABLE
def gloo_available():
return _GLOO_AVAILABLE
class GroupManager(object):
"""Use this class to manage the collective groups we created so far.
Each process will have an instance of `GroupManager`. Each process
could belong to multiple collective groups. The membership information
and other metadata are stored in the global `_group_mgr` object.
"""
def __init__(self):
self._name_group_map = {}
self._group_name_map = {}
def create_collective_group(self, backend, world_size, rank, group_name):
"""The entry to create new collective groups in the manager.
Put the registration and the group information into the manager
metadata as well.
"""
backend = types.Backend(backend)
if backend == types.Backend.MPI:
raise RuntimeError("Ray does not support MPI.")
elif backend == types.Backend.GLOO:
raise NotImplementedError()
elif backend == types.Backend.NCCL:
logger.debug("Creating NCCL group: '{}'...".format(group_name))
g = NCCLGroup(world_size, rank, group_name)
self._name_group_map[group_name] = g
self._group_name_map[g] = group_name
return self._name_group_map[group_name]
def is_group_exist(self, group_name):
return group_name in self._name_group_map
def get_group_by_name(self, group_name):
"""Get the collective group handle by its name."""
if not self.is_group_exist(group_name):
logger.warning(
"The group '{}' is not initialized.".format(group_name))
return None
return self._name_group_map[group_name]
def destroy_collective_group(self, group_name):
"""Group destructor."""
if not self.is_group_exist(group_name):
logger.warning("The group '{}' does not exist.".format(group_name))
return
# release the collective group resource
g = self._name_group_map[group_name]
# clean up the dicts
del self._group_name_map[g]
del self._name_group_map[group_name]
# Release the communicator resources
g.destroy_group()
_group_mgr = GroupManager()
def is_group_initialized(group_name):
"""Check if the group is initialized in this process by the group name."""
return _group_mgr.is_group_exist(group_name)
def init_collective_group(world_size: int,
rank: int,
backend=types.Backend.NCCL,
group_name: str = "default"):
"""Initialize a collective group inside an actor process.
Args:
world_size (int): the total number of processes in the group.
rank (int): the rank of the current process.
backend: the CCL backend to use, NCCL or GLOO.
group_name (str): the name of the collective group.
Returns:
None
"""
_check_inside_actor()
backend = types.Backend(backend)
_check_backend_availability(backend)
global _group_mgr
# TODO(Hao): implement a group auto-counter.
if not group_name:
raise ValueError("group_name '{}' needs to be a string."
.format(group_name))
if _group_mgr.is_group_exist(group_name):
raise RuntimeError("Trying to initialize a group twice.")
assert (world_size > 0)
assert (rank >= 0)
assert (rank < world_size)
_group_mgr.create_collective_group(backend, world_size, rank, group_name)
def create_collective_group(actors,
world_size: int,
ranks: List[int],
backend=types.Backend.NCCL,
group_name: str = "default"):
"""Declare a list of actors as a collective group.
Note: This function should be called in a driver process.
Args:
actors (list): a list of actors to be set in a collective group.
world_size (int): the total number of processes in the group.
ranks (List[int]): the rank of each actor.
backend: the CCL backend to use, NCCL or GLOO.
group_name (str): the name of the collective group.
Returns:
None
"""
backend = types.Backend(backend)
_check_backend_availability(backend)
name = "info_" + group_name
try:
ray.get_actor(name)
raise RuntimeError("Trying to initialize a group twice.")
except ValueError:
pass
if len(ranks) != len(actors):
raise RuntimeError(
"Each actor should correspond to one rank. Got '{}' "
"ranks but '{}' actors".format(len(ranks), len(actors)))
if set(ranks) != set(range(len(ranks))):
raise RuntimeError(
"Ranks must be a permutation from 0 to '{}'. Got '{}'.".format(
len(ranks), "".join([str(r) for r in ranks])))
if world_size <= 0:
raise RuntimeError("World size must be greater than zero. "
"Got '{}'.".format(world_size))
if not all(ranks) >= 0:
raise RuntimeError("Ranks must be non-negative.")
if not all(ranks) < world_size:
raise RuntimeError("Ranks cannot be greater than world_size.")
# avoid a circular dependency
from ray.util.collective.util import Info
# store the information into a NamedActor that can be accessed later.
name = "info_" + group_name
actors_id = [a._ray_actor_id for a in actors]
# TODO (Dacheng): how do we recycle this name actor?
info = Info.options(name=name, lifetime="detached").remote()
ray.get([info.set_info.remote(actors_id, world_size, ranks, backend)])
# TODO (we need a declarative destroy() API here.)
def destroy_collective_group(group_name: str = "default") -> None:
"""Destroy a collective group given its group name."""
_check_inside_actor()
global _group_mgr
_group_mgr.destroy_collective_group(group_name)
def get_rank(group_name: str = "default") -> int:
"""Return the rank of this process in the given group.
Args:
group_name (str): the name of the group to query
Returns:
the rank of this process in the named group,
-1 if the group does not exist or the process does
not belong to the group.
"""
_check_inside_actor()
if not is_group_initialized(group_name):
return -1
g = _group_mgr.get_group_by_name(group_name)
return g.rank
def get_collective_group_size(group_name: str = "default") -> int:
"""Return the size of the collective group with the given name.
Args:
group_name: the name of the group to query
Returns:
The world size of the collective group, -1 if the group does
not exist or the process does not belong to the group.
"""
_check_inside_actor()
if not is_group_initialized(group_name):
return -1
g = _group_mgr.get_group_by_name(group_name)
return g.world_size
def allreduce(tensor, group_name: str = "default", op=types.ReduceOp.SUM):
"""Collective allreduce the tensor across the group.
Args:
tensor: the tensor to be all-reduced on this process.
group_name (str): the collective group name to perform allreduce.
op: The reduce operation.
Returns:
None
"""
_check_single_tensor_input(tensor)
g = _check_and_get_group(group_name)
opts = types.AllReduceOptions
opts.reduceOp = op
g.allreduce([tensor], opts)
def allreduce_multigpu(tensor_list: list,
group_name: str = "default",
op=types.ReduceOp.SUM):
"""Collective allreduce a list of tensors across the group.
Args:
tensor_list (List[tensor]): list of tensors to be allreduced,
each on a GPU.
group_name (str): the collective group name to perform allreduce.
Returns:
None
"""
if not types.cupy_available():
raise RuntimeError("Multigpu calls requires NCCL and Cupy.")
_check_tensor_list_input(tensor_list)
g = _check_and_get_group(group_name)
opts = types.AllReduceOptions
opts.reduceOp = op
g.allreduce(tensor_list, opts)
def barrier(group_name: str = "default"):
"""Barrier all processes in the collective group.
Args:
group_name (str): the name of the group to barrier.
Returns:
None
"""
g = _check_and_get_group(group_name)
g.barrier()
def reduce(tensor,
dst_rank: int = 0,
group_name: str = "default",
op=types.ReduceOp.SUM):
"""Reduce the tensor across the group to the destination rank.
Args:
tensor: the tensor to be reduced on this process.
dst_rank (int): the rank of the destination process.
group_name (str): the collective group name to perform reduce.
op: The reduce operation.
Returns:
None
"""
_check_single_tensor_input(tensor)
g = _check_and_get_group(group_name)
# check dst rank
_check_rank_valid(g, dst_rank)
opts = types.ReduceOptions()
opts.reduceOp = op
opts.root_rank = dst_rank
opts.root_tensor = 0
g.reduce([tensor], opts)
def reduce_multigpu(tensor_list: list,
dst_rank: int = 0,
dst_tensor: int = 0,
group_name: str = "default",
op=types.ReduceOp.SUM):
"""Reduce the tensor across the group to the destination rank
and destination tensor.
Args:
tensor_list: the list of tensors to be reduced on this process;
each tensor located on a GPU.
dst_rank (int): the rank of the destination process.
dst_tensor: the index of GPU at the destination.
group_name (str): the collective group name to perform reduce.
op: The reduce operation.
Returns:
None
"""
if not types.cupy_available():
raise RuntimeError("Multigpu calls requires NCCL and Cupy.")
_check_tensor_list_input(tensor_list)
g = _check_and_get_group(group_name)
# check dst rank
_check_rank_valid(g, dst_rank)
_check_root_tensor_valid(len(tensor_list), dst_tensor)
opts = types.ReduceOptions()
opts.reduceOp = op
opts.root_rank = dst_rank
opts.root_tensor = dst_tensor
g.reduce(tensor_list, opts)
def broadcast(tensor, src_rank: int = 0, group_name: str = "default"):
"""Broadcast the tensor from a source process to all others.
Args:
tensor: the tensor to be broadcasted (src) or received (destination).
src_rank (int): the rank of the source process.
group_name (str): the collective group name to perform broadcast.
Returns:
None
"""
_check_single_tensor_input(tensor)
g = _check_and_get_group(group_name)
# check src rank
_check_rank_valid(g, src_rank)
opts = types.BroadcastOptions()
opts.root_rank = src_rank
opts.root_tensor = 0
g.broadcast([tensor], opts)
def broadcast_multigpu(tensor_list,
src_rank: int = 0,
src_tensor: int = 0,
group_name: str = "default"):
"""Broadcast the tensor from a source GPU to all other GPUs.
Args:
tensor_list: the tensors to broadcast (src) or receive (dst).
src_rank (int): the rank of the source process.
src_tensor (int): the index of the source GPU on the source process.
group_name (str): the collective group name to perform broadcast.
Returns:
None
"""
if not types.cupy_available():
raise RuntimeError("Multigpu calls requires NCCL and Cupy.")
_check_tensor_list_input(tensor_list)
g = _check_and_get_group(group_name)
# check src rank
_check_rank_valid(g, src_rank)
_check_root_tensor_valid(len(tensor_list), src_tensor)
opts = types.BroadcastOptions()
opts.root_rank = src_rank
opts.root_tensor = src_tensor
g.broadcast(tensor_list, opts)
def allgather(tensor_list: list, tensor, group_name: str = "default"):
"""Allgather tensors from each process of the group into a list.
Args:
tensor_list (list): the results, stored as a list of tensors.
tensor: the tensor (to be gathered) in the current process
group_name (str): the name of the collective group.
Returns:
None
"""
_check_single_tensor_input(tensor)
_check_tensor_list_input(tensor_list)
g = _check_and_get_group(group_name)
if len(tensor_list) != g.world_size:
# Typically CLL lib requires len(tensor_list) >= world_size;
# Here we make it more strict: len(tensor_list) == world_size.
raise RuntimeError(
"The length of the tensor list operands to allgather "
"must be equal to world_size.")
opts = types.AllGatherOptions()
g.allgather([tensor_list], [tensor], opts)
def allgather_multigpu(output_tensor_lists: list,
input_tensor_list: list,
group_name: str = "default"):
"""Allgather tensors from each gpus of the group into lists.
Args:
output_tensor_lists (List[List[tensor]]): gathered results, with shape
must be num_gpus * world_size * shape(tensor).
input_tensor_list: (List[tensor]): a list of tensors, with shape
num_gpus * shape(tensor).
group_name (str): the name of the collective group.
Returns:
None
"""
if not types.cupy_available():
raise RuntimeError("Multigpu calls requires NCCL and Cupy.")
_check_tensor_lists_input(output_tensor_lists)
_check_tensor_list_input(input_tensor_list)
g = _check_and_get_group(group_name)
opts = types.AllGatherOptions()
g.allgather(output_tensor_lists, input_tensor_list, opts)
def reducescatter(tensor,
tensor_list: list,
group_name: str = "default",
op=types.ReduceOp.SUM):
"""Reducescatter a list of tensors across the group.
Reduce the list of the tensors across each process in the group, then
scatter the reduced list of tensors -- one tensor for each process.
Args:
tensor: the resulted tensor on this process.
tensor_list (list): The list of tensors to be reduced and scattered.
group_name (str): the name of the collective group.
op: The reduce operation.
Returns:
None
"""
_check_single_tensor_input(tensor)
_check_tensor_list_input(tensor_list)
g = _check_and_get_group(group_name)
if len(tensor_list) != g.world_size:
raise RuntimeError(
"The length of the tensor list operands to reducescatter "
"must not be equal to world_size.")
opts = types.ReduceScatterOptions()
opts.reduceOp = op
g.reducescatter([tensor], [tensor_list], opts)
def reducescatter_multigpu(output_tensor_list,
input_tensor_lists,
group_name: str = "default",
op=types.ReduceOp.SUM):
"""Reducescatter a list of tensors across all GPUs.
Args:
output_tensor_list: the resulted list of tensors, with
shape: num_gpus * shape(tensor).
input_tensor_lists: the original tensors, with shape:
num_gpus * world_size * shape(tensor).
group_name (str): the name of the collective group.
op: The reduce operation.
Returns:
None.
"""
if not types.cupy_available():
raise RuntimeError("Multigpu calls requires NCCL and Cupy.")
_check_tensor_lists_input(input_tensor_lists)
_check_tensor_list_input(output_tensor_list)
g = _check_and_get_group(group_name)
opts = types.ReduceScatterOptions()
opts.reduceOp = op
g.reducescatter(output_tensor_list, input_tensor_lists, opts)
def send(tensor, dst_rank: int, group_name: str = "default"):
"""Send a tensor to a remote process synchronously.
Args:
tensor: the tensor to send.
dst_rank (int): the rank of the destination process.
group_name (str): the name of the collective group.
Returns:
None
"""
_check_single_tensor_input(tensor)
g = _check_and_get_group(group_name)
_check_rank_valid(g, dst_rank)
if dst_rank == g.rank:
raise RuntimeError(
"The destination rank '{}' is self.".format(dst_rank))
opts = types.SendOptions()
opts.dst_rank = dst_rank
g.send([tensor], opts)
def send_multigpu(tensor,
dst_rank: int,
dst_gpu_index: int,
group_name: str = "default"):
"""Send a tensor to a remote GPU synchronously.
The function asssume each process owns >1 GPUs, and the sender
process and receiver process has equal nubmer of GPUs.
Args:
tensor: the tensor to send, located on a GPU.
dst_rank (int): the rank of the destination process.
dst_gpu_index (int): the destination gpu index.
group_name (str): the name of the collective group.
Returns:
None
"""
if not types.cupy_available():
raise RuntimeError("send_multigpu call requires NCCL.")
_check_single_tensor_input(tensor)
g = _check_and_get_group(group_name)
_check_rank_valid(g, dst_rank)
if dst_rank == g.rank:
raise RuntimeError("The dst_rank '{}' is self. Considering "
"doing GPU to GPU memcpy instead?".format(dst_rank))
opts = types.SendOptions()
opts.dst_rank = dst_rank
opts.dst_gpu_index = dst_gpu_index
g.send([tensor], opts)
def recv(tensor, src_rank: int, group_name: str = "default"):
"""Receive a tensor from a remote process synchronously.
Args:
tensor: the received tensor.
src_rank (int): the rank of the source process.
group_name (str): the name of the collective group.
Returns:
None
"""
_check_single_tensor_input(tensor)
g = _check_and_get_group(group_name)
_check_rank_valid(g, src_rank)
if src_rank == g.rank:
raise RuntimeError(
"The destination rank '{}' is self.".format(src_rank))
opts = types.RecvOptions()
opts.src_rank = src_rank
g.recv([tensor], opts)
def recv_multigpu(tensor,
src_rank: int,
src_gpu_index: int,
group_name: str = "default"):
"""Receive a tensor from a remote GPU synchronously.
The function asssume each process owns >1 GPUs, and the sender
process and receiver process has equal nubmer of GPUs.
Args:
tensor: the received tensor, located on a GPU.
src_rank (int): the rank of the source process.
src_gpu_index (int): the index of the source gpu on the src process.
group_name (str): the name of the collective group.
Returns:
None
"""
if not types.cupy_available():
raise RuntimeError("recv_multigpu call requires NCCL.")
_check_single_tensor_input(tensor)
g = _check_and_get_group(group_name)
_check_rank_valid(g, src_rank)
if src_rank == g.rank:
raise RuntimeError("The dst_rank '{}' is self. Considering "
"doing GPU to GPU memcpy instead?".format(src_rank))
opts = types.RecvOptions()
opts.src_rank = src_rank
opts.src_gpu_index = src_gpu_index
g.recv([tensor], opts)
def _check_and_get_group(group_name):
"""Check the existence and return the group handle."""
_check_inside_actor()
global _group_mgr
if not is_group_initialized(group_name):
# try loading from remote info store
try:
# if the information is stored in an Info object,
# get and create the group.
name = "info_" + group_name
mgr = ray.get_actor(name=name)
ids, world_size, rank, backend = ray.get(mgr.get_info.remote())
worker = ray.worker.global_worker
id_ = worker.core_worker.get_actor_id()
r = rank[ids.index(id_)]
_group_mgr.create_collective_group(backend, world_size, r,
group_name)
except ValueError as exc:
# check if this group is initialized using options()
if "collective_group_name" in os.environ and \
os.environ["collective_group_name"] == group_name:
rank = int(os.environ["collective_rank"])
world_size = int(os.environ["collective_world_size"])
backend = os.environ["collective_backend"]
_group_mgr.create_collective_group(backend, world_size, rank,
group_name)
else:
raise RuntimeError(
"The collective group '{}' is not "
"initialized in the process.".format(group_name)) from exc
g = _group_mgr.get_group_by_name(group_name)
return g
def _check_single_tensor_input(tensor):
"""Check if the tensor is with a supported type."""
if isinstance(tensor, np.ndarray):
return
if types.cupy_available():
if isinstance(tensor, types.cp.ndarray):
return
if types.torch_available():
if isinstance(tensor, types.th.Tensor):
return
raise RuntimeError("Unrecognized tensor type '{}'. Supported types are: "
"np.ndarray, torch.Tensor, cupy.ndarray.".format(
type(tensor)))
def _check_backend_availability(backend: types.Backend):
"""Check whether the backend is available."""
if backend == types.Backend.GLOO:
if not gloo_available():
raise RuntimeError("GLOO is not available.")
elif backend == types.Backend.NCCL:
if not nccl_available():
raise RuntimeError("NCCL is not available.")
def _check_inside_actor():
"""Check if currently it is inside a Ray actor/task."""
worker = ray.worker.global_worker
if worker.mode == ray.WORKER_MODE:
return
else:
raise RuntimeError("The collective APIs shall be only used inside "
"a Ray actor or task.")
def _check_rank_valid(g, rank: int):
"""Check the rank: 0 <= rank < world_size."""
if rank < 0:
raise ValueError("rank '{}' is negative.".format(rank))
if rank >= g.world_size:
raise ValueError("rank '{}' must be less than world size "
"'{}'".format(rank, g.world_size))
def _check_tensor_list_input(tensor_list):
"""Check if the input is a list of supported tensor types."""
if not isinstance(tensor_list, list):
raise RuntimeError("The input must be a list of tensors. "
"Got '{}'.".format(type(tensor_list)))
if not tensor_list:
raise RuntimeError("Got an empty list of tensors.")
for t in tensor_list:
_check_single_tensor_input(t)
def _check_tensor_lists_input(tensor_lists):
"""Check if the input is a list of lists of supported tensor types."""
if not isinstance(tensor_lists, list):
raise RuntimeError("The input must be a list of lists of tensors. "
"Got '{}'.".format(type(tensor_lists)))
if not tensor_lists:
raise RuntimeError(f"Did not receive tensors. Got: {tensor_lists}")
for t in tensor_lists:
_check_tensor_list_input(t)
def _check_root_tensor_valid(length, root_tensor):
"""Check the root_tensor device is 0 <= root_tensor < length"""
if root_tensor < 0:
raise ValueError("root_tensor '{}' is negative.".format(root_tensor))
if root_tensor >= length:
raise ValueError(
"root_tensor '{}' is greater than the number of GPUs: "
"'{}'".format(root_tensor, length))
| []
| []
| [
"collective_backend",
"collective_rank",
"collective_group_name",
"collective_world_size"
]
| [] | ["collective_backend", "collective_rank", "collective_group_name", "collective_world_size"] | python | 4 | 0 | |
Kotlin.ideplugin/Contents/Resources/konan_lldb_config.py |
import lldb
import struct
import os
os.environ['CLIENT_TYPE'] = 'Xcode' | []
| []
| [
"CLIENT_TYPE"
]
| [] | ["CLIENT_TYPE"] | python | 1 | 0 | |
qa/pull-tester/rpc-tests.py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
rpc-tests.py - run regression test suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
RPC tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
import configparser
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'dip3-deterministicmns.py', # NOTE: needs rhypton_hash to pass
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py', # NOTE: needs rhypton_hash to pass
'fundrawtransaction.py',
'fundrawtransaction-hd.py',
'p2p-autoinstantsend.py',
'autois-mempool.py',
# vv Tests less than 2m vv
'p2p-instantsend.py',
'wallet.py',
'wallet-accounts.py',
'wallet-dump.py',
'listtransactions.py',
'multikeysporks.py',
'llmq-signing.py', # NOTE: needs rhypton_hash to pass
'llmq-chainlocks.py', # NOTE: needs rhypton_hash to pass
'llmq-simplepose.py', # NOTE: needs rhypton_hash to pass
'llmq-is-cl-conflicts.py', # NOTE: needs rhypton_hash to pass
'llmq-dkgerrors.py', # NOTE: needs rhypton_hash to pass
'dip4-coinbasemerkleroots.py', # NOTE: needs rhypton_hash to pass
# vv Tests less than 60s vv
'sendheaders.py', # NOTE: needs rhypton_hash to pass
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'keypool.py',
'keypool-hd.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs rhypton_hash to pass
'invalidtxrequest.py', # NOTE: needs rhypton_hash to pass
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'p2p-compactblocks.py',
'sporks.py',
'p2p-fingerprint.py',
]
ZMQ_SCRIPTS = [
# ZMQ test can only be run if RHYPTON Core was built with zmq-enabled.
# call rpc_tests.py with -nozmq to explicitly exclude these tests.
"zmq_test.py"]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
# 'pruning.py', # Prune mode is incompatible with -txindex.
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs rhypton_hash to pass
'bipdersig-p2p.py', # NOTE: needs rhypton_hash to pass
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs rhypton_hash to pass
]
ALL_SCRIPTS = BASE_SCRIPTS + ZMQ_SCRIPTS + EXTENDED_SCRIPTS
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [rpc-test.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude. Do not include the .py extension in the name.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--nozmq', action='store_true', help='do not run the zmq tests')
args, unknown_args = parser.parse_known_args()
# Create a set to store arguments and create the passon string
tests = set(arg for arg in unknown_args if arg[:2] != "--")
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
config.read_file(open(os.path.dirname(__file__) + "/tests_config.ini"))
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
enable_zmq = config["components"].getboolean("ENABLE_ZMQ") and not args.nozmq
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if enable_zmq:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Use -nozmq to run without the ZMQ tests."
"To run zmq tests, see dependency info in /qa/README.md.")
raise
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
test_list = [t for t in ALL_SCRIPTS if
(t in tests or re.sub(".py$", "", t) in tests)]
else:
# No individual tests have been specified. Run base tests, and
# optionally ZMQ tests and extended tests.
test_list = BASE_SCRIPTS
if enable_zmq:
test_list += ZMQ_SCRIPTS
if args.extended:
test_list += EXTENDED_SCRIPTS
# TODO: BASE_SCRIPTS and EXTENDED_SCRIPTS are sorted by runtime
# (for parallel running efficiency). This combined list will is no
# longer sorted.
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
for exclude_test in args.exclude.split(','):
if exclude_test + ".py" in test_list:
test_list.remove(exclude_test + ".py")
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in rpc-tests.py, or run rpc-tests.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for rpc-tests.py, then print help of the first script and exit.
parser.print_help()
subprocess.check_call((config["environment"]["SRCDIR"] + '/qa/rpc-tests/' + test_list[0]).split() + ['-h'])
sys.exit(0)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], args.jobs, args.coverage, passon_args)
def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=False, args=[]):
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/rhyptond' + exeext
tests_dir = src_dir + '/qa/rpc-tests/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s/qa/cache" % build_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
print("Initializing coverage directory at %s\n" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
subprocess.check_output([tests_dir + 'create_cache.py'] + flags)
#Run Tests
all_passed = True
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(jobs, tests_dir, test_list, flags)
max_len_name = len(max(test_list, key=len))
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print('' if passed else stdout + '\n', end='')
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((self.tests_dir + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| []
| []
| [
"BITCOIND"
]
| [] | ["BITCOIND"] | python | 1 | 0 | |
internal/version/version.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The version package permits running a specific version of Go.
package version
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"crypto/sha256"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"os/user"
"path"
"path/filepath"
"runtime"
"strings"
"time"
)
func init() {
http.DefaultTransport = &userAgentTransport{http.DefaultTransport}
}
// Run runs the "go" tool of the provided Go version.
func Run(version string) {
log.SetFlags(0)
root, err := goroot(version)
if err != nil {
log.Fatalf("%s: %v", version, err)
}
if len(os.Args) == 2 && os.Args[1] == "download" {
if err := install(root, version); err != nil {
log.Fatalf("%s: download failed: %v", version, err)
}
os.Exit(0)
}
if _, err := os.Stat(filepath.Join(root, unpackedOkay)); err != nil {
log.Fatalf("%s: not downloaded. Run '%s download' to install to %v", version, version, root)
}
runGo(root)
}
func runGo(root string) {
gobin := filepath.Join(root, "bin", "go"+exe())
cmd := exec.Command(gobin, os.Args[1:]...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
newPath := filepath.Join(root, "bin")
if p := os.Getenv("PATH"); p != "" {
newPath += string(filepath.ListSeparator) + p
}
cmd.Env = dedupEnv(caseInsensitiveEnv, append(os.Environ(), "GOROOT="+root, "PATH="+newPath))
handleSignals()
if err := cmd.Run(); err != nil {
// TODO: return the same exit status maybe.
os.Exit(1)
}
os.Exit(0)
}
// install installs a version of Go to the named target directory, creating the
// directory as needed.
func install(targetDir, version string) error {
if _, err := os.Stat(filepath.Join(targetDir, unpackedOkay)); err == nil {
log.Printf("%s: already downloaded in %v", version, targetDir)
return nil
}
if err := os.MkdirAll(targetDir, 0755); err != nil {
return err
}
goURL := versionArchiveURL(version)
res, err := http.Head(goURL)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
return fmt.Errorf("no binary release of %v for %v/%v at %v", version, getOS(), runtime.GOARCH, goURL)
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("server returned %v checking size of %v", http.StatusText(res.StatusCode), goURL)
}
base := path.Base(goURL)
archiveFile := filepath.Join(targetDir, base)
if fi, err := os.Stat(archiveFile); err != nil || fi.Size() != res.ContentLength {
if err != nil && !os.IsNotExist(err) {
// Something weird. Don't try to download.
return err
}
if err := copyFromURL(archiveFile, goURL); err != nil {
return fmt.Errorf("error downloading %v: %v", goURL, err)
}
fi, err = os.Stat(archiveFile)
if err != nil {
return err
}
if fi.Size() != res.ContentLength {
return fmt.Errorf("downloaded file %s size %v doesn't match server size %v", archiveFile, fi.Size(), res.ContentLength)
}
}
wantSHA, err := slurpURLToString(goURL + ".sha256")
if err != nil {
return err
}
if err := verifySHA256(archiveFile, strings.TrimSpace(wantSHA)); err != nil {
return fmt.Errorf("error verifying SHA256 of %v: %v", archiveFile, err)
}
log.Printf("Unpacking %v ...", archiveFile)
if err := unpackArchive(targetDir, archiveFile); err != nil {
return fmt.Errorf("extracting archive %v: %v", archiveFile, err)
}
if err := ioutil.WriteFile(filepath.Join(targetDir, unpackedOkay), nil, 0644); err != nil {
return err
}
log.Printf("Success. You may now run '%v'", version)
return nil
}
// unpackArchive unpacks the provided archive zip or tar.gz file to targetDir,
// removing the "go/" prefix from file entries.
func unpackArchive(targetDir, archiveFile string) error {
switch {
case strings.HasSuffix(archiveFile, ".zip"):
return unpackZip(targetDir, archiveFile)
case strings.HasSuffix(archiveFile, ".tar.gz"):
return unpackTarGz(targetDir, archiveFile)
default:
return errors.New("unsupported archive file")
}
}
// unpackTarGz is the tar.gz implementation of unpackArchive.
func unpackTarGz(targetDir, archiveFile string) error {
r, err := os.Open(archiveFile)
if err != nil {
return err
}
defer r.Close()
madeDir := map[string]bool{}
zr, err := gzip.NewReader(r)
if err != nil {
return err
}
tr := tar.NewReader(zr)
for {
f, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if !validRelPath(f.Name) {
return fmt.Errorf("tar file contained invalid name %q", f.Name)
}
rel := filepath.FromSlash(strings.TrimPrefix(f.Name, "go/"))
abs := filepath.Join(targetDir, rel)
fi := f.FileInfo()
mode := fi.Mode()
switch {
case mode.IsRegular():
// Make the directory. This is redundant because it should
// already be made by a directory entry in the tar
// beforehand. Thus, don't check for errors; the next
// write will fail with the same error.
dir := filepath.Dir(abs)
if !madeDir[dir] {
if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil {
return err
}
madeDir[dir] = true
}
wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm())
if err != nil {
return err
}
n, err := io.Copy(wf, tr)
if closeErr := wf.Close(); closeErr != nil && err == nil {
err = closeErr
}
if err != nil {
return fmt.Errorf("error writing to %s: %v", abs, err)
}
if n != f.Size {
return fmt.Errorf("only wrote %d bytes to %s; expected %d", n, abs, f.Size)
}
if !f.ModTime.IsZero() {
if err := os.Chtimes(abs, f.ModTime, f.ModTime); err != nil {
// benign error. Gerrit doesn't even set the
// modtime in these, and we don't end up relying
// on it anywhere (the gomote push command relies
// on digests only), so this is a little pointless
// for now.
log.Printf("error changing modtime: %v", err)
}
}
case mode.IsDir():
if err := os.MkdirAll(abs, 0755); err != nil {
return err
}
madeDir[abs] = true
default:
return fmt.Errorf("tar file entry %s contained unsupported file type %v", f.Name, mode)
}
}
return nil
}
// unpackZip is the zip implementation of unpackArchive.
func unpackZip(targetDir, archiveFile string) error {
zr, err := zip.OpenReader(archiveFile)
if err != nil {
return err
}
defer zr.Close()
for _, f := range zr.File {
name := strings.TrimPrefix(f.Name, "go/")
outpath := filepath.Join(targetDir, name)
if f.FileInfo().IsDir() {
if err := os.MkdirAll(outpath, 0755); err != nil {
return err
}
continue
}
rc, err := f.Open()
if err != nil {
return err
}
// File
if err := os.MkdirAll(filepath.Dir(outpath), 0755); err != nil {
return err
}
out, err := os.OpenFile(outpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
_, err = io.Copy(out, rc)
rc.Close()
if err != nil {
out.Close()
return err
}
if err := out.Close(); err != nil {
return err
}
}
return nil
}
// verifySHA256 reports whether the named file has contents with
// SHA-256 of the given wantHex value.
func verifySHA256(file, wantHex string) error {
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
hash := sha256.New()
if _, err := io.Copy(hash, f); err != nil {
return err
}
if fmt.Sprintf("%x", hash.Sum(nil)) != wantHex {
return fmt.Errorf("%s corrupt? does not have expected SHA-256 of %v", file, wantHex)
}
return nil
}
// slurpURLToString downloads the given URL and returns it as a string.
func slurpURLToString(url_ string) (string, error) {
res, err := http.Get(url_)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return "", fmt.Errorf("%s: %v", url_, res.Status)
}
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", fmt.Errorf("reading %s: %v", url_, err)
}
return string(slurp), nil
}
// copyFromURL downloads srcURL to dstFile.
func copyFromURL(dstFile, srcURL string) (err error) {
f, err := os.Create(dstFile)
if err != nil {
return err
}
defer func() {
if err != nil {
f.Close()
os.Remove(dstFile)
}
}()
c := &http.Client{
Transport: &userAgentTransport{&http.Transport{
// It's already compressed. Prefer accurate ContentLength.
// (Not that GCS would try to compress it, though)
DisableCompression: true,
DisableKeepAlives: true,
Proxy: http.ProxyFromEnvironment,
}},
}
res, err := c.Get(srcURL)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return errors.New(res.Status)
}
pw := &progressWriter{w: f, total: res.ContentLength}
n, err := io.Copy(pw, res.Body)
if err != nil {
return err
}
if res.ContentLength != -1 && res.ContentLength != n {
return fmt.Errorf("copied %v bytes; expected %v", n, res.ContentLength)
}
pw.update() // 100%
return f.Close()
}
type progressWriter struct {
w io.Writer
n int64
total int64
last time.Time
}
func (p *progressWriter) update() {
end := " ..."
if p.n == p.total {
end = ""
}
fmt.Fprintf(os.Stderr, "Downloaded %5.1f%% (%*d / %d bytes)%s\n",
(100.0*float64(p.n))/float64(p.total),
ndigits(p.total), p.n, p.total, end)
}
func ndigits(i int64) int {
var n int
for ; i != 0; i /= 10 {
n++
}
return n
}
func (p *progressWriter) Write(buf []byte) (n int, err error) {
n, err = p.w.Write(buf)
p.n += int64(n)
if now := time.Now(); now.Unix() != p.last.Unix() {
p.update()
p.last = now
}
return
}
// getOS returns runtime.GOOS. It exists as a function just for lazy
// testing of the Windows zip path when running on Linux/Darwin.
func getOS() string {
return runtime.GOOS
}
// versionArchiveURL returns the zip or tar.gz URL of the given Go version.
func versionArchiveURL(version string) string {
goos := getOS()
ext := ".tar.gz"
if goos == "windows" {
ext = ".zip"
}
arch := runtime.GOARCH
if goos == "linux" && runtime.GOARCH == "arm" {
arch = "armv6l"
}
return "https://dl.google.com/go/" + version + "." + goos + "-" + arch + ext
}
const caseInsensitiveEnv = runtime.GOOS == "windows"
// unpackedOkay is a sentinel zero-byte file to indicate that the Go
// version was downloaded and unpacked successfully.
const unpackedOkay = ".unpacked-success"
func exe() string {
if runtime.GOOS == "windows" {
return ".exe"
}
return ""
}
func goroot(version string) (string, error) {
root := os.Getenv("GODLROOT")
if len(root) != 0 {
return root, nil
}
home, err := homedir()
if err != nil {
return "", fmt.Errorf("failed to get home directory: %v", err)
}
return filepath.Join(home, ".godl", version), nil
}
func homedir() (string, error) {
// This could be replaced with os.UserHomeDir, but it was introduced too
// recently, and we want this to work with go as packaged by Linux
// distributions. Note that user.Current is not enough as it does not
// prioritize $HOME. See also Issue 26463.
switch getOS() {
case "plan9":
return "", fmt.Errorf("%q not yet supported", runtime.GOOS)
case "windows":
if dir := os.Getenv("USERPROFILE"); dir != "" {
return dir, nil
}
return "", errors.New("can't find user home directory; %USERPROFILE% is empty")
default:
if dir := os.Getenv("HOME"); dir != "" {
return dir, nil
}
if u, err := user.Current(); err == nil && u.HomeDir != "" {
return u.HomeDir, nil
}
return "", errors.New("can't find user home directory; $HOME is empty")
}
}
func validRelPath(p string) bool {
if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") {
return false
}
return true
}
type userAgentTransport struct {
rt http.RoundTripper
}
func (uat userAgentTransport) RoundTrip(r *http.Request) (*http.Response, error) {
version := runtime.Version()
if strings.Contains(version, "devel") {
// Strip the SHA hash and date. We don't want spaces or other tokens (see RFC2616 14.43)
version = "devel"
}
r.Header.Set("User-Agent", "golang-x-build-version/"+version)
return uat.rt.RoundTrip(r)
}
// dedupEnv returns a copy of env with any duplicates removed, in favor of
// later values.
// Items are expected to be on the normal environment "key=value" form.
// If caseInsensitive is true, the case of keys is ignored.
//
// This function is unnecessary when the binary is
// built with Go 1.9+, but keep it around for now until Go 1.8
// is no longer seen in the wild in common distros.
//
// This is copied verbatim from golang.org/x/build/envutil.Dedup at CL 10301
// (commit a91ae26).
func dedupEnv(caseInsensitive bool, env []string) []string {
out := make([]string, 0, len(env))
saw := map[string]int{} // to index in the array
for _, kv := range env {
eq := strings.Index(kv, "=")
if eq < 1 {
out = append(out, kv)
continue
}
k := kv[:eq]
if caseInsensitive {
k = strings.ToLower(k)
}
if dupIdx, isDup := saw[k]; isDup {
out[dupIdx] = kv
} else {
saw[k] = len(out)
out = append(out, kv)
}
}
return out
}
func handleSignals() {
// Ensure that signals intended for the child process are not handled by
// this process' runtime (e.g. SIGQUIT). See issue #36976.
signal.Notify(make(chan os.Signal), signalsToIgnore...)
}
| [
"\"PATH\"",
"\"GODLROOT\"",
"\"USERPROFILE\"",
"\"HOME\""
]
| []
| [
"HOME",
"GODLROOT",
"PATH",
"USERPROFILE"
]
| [] | ["HOME", "GODLROOT", "PATH", "USERPROFILE"] | go | 4 | 0 | |
src/main/java/net/jacobpeterson/util/OSUtil.java | package net.jacobpeterson.util;
import java.io.File;
public class OSUtil {
private static OS os = OS.UNKNOWN;
static {
String osName = System.getProperty("os.name").toLowerCase();
if (osName.contains("mac")) {
os = OS.MAC;
} else if (osName.contains("windows")) {
os = OS.WINDOWS;
} else if (osName.contains("linux")) {
os = OS.LINUX;
}
}
public static File getApplicationDataDirectory() {
if (os == OS.MAC) {
return new File(System.getProperty("user.home"), "Library/Application Support/");
} else if (os == OS.WINDOWS) {
return new File(System.getenv("APPDATA"));
} else {
return new File(System.getProperty("user.home"));
}
}
public static OS getOS() {
return os;
}
enum OS {
MAC,
WINDOWS,
LINUX,
UNKNOWN
}
}
| [
"\"APPDATA\""
]
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | java | 1 | 0 | |
model.go | // Copyright 2016 Matthew Baird
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package glik
import (
"fmt"
"github.com/gorilla/websocket"
"io/ioutil"
"os"
"strings"
"time"
)
var (
connectTimeOut = time.Duration(30 * time.Second)
readWriteTimeout = time.Duration(30 * time.Second)
)
const DEFAULT_SERVER = "192.168.99.5"
const CRLF = "\r\n"
const DEFAULT_USER = "atscale"
const DEFAULT_DIR = "WIN8-VBOX"
const DEFAULT_QRS_PORT = 4242
const DEFAULT_AUTH_PORT = 4243
const DEFAULT_WEBSOCKET_PORT = 4747
type API struct {
Server string
QrsPort int
AuthPort int
WebsocketPort int
Version string
Directory string
QlikUser string
ClientKey string
ClientCert string
XrfKey string
CertAuth string
WebsocketConnection *websocket.Conn
}
func DefaultApi() API {
certLocation := os.Getenv("atscale_http_sslcert")
if len(certLocation) == 0 {
certLocation = "client.pem"
}
keyLocation := os.Getenv("atscale_http_sslkey")
if len(keyLocation) == 0 {
keyLocation = "client_key.pem"
}
caFileLocation := os.Getenv("atscale_ca_file")
if len(caFileLocation) == 0 {
caFileLocation = "root.pem"
}
api := NewAPI(DEFAULT_SERVER, DEFAULT_DIR, DEFAULT_USER, DEFAULT_QRS_PORT, DEFAULT_AUTH_PORT, DEFAULT_WEBSOCKET_PORT)
api.SetTLSItemLocations(certLocation, keyLocation, caFileLocation)
return api
}
func (api *API) SetTLSItemLocations(certLocation, keyLocation, caFile string) error {
_, err := ioutil.ReadFile(keyLocation)
if err != nil {
return fmt.Errorf("error reading client key bytes from [%s]:%v\n", keyLocation, err)
}
_, err = ioutil.ReadFile(certLocation)
if err != nil {
return fmt.Errorf("error reading client cert bytes from [%s]:%v\n", certLocation, err)
}
_, err = ioutil.ReadFile(caFile)
if err != nil {
return fmt.Errorf("error reading ca bytes from [%s]:%v\n", caFile, err)
}
api.ClientKey = keyLocation
api.ClientCert = certLocation
api.CertAuth = caFile
return nil
}
func NewAPI(server string, directory, user string, qrsPort, authPort, websocketPort int) API {
fixedUpServer := server
if strings.HasSuffix(server, "/") {
fixedUpServer = server[0 : len(server)-1]
}
api := API{Server: fixedUpServer}
api.QlikUser = user
api.Directory = directory
api.QrsPort = qrsPort
api.AuthPort = authPort
api.WebsocketPort = websocketPort
return api
}
type About struct {
BuildVersion string `json:"buildVersion,omitempty"`
BuildDate string `json:"buildDate,omitempty"`
DatabaseProvider string `json:"databaseProvider,omitempty"`
NodeType int `json:"nodeType,omitempty"`
SchemaPath string `json:"schemaPath,omitempty"`
}
type ApplicationResult struct {
Id string `json:"id,omitempty"`
CreatedDate string `json:"createdDate,omitempty"`
ModifiedDate string `json:"modifiedDate,omitempty"`
ModifiedByUserName string `json:"modifiedByUserName,omitempty"`
CustomProperties []CustomProperty `json:"customProperites,omitempty"`
Owner *Owner `json:"owner,omitempty"`
Name string `json:"name,omitempty"`
AppId string `json:"appId,omitempty"`
PublishTime string `json:"publishTime,omitempty"`
Published bool `json:"published,omitempty"`
Tags []string `json:"tags,omitempty"`
Description string `json:"description,omitempty"`
Stream *Stream `json:"stream,omitempty"`
FileSize int `json:"fileSize,omitempty"`
LastReloadTime string `json:"lastReloadTime,omitempty"`
Thumbnail string `json:"thumbnail,omitempty"`
SavedInProductVersion string `json:"savedInProductVersion,omitempty"`
MigrationHash string `json:"migrationHash,omitempty"`
Privileges *Privileges `json:"privileges,omitempty"`
SchemaPath string `json:"schemaPath,omitempty"`
}
type CustomProperty struct {
}
type Owner struct {
UserId string `json:"userId,omitempty"`
UserDirectory string `json:"userDirectory,omitempty"`
Name string `json:"name,omitempty"`
Id string `json:"id,omitempty"`
}
type Stream struct {
Name string `json:"name,omitempty"`
Id string `json:"id,omitempty"`
Privileges *Privileges `json:"privileges,omitempty"`
}
type Privileges struct {
}
| [
"\"atscale_http_sslcert\"",
"\"atscale_http_sslkey\"",
"\"atscale_ca_file\""
]
| []
| [
"atscale_http_sslkey",
"atscale_ca_file",
"atscale_http_sslcert"
]
| [] | ["atscale_http_sslkey", "atscale_ca_file", "atscale_http_sslcert"] | go | 3 | 0 | |
train.py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, [3]))
import time
import torch
import argparse
import numpy as np
from inference import infer
from utils.util import mode
from hparams import hparams as hps
from torch.utils.data import DataLoader
from utils.logger import Tacotron2Logger
from utils.dataset import ljdataset, ljcollate
from model.model import Tacotron2, Tacotron2Loss
np.random.seed(hps.seed)
torch.manual_seed(hps.seed)
torch.cuda.manual_seed(hps.seed)
def prepare_dataloaders(fdir):
trainset = ljdataset(fdir)
collate_fn = ljcollate(hps.n_frames_per_step)
train_loader = DataLoader(trainset, num_workers = hps.n_workers, shuffle = True,
batch_size = hps.batch_size, pin_memory = hps.pin_mem,
drop_last = True, collate_fn = collate_fn)
return train_loader
def load_checkpoint(ckpt_pth, model, optimizer):
ckpt_dict = torch.load(ckpt_pth)
model.load_state_dict(ckpt_dict['model'])
optimizer.load_state_dict(ckpt_dict['optimizer'])
iteration = ckpt_dict['iteration']
return model, optimizer, iteration
def save_checkpoint(model, optimizer, iteration, ckpt_pth):
torch.save({'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'iteration': iteration}, ckpt_pth)
def train(args):
# build model
model = Tacotron2()
mode(model, True)
optimizer = torch.optim.Adam(model.parameters(), lr = hps.lr,
betas = hps.betas, eps = hps.eps,
weight_decay = hps.weight_decay)
criterion = Tacotron2Loss()
# load checkpoint
iteration = 1
if args.ckpt_pth != '':
model, optimizer, iteration = load_checkpoint(args.ckpt_pth, model, optimizer)
iteration += 1 # next iteration is iteration+1
# get scheduler
if hps.sch:
lr_lambda = lambda step: hps.sch_step**0.5*min((step+1)*hps.sch_step**-1.5, (step+1)**-0.5)
if args.ckpt_pth != '':
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch = iteration)
else:
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
# make dataset
train_loader = prepare_dataloaders(args.data_dir)
# get logger ready
if args.log_dir != '':
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
os.chmod(args.log_dir, 0o775)
logger = Tacotron2Logger(args.log_dir)
# get ckpt_dir ready
if args.ckpt_dir != '' and not os.path.isdir(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
os.chmod(args.ckpt_dir, 0o775)
model.train()
# ================ MAIN TRAINNIG LOOP! ===================
while iteration <= hps.max_iter:
for batch in train_loader:
if iteration > hps.max_iter:
break
start = time.perf_counter()
x, y = model.parse_batch(batch)
y_pred = model(x)
# loss
loss, item = criterion(y_pred, y, iteration)
# zero grad
model.zero_grad()
# backward, grad_norm, and update
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hps.grad_clip_thresh)
optimizer.step()
if hps.sch:
scheduler.step()
# info
dur = time.perf_counter()-start
print('Iter: {} Loss: {:.2e} Grad Norm: {:.2e} {:.1f}s/it'.format(
iteration, item, grad_norm, dur))
# log
if args.log_dir != '' and (iteration % hps.iters_per_log == 0):
learning_rate = optimizer.param_groups[0]['lr']
logger.log_training(item, grad_norm, learning_rate, iteration)
# sample
if args.log_dir != '' and (iteration % hps.iters_per_sample == 0):
model.eval()
output = infer(hps.eg_text, model)
model.train()
logger.sample_training(output, iteration)
# save ckpt
if args.ckpt_dir != '' and (iteration % hps.iters_per_ckpt == 0):
ckpt_pth = os.path.join(args.ckpt_dir, 'ckpt_{}'.format(iteration))
save_checkpoint(model, optimizer, iteration, ckpt_pth)
iteration += 1
if args.log_dir != '':
logger.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# path
parser.add_argument('-d', '--data_dir', type = str, default = 'data',
help = 'directory to load data')
parser.add_argument('-l', '--log_dir', type = str, default = 'log',
help = 'directory to save tensorboard logs')
parser.add_argument('-cd', '--ckpt_dir', type = str, default = 'ckpt',
help = 'directory to save checkpoints')
parser.add_argument('-cp', '--ckpt_pth', type = str, default = '',
help = 'path to load checkpoints')
args = parser.parse_args()
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False # faster due to dynamic input shape
train(args)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
tests/UsersClientTest.py | from okta import UsersClient
from okta.models.user import User
import random
import unittest
import os
class UsersClientTest(unittest.TestCase):
def setUp(self):
self.client = UsersClient(os.environ.get('OKTA_TEST_URL'), os.environ.get('OKTA_TEST_KEY'))
def test_paging(self):
users = self.client.get_paged_users(limit=1)
first_page_hit = subsequent_page_hit = False
for user in users.result:
first_page_hit = True
while not users.is_last_page():
users = self.client.get_paged_users(url=users.next_url)
for user in users.result:
subsequent_page_hit = True
self.assertTrue(first_page_hit and subsequent_page_hit, "The first and subsequent pages weren't hit")
def test_single_user(self):
user = User(login='fake' + str(random.random()) + '@asdf.com',
email='[email protected]',
firstName='Joe',
lastName='Schmoe')
user = self.client.create_user(user, activate=False)
self.assertEqual(user.status, "STAGED", "User should be staged")
user = User(login='fake' + str(random.random()) + '@asdf.com',
email='[email protected]',
firstName='Joe',
lastName='Schmoe')
user = self.client.create_user(user, activate=True)
self.assertEqual(user.status, "PROVISIONED", "User should be provisioned") | []
| []
| [
"OKTA_TEST_URL",
"OKTA_TEST_KEY"
]
| [] | ["OKTA_TEST_URL", "OKTA_TEST_KEY"] | python | 2 | 0 | |
Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/115_poposta_de_projeto-calculando-redes-ipv4/classes/calcipv4.py | import re
class CalcIPv4:
def __init__(self, ip, mascara=None, prefixo=None):
self.ip = ip
self.mascara = mascara
self.prefixo = prefixo
@property
def ip(self):
return self._ip
@property
def mascara(self):
return self._mascara
@property
def prefixo(self):
return self._prefixo
@ip.setter
def ip(self, valor):
self._valida_ip(valor)
return self._ip
@mascara.setter
def mascara(self, valor):
self._mascara = valor
@prefixo.setter
def prefixo(self, valor):
self._prefixo = valor
@staticmethod
def _valida_ip(ip):
regexp = re.compile(
r'^([0-9]{1,3}).([0-9]{1,3}).([0-9]{1,3})$'
)
print(regexp.search(ip), '12')
| []
| []
| []
| [] | [] | python | null | null | null |
service/iam/api_op_DeleteSAMLProvider.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package iam
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/awsutil"
"github.com/aws/aws-sdk-go-v2/private/protocol"
"github.com/aws/aws-sdk-go-v2/private/protocol/query"
)
type DeleteSAMLProviderInput struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) of the SAML provider to delete.
//
// SAMLProviderArn is a required field
SAMLProviderArn *string `min:"20" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteSAMLProviderInput) String() string {
return awsutil.Prettify(s)
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteSAMLProviderInput) Validate() error {
invalidParams := aws.ErrInvalidParams{Context: "DeleteSAMLProviderInput"}
if s.SAMLProviderArn == nil {
invalidParams.Add(aws.NewErrParamRequired("SAMLProviderArn"))
}
if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 {
invalidParams.Add(aws.NewErrParamMinLen("SAMLProviderArn", 20))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
type DeleteSAMLProviderOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteSAMLProviderOutput) String() string {
return awsutil.Prettify(s)
}
const opDeleteSAMLProvider = "DeleteSAMLProvider"
// DeleteSAMLProviderRequest returns a request value for making API operation for
// AWS Identity and Access Management.
//
// Deletes a SAML provider resource in IAM.
//
// Deleting the provider resource from IAM does not update any roles that reference
// the SAML provider resource's ARN as a principal in their trust policies.
// Any attempt to assume a role that references a non-existent provider resource
// ARN fails.
//
// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
//
// // Example sending a request using DeleteSAMLProviderRequest.
// req := client.DeleteSAMLProviderRequest(params)
// resp, err := req.Send(context.TODO())
// if err == nil {
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSAMLProvider
func (c *Client) DeleteSAMLProviderRequest(input *DeleteSAMLProviderInput) DeleteSAMLProviderRequest {
op := &aws.Operation{
Name: opDeleteSAMLProvider,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteSAMLProviderInput{}
}
req := c.newRequest(op, input, &DeleteSAMLProviderOutput{})
req.Handlers.Unmarshal.Remove(query.UnmarshalHandler)
req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
return DeleteSAMLProviderRequest{Request: req, Input: input, Copy: c.DeleteSAMLProviderRequest}
}
// DeleteSAMLProviderRequest is the request type for the
// DeleteSAMLProvider API operation.
type DeleteSAMLProviderRequest struct {
*aws.Request
Input *DeleteSAMLProviderInput
Copy func(*DeleteSAMLProviderInput) DeleteSAMLProviderRequest
}
// Send marshals and sends the DeleteSAMLProvider API request.
func (r DeleteSAMLProviderRequest) Send(ctx context.Context) (*DeleteSAMLProviderResponse, error) {
r.Request.SetContext(ctx)
err := r.Request.Send()
if err != nil {
return nil, err
}
resp := &DeleteSAMLProviderResponse{
DeleteSAMLProviderOutput: r.Request.Data.(*DeleteSAMLProviderOutput),
response: &aws.Response{Request: r.Request},
}
return resp, nil
}
// DeleteSAMLProviderResponse is the response type for the
// DeleteSAMLProvider API operation.
type DeleteSAMLProviderResponse struct {
*DeleteSAMLProviderOutput
response *aws.Response
}
// SDKResponseMetdata returns the response metadata for the
// DeleteSAMLProvider request.
func (r *DeleteSAMLProviderResponse) SDKResponseMetdata() *aws.Response {
return r.response
}
| []
| []
| []
| [] | [] | go | null | null | null |
lib/scitools/PrmDictBase.py | #!/usr/bin/env python
"""
Module for managing parameters.
"""
import re, os, sys
import collections
import numbers
def message(m):
if os.environ.get('DEBUG', '0') == '1':
print m
class PrmDictBase(object):
"""
Base class for managing parameters stored in dictionaries.
Typical use includes data or solver classes for solving physical
problems numerically. One may then choose to hold all physical
parameters in a dictionary physical_prm, containing
(parameter name, value) pairs, and all numerical parameters in
a dictionary numerical_prm. The physical_prm and numerical_prm
dictionaries can then be defined in a subclass of PrmDictBase
and managed by PrmDictBase. The management includes several
convenient features:
- keeping all input data in one place
- setting of one or more parameters where the type of the value
must match the type of the previous (initial) value
- pretty print of all defined parameters
- copying parameters from dictionaries to, e.g., local variables
and back again, or to local namespaces and back again
- easy transition from parameter dictionaries to more sophisticated
handling of input data, e.g., class scitools.ParameterInterface
(GUI, CGI, command-line args)
The subclass typically defines the dictionaries, say
self.physical_prm and self.numerical_prm. Then these are
appended to the inherited self._prm_list list to be registered.
All members of this list are dictionaries that will not accept
new keys (i.e., all parameters must be defined prior to registering
them in self._prm_list). With this list one has a collection of all
parameter dictionaries in the application.
self._type_check[prm] is defined if we want to type check
a parameter prm.
if self._type_check[prm] is True (or False), prm must either
be None, of the same type as the previously registered
value of prm, or any number (float, int, complex) if
the previous value prm was any number. Instead of a boolean
value, self._type_check[prm] may hold a tuple of class types
(to be used in isinstance checks), or a function which takes
the value as argument and returns True if the that value is
of the right type (otherwise False).
In addition to the parameter dictionaries with fixed keys, class
PrmDictBase also holds a self.user_prm, which is a dictionary
of "meta data", i.e., an arbitrary set of keys and values that
can arbitrarily extended anywhere. If self.user_prm is None,
no such meta data can exists (implying that only parameters
registered in the dictionaries in self._prm_list are allowed - the
programmer of subclasses can of course extend these parameter
sets whenever desired; disallowing a parameter name is only a
feature of the set function for setting the value of a (registered)
parameter).
Here is an example::
from scitools.PrmDictBase import PrmDictBase
class SomeSolver(PrmDictBase):
def __init__(self, **kwargs):
PrmDictBase.__init__(self)
# register parameters in dictionaries:
self.physical_prm = {'density': 1.0, 'Cp': 1.0,
'k': 1.0, 'L': 1.0}
self.numerical_prm = {'n': 10, 'dt': 0.1, 'tstop': 3}
# attach dictionaries to base class list (required):
self._prm_list = [self.physical_prm, self.numerical_prm]
# specify parameters to be type checked when set:
self._type_check.update({'n': True, 'dt': (float,),
'k': lambda k: isinstance(int,float) and k>0})
# disallow arbitrary meta data
self.user_prm = None # set to {} if meta data are allowed
# initialize parameters according to keyword arguments:
self.set(**kwargs)
def _update(self):
# dt depends on n, L, k; update dt in case the three
# others parameters have been changed
# (in general this method is used to check consistency
# between parameters and perform updates if necessary)
n = self.numerical_prm['n']
L = self.physical_prm['L']
k = self.physical_prm['k']
self.u = zeros(n+1, Float)
h = L/float(n)
dt_limit = h**2/(2*k)
if self.numerical_prm['dt'] > dt_limit:
self.numerical_prm['dt'] = dt_limit
def compute1(self):
# compute something
return self.physical_prm['k']/self.physical_prm['Cp']
def compute2(self):
# turn numerical parameters into local variables:
exec self.dicts2variables(self._prm_list)
# or exec self.dicts2variables(self.numerical_prm) # selected prms
# now we have local variables n, dt, tstop, density, Cp, k, L
# that we can compute with, say
Q = k/Cp
dt = 0.9*dt
# if some of the local variables are changed, say dt, they must
# be inserted back into the parameter dictionaries:
self.variables2dicts(self.numerical_prm, dt=dt)
"""
def __init__(self):
# dicts whose keys are fixed (non-extensible):
self._prm_list = [] # fill in subclass
self.user_prm = None # user's meta data
self._type_check = {} # fill in subclass
def _prm_dict_names(self):
"""Return the name of all self.*_prm dictionaries."""
return [attr for attr in self.__dict__ if \
re.search(r'^[^_].*_prm$', attr)]
def usage(self):
"""Print the name of all parameters that can be set."""
prm_dict_names = self._prm_dict_names()
prm_names = []
for name in prm_dict_names:
d = self.__dict__[name]
if isinstance(d, dict):
k = list(d.keys())
k.sort(lambda a,b: cmp(a.lower(),b.lower()))
prm_names += k
print 'registered parameters:\n'
for i in prm_names:
print i
# alternative (sort all in one bunch):
# names = []
# for d in self._prm_list:
# names += list(d.keys())
# names.sort
# print names
def dump(self):
"""Dump all parameters and their values."""
for d in self._prm_list:
keys = list(d.keys())
keys.sort(lambda a,b: cmp(a.lower(),b.lower()))
for prm in keys:
print '%s = %s' % (prm, d[prm])
def set(self, **kwargs):
"""Set kwargs data in parameter dictionaries."""
# print usage message if no arguments:
if len(kwargs) == 0:
self.usage()
return
for prm in kwargs:
_set = False
for d in self._prm_list:
if len(list(d.keys())) == 0:
raise ValueError('self._prm_list is wrong (empty)')
try:
if self.set_in_dict(prm, kwargs[prm], d):
_set = True
break
except TypeError, msg:
print msg
#break
sys.exit(1) # type error is fatal
if not _set: # maybe set prm as meta data?
if isinstance(self.user_prm, dict):
# not a registered parameter:
self.user_prm[prm] = kwargs[prm]
message('%s=%s assigned in self.user_prm' % \
(prm, kwargs[prm]))
else:
raise NameError('parameter "%s" not registered' % prm)
self._update()
def set_in_dict(self, prm, value, d):
"""
Set d[prm]=value, but check if prm is registered in class
dictionaries, if the type is acceptable, etc.
"""
can_set = False
# check that prm is a registered key
if prm in d:
if prm in self._type_check:
# prm should be type-checked
if isinstance(self._type_check[prm], (int,float)):
# (bool is subclass of int)
if self._type_check[prm]:
# type check against prev. value or None:
if isinstance(value, (type(d[prm]), None)):
can_set = True
# allow mixing int, float, complex:
elif isinstance(value, numbers.Number) and\
isinstance(d[prm], numbers.Number):
can_set = True
elif isinstance(self._type_check[prm], (tuple,list,type)):
# self._type_check[prm] holds either the type or
# a tuple/list of types; test against them
#print 'testing %s=%s against type %s' % (prm,value,self._type_check[prm])
if isinstance(value, self._type_check[prm]):
can_set = True
else:
raise TypeError('\n\n%s=%s: %s has type %s, not %s' % \
(prm, value, prm, self._type_check[prm],
type(value)))
elif callable(self._type_check[prm]):
can_set = self._type_check[prm](value)
else:
raise TypeError('self._type_check["%s"] has an '\
'illegal value %s' % \
(prm, self._type_check[prm]))
else:
can_set = True
else:
message('%s is not registered in\n%s' % (prm, d))
if can_set:
d[prm] = value
message('%s=%s is assigned' % (prm, value))
return True
return False
def _update(self):
"""Check data consistency and make updates."""
# to be implemented in subclasses
pass
def get(self, **kwargs):
return [self._solver_prm[prm] \
for prm in kwargs if prm in self._solver_prm]
def properties(self, global_namespace):
"""Make properties out of local dictionaries."""
for ds in self._prm_dict_names():
d = eval('self.' + ds)
for prm in d: # or for prm in self.__dict__[ds]
# properties cannot have whitespace:
prm = prm.replace(' ', '_')
cmd = '%s.%s = property(fget='\
'lambda self: self.%s["%s"], %s)' % \
(self.__class__.__name__, prm, ds, prm,
' doc="read-only property"')
print cmd
exec(cmd, global_namespace, locals())
def dicts2namespace(self, namespace, dicts, overwrite=True):
"""
Make namespace variables out of dict items.
That is, for all dicts, insert all (key,value) pairs in
the namespace dict.
namespace is a dictionary, dicts is a list of dictionaries.
"""
# can be tuned in subclasses
# allow dicts to be a single dictionary:
if not isinstance(dicts, (list,tuple)):
dicts = [dicts]
for d in dicts:
if overwrite:
namespace.update(d)
else:
for key in d:
if key in namespace and not overwrite:
print 'cannot overwrite %s' % key
else:
namespace[key] = d[key]
def dicts2namespace2(self, namespace, dicts):
"""As dicts2namespace2, but use exec."""
# can be tuned in subclasses
# allow dicts to be a single dictionary:
if not isinstance(dicts, (list,tuple)):
dicts = [dicts]
for d in dicts:
for key in d:
exec('%s=%s' % (key,repr(d[key])), globals(), namespace)
def namespace2dicts(self, namespace, dicts):
"""
Update dicts from variables in a namespace.
That is, for all keys in namespace, insert (key,value) pair
in the dict in dicts that has the same key registered.
namespace is a dictionary, dicts is a list of dictionaries.
"""
# allow dicts to be a single dictionary:
if not isinstance(dicts, (list,tuple)):
dicts = [dicts]
keys = [] # all keys in namespace that are keys in dicts
for key in namespace:
for d in dicts:
if key in d:
d[key] = namespace[key] # update value
keys.append(key) # mark for delete
# clean up what we made in self.dicts2namespace:
for key in keys:
del namespace[key]
def dicts2variables(self, dicts):
"""
Make Python code string that defines local variables from
all parameters in dicts (list of dictionaries of parameters).
For example, if dicts[1] has a key n with value 1.0, the
statement 'n=1.0' will be included in the returned string.
The calling code will typically exec this returned string
to make local variables (short hands) from parameters stored
in dictionaries. (Note that such local variables are read-only,
changing their values will not be reflected in the dictionaries!).
"""
# allow dicts to be a single dictionary:
if not isinstance(dicts, (list,tuple)):
dicts = [dicts]
s = ''
for d in dicts:
for name in d:
s += '%s = %s\n' % (name, d[name])
return s
def variables2dicts(self, dicts, **variables):
"""
Insert the name=value keyword arguments in variables into
the dictionaries in dicts (list of dictionaries).
This is the inverse of the dicts2variables function.
Usage:
exec self.dicts2variables(self.numerical_prm)
# work with read-only n, dt, tstop
...
# update (in case n, dt, tstop was changed):
self.variables2dicts(self.numerical_prm, n=n, dt=dt, tstop=tstop)
"""
for name in variables:
for d in dicts:
if name in d:
d[name] = variables[name]
# initial tests are found in src/py/examples/classdicts.py
| []
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | python | 1 | 0 | |
torch/testing/_internal/common_quantization.py | r"""Importing this file includes common utility methods and base clases for
checking quantization api and properties of resulting modules.
"""
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
from torch.nn.intrinsic import _FusedModule
import torch.distributed as dist
from torch.testing._internal.common_utils import TestCase
from torch.quantization import QuantWrapper, QuantStub, DeQuantStub, \
default_qconfig, default_dynamic_qconfig, default_per_channel_qconfig, QConfig, default_observer, default_weight_observer, \
propagate_qconfig_, convert, get_default_qconfig, quantize_dynamic_jit, quantize_jit, float_qparams_weight_only_qconfig, \
get_default_qat_qconfig, PerChannelMinMaxObserver, default_dynamic_quant_observer, QConfigDynamic, QuantType
from torch.quantization.quantization_mappings import (
get_default_dynamic_quant_module_mappings,
get_default_qconfig_propagation_list,
get_default_qat_module_mappings,
)
try:
# graph mode quantization based on fx
from torch.quantization.quantize_fx import (
prepare_fx,
prepare_qat_fx,
convert_fx,
)
from torch.quantization.ns.ns_types import NSSingleResultValuesType, NSSubgraph
from torch.fx.graph import Node
from torch.fx import GraphModule
HAS_FX = True
except ImportError:
HAS_FX = False
import copy
import io
import functools
import time
import os
import unittest
import numpy as np
from torch.testing import FileCheck
from typing import Callable, Tuple, Dict, Any, Union
class NodeSpec:
''' Used for checking GraphModule Node
'''
def __init__(self, op, target):
'''
op: call_function | call_module
target:
for call_function, target would be a function
for call_module, target would be the type of PyTorch module
'''
self.op = op
self.target = target
@classmethod
def call_function(cls, target):
return NodeSpec('call_function', target)
@classmethod
def call_method(cls, target):
return NodeSpec('call_method', target)
@classmethod
def call_module(cls, target):
return NodeSpec('call_module', target)
def __hash__(self):
return hash((self.op, self.target))
def __eq__(self, other):
if not isinstance(other, NodeSpec):
return NotImplemented
return self.op == other.op and self.target == other.target
def __repr__(self):
return repr(self.op) + " " + repr(self.target)
def test_only_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
input Tensors and run the model on the dataset
"""
for inp in calib_data:
output = model(*inp)
_default_loss_fn = torch.nn.CrossEntropyLoss()
def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn):
r"""
Default train function takes a torch.utils.data.Dataset and train the model
on the dataset
"""
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
train_loss, correct, total = 0, 0, 0
for i in range(10):
model.train()
for data, target in train_data:
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return train_loss, correct, total
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches):
model.train()
cnt = 0
for image, target in data_loader:
start_time = time.time()
print('.', end='')
cnt += 1
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if cnt >= ntrain_batches:
return
return
def ddp_setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def ddp_cleanup():
dist.destroy_process_group()
def run_ddp(rank, world_size, prepared):
ddp_setup(rank, world_size)
prepared.cuda()
prepared = torch.nn.parallel.DistributedDataParallel(prepared, device_ids=[rank])
prepared.to(rank)
model_with_ddp = prepared
optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001)
train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1)
ddp_cleanup()
def convert_dynamic(module):
convert(module, get_default_dynamic_quant_module_mappings(), inplace=True)
def prepare_dynamic(model, qconfig_dict=None):
propagate_qconfig_(model, qconfig_dict)
def _make_conv_test_input(
batch_size, in_channels_per_group, input_feature_map_size,
out_channels_per_group, groups, kernel_size, X_scale, X_zero_point, W_scale,
W_zero_point, use_bias, use_channelwise,
):
in_channels = in_channels_per_group * groups
out_channels = out_channels_per_group * groups
(X_value_min, X_value_max) = (0, 4)
X_init = torch.randint(
X_value_min, X_value_max,
(batch_size, in_channels,) + input_feature_map_size)
X = X_scale * (X_init - X_zero_point).float()
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8)
W_scale = W_scale * out_channels
W_zero_point = W_zero_point * out_channels
# Resize W_scale and W_zero_points arrays equal to out_channels
W_scale = W_scale[:out_channels]
W_zero_point = W_zero_point[:out_channels]
# For testing, we use small values for weights and for activations so that
# no overflow occurs in vpmaddubsw instruction. If the overflow occurs in
# qconv implementation and if there is no overflow.
# In reference we can't exactly match the results with reference.
# Please see the comment in qconv implementation file
# aten/src/ATen/native/quantized/cpu/qconv.cpp for more details.
(W_value_min, W_value_max) = (-5, 5)
# The operator expects them in the format
# (out_channels, in_channels/groups,) + kernel_size
W_init = torch.randint(
W_value_min, W_value_max,
(out_channels, in_channels_per_group,) + kernel_size)
b_init = torch.randint(0, 10, (out_channels,))
if use_channelwise:
W_shape = (-1, 1) + (1,) * len(kernel_size)
W_scales_tensor = torch.tensor(W_scale, dtype=torch.float)
W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float)
W = W_scales_tensor.reshape(*W_shape) * (
W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float()
b = X_scale * W_scales_tensor * b_init.float()
W_q = torch.quantize_per_channel(
W, W_scales_tensor.double(), W_zero_points_tensor.long(), 0,
dtype=torch.qint8)
else:
W = W_scale[0] * (W_init - W_zero_point[0]).float()
b = X_scale * W_scale[0] * b_init.float()
W_q = torch.quantize_per_tensor(
W, scale=W_scale[0], zero_point=W_zero_point[0], dtype=torch.qint8)
return (X, X_q, W, W_q, b if use_bias else None)
def skipIfNoFBGEMM(fn):
reason = 'Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs with instruction set support AVX2 or newer.'
if isinstance(fn, type):
if 'fbgemm' not in torch.backends.quantized.supported_engines:
fn.__unittest_skip__ = True
fn.__unittest_skip_why__ = reason
return fn
@functools.wraps(fn)
def wrapper(*args, **kwargs):
if 'fbgemm' not in torch.backends.quantized.supported_engines:
raise unittest.SkipTest(reason)
else:
fn(*args, **kwargs)
return wrapper
try:
import torchvision # noqa: F401
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skip_if_no_torchvision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
def get_script_module(model, tracing, data):
return torch.jit.trace(model, data) if tracing else torch.jit.script(model)
def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True):
"""
Convert lengths to offsets for embedding_bag
"""
tt = np.zeros((t.shape[0] + 1,), dtype=offset_type)
tt[1:] = t
tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type))
if use_begin_offset:
return tt[:-1]
return tt[1:]
# QuantizationTestCase used as a base class for testing quantization on modules
class QuantizationTestCase(TestCase):
def setUp(self):
super().setUp()
self.calib_data = [[torch.rand(2, 5, dtype=torch.float)] for _ in range(2)]
self.train_data = [[torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)] for _ in range(2)]
self.img_data_1d = [[torch.rand(2, 3, 10, dtype=torch.float)]
for _ in range(2)]
self.img_data_2d = [[torch.rand(1, 3, 10, 10, dtype=torch.float)]
for _ in range(2)]
self.img_data_3d = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float)]
for _ in range(2)]
self.img_data_1d_train = [[torch.rand(2, 3, 10, dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long)]
for _ in range(2)]
self.img_data_2d_train = [[torch.rand(1, 3, 10, 10, dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long)]
for _ in range(2)]
self.img_data_3d_train = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long)]
for _ in range(2)]
self.img_data_dict = {1 : self.img_data_1d,
2 : self.img_data_2d,
3 : self.img_data_3d}
# Quant types that produce statically quantized ops
self.static_quant_types = [QuantType.STATIC, QuantType.QAT]
# All quant types for (fx based) graph mode quantization
self.all_quant_types = [QuantType.DYNAMIC, QuantType.STATIC, QuantType.QAT]
def checkNoPrepModules(self, module):
r"""Checks the module does not contain child
modules for quantization prepration, e.g.
quant, dequant and observer
"""
self.assertFalse(hasattr(module, 'quant'))
self.assertFalse(hasattr(module, 'dequant'))
def checkNoQconfig(self, module):
r"""Checks the module does not contain qconfig
"""
self.assertFalse(hasattr(module, 'qconfig'))
for child in module.children():
self.checkNoQconfig(child)
def checkHasPrepModules(self, module):
r"""Checks the module contains child
modules for quantization prepration, e.g.
quant, dequant and observer
"""
self.assertTrue(hasattr(module, 'module'))
self.assertTrue(hasattr(module, 'quant'))
self.assertTrue(hasattr(module, 'dequant'))
def checkObservers(self, module, propagate_qconfig_list=None, prepare_custom_config_dict=None):
r"""Checks the module or module's leaf descendants
have observers in preperation for quantization
"""
if propagate_qconfig_list is None:
propagate_qconfig_list = get_default_qconfig_propagation_list()
if prepare_custom_config_dict is None:
prepare_custom_config_dict = {}
float_to_observed_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {})
# check if a module is a leaf module, ignoring activation_post_process attribute
def is_leaf_module(module):
submodule_name_count = 0
for name, _ in module.named_children():
if name != 'activation_post_process':
submodule_name_count += 1
return submodule_name_count == 0
if hasattr(module, 'qconfig') and module.qconfig is not None and \
((is_leaf_module(module) and not isinstance(module, torch.nn.Sequential)
and type(module) in propagate_qconfig_list) or
type(module) in float_to_observed_module_class_mapping.keys()) and \
not isinstance(module, torch.quantization.DeQuantStub):
self.assertTrue(hasattr(module, 'activation_post_process'),
'module: ' + str(type(module)) + ' do not have observer')
# we don't need to check observers for child modules of the
# qat modules
if type(module) not in get_default_qat_module_mappings().values() and \
type(module) not in float_to_observed_module_class_mapping.values() and \
not isinstance(module, _FusedModule):
for child in module.children():
self.checkObservers(child, propagate_qconfig_list, prepare_custom_config_dict)
def checkQuantDequant(self, mod):
r"""Checks that mod has nn.Quantize and
nn.DeQuantize submodules inserted
"""
self.assertEqual(type(mod.quant), nnq.Quantize)
self.assertEqual(type(mod.dequant), nnq.DeQuantize)
def checkWrappedQuantizedLinear(self, mod):
r"""Checks that mod has been swapped for an nnq.Linear
module, the bias is qint32, and that the module
has Quantize and DeQuantize submodules
"""
self.assertEqual(type(mod.module), nnq.Linear)
self.checkQuantDequant(mod)
def checkQuantizedLinear(self, mod):
self.assertEqual(type(mod), nnq.Linear)
def checkDynamicQuantizedLinear(self, mod, dtype):
r"""Checks that mod has been swapped for an nnqd.Linear
module, the bias is float.
"""
self.assertEqual(type(mod), nnqd.Linear)
self.assertEqual(mod._packed_params.dtype, dtype)
def check_eager_serialization(self, ref_model, loaded_model, x):
# Check state dict serialization and torch.save APIs
model_dict = ref_model.state_dict()
b = io.BytesIO()
torch.save(model_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
loaded_model.load_state_dict(loaded_dict)
ref_out = ref_model(*x)
load_out = loaded_model(*x)
def check_outputs(ref_out, load_out):
self.assertEqual(ref_out[0], load_out[0])
if isinstance(ref_out[1], tuple):
self.assertEqual(ref_out[1][0], load_out[1][0])
self.assertEqual(ref_out[1][1], load_out[1][1])
else:
self.assertEqual(ref_out[1], load_out[1])
check_outputs(ref_out, load_out)
b = io.BytesIO()
torch.save(ref_model, b)
b.seek(0)
loaded = torch.load(b)
load_out = loaded(*x)
check_outputs(ref_out, load_out)
def check_weight_bias_api(self, ref_model, weight_keys, bias_keys):
weight = ref_model.get_weight()
bias = ref_model.get_bias()
self.assertEqual(weight_keys ^ weight.keys(), set())
self.assertEqual(bias_keys ^ bias.keys(), set())
def checkDynamicQuantizedLSTM(self, mod, reference_module_type, dtype):
r"""Checks that mod has been swapped for an nnqd.LSTM type
module, the bias is float.
"""
wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'}
self.assertEqual(type(mod), reference_module_type)
for packed_params in mod._all_weight_values:
self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype])
def checkLinear(self, mod):
self.assertEqual(type(mod), torch.nn.Linear)
def checkDynamicQuantizedModule(self, mod, reference_module_type, dtype):
r"""Checks that mod has been swapped for an nnqd.Linear
module, the bias is float.
"""
wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'}
self.assertEqual(type(mod), reference_module_type)
if hasattr(mod, '_all_weight_values'):
for packed_params in mod._all_weight_values:
self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype])
def checkScriptable(self, orig_mod, calib_data, check_save_load=False):
scripted = torch.jit.script(orig_mod)
self._checkScriptable(orig_mod, scripted, calib_data, check_save_load)
# Use first calib_data entry as trace input
traced = torch.jit.trace(orig_mod, calib_data[0])
self._checkScriptable(orig_mod, traced, calib_data, check_save_load)
# Call this twice: once for a scripted module and once for a traced module
def _checkScriptable(self, orig_mod, script_mod, calib_data, check_save_load):
self._checkModuleCorrectnessAgainstOrig(orig_mod, script_mod, calib_data)
# Test save/load
buffer = io.BytesIO()
torch.jit.save(script_mod, buffer)
buffer.seek(0)
loaded_mod = torch.jit.load(buffer)
# Pending __get_state_ and __set_state__ support
# See tracking task https://github.com/pytorch/pytorch/issues/23984
if check_save_load:
self._checkModuleCorrectnessAgainstOrig(orig_mod, loaded_mod, calib_data)
def _checkModuleCorrectnessAgainstOrig(self, orig_mod, test_mod, calib_data):
for inp in calib_data:
ref_output = orig_mod(*inp)
scripted_output = test_mod(*inp)
self.assertEqual(scripted_output, ref_output)
def checkGraphModeOp(self, module, inputs, quantized_op, tracing=False, debug=False,
check=True, eval_mode=True, dynamic=False, qconfig=None):
if debug:
print('Testing:', str(module))
qconfig_dict = {'': get_default_qconfig(torch.backends.quantized.engine)}
if eval_mode:
module = module.eval()
if dynamic:
qconfig_dict = {'': default_dynamic_qconfig if qconfig is None else qconfig}
model = get_script_module(module, tracing, inputs[0]).eval()
if debug:
print('input graph:', model.graph)
models = {}
outputs = {}
for debug in [True, False]:
if dynamic:
models[debug] = quantize_dynamic_jit(model, qconfig_dict, debug=debug)
# make sure it runs
outputs[debug] = models[debug](inputs)
else:
# module under test can contain in-place ops, and we depend on
# input data staying constant for comparisons
inputs_copy = copy.deepcopy(inputs)
models[debug] = quantize_jit(
model, qconfig_dict, test_only_eval_fn, [inputs_copy], inplace=False,
debug=debug)
# make sure it runs
outputs[debug] = models[debug](*inputs[0])
if debug:
print('debug graph:', models[True].graph)
print('non debug graph:', models[False].graph)
if check:
# debug and non-debug option should have the same numerics
self.assertEqual(outputs[True], outputs[False])
# non debug graph should produce quantized op
FileCheck().check(quantized_op) \
.run(models[False].graph)
return models[False]
def checkGraphModuleNodes(
self, graph_module,
expected_node=None,
expected_node_occurrence=None,
expected_node_list=None):
""" Check if GraphModule contains the target node
Args:
graph_module: the GraphModule instance we want to check
expected_node, expected_node_occurrence, expected_node_list:
see docs for checkGraphModeFxOp
"""
nodes_in_graph = dict()
node_list = []
modules = dict(graph_module.named_modules(remove_duplicate=False))
for node in graph_module.graph.nodes:
n = None
if node.op == 'call_function' or node.op == 'call_method':
n = NodeSpec(node.op, node.target)
elif node.op == 'call_module':
n = NodeSpec(node.op, type(modules[node.target]))
if n is not None:
node_list.append(n)
if n in nodes_in_graph:
nodes_in_graph[n] += 1
else:
nodes_in_graph[n] = 1
if expected_node is not None:
self.assertTrue(expected_node in nodes_in_graph, 'node:' + str(expected_node) +
' not found in the graph module')
if expected_node_occurrence is not None:
for expected_node, occurrence in expected_node_occurrence.items():
if occurrence != 0:
self.assertTrue(
expected_node in nodes_in_graph,
'Check failed for node:' + str(expected_node) +
' not found')
self.assertTrue(
nodes_in_graph[expected_node] == occurrence,
'Check failed for node:' + str(expected_node) +
' Expected occurrence:' + str(occurrence) +
' Found occurrence:' + str(nodes_in_graph[expected_node]))
else:
self.assertTrue(
expected_node not in nodes_in_graph,
'Check failed for node:' + str(expected_node) +
' expected no occurrence but found')
if expected_node_list is not None:
cur_index = 0
for n in node_list:
if cur_index == len(expected_node_list):
return
if n == expected_node_list[cur_index]:
cur_index += 1
self.assertTrue(
cur_index == len(expected_node_list),
"Check failed for graph:" +
self.printGraphModule(graph_module, print_str=False) +
"Expected ordered list:" +
str(expected_node_list))
def printGraphModule(self, graph_module, print_str=True):
modules = dict(graph_module.named_modules())
node_infos = []
for n in graph_module.graph.nodes:
node_info = ' '.join(map(repr, [n.op, n.name, n.target, n.args, n.kwargs]))
if n.op == 'call_module':
node_info += ' module type: ' + repr(type(modules[n.target]))
node_infos.append(node_info)
str_to_print = '\n'.join(node_infos)
if print_str:
print(str_to_print)
return str_to_print
if HAS_FX:
def assert_types_for_matched_subgraph_pairs(
self,
matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]],
expected_types: Dict[str, Tuple[Tuple[Callable, Callable], Tuple[Callable, Callable]]],
gm_a: GraphModule,
gm_b: GraphModule,
) -> None:
"""
Verifies that the types specified in expected_types match
the underlying objects pointed to by the nodes in matched_subgraph_pairs.
An example successful test case:
matched_subgraph_pairs = {'x0': (graph_a_conv_0_node, graph_b_conv_0_node)}
expected_types = {'x0': (nn.Conv2d, nnq.Conv2d)}
The function tests for key equivalence, and verifies types with
instance checks.
"""
def _get_underlying_op_type(
node: Node, gm: GraphModule
) -> Union[Callable, str]:
if node.op == 'call_module':
mod = getattr(gm, node.target)
return type(mod)
else:
assert node.op in ('call_function', 'call_method')
return node.target
self.assertTrue(
len(matched_subgraph_pairs) == len(expected_types),
'Expected length of results to match, but got %d and %d' %
(len(matched_subgraph_pairs), len(expected_types))
)
for k, v in expected_types.items():
expected_types_a, expected_types_b = v
exp_type_start_a, exp_type_end_a = expected_types_a
exp_type_start_b, exp_type_end_b = expected_types_b
subgraph_a, subgraph_b = matched_subgraph_pairs[k]
act_type_start_a = _get_underlying_op_type(subgraph_a.start_node, gm_a)
act_type_start_b = _get_underlying_op_type(subgraph_b.start_node, gm_b)
act_type_end_a = _get_underlying_op_type(subgraph_a.end_node, gm_a)
act_type_end_b = _get_underlying_op_type(subgraph_b.end_node, gm_b)
types_match = (exp_type_start_a is act_type_start_a) and \
(exp_type_end_a is act_type_end_a) and \
(exp_type_start_b is act_type_start_b) and \
(exp_type_end_b is act_type_end_b)
self.assertTrue(
types_match,
'Type mismatch at %s: expected %s, got %s' %
(k, (exp_type_start_a, exp_type_end_a, exp_type_start_b, exp_type_end_b),
(act_type_start_a, act_type_end_a, act_type_start_b, act_type_end_b))
)
def assert_ns_compare_dict_valid(
self,
act_compare_dict: Dict[str, Dict[str, Dict[str, Any]]],
) -> None:
"""
Verifies that the act_compare_dict (output of Numeric Suite APIs) is valid:
1. for each layer, results are recorded for two models
2. number of seen tensors match
3. shapes of each pair of seen tensors match
"""
for layer_name, result_type_to_data in act_compare_dict.items():
for result_type, layer_data in result_type_to_data.items():
self.assertTrue(
len(layer_data) == 2,
f"Layer {layer_name} does not have exactly two model results.")
model_name_0, model_name_1 = layer_data.keys()
for res_idx in range(len(layer_data[model_name_0])):
layer_data_0 = layer_data[model_name_0][res_idx]
layer_data_1 = layer_data[model_name_1][res_idx]
self.assertTrue(
layer_data_0['type'] == layer_data_0['type'],
f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same type.")
self.assertTrue(
len(layer_data_0['values']) ==
len(layer_data_1['values']),
f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same number of seen Tensors.")
# F.conv1d weight has rank 3, and toq.conv1d unpacked weight
# has rank 4. For now, skip the length check for conv1d only.
is_weight_functional_conv1d = (
result_type == NSSingleResultValuesType.WEIGHT.value and
(
'conv1d' in layer_data_0['prev_node_target_type'] or
'conv1d' in layer_data_1['prev_node_target_type']
)
)
if not is_weight_functional_conv1d:
for idx in range(len(layer_data_0['values'])):
values_0 = layer_data_0['values'][idx]
values_1 = layer_data_1['values'][idx]
if isinstance(values_0, torch.Tensor):
self.assertTrue(
values_0.shape == values_1.shape,
f"Layer {layer_name}, {model_name_0} and {model_name_1} " +
f"have a shape mismatch at idx {idx}.")
else:
assert isinstance(values_0, tuple), \
f"unhandled type {type(values_0)}"
assert len(values_0) == 2
assert len(values_0[1]) == 2
assert values_0[0].shape == values_1[0].shape
assert values_0[1][0].shape == values_1[1][0].shape
assert values_0[1][1].shape == values_1[1][1].shape
# verify that ref_node_name is valid
ref_node_name_0 = layer_data_0['ref_node_name']
ref_node_name_1 = layer_data_1['ref_node_name']
prev_node_name_0 = layer_data_0['prev_node_name']
prev_node_name_1 = layer_data_1['prev_node_name']
if layer_data_0['type'] == NSSingleResultValuesType.NODE_OUTPUT.value:
self.assertTrue(ref_node_name_0 == prev_node_name_0)
self.assertTrue(ref_node_name_1 == prev_node_name_1)
elif layer_data_0['type'] == NSSingleResultValuesType.NODE_INPUT.value:
self.assertTrue(ref_node_name_0 != prev_node_name_0)
self.assertTrue(ref_node_name_1 != prev_node_name_1)
def checkGraphModeFxOp(self, model, inputs, quant_type,
expected_node=None,
expected_node_occurrence=None,
expected_node_list=None,
is_reference=False,
print_debug_info=False,
custom_qconfig_dict=None,
prepare_expected_node=None,
prepare_expected_node_occurrence=None,
prepare_expected_node_list=None,
prepare_custom_config_dict=None):
""" Quantizes model with graph mode quantization on fx and check if the
quantized model contains the quantized_node
Args:
model: floating point torch.nn.Module
inputs: one positional sample input arguments for model
expected_node: NodeSpec
e.g. NodeSpec.call_function(torch.quantize_per_tensor)
expected_node_occurrence: a dict from NodeSpec to
expected number of occurences (int)
e.g. {NodeSpec.call_function(torch.quantize_per_tensor) : 1,
NodeSpec.call_method('dequantize'): 1}
expected_node_list: a list of NodeSpec, used to check the order
of the occurrence of Node
e.g. [NodeSpec.call_function(torch.quantize_per_tensor),
NodeSpec.call_module(nnq.Conv2d),
NodeSpec.call_function(F.hardtanh_),
NodeSpec.call_method('dequantize')]
is_reference: if True, enables reference mode
print_debug_info: if True, prints debug info
custom_qconfig_dict: overrides default qconfig_dict
prepare_expected_node: same as expected_node, but for prepare
prepare_expected_node_occurrence: same as
expected_node_occurrence, but for prepare
prepare_expected_node_list: same as expected_node_list, but
for prepare
"""
# TODO: make img_data a single example instead of a list
if type(inputs) == list:
inputs = inputs[0]
if quant_type == QuantType.QAT:
qconfig = get_default_qat_qconfig(torch.backends.quantized.engine)
model.train()
elif quant_type == QuantType.STATIC:
qconfig = get_default_qconfig(torch.backends.quantized.engine)
model.eval()
else:
qconfig = default_dynamic_qconfig
model.eval()
if quant_type == QuantType.QAT:
prepare = prepare_qat_fx
else:
prepare = prepare_fx
qconfig_dict = {"": qconfig}
# overwrite qconfig_dict with custom_qconfig_dict
if custom_qconfig_dict is not None:
qconfig_dict = custom_qconfig_dict
prepared = prepare(
model, qconfig_dict,
prepare_custom_config_dict=prepare_custom_config_dict)
if not quant_type == QuantType.DYNAMIC:
prepared(*inputs)
if print_debug_info:
print()
print('quant type:\n', quant_type)
print('original model:\n', model)
print()
print('prepared model:\n', prepared)
self.checkGraphModuleNodes(
prepared, prepare_expected_node,
prepare_expected_node_occurrence, prepare_expected_node_list)
prepared_copy = copy.deepcopy(prepared)
qgraph = convert_fx(prepared)
qgraph_reference = convert_fx(prepared_copy, is_reference=True)
result = qgraph(*inputs)
result_reference = qgraph_reference(*inputs)
qgraph_to_check = qgraph_reference if is_reference else qgraph
if print_debug_info:
print()
print('quantized model:\n', qgraph_to_check)
self.printGraphModule(qgraph_to_check)
print()
self.checkGraphModuleNodes(
qgraph_to_check, expected_node, expected_node_occurrence, expected_node_list)
return result
def checkEmbeddingSerialization(self, qemb, num_embeddings, embedding_dim, indices, offsets,
set_qconfig, is_emb_bag, dtype=torch.quint8):
# Test serialization of dynamic EmbeddingBag module using state_dict
if is_emb_bag:
inputs = [indices, offsets]
else:
inputs = [indices]
emb_dict = qemb.state_dict()
b = io.BytesIO()
torch.save(emb_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
embedding_unpack = torch.ops.quantized.embedding_bag_unpack
# Check unpacked weight values explicitly
for key in emb_dict:
if isinstance(emb_dict[key], torch._C.ScriptObject):
assert isinstance(loaded_dict[key], torch._C.ScriptObject)
emb_weight = embedding_unpack(emb_dict[key])
loaded_weight = embedding_unpack(loaded_dict[key])
self.assertEqual(emb_weight, loaded_weight)
# Check state dict serialization and torch.save APIs
if is_emb_bag:
loaded_qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,
include_last_offset=True, mode='sum', dtype=dtype)
else:
loaded_qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype)
self.check_eager_serialization(qemb, loaded_qemb, inputs)
loaded_qemb.load_state_dict(loaded_dict)
self.assertEqual(embedding_unpack(qemb._packed_params._packed_weight),
embedding_unpack(loaded_qemb._packed_params._packed_weight))
# Test JIT serialization
self.checkScriptable(qemb, [inputs], check_save_load=True)
# Test from_float call
if is_emb_bag:
float_embedding = torch.nn.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
else:
float_embedding = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim)
if set_qconfig:
float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype,
qscheme=torch.per_channel_affine_float_qparams,
ch_axis=0)
float_embedding.qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,
weight=float_qparams_observer)
prepare_dynamic(float_embedding)
float_embedding(*inputs)
if is_emb_bag:
q_embeddingbag = nnq.EmbeddingBag.from_float(float_embedding)
expected_name = "QuantizedEmbeddingBag"
else:
q_embeddingbag = nnq.Embedding.from_float(float_embedding)
expected_name = "QuantizedEmbedding"
q_embeddingbag(*inputs)
self.assertTrue(expected_name in str(q_embeddingbag))
# Below are a series of toy models to use in testing quantization
class SingleLayerLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
return x
class AnnotatedSingleLayerLinearModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
def forward(self, x):
x = self.fc1(x)
return x
class SingleLayerLinearDynamicModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
return x
class RNNDynamicModel(torch.nn.Module):
def __init__(self, mod_type):
super().__init__()
self.qconfig = default_dynamic_qconfig
if mod_type == 'GRU':
self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float)
if mod_type == 'LSTM':
self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float)
def forward(self, x):
x = self.mod(x)
return x
class RNNCellDynamicModel(torch.nn.Module):
def __init__(self, mod_type):
super().__init__()
self.qconfig = default_dynamic_qconfig
if mod_type == 'GRUCell':
self.mod = torch.nn.GRUCell(2, 2).to(dtype=torch.float)
if mod_type == 'LSTMCell':
self.mod = torch.nn.LSTMCell(2, 2).to(dtype=torch.float)
if mod_type == 'RNNReLU':
self.mod = torch.nn.RNNCell(2, 2, nonlinearity='relu').to(dtype=torch.float)
if mod_type == 'RNNTanh':
self.mod = torch.nn.RNNCell(2, 2, nonlinearity='tanh').to(dtype=torch.float)
def forward(self, x):
x = self.mod(x)
return x
class LSTMwithHiddenDynamicModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.lstm = torch.nn.LSTM(2, 2).to(dtype=torch.float)
def forward(self, x, hid):
x, hid = self.lstm(x, hid)
return x, hid
class ConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
return x
class ConvTransposeModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
return x
class AnnotatedConvModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
class AnnotatedConvTransposeModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.dequant(x)
return x
class ConvBnModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class AnnotatedConvBnModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = default_qconfig
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.dequant(x)
return x
class ConvBnReLUModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class AnnotatedConvBnReLUModel(torch.nn.Module):
def __init__(self, qengine='fbgemm'):
super(AnnotatedConvBnReLUModel, self).__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=True)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.dequant(x)
return x
def fuse_model(self):
torch.quantization.fuse_modules(self, [['conv', 'bn', 'relu']], inplace=True)
class TwoLayerLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class LinearModelWithSubmodule(nn.Module):
def __init__(self):
super(LinearModelWithSubmodule, self).__init__()
self.subm = TwoLayerLinearModel()
self.fc = nn.Linear(5, 5)
def forward(self, x):
x = self.subm(x)
x = self.fc(x)
return x
class AnnotatedTwoLayerLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float))
self.fc2.qconfig = torch.quantization.get_default_qconfig("fbgemm")
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
class ActivationsTestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig("fbgemm")
self.quant = torch.quantization.QuantStub()
self.hardswish = torch.nn.Hardswish().to(dtype=torch.float)
self.elu = torch.nn.ELU().to(dtype=torch.float)
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.hardswish(x)
x = self.elu(x)
x = self.dequant(x)
return x
class LinearReluModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(self.fc(x))
return x
class NormalizationTestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.quantization.QuantStub()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.layer_norm = torch.nn.LayerNorm((8))
self.group_norm = torch.nn.GroupNorm(2, 8)
self.instance_norm1d = torch.nn.InstanceNorm1d(8)
self.instance_norm2d = torch.nn.InstanceNorm2d(8)
self.instance_norm3d = torch.nn.InstanceNorm3d(8)
def forward(self, x):
x = self.quant(x)
x = self.fc1(x)
x = self.layer_norm(x)
x = self.group_norm(x.unsqueeze(-1).repeat(1, 1, 3))
x = self.instance_norm1d(x)
x = self.instance_norm2d(x.unsqueeze(-1))
x = self.instance_norm3d(x.unsqueeze(-1))
return x
class NestedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedNestedModel(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
if qengine == 'fbgemm':
self.sub2.fc1.qconfig = default_per_channel_qconfig
else:
self.sub2.fc1.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedSubNestedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = QuantWrapper(TwoLayerLinearModel())
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class AnnotatedCustomConfigNestedModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.qconfig = default_qconfig
custom_options = {
'dtype': torch.quint8,
'qscheme': torch.per_tensor_affine
}
custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options),
weight=default_weight_observer)
self.sub2.fc1.qconfig = custom_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
self.sub2.fc2 = QuantWrapper(self.sub2.fc2)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class QuantSubModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = QuantWrapper(TwoLayerLinearModel())
self.sub2.qconfig = default_qconfig
self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.fc3.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
class InnerModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)
self.relu1 = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)
self.relu2 = torch.nn.ReLU()
def forward(self, x):
return self.relu2(self.fc2(self.relu1(self.fc1(x))))
def fuse_modules(self):
fusable_layers = []
named_children = list(self.named_children())
for idx, (current_name, layer) in enumerate(named_children):
if isinstance(layer, torch.nn.Linear):
if idx >= len(named_children) - 1:
break
if isinstance(named_children[idx + 1][1], torch.nn.ReLU):
fusable_layers.append([current_name,
named_children[idx + 1][0]])
torch.quantization.fuse_modules(self, fusable_layers, inplace=True)
class SkipQuantModel(torch.nn.Module):
r"""We can skip quantization by explicitly
setting qconfig of a submodule to None
"""
def __init__(self):
super().__init__()
self.sub = InnerModule()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
return self.fc(self.sub(x))
def fuse_modules(self):
self.sub.fuse_modules()
class AnnotatedSkipQuantModel(torch.nn.Module):
r"""We can skip quantization by explicitly
setting qconfig of a submodule to None
"""
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig(qengine)
self.sub = QuantWrapper(InnerModule())
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
# don't quantize this fc
self.fc.qconfig = None
def forward(self, x):
return self.fc(self.sub(x))
def fuse_modules(self):
self.sub.module.fuse_modules()
class QuantStubModel(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`
"""
def __init__(self):
super().__init__()
self.qconfig = torch.quantization.get_default_qconfig("qnnpack")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.fc(x)
return self.dequant(x)
class ManualLinearQATModel(torch.nn.Module):
r"""A Module with manually inserted `QuantStub` and `DeQuantStub`
"""
def __init__(self, qengine):
super().__init__()
self.qconfig = torch.quantization.get_default_qat_qconfig(qengine)
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.fc1(x)
x = self.fc2(x)
return self.dequant(x)
class ManualConvLinearQATModel(torch.nn.Module):
r"""A module with manually inserted `QuantStub` and `DeQuantStub`
and contains both linear and conv modules
"""
def __init__(self):
super().__init__()
self.qconfig = torch.quantization.get_default_qat_qconfig("qnnpack")
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float)
self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = x.view(-1, 64).contiguous()
x = self.fc1(x)
x = self.fc2(x)
return self.dequant(x)
class SubModelForFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.bn = nn.BatchNorm2d(2).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class SubModelWithoutFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.relu = nn.ReLU(inplace=False).to(dtype=torch.float)
def forward(self, x):
return self.relu(self.conv(x))
class ModelForFusion(nn.Module):
def __init__(self, qconfig):
super().__init__()
self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float)
self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.sub1 = SubModelForFusion()
self.sub2 = SubModelWithoutFusion()
self.fc = nn.Linear(36, 10).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.qconfig = qconfig
self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float)
self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float)
self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float)
self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float)
self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float)
self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float)
# don't quantize sub2
self.sub2.qconfig = None
self.fc.qconfig = None
def forward(self, x):
x = x.squeeze(2)
x = self.quant(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu4(x)
x = x.unsqueeze(2)
y = x.unsqueeze(2)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.sub1(x)
x = self.dequant(x)
x = self.sub2(x)
x = x.view(-1, 36).contiguous()
x = self.fc(x)
y = self.conv2(y)
y = self.relu2(y)
y = self.bn2(y)
y = self.relu3(y)
y = self.dequant(y)
return x
class ConvBNReLU(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(3, 3, 1, 1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=False)
)
class ModelWithSequentialFusion(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1)
self.relu1 = nn.ReLU(inplace=False)
layers = []
for i in range(3):
layers.append(ConvBNReLU())
self.features = nn.Sequential(*layers)
head = [nn.Linear(300, 10), nn.ReLU(inplace=False)]
self.classifier = nn.Sequential(*head)
self.seq = nn.Sequential()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.features(x)
x = torch.reshape(x, (-1, 3 * 10 * 10))
x = self.classifier(x)
x = self.seq(x)
x = self.dequant(x)
return x
class ModelForFusionWithBias(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 2, 5, bias=True).to(dtype=torch.float)
self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.conv2 = nn.Conv2d(2, 2, 1, bias=True).to(dtype=torch.float)
self.bn2 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.dequant(x)
return x
class ModelForLinearBNFusion(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(20, 10)
self.bn = nn.BatchNorm1d(10)
nn.init.uniform_(self.bn.weight)
nn.init.uniform_(self.bn.bias)
def forward(self, x):
return self.bn(self.fc(x))
class DummyObserver(torch.nn.Module):
def calculate_qparams(self):
return 1.0, 0
def forward(self, x):
return x
class ModelWithFunctionals(torch.nn.Module):
def __init__(self):
super().__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
# self.my_scalar_add = nnq.FloatFunctional()
# self.my_scalar_mul = nnq.FloatFunctional()
def forward(self, x):
y = self.mycat.cat([x, x, x])
z = self.myadd.add(y, y)
w = self.myadd_relu.add_relu(z, z)
# Tracing doesnt work yet for c10 ops with scalar inputs
# https://github.com/pytorch/pytorch/issues/27097
# w = self.my_scalar_add.add_scalar(w, -0.5)
# w = self.my_scalar_mul.mul_scalar(w, 0.5)
return w
class ResNetBase(torch.nn.Module):
def __init__(self):
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.downsample = torch.nn.Identity()
self.myop = nn.quantized.FloatFunctional()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = torch.nn.Linear(inplanes, 1)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
identity = self.downsample(x)
out = self.myop.add(out, identity)
out = self.relu2(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
out = self.fc(out)
return out
def fuse_model(self):
torch.quantization.fuse_modules(self, [['conv1', 'bn1', 'relu1']], inplace=True)
class ModelMultipleOps(torch.nn.Module):
def __init__(self):
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.downsample = torch.nn.Identity()
self.skip_add = nn.quantized.FloatFunctional()
self.cat = nn.quantized.FloatFunctional()
self.avgpool = nn.AdaptiveAvgPool2d((4, 4))
self.fc = nn.Linear(12, 6)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
identity = self.downsample(x)
out = self.skip_add.add(out, identity)
out = self.relu2(out)
out = self.avgpool(out)
out = self.conv2(out)
out = torch.nn.functional.max_pool2d(out, 2, 2)
out = self.cat.cat([out, out])
out = out.reshape(-1, 3 * 2 * 2)
out = self.fc(out)
return out
# Model to ensure consistency of fake quant with true quant
# Average pooling and mean operations are not modelled
# accurately with fake-quant so this model does not
# contain those operations
class ModelMultipleOpsNoAvgPool(torch.nn.Module):
def __init__(self):
super().__init__()
norm_layer = nn.BatchNorm2d
inplanes = 3
self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)
self.bn1 = norm_layer(inplanes)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.skip_add = nn.quantized.FloatFunctional()
self.cat = nn.quantized.FloatFunctional()
self.maxpool = nn.MaxPool2d((4, 4))
self.fc = nn.Linear(12, 6)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
skip = self.conv2(x)
out = self.skip_add.add(out, skip)
out = self.relu2(out)
out = self.maxpool(out)
out = self.conv2(out)
out = torch.nn.functional.max_pool2d(out, 2, 2)
out = self.cat.cat([out, out])
out = out.reshape(-1, 3 * 2 * 2)
out = self.fc(out)
return out
class EmbeddingBagModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12,
include_last_offset=True, scale_grad_by_freq=False, mode='sum')
def forward(self, indices, offsets, per_sample_weights):
return self.emb(indices, offsets, per_sample_weights)
class EmbeddingModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
def forward(self, indices):
return self.emb(indices)
class EmbeddingWithLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
self.fc = torch.nn.Linear(5, 5)
self.emb.qconfig = float_qparams_weight_only_qconfig
self.qconfig = default_qconfig
def forward(self, indices, linear_in):
return self.emb(indices), self.fc(linear_in)
class DenseTopMLP(nn.Module):
def __init__(self, dense_dim, dense_out, embedding_dim, top_out_in, top_out_out) -> None:
super(DenseTopMLP, self).__init__()
self.dense_mlp = nn.Sequential(
nn.Linear(dense_dim, dense_out),
)
self.top_mlp = nn.Sequential(
nn.Linear(dense_out + embedding_dim, top_out_in),
nn.Linear(top_out_in, top_out_out),
)
def forward(
self,
sparse_feature: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
dense_feature = self.dense_mlp(dense)
features = torch.cat([dense_feature] + [sparse_feature], dim=1)
out = self.top_mlp(features)
return out
# thin wrapper around embedding bag, because tracing inside nn.Embedding
# bag is not supported at the moment and this is top level
class EmbBagWrapper(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.emb_bag = nn.EmbeddingBag(num_embeddings, embedding_dim, mode='sum')
def forward(self, indices, offsets):
return self.emb_bag(indices, offsets)
class SparseNNModel(nn.Module):
_NUM_EMBEDDINGS = 10
_EMBEDDING_DIM = 5
_DENSE_DIM = 4
_DENSE_OUTPUT = 2
_TOP_OUT_IN = 2
_TOP_OUT_OUT = 2
_TOP_MLP_DIM = 1
def __init__(self) -> None:
super(SparseNNModel, self).__init__()
self.model_sparse = EmbBagWrapper(self._NUM_EMBEDDINGS, self._EMBEDDING_DIM)
self.dense_top = DenseTopMLP(
self._DENSE_DIM, self._DENSE_OUTPUT, self._EMBEDDING_DIM, self._TOP_OUT_IN,
self._TOP_OUT_OUT)
def forward(
self,
sparse_indices: torch.Tensor,
sparse_offsets: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
sparse_feature = self.model_sparse(sparse_indices, sparse_offsets)
out = self.dense_top(sparse_feature, dense)
return out
| []
| []
| [
"MASTER_ADDR",
"MASTER_PORT"
]
| [] | ["MASTER_ADDR", "MASTER_PORT"] | python | 2 | 0 | |
chain/stmgr/utils.go | package stmgr
import (
"bytes"
"context"
"fmt"
"os"
"reflect"
"runtime"
"strings"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/rt"
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.NetworkName, error) {
act, err := sm.LoadActorRaw(ctx, init_.Address, st)
if err != nil {
return "", err
}
ias, err := init_.Load(sm.cs.Store(ctx), act)
if err != nil {
return "", err
}
return ias.NetworkName()
}
func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (address.Address, error) {
state, err := sm.StateTree(st)
if err != nil {
return address.Undef, xerrors.Errorf("(get sset) failed to load state tree: %w", err)
}
act, err := state.GetActor(maddr)
if err != nil {
return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
info, err := mas.Info()
if err != nil {
return address.Undef, xerrors.Errorf("failed to load actor info: %w", err)
}
return vm.ResolveToKeyAddr(state, sm.cs.Store(ctx), info.Worker)
}
func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (power.Claim, power.Claim, bool, error) {
return GetPowerRaw(ctx, sm, ts.ParentState(), maddr)
}
func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (power.Claim, power.Claim, bool, error) {
act, err := sm.LoadActorRaw(ctx, power.Address, st)
if err != nil {
return power.Claim{}, power.Claim{}, false, xerrors.Errorf("(get sset) failed to load power actor state: %w", err)
}
pas, err := power.Load(sm.cs.Store(ctx), act)
if err != nil {
return power.Claim{}, power.Claim{}, false, err
}
tpow, err := pas.TotalPower()
if err != nil {
return power.Claim{}, power.Claim{}, false, err
}
var mpow power.Claim
var minpow bool
if maddr != address.Undef {
var found bool
mpow, found, err = pas.MinerPower(maddr)
if err != nil || !found {
// TODO: return an error when not found?
return power.Claim{}, power.Claim{}, false, err
}
minpow, err = pas.MinerNominalPowerMeetsConsensusMinimum(maddr)
if err != nil {
return power.Claim{}, power.Claim{}, false, err
}
}
return mpow, tpow, minpow, nil
}
func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorPreCommitOnChainInfo, error) {
act, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
return mas.GetPrecommittedSector(sid)
}
func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorOnChainInfo, error) {
act, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
return mas.GetSector(sid)
}
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
act, err := sm.LoadActorRaw(ctx, maddr, st)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
}
var provingSectors bitfield.BitField
if nv < network.Version8 {
allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
if err != nil {
return nil, xerrors.Errorf("get all sectors: %w", err)
}
faultySectors, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
if err != nil {
return nil, xerrors.Errorf("get faulty sectors: %w", err)
}
provingSectors, err = bitfield.SubtractBitField(allSectors, faultySectors)
if err != nil {
return nil, xerrors.Errorf("calc proving sectors: %w", err)
}
} else {
provingSectors, err = miner.AllPartSectors(mas, miner.Partition.ActiveSectors)
if err != nil {
return nil, xerrors.Errorf("get active sectors sectors: %w", err)
}
}
numProvSect, err := provingSectors.Count()
if err != nil {
return nil, xerrors.Errorf("failed to count bits: %w", err)
}
// TODO(review): is this right? feels fishy to me
if numProvSect == 0 {
return nil, nil
}
info, err := mas.Info()
if err != nil {
return nil, xerrors.Errorf("getting miner info: %w", err)
}
mid, err := address.IDFromAddress(maddr)
if err != nil {
return nil, xerrors.Errorf("getting miner ID: %w", err)
}
proofType, err := miner.WinningPoStProofTypeFromWindowPoStProofType(nv, info.WindowPoStProofType)
if err != nil {
return nil, xerrors.Errorf("determining winning post proof type: %w", err)
}
ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, proofType, abi.ActorID(mid), rand, numProvSect)
if err != nil {
return nil, xerrors.Errorf("generating winning post challenges: %w", err)
}
iter, err := provingSectors.BitIterator()
if err != nil {
return nil, xerrors.Errorf("iterating over proving sectors: %w", err)
}
// Select winning sectors by _index_ in the all-sectors bitfield.
selectedSectors := bitfield.New()
prev := uint64(0)
for _, n := range ids {
sno, err := iter.Nth(n - prev)
if err != nil {
return nil, xerrors.Errorf("iterating over proving sectors: %w", err)
}
selectedSectors.Set(sno)
prev = n
}
sectors, err := mas.LoadSectors(&selectedSectors)
if err != nil {
return nil, xerrors.Errorf("loading proving sectors: %w", err)
}
out := make([]builtin.SectorInfo, len(sectors))
for i, sinfo := range sectors {
out[i] = builtin.SectorInfo{
SealProof: sinfo.SealProof,
SectorNumber: sinfo.SectorNumber,
SealedCID: sinfo.SealedCID,
}
}
return out, nil
}
func StateMinerInfo(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*miner.MinerInfo, error) {
act, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
}
mi, err := mas.Info()
if err != nil {
return nil, err
}
return &mi, err
}
func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (bool, error) {
act, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
return false, xerrors.Errorf("failed to load power actor: %w", err)
}
spas, err := power.Load(sm.cs.Store(ctx), act)
if err != nil {
return false, xerrors.Errorf("failed to load power actor state: %w", err)
}
_, ok, err := spas.MinerPower(maddr)
if err != nil {
return false, xerrors.Errorf("getting miner power: %w", err)
}
if !ok {
return true, nil
}
return false, nil
}
func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts *types.TipSet) (*api.MarketDeal, error) {
act, err := sm.LoadActor(ctx, market.Address, ts)
if err != nil {
return nil, xerrors.Errorf("failed to load market actor: %w", err)
}
state, err := market.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load market actor state: %w", err)
}
proposals, err := state.Proposals()
if err != nil {
return nil, err
}
proposal, found, err := proposals.Get(dealID)
if err != nil {
return nil, err
} else if !found {
return nil, xerrors.Errorf(
"deal %d not found "+
"- deal may not have completed sealing before deal proposal "+
"start epoch, or deal may have been slashed",
dealID)
}
states, err := state.States()
if err != nil {
return nil, err
}
st, found, err := states.Get(dealID)
if err != nil {
return nil, err
}
if !found {
st = market.EmptyDealState()
}
return &api.MarketDeal{
Proposal: *proposal,
State: *st,
}, nil
}
func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([]address.Address, error) {
act, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
return nil, xerrors.Errorf("failed to load power actor: %w", err)
}
powState, err := power.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load power actor state: %w", err)
}
return powState.ListAllMiners()
}
func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, msgs []*types.Message, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
if ts == nil {
ts = sm.cs.GetHeaviestTipSet()
}
base, trace, err := sm.ExecutionTrace(ctx, ts)
if err != nil {
return cid.Undef, nil, err
}
for i := ts.Height(); i < height; i++ {
// handle state forks
base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts)
if err != nil {
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
}
// TODO: should we also run cron here?
}
r := store.NewChainRand(sm.cs, ts.Cids())
vmopt := &vm.VMOpts{
StateBase: base,
Epoch: height,
Rand: r,
Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(),
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: ts.Blocks()[0].ParentBaseFee,
LookbackState: LookbackStateGetterForTipset(sm, ts),
}
vmi, err := sm.newVM(ctx, vmopt)
if err != nil {
return cid.Undef, nil, err
}
for i, msg := range msgs {
// TODO: Use the signed message length for secp messages
ret, err := vmi.ApplyMessage(ctx, msg)
if err != nil {
return cid.Undef, nil, xerrors.Errorf("applying message %s: %w", msg.Cid(), err)
}
if ret.ExitCode != 0 {
log.Infof("compute state apply message %d failed (exit: %d): %s", i, ret.ExitCode, ret.ActorErr)
}
}
root, err := vmi.Flush(ctx)
if err != nil {
return cid.Undef, nil, err
}
return root, trace, nil
}
func LookbackStateGetterForTipset(sm *StateManager, ts *types.TipSet) vm.LookbackStateGetter {
return func(ctx context.Context, round abi.ChainEpoch) (*state.StateTree, error) {
_, st, err := GetLookbackTipSetForRound(ctx, sm, ts, round)
if err != nil {
return nil, err
}
return sm.StateTree(st)
}
}
func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, cid.Cid, error) {
var lbr abi.ChainEpoch
lb := policy.GetWinningPoStSectorSetLookback(sm.GetNtwkVersion(ctx, round))
if round > lb {
lbr = round - lb
}
// more null blocks than our lookback
if lbr >= ts.Height() {
// This should never happen at this point, but may happen before
// network version 3 (where the lookback was only 10 blocks).
st, _, err := sm.TipSetState(ctx, ts)
if err != nil {
return nil, cid.Undef, err
}
return ts, st, nil
}
// Get the tipset after the lookback tipset, or the next non-null one.
nextTs, err := sm.ChainStore().GetTipsetByHeight(ctx, lbr+1, ts, false)
if err != nil {
return nil, cid.Undef, xerrors.Errorf("failed to get lookback tipset+1: %w", err)
}
if lbr > nextTs.Height() {
return nil, cid.Undef, xerrors.Errorf("failed to find non-null tipset %s (%d) which is known to exist, found %s (%d)", ts.Key(), ts.Height(), nextTs.Key(), nextTs.Height())
}
lbts, err := sm.ChainStore().GetTipSetFromKey(nextTs.Parents())
if err != nil {
return nil, cid.Undef, xerrors.Errorf("failed to resolve lookback tipset: %w", err)
}
return lbts, nextTs.ParentState(), nil
}
func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) {
ts, err := sm.ChainStore().LoadTipSet(tsk)
if err != nil {
return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err)
}
prev, err := sm.ChainStore().GetLatestBeaconEntry(ts)
if err != nil {
if os.Getenv("LOTUS_IGNORE_DRAND") != "_yes_" {
return nil, xerrors.Errorf("failed to get latest beacon entry: %w", err)
}
prev = &types.BeaconEntry{}
}
entries, err := beacon.BeaconEntriesForBlock(ctx, bcs, round, ts.Height(), *prev)
if err != nil {
return nil, err
}
rbase := *prev
if len(entries) > 0 {
rbase = entries[len(entries)-1]
}
lbts, lbst, err := GetLookbackTipSetForRound(ctx, sm, ts, round)
if err != nil {
return nil, xerrors.Errorf("getting lookback miner actor state: %w", err)
}
act, err := sm.LoadActorRaw(ctx, maddr, lbst)
if xerrors.Is(err, types.ErrActorNotFound) {
_, err := sm.LoadActor(ctx, maddr, ts)
if err != nil {
return nil, xerrors.Errorf("loading miner in current state: %w", err)
}
return nil, nil
}
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
}
mas, err := miner.Load(sm.cs.Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load miner actor state: %w", err)
}
buf := new(bytes.Buffer)
if err := maddr.MarshalCBOR(buf); err != nil {
return nil, xerrors.Errorf("failed to marshal miner address: %w", err)
}
prand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes())
if err != nil {
return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err)
}
nv := sm.GetNtwkVersion(ctx, ts.Height())
sectors, err := GetSectorsForWinningPoSt(ctx, nv, pv, sm, lbst, maddr, prand)
if err != nil {
return nil, xerrors.Errorf("getting winning post proving set: %w", err)
}
if len(sectors) == 0 {
return nil, nil
}
mpow, tpow, _, err := GetPowerRaw(ctx, sm, lbst, maddr)
if err != nil {
return nil, xerrors.Errorf("failed to get power: %w", err)
}
info, err := mas.Info()
if err != nil {
return nil, err
}
worker, err := sm.ResolveToKeyAddress(ctx, info.Worker, ts)
if err != nil {
return nil, xerrors.Errorf("resolving worker address: %w", err)
}
// TODO: Not ideal performance...This method reloads miner and power state (already looked up here and in GetPowerRaw)
eligible, err := MinerEligibleToMine(ctx, sm, maddr, ts, lbts)
if err != nil {
return nil, xerrors.Errorf("determining miner eligibility: %w", err)
}
return &api.MiningBaseInfo{
MinerPower: mpow.QualityAdjPower,
NetworkPower: tpow.QualityAdjPower,
Sectors: sectors,
WorkerKey: worker,
SectorSize: info.SectorSize,
PrevBeaconEntry: *prev,
BeaconEntries: entries,
EligibleForMining: eligible,
}, nil
}
type MethodMeta struct {
Name string
Params reflect.Type
Ret reflect.Type
}
var MethodsMap = map[cid.Cid]map[abi.MethodNum]MethodMeta{}
func init() {
// TODO: combine with the runtime actor registry.
var actors []rt.VMActor
actors = append(actors, exported0.BuiltinActors()...)
actors = append(actors, exported2.BuiltinActors()...)
actors = append(actors, exported3.BuiltinActors()...)
for _, actor := range actors {
exports := actor.Exports()
methods := make(map[abi.MethodNum]MethodMeta, len(exports))
// Explicitly add send, it's special.
methods[builtin.MethodSend] = MethodMeta{
Name: "Send",
Params: reflect.TypeOf(new(abi.EmptyValue)),
Ret: reflect.TypeOf(new(abi.EmptyValue)),
}
// Iterate over exported methods. Some of these _may_ be nil and
// must be skipped.
for number, export := range exports {
if export == nil {
continue
}
ev := reflect.ValueOf(export)
et := ev.Type()
// Extract the method names using reflection. These
// method names always match the field names in the
// `builtin.Method*` structs (tested in the specs-actors
// tests).
fnName := runtime.FuncForPC(ev.Pointer()).Name()
fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm")
switch abi.MethodNum(number) {
case builtin.MethodSend:
panic("method 0 is reserved for Send")
case builtin.MethodConstructor:
if fnName != "Constructor" {
panic("method 1 is reserved for Constructor")
}
}
methods[abi.MethodNum(number)] = MethodMeta{
Name: fnName,
Params: et.In(1),
Ret: et.Out(0),
}
}
MethodsMap[actor.Code()] = methods
}
}
func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) {
act, err := sm.LoadActor(ctx, to, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
m, found := MethodsMap[act.Code][method]
if !found {
return nil, fmt.Errorf("unknown method %d for actor %s", method, act.Code)
}
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
}
func GetParamType(actCode cid.Cid, method abi.MethodNum) (cbg.CBORUnmarshaler, error) {
m, found := MethodsMap[actCode][method]
if !found {
return nil, fmt.Errorf("unknown method %d for actor %s", method, actCode)
}
return reflect.New(m.Params.Elem()).Interface().(cbg.CBORUnmarshaler), nil
}
func minerHasMinPower(ctx context.Context, sm *StateManager, addr address.Address, ts *types.TipSet) (bool, error) {
pact, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
return false, xerrors.Errorf("loading power actor state: %w", err)
}
ps, err := power.Load(sm.cs.Store(ctx), pact)
if err != nil {
return false, err
}
return ps.MinerNominalPowerMeetsConsensusMinimum(addr)
}
func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Address, baseTs *types.TipSet, lookbackTs *types.TipSet) (bool, error) {
hmp, err := minerHasMinPower(ctx, sm, addr, lookbackTs)
// TODO: We're blurring the lines between a "runtime network version" and a "Lotus upgrade epoch", is that unavoidable?
if sm.GetNtwkVersion(ctx, baseTs.Height()) <= network.Version4 {
return hmp, err
}
if err != nil {
return false, err
}
if !hmp {
return false, nil
}
// Post actors v2, also check MinerEligibleForElection with base ts
pact, err := sm.LoadActor(ctx, power.Address, baseTs)
if err != nil {
return false, xerrors.Errorf("loading power actor state: %w", err)
}
pstate, err := power.Load(sm.cs.Store(ctx), pact)
if err != nil {
return false, err
}
mact, err := sm.LoadActor(ctx, addr, baseTs)
if err != nil {
return false, xerrors.Errorf("loading miner actor state: %w", err)
}
mstate, err := miner.Load(sm.cs.Store(ctx), mact)
if err != nil {
return false, err
}
// Non-empty power claim.
if claim, found, err := pstate.MinerPower(addr); err != nil {
return false, err
} else if !found {
return false, err
} else if claim.QualityAdjPower.LessThanEqual(big.Zero()) {
return false, err
}
// No fee debt.
if debt, err := mstate.FeeDebt(); err != nil {
return false, err
} else if !debt.IsZero() {
return false, err
}
// No active consensus faults.
if mInfo, err := mstate.Info(); err != nil {
return false, err
} else if baseTs.Height() <= mInfo.ConsensusFaultElapsed {
return false, nil
}
return true, nil
}
func CheckTotalFIL(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi.TokenAmount, error) {
str, err := state.LoadStateTree(sm.ChainStore().Store(ctx), ts.ParentState())
if err != nil {
return abi.TokenAmount{}, err
}
sum := types.NewInt(0)
err = str.ForEach(func(a address.Address, act *types.Actor) error {
sum = types.BigAdd(sum, act.Balance)
return nil
})
if err != nil {
return abi.TokenAmount{}, err
}
return sum, nil
}
func MakeMsgGasCost(msg *types.Message, ret *vm.ApplyRet) api.MsgGasCost {
return api.MsgGasCost{
Message: msg.Cid(),
GasUsed: big.NewInt(ret.GasUsed),
BaseFeeBurn: ret.GasCosts.BaseFeeBurn,
OverEstimationBurn: ret.GasCosts.OverEstimationBurn,
MinerPenalty: ret.GasCosts.MinerPenalty,
MinerTip: ret.GasCosts.MinerTip,
Refund: ret.GasCosts.Refund,
TotalCost: big.Sub(msg.RequiredFunds(), ret.GasCosts.Refund),
}
}
| [
"\"LOTUS_IGNORE_DRAND\""
]
| []
| [
"LOTUS_IGNORE_DRAND"
]
| [] | ["LOTUS_IGNORE_DRAND"] | go | 1 | 0 | |
backend/smithson_31899/wsgi.py | """
WSGI config for smithson_31899 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'smithson_31899.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/commands/cloud_init.go | /*
* Copyright (c) 2022.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*/
package commands
import (
"fmt"
"github.com/kameshsampath/kluster/pkg/utils"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
"path"
"strings"
)
type User struct {
Name string `yaml:"profile,omitempty"`
Groups string `yaml:"groups,omitempty"`
Shell string `yaml:"shell,omitempty"`
Sudo string `yaml:"sudo,omitempty"`
SSHAuthorizedKeys []string `yaml:"ssh-authorized-keys,omitempty"`
Password string `yaml:"password,omitempty"`
}
type CloudInit struct {
PackageUpdate bool `yaml:"package_update,omitempty"`
Packages []string `yaml:"packages,omitempty"`
Bootcmd []string `yaml:"bootcmd,omitempty"`
Runcmd []string `yaml:"runcmd,omitempty"`
Users []User `yaml:"users"`
K3sUtil *utils.K3sVersionInfo `yaml:"-"`
}
var _ yaml.Unmarshaler = (*User)(nil)
func NewCloudInitUtil(k3sUtil *utils.K3sVersionInfo) (*CloudInit, error) {
var err error
if k3sUtil == nil {
k3sUtil, err = utils.NewK3sVersionUtil("", "")
if err != nil {
return nil, err
}
}
if err := k3sUtil.QueryAndCacheK3sReleases(); err != nil {
return nil, err
}
return &CloudInit{
K3sUtil: k3sUtil,
}, nil
}
//UnmarshalYAML implements Unmarshaller to unmarshall User YAML data
func (u *User) UnmarshalYAML(unmarshal func(interface{}) error) error {
var t interface{}
if err := unmarshal(&t); err != nil {
return err
}
if s, ok := t.(string); ok {
u.Name = s
} else if m, ok := t.(map[interface{}]interface{}); ok {
for k, v := range m {
switch key := k.(string); key {
case "profile":
u.Name = v.(string)
case "groups":
u.Groups = v.(string)
case "password":
u.Password = v.(string)
case "shell":
u.Shell = v.(string)
case "sudo":
u.Sudo = v.(string)
case "ssh-authorized-keys":
iArr := v.([]interface{})
var sshAuthKeys []string
for _, i := range iArr {
sshAuthKeys = append(sshAuthKeys, i.(string))
}
u.SSHAuthorizedKeys = sshAuthKeys
}
}
}
return nil
}
//buildCloudInitConfig marshalls cloud init YAML to CloudInit
func (c *CloudInit) configureAndGetCloudInitFile(opts *StartOptions) error {
log.Debugf("Runner Temp is %s and user temp dir is %s ", os.Getenv("TMPDIR"), os.TempDir())
var b = []byte(k3sDefaultCloudInit)
if err := yaml.Unmarshal(b, c); err != nil {
log.Errorf("Error unmarshalling Cloud Init %v", err)
return err
}
if opts.k3sVersion == "" {
opts.k3sVersion = c.K3sUtil.Versions[0]
}
installExec := fmt.Sprintf(k3sInstallCmd, opts.k3sVersion, strings.Join(opts.k3sServerFlags, " "))
c.Runcmd = append(c.Runcmd, installExec, k3sKubeConfigCopyCmd)
dir, err := ioutil.TempDir("", "kluster-start")
if err != nil {
return err
}
cloudInitFile := path.Join(dir, fmt.Sprintf("%s-cloud-init", opts.profile))
if _, err := os.Create(cloudInitFile); err != nil {
log.Errorf("Unable to create cloud init file %s, %v", cloudInitFile, err)
return err
}
b, err = yaml.Marshal(&c)
if err != nil {
log.Errorf("Error marshalling cloud init file %v", err)
return err
}
if err := ioutil.WriteFile(cloudInitFile, b, 0600); err != nil {
log.Errorf("Error writing to cloud init file %v", err)
return err
}
log.Debugf("Generated clou-init file %s", cloudInitFile)
opts.cloudInitFile = cloudInitFile
return nil
}
| [
"\"TMPDIR\""
]
| []
| [
"TMPDIR"
]
| [] | ["TMPDIR"] | go | 1 | 0 | |
completers/cargo_completer/cmd/publish.go | package cmd
import (
"github.com/rsteube/carapace"
"github.com/rsteube/carapace-bin/completers/cargo_completer/cmd/action"
"github.com/spf13/cobra"
)
var publishCmd = &cobra.Command{
Use: "publish",
Short: "Upload a package to the registry",
Run: func(cmd *cobra.Command, args []string) {},
}
func init() {
carapace.Gen(publishCmd).Standalone()
publishCmd.Flags().StringS("Z", "Z", "", "Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details")
publishCmd.Flags().Bool("all-features", false, "Activate all available features")
publishCmd.Flags().Bool("allow-dirty", false, "Allow dirty working directories to be packaged")
publishCmd.Flags().String("color", "", "Coloring: auto, always, never")
publishCmd.Flags().Bool("dry-run", false, "Perform all checks without uploading")
publishCmd.Flags().String("features", "", "Space or comma separated list of features to activate")
publishCmd.Flags().Bool("frozen", false, "Require Cargo.lock and cache are up to date")
publishCmd.Flags().BoolP("help", "h", false, "Prints help information")
publishCmd.Flags().String("index", "", "Registry index URL to upload the package to")
publishCmd.Flags().StringP("jobs", "j", "", "Number of parallel jobs, defaults to # of CPUs")
publishCmd.Flags().Bool("locked", false, "Require Cargo.lock is up to date")
publishCmd.Flags().String("manifest-path", "", "Path to Cargo.toml")
publishCmd.Flags().Bool("no-default-features", false, "Do not activate the `default` feature")
publishCmd.Flags().Bool("no-verify", false, "Don't verify the contents by building them")
publishCmd.Flags().Bool("offline", false, "Run without accessing the network")
publishCmd.Flags().BoolP("quiet", "q", false, "No output printed to stdout")
publishCmd.Flags().String("registry", "", "Registry to publish to")
publishCmd.Flags().String("target", "", "Build for the target triple")
publishCmd.Flags().String("target-dir", "", "Directory for all generated artifacts")
publishCmd.Flags().String("token", "", "Token to use when uploading")
publishCmd.Flags().BoolP("verbose", "v", false, "Use verbose output (-vv very verbose/build.rs output)")
rootCmd.AddCommand(publishCmd)
carapace.Gen(publishCmd).FlagCompletion(carapace.ActionMap{
"color": action.ActionColorModes(),
"features": carapace.ActionMultiParts(",", func(c carapace.Context) carapace.Action {
return action.ActionFeatures(publishCmd).Invoke(c).Filter(c.Parts).ToA()
}),
"manifest-path": carapace.ActionFiles(),
"registry": action.ActionRegistries(),
"target-dir": carapace.ActionDirectories(),
})
}
| []
| []
| []
| [] | [] | go | null | null | null |
aws-inventory/lambda/inventory-transit-gateway.py | import boto3
from botocore.exceptions import ClientError
import json
import os
import time
from datetime import datetime, timezone
from dateutil import tz
from antiope.aws_account import *
from common import *
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
RESOURCE_PATH = "ec2/transitgateway"
def lambda_handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Received message: " + json.dumps(message, sort_keys=True))
try:
target_account = AWSAccount(message['account_id'])
for r in target_account.get_regions():
try:
discover_transit_gateways(target_account, r)
except ClientError as e:
# Move onto next region if we get access denied. This is probably SCPs
if e.response['Error']['Code'] == 'AccessDeniedException':
logger.error(f"AccessDeniedException for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})")
continue
else:
raise # pass on to the next handler
except AntiopeAssumeRoleError as e:
logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id))
return()
except ClientError as e:
if e.response['Error']['Code'] == 'UnauthorizedOperation':
logger.error("Antiope doesn't have proper permissions to this account")
return(event)
logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e))
capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e))
raise
except Exception as e:
logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context)))
capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e))
raise
def discover_transit_gateways(target_account, region):
'''Iterate accross all regions to discover transit gateways'''
ec2_client = target_account.get_client('ec2', region=region)
response = ec2_client.describe_transit_gateways()
if response['TransitGateways']:
for tg in response['TransitGateways']:
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = "AWS::EC2::TransitGateway"
resource_item['source'] = "Antiope"
resource_item['awsRegion'] = region
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['configuration'] = tg
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = tg['TransitGatewayId']
resource_item['ARN'] = tg['TransitGatewayArn']
resource_item['resourceCreationTime'] = tg['CreationTime']
resource_item['errors'] = {}
if 'Tags' in tg:
resource_item['tags'] = parse_tags(tg['Tags'])
# Get the transit gateway attachements based on the gateway ID and add it as part of the supplementray configuration
attachements = discover_transit_gateway_attachments(ec2_client, tg['TransitGatewayId'])
for a in attachements:
resource_item['supplementaryConfiguration'] = a
if a['ResourceType'] == 'tgw-peering':
tg_peering_response = discover_transit_gateway_peering_attachments(ec2_client, a['TransitGatewayId'], a['TransitGatewayAttachmentId'])[0]
resource_item['supplementaryConfiguration']['TransitGatewayPeeringAttachments'] = tg_peering_response
if a['ResourceType'] == 'vpc':
vpc_attachments_response = discover_transit_gateway_vpc_attachments(ec2_client, a['TransitGatewayId'], a['ResourceId'])[0]
resource_item['supplementaryConfiguration']['TransitGatewayVpcAttachments'] = vpc_attachments_response
# Save files to S3
save_resource_to_s3(RESOURCE_PATH, tg['TransitGatewayId'], resource_item)
logger.info("Discovered Transit Gateways ({}) in account {} for region {}".format(tg['TransitGatewayId'], target_account.account_id, region))
logger.debug("Data: {}".format(resource_item))
else:
logger.debug("No Transit Gateways found for account {} in region {}".format(target_account.account_id, region))
def discover_transit_gateway_attachments(ec2_client, tgId):
''' Get transit gateway attachement based on transit gateway ID'''
response = ec2_client.describe_transit_gateway_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [
tgId,
]
},
]
)
return(response['TransitGatewayAttachments'])
def discover_transit_gateway_vpc_attachments(ec2_client, tgId, resourceId):
''' Get transit gateway vpc attachement information based on transit gateway ID and vpc ID'''
response = ec2_client.describe_transit_gateway_vpc_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [
tgId,
],
'Name': 'vpc-id',
'Values': [
resourceId,
]
},
]
)
return(response['TransitGatewayVpcAttachments'])
def discover_transit_gateway_peering_attachments(ec2_client, tgId, tgAttachId):
'''Get Transit Gateway Peering Attachment configuration based on the Transit Gateway ID'''
response = ec2_client.describe_transit_gateway_peering_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [
tgId,
],
'Name': 'transit-gateway-attachment-id',
'Values': [
tgAttachId,
]
},
]
)
return(response['TransitGatewayPeeringAttachments']) | []
| []
| [
"LOG_LEVEL"
]
| [] | ["LOG_LEVEL"] | python | 1 | 0 | |
python/setup.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import glob
import os
import os.path as osp
import re
import shlex
import shutil
import sys
from Cython.Distutils import build_ext as _build_ext
import Cython
import pkg_resources
from setuptools import setup, Extension, Distribution
from os.path import join as pjoin
from distutils.command.clean import clean as _clean
from distutils.util import strtobool
from distutils import sysconfig
# Check if we're running 64-bit Python
is_64_bit = sys.maxsize > 2**32
if Cython.__version__ < '0.27':
raise Exception('Please upgrade to Cython 0.27 or newer')
setup_dir = os.path.abspath(os.path.dirname(__file__))
@contextlib.contextmanager
def changed_dir(dirname):
oldcwd = os.getcwd()
os.chdir(dirname)
try:
yield
finally:
os.chdir(oldcwd)
class clean(_clean):
def run(self):
_clean.run(self)
for x in []:
try:
os.remove(x)
except OSError:
pass
class build_ext(_build_ext):
_found_names = ()
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
self.extensions = [ext for ext in self.extensions
if ext.name != '__dummy__']
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
def run(self):
self._run_cmake()
_build_ext.run(self)
# adapted from cmake_build_ext in dynd-python
# github.com/libdynd/dynd-python
description = "Build the C-extensions for arrow"
user_options = ([('cmake-generator=', None, 'CMake generator'),
('extra-cmake-args=', None, 'extra arguments for CMake'),
('build-type=', None,
'build type (debug or release), default release'),
('boost-namespace=', None,
'namespace of boost (default: boost)'),
('with-parquet', None, 'build the Parquet extension'),
('with-static-parquet', None, 'link parquet statically'),
('with-static-boost', None, 'link boost statically'),
('with-plasma', None, 'build the Plasma extension'),
('with-orc', None, 'build the ORC extension'),
('generate-coverage', None,
'enable Cython code coverage'),
('bundle-boost', None,
'bundle the (shared) Boost libraries'),
('bundle-arrow-cpp', None,
'bundle the Arrow C++ libraries')] +
_build_ext.user_options)
def initialize_options(self):
_build_ext.initialize_options(self)
self.cmake_generator = os.environ.get('PYARROW_CMAKE_GENERATOR')
if not self.cmake_generator and sys.platform == 'win32':
self.cmake_generator = 'Visual Studio 14 2015 Win64'
self.extra_cmake_args = os.environ.get('PYARROW_CMAKE_OPTIONS', '')
self.build_type = os.environ.get('PYARROW_BUILD_TYPE',
'release').lower()
self.boost_namespace = os.environ.get('PYARROW_BOOST_NAMESPACE',
'boost')
self.cmake_cxxflags = os.environ.get('PYARROW_CXXFLAGS', '')
if sys.platform == 'win32':
# Cannot do debug builds in Windows unless Python itself is a debug
# build
if not hasattr(sys, 'gettotalrefcount'):
self.build_type = 'release'
self.with_parquet = strtobool(
os.environ.get('PYARROW_WITH_PARQUET', '0'))
self.with_static_parquet = strtobool(
os.environ.get('PYARROW_WITH_STATIC_PARQUET', '0'))
self.with_static_boost = strtobool(
os.environ.get('PYARROW_WITH_STATIC_BOOST', '0'))
self.with_plasma = strtobool(
os.environ.get('PYARROW_WITH_PLASMA', '0'))
self.with_orc = strtobool(
os.environ.get('PYARROW_WITH_ORC', '0'))
self.generate_coverage = strtobool(
os.environ.get('PYARROW_GENERATE_COVERAGE', '0'))
self.bundle_arrow_cpp = strtobool(
os.environ.get('PYARROW_BUNDLE_ARROW_CPP', '0'))
self.bundle_boost = strtobool(
os.environ.get('PYARROW_BUNDLE_BOOST', '0'))
CYTHON_MODULE_NAMES = [
'lib',
'_parquet',
'_orc',
'_plasma']
def _run_cmake(self):
# The directory containing this setup.py
source = osp.dirname(osp.abspath(__file__))
# The staging directory for the module being built
build_temp = pjoin(os.getcwd(), self.build_temp)
build_lib = os.path.join(os.getcwd(), self.build_lib)
saved_cwd = os.getcwd()
if not os.path.isdir(self.build_temp):
self.mkpath(self.build_temp)
# Change to the build directory
with changed_dir(self.build_temp):
# Detect if we built elsewhere
if os.path.isfile('CMakeCache.txt'):
cachefile = open('CMakeCache.txt', 'r')
cachedir = re.search('CMAKE_CACHEFILE_DIR:INTERNAL=(.*)',
cachefile.read()).group(1)
cachefile.close()
if (cachedir != build_temp):
return
static_lib_option = ''
cmake_options = [
'-DPYTHON_EXECUTABLE=%s' % sys.executable,
static_lib_option,
]
if self.cmake_generator:
cmake_options += ['-G', self.cmake_generator]
if self.with_parquet:
cmake_options.append('-DPYARROW_BUILD_PARQUET=on')
if self.with_static_parquet:
cmake_options.append('-DPYARROW_PARQUET_USE_SHARED=off')
if not self.with_static_boost:
cmake_options.append('-DPYARROW_BOOST_USE_SHARED=on')
else:
cmake_options.append('-DPYARROW_BOOST_USE_SHARED=off')
if self.with_plasma:
cmake_options.append('-DPYARROW_BUILD_PLASMA=on')
if self.with_orc:
cmake_options.append('-DPYARROW_BUILD_ORC=on')
if len(self.cmake_cxxflags) > 0:
cmake_options.append('-DPYARROW_CXXFLAGS={0}'
.format(self.cmake_cxxflags))
if self.generate_coverage:
cmake_options.append('-DPYARROW_GENERATE_COVERAGE=on')
if self.bundle_arrow_cpp:
cmake_options.append('-DPYARROW_BUNDLE_ARROW_CPP=ON')
# ARROW-1090: work around CMake rough edges
if 'ARROW_HOME' in os.environ and sys.platform != 'win32':
pkg_config = pjoin(os.environ['ARROW_HOME'], 'lib',
'pkgconfig')
os.environ['PKG_CONFIG_PATH'] = pkg_config
del os.environ['ARROW_HOME']
if self.bundle_boost:
cmake_options.append('-DPYARROW_BUNDLE_BOOST=ON')
cmake_options.append('-DCMAKE_BUILD_TYPE={0}'
.format(self.build_type.lower()))
if self.boost_namespace != 'boost':
cmake_options.append('-DBoost_NAMESPACE={}'
.format(self.boost_namespace))
extra_cmake_args = shlex.split(self.extra_cmake_args)
build_tool_args = []
if sys.platform == 'win32':
if not is_64_bit:
raise RuntimeError('Not supported on 32-bit Windows')
else:
build_tool_args.append('--')
if os.environ.get('PYARROW_BUILD_VERBOSE', '0') == '1':
build_tool_args.append('VERBOSE=1')
if os.environ.get('PYARROW_PARALLEL'):
build_tool_args.append(
'-j{0}'.format(os.environ['PYARROW_PARALLEL']))
# Generate the build files
print("-- Runnning cmake for pyarrow")
self.spawn(['cmake'] + extra_cmake_args + cmake_options + [source])
print("-- Finished cmake for pyarrow")
# Do the build
print("-- Running cmake --build for pyarrow")
self.spawn(['cmake', '--build', '.', '--config', self.build_type]
+ build_tool_args)
print("-- Finished cmake --build for pyarrow")
if self.inplace:
# a bit hacky
build_lib = saved_cwd
# Move the libraries to the place expected by the Python build
try:
os.makedirs(pjoin(build_lib, 'pyarrow'))
except OSError:
pass
if sys.platform == 'win32':
build_prefix = ''
else:
build_prefix = self.build_type
if self.bundle_arrow_cpp:
print(pjoin(build_lib, 'pyarrow'))
move_shared_libs(build_prefix, build_lib, "arrow")
move_shared_libs(build_prefix, build_lib, "arrow_python")
if self.with_plasma:
move_shared_libs(build_prefix, build_lib, "plasma")
if self.with_parquet and not self.with_static_parquet:
move_shared_libs(build_prefix, build_lib, "parquet")
if not self.with_static_boost and self.bundle_boost:
move_shared_libs(
build_prefix, build_lib,
"{}_filesystem".format(self.boost_namespace))
move_shared_libs(
build_prefix, build_lib,
"{}_system".format(self.boost_namespace))
move_shared_libs(
build_prefix, build_lib,
"{}_regex".format(self.boost_namespace))
print('Bundling includes: ' + pjoin(build_prefix, 'include'))
if os.path.exists(pjoin(build_lib, 'pyarrow', 'include')):
shutil.rmtree(pjoin(build_lib, 'pyarrow', 'include'))
shutil.move(pjoin(build_prefix, 'include'),
pjoin(build_lib, 'pyarrow'))
# Move the built C-extension to the place expected by the Python
# build
self._found_names = []
for name in self.CYTHON_MODULE_NAMES:
built_path = self.get_ext_built(name)
if not os.path.exists(built_path):
print(built_path)
if self._failure_permitted(name):
print('Cython module {0} failure permitted'
.format(name))
continue
raise RuntimeError('pyarrow C-extension failed to build:',
os.path.abspath(built_path))
cpp_generated_path = self.get_ext_generated_cpp_source(name)
if not os.path.exists(cpp_generated_path):
raise RuntimeError('expected to find generated C++ file '
'in {0!r}'.format(cpp_generated_path))
# The destination path to move the generated C++ source to
# (for Cython source coverage)
cpp_path = pjoin(build_lib, self._get_build_dir(),
os.path.basename(cpp_generated_path))
if os.path.exists(cpp_path):
os.remove(cpp_path)
# The destination path to move the built C extension to
ext_path = pjoin(build_lib, self._get_cmake_ext_path(name))
if os.path.exists(ext_path):
os.remove(ext_path)
self.mkpath(os.path.dirname(ext_path))
print('Moving generated C++ source', cpp_generated_path,
'to build path', cpp_path)
shutil.move(cpp_generated_path, cpp_path)
print('Moving built C-extension', built_path,
'to build path', ext_path)
shutil.move(built_path, ext_path)
self._found_names.append(name)
if os.path.exists(self.get_ext_built_api_header(name)):
shutil.move(self.get_ext_built_api_header(name),
pjoin(os.path.dirname(ext_path),
name + '_api.h'))
# Move the plasma store
if self.with_plasma:
source = os.path.join(self.build_type, "plasma_store")
target = os.path.join(build_lib,
self._get_build_dir(),
"plasma_store")
shutil.move(source, target)
def _failure_permitted(self, name):
if name == '_parquet' and not self.with_parquet:
return True
if name == '_plasma' and not self.with_plasma:
return True
if name == '_orc' and not self.with_orc:
return True
return False
def _get_build_dir(self):
# Get the package directory from build_py
build_py = self.get_finalized_command('build_py')
return build_py.get_package_dir('pyarrow')
def _get_cmake_ext_path(self, name):
# This is the name of the arrow C-extension
suffix = sysconfig.get_config_var('EXT_SUFFIX')
if suffix is None:
suffix = sysconfig.get_config_var('SO')
filename = name + suffix
return pjoin(self._get_build_dir(), filename)
def get_ext_generated_cpp_source(self, name):
if sys.platform == 'win32':
head, tail = os.path.split(name)
return pjoin(head, tail + ".cpp")
else:
return pjoin(name + ".cpp")
def get_ext_built_api_header(self, name):
if sys.platform == 'win32':
head, tail = os.path.split(name)
return pjoin(head, tail + "_api.h")
else:
return pjoin(name + "_api.h")
def get_ext_built(self, name):
if sys.platform == 'win32':
head, tail = os.path.split(name)
suffix = sysconfig.get_config_var('SO')
# Visual Studio seems to differ from other generators in
# where it places output files.
if self.cmake_generator.startswith('Visual Studio'):
return pjoin(head, self.build_type, tail + suffix)
else:
return pjoin(head, tail + suffix)
else:
suffix = sysconfig.get_config_var('SO')
return pjoin(self.build_type, name + suffix)
def get_names(self):
return self._found_names
def get_outputs(self):
# Just the C extensions
# regular_exts = _build_ext.get_outputs(self)
return [self._get_cmake_ext_path(name)
for name in self.get_names()]
def move_shared_libs(build_prefix, build_lib, lib_name):
if sys.platform == 'win32':
# Move all .dll and .lib files
libs = glob.glob(pjoin(build_prefix, lib_name) + '*')
for filename in libs:
shutil.move(pjoin(build_prefix, filename),
pjoin(build_lib, 'pyarrow', filename))
else:
_move_shared_libs_unix(build_prefix, build_lib, lib_name)
def _move_shared_libs_unix(build_prefix, build_lib, lib_name):
shared_library_prefix = 'lib'
if sys.platform == 'darwin':
shared_library_suffix = '.dylib'
else:
shared_library_suffix = '.so'
lib_filename = (shared_library_prefix + lib_name +
shared_library_suffix)
# Also copy libraries with ABI/SO version suffix
if sys.platform == 'darwin':
lib_pattern = (shared_library_prefix + lib_name +
".*" + shared_library_suffix[1:])
libs = glob.glob(pjoin(build_prefix, lib_pattern))
else:
libs = glob.glob(pjoin(build_prefix, lib_filename) + '*')
if not libs:
raise Exception('Could not find library:' + lib_filename +
' in ' + build_prefix)
# Longest suffix library should be copied, all others symlinked
libs.sort(key=lambda s: -len(s))
print(libs, libs[0])
lib_filename = os.path.basename(libs[0])
shutil.move(pjoin(build_prefix, lib_filename),
pjoin(build_lib, 'pyarrow', lib_filename))
for lib in libs[1:]:
filename = os.path.basename(lib)
link_name = pjoin(build_lib, 'pyarrow', filename)
if not os.path.exists(link_name):
os.symlink(lib_filename, link_name)
# In the case of a git-archive, we don't have any version information
# from the SCM to infer a version. The only source is the java/pom.xml.
#
# Note that this is only the case for git-archives. sdist tarballs have
# all relevant information (but not the Java sources).
if not os.path.exists('../.git') and os.path.exists('../java/pom.xml'):
import xml.etree.ElementTree as ET
tree = ET.parse('../java/pom.xml')
version_tag = list(tree.getroot().findall(
'{http://maven.apache.org/POM/4.0.0}version'))[0]
os.environ["SETUPTOOLS_SCM_PRETEND_VERSION"] = version_tag.text.replace(
"-SNAPSHOT", "a0")
with open('README.md') as f:
long_description = f.read()
class BinaryDistribution(Distribution):
def has_ext_modules(foo):
return True
install_requires = (
'numpy >= 1.10',
'six >= 1.0.0',
'futures;python_version<"3.2"'
)
def parse_version(root):
from setuptools_scm import version_from_scm
import setuptools_scm.git
describe = (setuptools_scm.git.DEFAULT_DESCRIBE +
" --match 'apache-arrow-[0-9]*'")
# Strip catchall from the commandline
describe = describe.replace("--match *.*", "")
version = setuptools_scm.git.parse(root, describe)
if not version:
return version_from_scm(root)
else:
return version
# Only include pytest-runner in setup_requires if we're invoking tests
if {'pytest', 'test', 'ptr'}.intersection(sys.argv):
setup_requires = ['pytest-runner']
else:
setup_requires = []
setup(
name="pyarrow",
packages=['pyarrow', 'pyarrow.tests'],
zip_safe=False,
package_data={'pyarrow': ['*.pxd', '*.pyx', 'includes/*.pxd']},
include_package_data=True,
distclass=BinaryDistribution,
# Dummy extension to trigger build_ext
ext_modules=[Extension('__dummy__', sources=[])],
cmdclass={
'clean': clean,
'build_ext': build_ext
},
entry_points={
'console_scripts': [
'plasma_store = pyarrow:_plasma_store_entry_point'
]
},
use_scm_version={"root": "..", "relative_to": __file__,
"parse": parse_version},
setup_requires=['setuptools_scm', 'cython >= 0.27'] + setup_requires,
install_requires=install_requires,
tests_require=['pytest', 'pandas'],
description="Python library for Apache Arrow",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
license='Apache License, Version 2.0',
maintainer="Apache Arrow Developers",
maintainer_email="[email protected]",
test_suite="pyarrow.tests",
url="https://arrow.apache.org/"
)
| []
| []
| [
"PYARROW_WITH_STATIC_BOOST",
"ARROW_HOME",
"PYARROW_CMAKE_GENERATOR",
"PYARROW_CMAKE_OPTIONS",
"PYARROW_WITH_STATIC_PARQUET",
"PYARROW_BUILD_TYPE",
"PYARROW_PARALLEL",
"PYARROW_GENERATE_COVERAGE",
"PYARROW_WITH_ORC",
"PYARROW_BUNDLE_ARROW_CPP",
"PYARROW_BOOST_NAMESPACE",
"PKG_CONFIG_PATH",
"PYARROW_BUILD_VERBOSE",
"PYARROW_CXXFLAGS",
"SETUPTOOLS_SCM_PRETEND_VERSION",
"PYARROW_WITH_PLASMA",
"PYARROW_BUNDLE_BOOST",
"PYARROW_WITH_PARQUET"
]
| [] | ["PYARROW_WITH_STATIC_BOOST", "ARROW_HOME", "PYARROW_CMAKE_GENERATOR", "PYARROW_CMAKE_OPTIONS", "PYARROW_WITH_STATIC_PARQUET", "PYARROW_BUILD_TYPE", "PYARROW_PARALLEL", "PYARROW_GENERATE_COVERAGE", "PYARROW_WITH_ORC", "PYARROW_BUNDLE_ARROW_CPP", "PYARROW_BOOST_NAMESPACE", "PKG_CONFIG_PATH", "PYARROW_BUILD_VERBOSE", "PYARROW_CXXFLAGS", "SETUPTOOLS_SCM_PRETEND_VERSION", "PYARROW_WITH_PLASMA", "PYARROW_BUNDLE_BOOST", "PYARROW_WITH_PARQUET"] | python | 18 | 0 | |
dckrclstrpanic/panic.py | #! /usr/bin/env python
from __future__ import print_function
import sh
from io import StringIO
import sys
import boto3
import os
import requests
import time
import sh
import uuid
import logging
import datetime
from dateutil.tz import tzutc
import json
from sh import docker_machine
import pprint
import sh
import sys
from threading import Semaphore
from botocore.exceptions import ClientError
#----------------------------------------------------------------------------#
#--- SUBPROCESS w/ ASYNC READ -----------------------------------------------#
#----------------------------------------------------------------------------#
from zope.component import getGlobalSiteManager, getUtility
from zope.interface import implements, Interface
import threading
from threading import Thread
import subprocess
from subprocess import Popen, PIPE
gsm = getGlobalSiteManager()
def dbgprnt(msg):
print('/////// msg ///////')
print(msg)
print('///////////////////')
class IFWUtils(Interface):
pass
class HoverException(Exception):
pass
class HoverAPI(object):
def __init__(self):
username = os.environ.get('hover_username')
password = os.environ.get('hover_password')
params = {"username": username, "password": password}
r = requests.post("https://www.hover.com/api/login", json=params)
if not r.ok or "hoverauth" not in r.cookies:
raise HoverException(r)
self.cookies = {"hoverauth": r.cookies["hoverauth"]}
res = self.call("get", "dns")
self.entries_by_domain = {}
self.ids_by_domain = {}
if res.get('succeeded'):
for item in res.get('domains'):
domain = item.get('domain_name')
domain_id = item.get('id')
self.entries_by_domain[domain] = item
self.ids_by_domain[domain] = domain_id
def call(self, method, resource, data=None):
url = "https://www.hover.com/api/{0}".format(resource)
r = requests.request(method, url, data=data, cookies=self.cookies)
if not r.ok:
raise HoverException(r)
if r.content:
body = r.json()
if "succeeded" not in body or body["succeeded"] is not True:
raise HoverException(body)
return body
def getEntryById(self, domain_id):
return self.call("get", "domains/{}".format(domain_id))
def getDnsById(self, domain_id):
return self.call("get", "domains/{}/dns".format(domain_id))
def getDnsByDomain(self, domain):
#print(self.ids_by_domain.keys())
domain_id = self.ids_by_domain.get(domain)
return self.getDnsById(domain_id)
def createDnsARecord(self):
record = {"name": "mysubdomain", "type": "A", "content": "127.0.0.1"}
self.call("post", "domains/dom123456/dns", record)
def setNameservers(self, nameservers=[]):
json.dumps({"field":"nameservers","value":nameservers})
r = requests.put(
"https://www.hover.com/control_panel/domain/entomoph.me",
cookies=self.cookies,
json=params
)
class Route53API(object):
def __init__(self):
self.conn = boto3.client('route53')
def listAllZones(self):
return self.conn.list_hosted_zones().get('HostedZones')
def createZone(self, zone_name, caller_ref):
return self.conn.create_hosted_zone(
Name=zone_name,
CallerReference=caller_ref
)
def changeRecordSets(zone_id, change_list):
self.conn.change_resource_record_sets(
HostedZoneId = zone_id,
ChangeBatch = {'Changes': change_list}
)
def deleteZoneById(self, zone_id):
for zone in self.listAllZones():
if zone.get('Id') == zone_id:
self.conn.delete_hosted_zone(Id=zone_id)
def deleteAllZones(self):
for zone in self.listAllZones():
zone_id = zone.get('Id')
self.conn.delete_hosted_zone(Id=zone_id)
return json.dumps({"succeeded":True})
class HoverAWS(object):
def __init__(self):
self.r53 = Route53API()
self.hov = HoverAPI()
def __call__(self):
print(r53.listAllZones())
print(hov.entries_by_domain)
class EC2(object):
def __init__(self):
#boto3.set_stream_logger(name='botocore')
self.conn = boto3.client('ec2')
self.rsrc = boto3.resource('ec2')
#self.conn = boto3.resource('ec2')
def getInstances(self):
# running instance --> Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]
return list(self.conn.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running', 'stopped']}]
))
def listSecurityGroups(self):
return self.conn.describe_security_groups().get('SecurityGroups')
def getSecurityGroupByName(self, name):
for group in self.listSecurityGroups():
group_name = group.get('GroupName')
if group_name == name:
return group
return None
def authorizeSwarmIngress(self, group_id):
self.conn.authorize_security_group_ingress(
GroupId=group_id,
IpPermissions=[
{
'IpProtocol': 'tcp',
'FromPort': 2377,
'ToPort': 2377,
'IpRanges': [{'CidrIp': '0.0.0.0/0'}]
}
])
def getInstancesWithTag(self, tag_name, tag_value):
filters = [{'Name': 'tag:{}'.format(tag_name), 'Values': [tag_value]}]
return self.conn.describe_instances(Filters=filters)
def getInstancesWithTagKey(self, tag_name):
"returns ec2.instancesCollection"
filters = [{'Name':'tag-key', 'Values':["{}".format(tag_name)]}]
return self.rsrc.instances.filter(Filters=filters)
def getInstancesWithTagValue(self, tag_value):
"returns ec2.instancesCollection"
filters = [{'Name':'tag-value', 'Values':["{}".format(tag_value)]}]
return self.rsrc.instances.filter(Filters=filters)
def getInstanceTags(self, instance_id):
instance = self.getInstanceById(instance_id)
tags = {}
if instance.tags is None:
return []
for tag in instance.tags:
key = tag['Key']
tags[key] = tag['Value']
return tags
def applyTagToInstance(self, instance_id, tags):
#tags is in this format --> [{'Key':'foo', 'Value':'bar'}]
self.conn.create_tags(Resources=[instance_id], Tags=tags)
def getInstanceById(self, instance_id):
return self.rsrc.Instance(instance_id)
def getVolumesByInstanceId(self, instance_id):
return self.conn.describe_instance_attribute(
InstanceId=instance_id,
Attribute='blockDeviceMapping'
)
def rebootInstance(self, instance_id):
instance = self.getInstanceById(instance_id)
instance.reboot()
instance.wait_until_running()
return
def getPrivateDns(self, instance_id):
instance = self.getInstanceById(instance_id)
return instance.private_dns_name
def getPrivateIpAddress(self, instance_id):
instance = self.getInstanceById(instance_id)
return instance.private_ip_address.strip()
def getPublicIpAddress(self, instance_id):
instance = self.getInstanceById(instance_id)
return instance.public_ip_address.strip()
def listVPC(self):
return list(self.conn.vpcs.all())
#vpcs = list(self.conn.vpcs.filter(Filters=filters))
ec2 = EC2()
gsm.registerUtility(ec2, IFWUtils, 'ec2')
# //////////////////////////////////////////////////////////////////////////// #
# //////////////////////////////////////////////////////////////////////////// #
# //////////////////////////////////////////////////////////////////////////// #
class IPFSCluster(object):
def __init__(self):
pass
def createClusterSecret(self):
cmd = \
"-vN 32 -An -tx1 /dev/urandom | tr -d".split()
cmd.append(' \n')
return sh.od(*cmd).strip().replace(' ', '').replace('\n', '')
def initCluster(self, mchn_name, env={}):
cmd = "ipfs-cluster-service init"
self.runCmd(mchn_name, cmd.split(' '), env=env)
def runClusterDaemon(self, mchn_name, env={}):
cmd = "ipfs-cluster-service daemon"
self.runCmd(mchn_name, cmd.split(' '), env=env)
def setClusterMaster(self, mchn_name, env={}):
if "CLUSTER_SECRET" in env.keys():
self.initCluster(mchn_name, env=env)
self.runClusterDaemon(mchn_name, env=env)
else:
print("/// NO CLUSTER SECRET EXISTS ///")
sys.exit(0)
def setClusterWorker(self, mchn_name, env={}):
if "CLUSTER_SECRET" in env.keys():
self.ipfs.runClusterDaemon(mchn_name, env=env)
else:
print("/// NO CLUSTER SECRET EXISTS ///")
sys.exit(0)
_ipfs = IPFSCluster()
gsm.registerUtility(_ipfs, IFWUtils, '_ipfs')
class Swarm(object):
def __init__(self):
self.ec2 = getUtility(IFWUtils, 'ec2')
self.ipfs = getUtility(IFWUtils, '_ipfs')
#patches
self.ipfs.runCmd = self.runCmd
def getSwarmMasters(self, cluster_uid):
cluster = self.getClusterInfo(cluster_uid)
masters = []
for mchn_name in cluster:
entry = cluster[mchn_name]
if 'swarm_master' in entry.keys():
masters.append(mchn_name)
return masters
def checkService(self, srvc_name, cluster_uid):
mchn_name = self.getSwarmMasters(cluster_uid)[0]
cmd = \
"sudo docker service ps {}".format(srvc_name)
self.runCmd(mchn_name, cmd, quoted=False)
def createRegistryService(self, cluster_uid):
mchn_name = self.getSwarmMasters(cluster_uid)[0]
cmd = \
"docker service create --name registry --publish published=5000,target=5000 registry:2"
self.runCmd(mchn_name, cmd, quoted=False)
def getInstanceTagsDict(self, instance):
tags_lst = [{x['Key']:x['Value']} for x in instance_tags]
tags_dict = {}
for item in tags_lst:
key = item.keys()[0]
val = item.get(key)
tags_dict[key] = val
def getClusters(self):
clusters = {}
for instance in swarm.ec2.getInstancesWithTagKey('cluster_uid').all():
instance_tags = instance.tags
instance_id = instance.id
tags_lst = [{x['Key']:x['Value']} for x in instance_tags]
tags_dict = {}
for item in tags_lst:
key = item.keys()[0]
val = item.get(key)
tags_dict[key] = val
cluster_uid = tags_dict.pop('cluster_uid')
mchn_name = tags_dict.pop('Name')
if cluster_uid not in clusters.keys():
clusters[cluster_uid] = {}
clusters[cluster_uid][mchn_name] = {'instance_id':instance_id}
if 'swarm_master' in tags_dict.keys():
clusters[cluster_uid][mchn_name]['swarm_master'] = True
clusters[cluster_uid][mchn_name]['volumes'] = \
[x.id for x in instance.volumes.all()]
print("/// CLUSTER INFO ///")
print(clusters)
return clusters
def getClusterInfo(self, cluster_uid):
return self.getClusters().get(cluster_uid)
def getSwarmSecurityGroupId(self):
return swarm.ec2.getSecurityGroupByName('docker-machine').get('GroupId')
def listAllSwarms(self):
for instance in self.ec2.getInstances():
print(self.ec2.getInstanceTags(instance.id))
def listAWSDockerMachines(self):
#filter='state=Running',
return sh.docker_machine.ls(
filter='driver=amazonec2',
format="{{.Name}}"
).strip().split('\n')
def inspectDckrMchn(self, mchn_name):
return json.loads(sh.docker_machine.inspect(mchn_name).strip())
def getAwsInstanceId(self, mchn_name):
return self.inspectDckrMchn(mchn_name).get('Driver').get('InstanceId')
def rmDckrMchn(self, mchn_name):
return sh.docker_machine.rm(mchn_name, force=True)
def rmSwarn(self):
for mchn_name in self.listAWSDockerMachines():
if('dckrmchn') in mchn_name:
swarm.rmDckrMchn(mchn_name)
print(mchn_name)
def printCmd(self, running_command):
print("///////// runCmd /////////")
print(' '.join(running_command.cmd).strip())
print("///////// end res /////////")
def runCmd(self, mchn_name, cmd, env={}, quoted=False):
#hostname env VAR1=VALUE1 VAR2=VALUE thecommand the args
#ssh username@machine VAR=value cmd cmdargs
args = []
if env:
for kv in env.items():
args.append("{}={}".format(kv[0], kv[1]))
baked = sh.docker_machine.ssh.bake(mchn_name, *args)
if quoted:
res = baked(cmd)
self.printCmd(res, cmd)
else:
res = baked(cmd)
self.printCmd(res)
return res
def getJoinToken(self, mchn_name, role="worker"):
cmd = 'docker swarm join-token --quiet {}'.format(role)
return self.runCmd(mchn_name, cmd)
def getMachinePublicIp(self, mchn_name):
instance_id = self.getAwsInstanceId(mchn_name)
return self.ec2.getPublicIpAddress(instance_id)
def setSwarmWorker(self, mchn_name, join_token, mgr_ip):
print(">>> setSwarmWorker <<<")
advertised_addr = "{}:2377".format(mgr_ip)
print(advertised_addr)
cmd = \
"""
docker swarm join --token {} {}
""".format(join_token, advertised_addr)
self.runCmd(mchn_name, cmd, quoted=True)
def setSwarmMaster(self, mchn_name):
mgr_ip = self.getMachinePublicIp(mchn_name)
#advertised_addr = "tcp://{}:2377".format(mgr_ip)
instance_id = self.getAwsInstanceId(mchn_name)
advertised_addr = "{}:2377".format(mgr_ip)
cmd = "docker swarm init --advertise-addr {}".format(mgr_ip)
self.runCmd(mchn_name, cmd)
master_tags = [
{'Key':'swarm_master', 'Value':'true'}
]
self.ec2.applyTagToInstance(instance_id, master_tags)
worker_token = self.getJoinToken(mchn_name, role="worker")
return (worker_token, mgr_ip)
def getSwarmNodes(self, cluster_uid):
return self.ec2.getInstancesWithTag('cluster_uid', cluster_uid)
def getSwarmNodeInstances(self, cluster_uid):
nodes = self.getSwarmNodes(cluster_uid).get('Reservations')
res = []
for entry in nodes:
node = entry.get('Instances')[0]
instance_id = node.get('InstanceId')
res.append(self.ec2.rsrc.Instance(instance_id))
return res
def applySecurityGroupRules(self):
#2377/tcp: Swarm mode api
#7946/both: Overlay networking control
#4789/udp: Overlay networking data
#protocol 50 for ipsec (secure option) of overlay networking
rules = [
("tcp",[2377]),
("tcp",[7946]),
("udp",[7946]),
("tcp",[4789]),
("udp",[4789]),
("tcp",[8080]),
("tcp",[80]),
("tcp",[4001,4002])
]
sg_id = swarm.getSwarmSecurityGroupId()
ip_permissions = []
for rule in rules:
if len(rule[1]) == 1:
protocol, from_port, to_port = \
rule[0], rule[1][0], rule[1][0]
else:
protocol, from_port, to_port = \
rule[0], rule[1][0], rule[1][1]
entry = {
'IpProtocol':protocol,
'FromPort':from_port,
'ToPort':to_port,
'IpRanges':[{'CidrIp': '0.0.0.0/0'}]
}
ip_permissions.append(entry)
try:
self.ec2.conn.authorize_security_group_ingress(
GroupId=sg_id,
IpPermissions=ip_permissions
)
except ClientError:
pass
def createMachineName(self, uid=None):
if not uid:
uid = str(uuid.uuid4())
return "dckrmchn-{}".format(uid)
def addUserToDockerGroup(self, mchn_name):
cmd = "sudo usermod -aG docker $USER"
self.runCmd(mchn_name, cmd)
def onSwarmNodeCreated(self, cluster_uid, proc, success, exit_code):
"""
I run when aws instance creation has succeeded
I am an asynchronous callback originated by sh.RunningCommand
"""
mchn_name = proc.cmd[-1]
tags = [
{'Key':'cluster_uid', 'Value':cluster_uid}
]
instance_id = self.getAwsInstanceId(mchn_name)
self.ec2.applyTagToInstance(instance_id, tags)
self.addUserToDockerGroup(mchn_name)
def getMachineCommand(self, engine_env_args):
return \
sh.docker_machine.create.bake(
driver="amazonec2",
amazonec2_instance_type="t2.micro",
*engine_env_args
)
def createSwarmNodes(self, n_workers, cluster_uid, env_args={}):
"""
I asynchrounously create and tag all swarm instances on aws
"""
pool = Semaphore(10)
def done(proc, success, exit_code):
if success:
self.onSwarmNodeCreated(cluster_uid, proc, success, exit_code)
pool.release()
def launchNode(mchn_name):
### apply env arguments
engine_env_args = []
for entry in env_args.items():
engine_env_args.append('--engine-env')
engine_env_args.append('{}={}'.format(entry[0], entry[1]))
dmc = self.getMachineCommand(engine_env_args)
#### do the thing
pool.acquire()
running_cmd = dmc(mchn_name, _bg=True, _done=done)
return running_cmd
#set up the nodes
cluster_mchn_names = []
procs = []
for x in range(0, n_workers):
mchn_name = self.createMachineName()
cluster_mchn_names.append(mchn_name)
procs.append(launchNode(mchn_name))
#wait for the nodes to launch
[p.wait() for p in procs]
print("FINALLY")
return cluster_mchn_names
def initSwarm(self, n_workers=3, env_args={}):
"create a new swarm with n nodes"
cluster_uid = str(uuid.uuid4())
cluster_mchn_names = \
self.createSwarmNodes(
n_workers,
cluster_uid,
env_args=env_args
)
swarm.applySecurityGroupRules()
mstr_mchn = cluster_mchn_names[0]
# >>> up to this point all nodes are created equal <<<
# set the swarm master
join_token, mgr_ip = swarm.setSwarmMaster(mstr_mchn)
# set the swarm workers
for mchn_name in cluster_mchn_names:
if mchn_name != mstr_mchn:
swarm.setSwarmWorker(mchn_name, join_token, mgr_ip)
_swarm = Swarm()
gsm.registerUtility(_swarm, IFWUtils, '_swarm')
if __name__ == '__main__':
pp = pprint.PrettyPrinter(indent=2)
swarm = getUtility(IFWUtils, '_swarm')
swarm.rmSwarn()
#swarm.initSwarm(n_workers=3, env_args={"foo":"bar", "bin":"baz"})
| []
| []
| [
"hover_username",
"hover_password"
]
| [] | ["hover_username", "hover_password"] | python | 2 | 0 | |
integration/integration_linux_test.go | // Copyright 2018 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration_test
import (
"fmt"
"math/rand"
"os"
"os/exec"
"path/filepath"
"bytes"
"io"
"net"
"regexp"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
)
var _ = Describe("Basic PTP using cnitool", func() {
var (
cnitoolBin string
cniPath string
)
BeforeEach(func() {
var err error
cniPath, err = filepath.Abs("../bin")
Expect(err).NotTo(HaveOccurred())
cnitoolBin, err = exec.LookPath("cnitool")
Expect(err).NotTo(HaveOccurred(), "expected to find cnitool in your PATH")
})
Context("basic cases", func() {
var (
env TestEnv
hostNS Namespace
contNS Namespace
)
BeforeEach(func() {
var err error
netConfPath, err := filepath.Abs("./testdata")
Expect(err).NotTo(HaveOccurred())
env = TestEnv([]string{
"CNI_PATH=" + cniPath,
"NETCONFPATH=" + netConfPath,
"PATH=" + os.Getenv("PATH"),
})
hostNS = Namespace(fmt.Sprintf("cni-test-host-%x", rand.Int31()))
hostNS.Add()
contNS = Namespace(fmt.Sprintf("cni-test-cont-%x", rand.Int31()))
contNS.Add()
})
AfterEach(func() {
contNS.Del()
hostNS.Del()
})
basicAssertion := func(netName, expectedIPPrefix string) {
env.runInNS(hostNS, cnitoolBin, "add", netName, contNS.LongName())
addrOutput := env.runInNS(contNS, "ip", "addr")
Expect(addrOutput).To(ContainSubstring(expectedIPPrefix))
env.runInNS(hostNS, cnitoolBin, "del", netName, contNS.LongName())
}
It("supports basic network add and del operations", func() {
basicAssertion("basic-ptp", "10.1.2.")
})
It("supports add and del with ptp + bandwidth", func() {
basicAssertion("chained-ptp-bandwidth", "10.9.2.")
})
})
Context("when the bandwidth plugin is chained with a plugin that returns multiple adapters", func() {
var (
hostNS Namespace
contNS1 Namespace
contNS2 Namespace
basicBridgeEnv TestEnv
chainedBridgeBandwidthEnv TestEnv
chainedBridgeBandwidthSession, basicBridgeSession *gexec.Session
)
BeforeEach(func() {
hostNS = Namespace(fmt.Sprintf("cni-test-host-%x", rand.Int31()))
hostNS.Add()
contNS1 = Namespace(fmt.Sprintf("cni-test-cont1-%x", rand.Int31()))
contNS1.Add()
contNS2 = Namespace(fmt.Sprintf("cni-test-cont2-%x", rand.Int31()))
contNS2.Add()
basicBridgeNetConfPath, err := filepath.Abs("./testdata/basic-bridge")
Expect(err).NotTo(HaveOccurred())
basicBridgeEnv = TestEnv([]string{
"CNI_PATH=" + cniPath,
"NETCONFPATH=" + basicBridgeNetConfPath,
"PATH=" + os.Getenv("PATH"),
})
chainedBridgeBandwidthNetConfPath, err := filepath.Abs("./testdata/chained-bridge-bandwidth")
Expect(err).NotTo(HaveOccurred())
chainedBridgeBandwidthEnv = TestEnv([]string{
"CNI_PATH=" + cniPath,
"NETCONFPATH=" + chainedBridgeBandwidthNetConfPath,
"PATH=" + os.Getenv("PATH"),
})
})
AfterEach(func() {
if chainedBridgeBandwidthSession != nil {
chainedBridgeBandwidthSession.Kill()
}
if basicBridgeSession != nil {
basicBridgeSession.Kill()
}
chainedBridgeBandwidthEnv.runInNS(hostNS, cnitoolBin, "del", "network-chain-test", contNS1.LongName())
basicBridgeEnv.runInNS(hostNS, cnitoolBin, "del", "network-chain-test", contNS2.LongName())
})
Measure("limits traffic only on the restricted bandwith veth device", func(b Benchmarker) {
ipRegexp := regexp.MustCompile("10\\.1[12]\\.2\\.\\d{1,3}")
By(fmt.Sprintf("adding %s to %s\n\n", "chained-bridge-bandwidth", contNS1.ShortName()))
chainedBridgeBandwidthEnv.runInNS(hostNS, cnitoolBin, "add", "network-chain-test", contNS1.LongName())
chainedBridgeIP := ipRegexp.FindString(chainedBridgeBandwidthEnv.runInNS(contNS1, "ip", "addr"))
Expect(chainedBridgeIP).To(ContainSubstring("10.12.2."))
By(fmt.Sprintf("adding %s to %s\n\n", "basic-bridge", contNS2.ShortName()))
basicBridgeEnv.runInNS(hostNS, cnitoolBin, "add", "network-chain-test", contNS2.LongName())
basicBridgeIP := ipRegexp.FindString(basicBridgeEnv.runInNS(contNS2, "ip", "addr"))
Expect(basicBridgeIP).To(ContainSubstring("10.11.2."))
var chainedBridgeBandwidthPort, basicBridgePort int
var err error
By(fmt.Sprintf("starting echo server in %s\n\n", contNS1.ShortName()))
chainedBridgeBandwidthPort, chainedBridgeBandwidthSession, err = startEchoServerInNamespace(contNS1)
Expect(err).ToNot(HaveOccurred())
By(fmt.Sprintf("starting echo server in %s\n\n", contNS2.ShortName()))
basicBridgePort, basicBridgeSession, err = startEchoServerInNamespace(contNS2)
Expect(err).ToNot(HaveOccurred())
packetInBytes := 20000 // The shaper needs to 'warm'. Send enough to cause it to throttle,
// balanced by run time.
By(fmt.Sprintf("sending tcp traffic to the chained, bridged, traffic shaped container on ip address '%s:%d'\n\n", chainedBridgeIP, chainedBridgeBandwidthPort))
runtimeWithLimit := b.Time("with chained bridge and bandwidth plugins", func() {
makeTcpClientInNS(hostNS.ShortName(), chainedBridgeIP, chainedBridgeBandwidthPort, packetInBytes)
})
By(fmt.Sprintf("sending tcp traffic to the basic bridged container on ip address '%s:%d'\n\n", basicBridgeIP, basicBridgePort))
runtimeWithoutLimit := b.Time("with basic bridged plugin", func() {
makeTcpClientInNS(hostNS.ShortName(), basicBridgeIP, basicBridgePort, packetInBytes)
})
Expect(runtimeWithLimit).To(BeNumerically(">", runtimeWithoutLimit+1000*time.Millisecond))
}, 1)
})
})
type TestEnv []string
func (e TestEnv) run(bin string, args ...string) string {
cmd := exec.Command(bin, args...)
cmd.Env = e
session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session, "5s").Should(gexec.Exit(0))
return string(session.Out.Contents())
}
func (e TestEnv) runInNS(nsShortName Namespace, bin string, args ...string) string {
a := append([]string{"netns", "exec", string(nsShortName), bin}, args...)
return e.run("ip", a...)
}
type Namespace string
func (n Namespace) LongName() string {
return fmt.Sprintf("/var/run/netns/%s", n)
}
func (n Namespace) ShortName() string {
return string(n)
}
func (n Namespace) Add() {
(TestEnv{}).run("ip", "netns", "add", string(n))
}
func (n Namespace) Del() {
(TestEnv{}).run("ip", "netns", "del", string(n))
}
func makeTcpClientInNS(netns string, address string, port int, numBytes int) {
payload := bytes.Repeat([]byte{'a'}, numBytes)
message := string(payload)
var cmd *exec.Cmd
if netns != "" {
netns = filepath.Base(netns)
cmd = exec.Command("ip", "netns", "exec", netns, echoClientBinaryPath, "--target", fmt.Sprintf("%s:%d", address, port), "--message", message)
} else {
cmd = exec.Command(echoClientBinaryPath, "--target", fmt.Sprintf("%s:%d", address, port), "--message", message)
}
cmd.Stdin = bytes.NewBuffer([]byte(message))
cmd.Stderr = GinkgoWriter
out, err := cmd.Output()
Expect(err).NotTo(HaveOccurred())
Expect(string(out)).To(Equal(message))
}
func startEchoServerInNamespace(netNS Namespace) (int, *gexec.Session, error) {
session, err := startInNetNS(echoServerBinaryPath, netNS)
Expect(err).NotTo(HaveOccurred())
// wait for it to print it's address on stdout
Eventually(session.Out).Should(gbytes.Say("\n"))
_, portString, err := net.SplitHostPort(strings.TrimSpace(string(session.Out.Contents())))
Expect(err).NotTo(HaveOccurred())
port, err := strconv.Atoi(portString)
Expect(err).NotTo(HaveOccurred())
go func() {
// print out echoserver output to ginkgo to capture any errors that might be occurring.
io.Copy(GinkgoWriter, io.MultiReader(session.Out, session.Err))
}()
return port, session, nil
}
func startInNetNS(binPath string, namespace Namespace) (*gexec.Session, error) {
cmd := exec.Command("ip", "netns", "exec", namespace.ShortName(), binPath)
return gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
}
| [
"\"PATH\"",
"\"PATH\"",
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
background/background_linux.go | package background
import (
"image"
"os"
"os/exec"
"path/filepath"
"strings"
)
func setGnome3(absPath string) error {
// Darken background area
exec.Command(
"gsettings", "set", "org.gnome.desktop.background",
"primary-color", "#000000",
).Run()
// Set background mode (again, testing on gnome3)
exec.Command(
"gsettings", "set", "org.gnome.desktop.background",
"picture-options", "scaled",
).Run()
// Set the background (gnome3 only atm)
return exec.Command(
"gsettings", "set", "org.gnome.desktop.background",
"picture-uri", "file://"+absPath,
).Run()
}
func setMate(absPath string) error {
// Darken background area
exec.Command(
"gsettings", "set", "org.mate.background",
"primary-color", "#000000",
).Run()
// Enable solid background color
exec.Command(
"gsettings", "set", "org.mate.background",
"color-shading-type", "solid",
).Run()
// Set background mode
exec.Command(
"gsettings", "set", "org.mate.background",
"picture-options", "scaled",
).Run()
// Set the background
return exec.Command(
"gsettings", "set", "org.mate.background",
"picture-filename", absPath,
).Run()
}
func setFeh(absPath string) error {
// Set the background
return exec.Command(
"feh", "--bg-max", absPath,
).Run()
}
// PlatformDownload dowloads the image to the preferred location for the
// platform and returns the path it downloaded to.
func PlatformDownload(img image.Image) (string, error) {
// Get the absolute path of the directory.
homePath := os.Getenv("HOME")
absPath := filepath.Join(homePath, ".local", "share", "satellite", "background.png")
// Create the file.
return absPath, DownloadOnly(img, absPath)
}
// Set the background on linux.
func Set(absPath string) error {
var useDesktop string
if CustomDesktop == "" {
useDesktop = os.Getenv("XDG_CURRENT_DESKTOP")
} else {
useDesktop = CustomDesktop
}
// Check for desktop-specific methods.
switch strings.ToLower(useDesktop) {
case "gnome", "x-cinnamon":
return setGnome3(absPath)
case "mate":
return setMate(absPath)
}
// None found, now let's try to check if feh is installed.
if _, err := exec.LookPath("feh"); err != nil {
return ErrDEUnsupported
}
return setFeh(absPath)
}
| [
"\"HOME\"",
"\"XDG_CURRENT_DESKTOP\""
]
| []
| [
"HOME",
"XDG_CURRENT_DESKTOP"
]
| [] | ["HOME", "XDG_CURRENT_DESKTOP"] | go | 2 | 0 | |
scripts/post_install.py | """Setuptools post install script."""
import errno
import getpass
import os
import shutil
import sys
def run(install):
"""Runs all post install hooks."""
_copy_sh_ext(install)
def _copy_sh_ext(install):
"""Copy shell extension to funky config directory."""
this_dir = os.path.dirname(os.path.realpath(__file__))
root = install.root if install.root else ''
if 'XDG_DATA_HOME' in os.environ:
xdg_data_dir = root + os.environ['XDG_DATA_HOME'] + "/funky"
else:
home = 'Users' if sys.platform == 'darwin' else 'home'
user = getpass.getuser()
if user == 'root':
xdg_data_dir = root + "/usr/share/funky"
else:
xdg_data_dir = root + "/{}/{}/.local/share/funky".format(home, user)
_create_dir(xdg_data_dir)
src = '{}/shell/funky.sh'.format(this_dir)
dest = '{}/funky.sh'.format(xdg_data_dir)
shutil.copyfile(src, dest)
def _create_dir(directory):
"""Create directory."""
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
return
if __name__ == "__main__":
run()
| []
| []
| [
"XDG_DATA_HOME"
]
| [] | ["XDG_DATA_HOME"] | python | 1 | 0 | |
qa/rpc-tests/test_framework/test_framework.py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Syscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-syscoinrpc to module search path:
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
assert_equal,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
wait_syscoinds,
enable_coverage,
check_json_precision,
initialize_chain_clean,
)
from authproxy import AuthServiceProxy, JSONRPCException
class SyscoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_syscoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_syscoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave syscoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop syscoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing syscoind/syscoin-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+e.message)
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
wait_syscoinds()
else:
print("Note: syscoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some syscoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(SyscoinTestFramework):
# Can override the num_nodes variable to indicate how many nodes to run.
def __init__(self):
self.num_nodes = 2
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("SYSCOIND", "syscoind"),
help="syscoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("SYSCOIND", "syscoind"),
help="syscoind binary to use for reference nodes (if any)")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| []
| []
| [
"SYSCOIND",
"PATH"
]
| [] | ["SYSCOIND", "PATH"] | python | 2 | 0 | |
letsencrypt/etc/settings.py | # -*- coding: utf-8 -*-
import os
### Configurações do celery ###
#broker = 'amqp://[email protected]//'
#backend = 'amqp://[email protected]//'
broker = os.environ.get('BROKER_URL')
backend = os.environ.get('BROKER_URL')
### Lista de NS que podem ser utilizados pelo validator ###
lista_soa = []
### Dicionário do status_json ###
schema_json = {
'status': 'iniciando',
}
### Configurações do validator intervalo em segs e tentativas ###
intervalo = 30
max_retry = 20
### diretório onde o validator e outros salvam o status_json ###
status_json_dir = '/var/www/html/certs'
| []
| []
| [
"BROKER_URL"
]
| [] | ["BROKER_URL"] | python | 1 | 0 | |
pkg/utils/v1alpha1/utils.go | package utils
import (
"fmt"
"net"
"os"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
apis "github.com/openebs/cstor-csi/pkg/apis/openebs.io/core/v1alpha1"
snapshotclient "github.com/openebs/cstor-csi/pkg/client/snapshot/v1alpha1"
"google.golang.org/grpc"
"k8s.io/kubernetes/pkg/util/mount"
)
const (
// TODO make VolumeWaitTimeout as env
// VolumeWaitTimeout indicates the timegap between two consecutive volume
// status check attempts
VolumeWaitTimeout = 2
// TODO make VolumeWaitRetryCount as env
// VolumeWaitRetryCount indicates the number of retries made to check the
// status of volume before erroring out
VolumeWaitRetryCount = 6
// TODO make MonitorMountRetryTimeout as env
// MonitorMountRetryTimeout indicates the time gap between two consecutive
//monitoring attempts
MonitorMountRetryTimeout = 5
)
var (
// OpenEBSNamespace is openebs system namespace
OpenEBSNamespace string
// NodeIDENV is the NodeID of the node on which the pod is present
NodeIDENV string
// TransitionVolList contains the list of volumes under transition
// This list is protected by TransitionVolListLock
TransitionVolList map[string]apis.CSIVolumeStatus
// TransitionVolListLock is required to protect the above Volumes list
TransitionVolListLock sync.RWMutex
// ReqMountList contains the list of volumes which are required
// to be remounted. This list is secured by ReqMountListLock
ReqMountList map[string]apis.CSIVolumeStatus
)
const (
// timmeout indiactes the REST call timeouts
timeout = 60 * time.Second
)
func init() {
OpenEBSNamespace = os.Getenv("OPENEBS_NAMESPACE")
if OpenEBSNamespace == "" {
logrus.Fatalf("OPENEBS_NAMESPACE environment variable not set")
}
NodeIDENV = os.Getenv("OPENEBS_NODE_ID")
if NodeIDENV == "" && os.Getenv("OPENEBS_NODE_DRIVER") != "" {
logrus.Fatalf("OPENEBS_NODE_ID not set")
}
TransitionVolList = make(map[string]apis.CSIVolumeStatus)
ReqMountList = make(map[string]apis.CSIVolumeStatus)
}
// parseEndpoint should have a valid prefix(unix/tcp)
// to return a valid endpoint parts
func parseEndpoint(ep string) (string, string, error) {
if strings.HasPrefix(strings.ToLower(ep), "unix://") ||
strings.HasPrefix(strings.ToLower(ep), "tcp://") {
s := strings.SplitN(ep, "://", 2)
if s[1] != "" {
return s[0], s[1], nil
}
}
return "", "", fmt.Errorf("Invalid endpoint: %v", ep)
}
// logGRPC logs all the grpc related errors, i.e the final errors
// which are returned to the grpc clients
func logGRPC(
ctx context.Context, req interface{},
info *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
) (interface{}, error) {
logrus.Infof("GRPC call: %s", info.FullMethod)
logrus.Infof("GRPC request: %s", protosanitizer.StripSecrets(req))
resp, err := handler(ctx, req)
if err != nil {
logrus.Errorf("GRPC error: %v", err)
} else {
logrus.Infof("GRPC response: %s", protosanitizer.StripSecrets(resp))
}
return resp, err
}
// ChmodMountPath removes all permission from the folder if volume is not
// mounted on it
func ChmodMountPath(mountPath string) error {
return os.Chmod(mountPath, 0000)
}
// WaitForVolumeToBeReachable keeps the mounts on hold until the volume is
// reachable
func WaitForVolumeToBeReachable(targetPortal string) error {
var (
retries int
err error
conn net.Conn
)
for {
// Create a connection to test if the iSCSI Portal is reachable,
if conn, err = net.Dial("tcp", targetPortal); err == nil {
conn.Close()
logrus.Infof("Volume is reachable to create connections")
return nil
}
// wait until the iSCSI targetPortal is reachable
// There is no pointn of triggering iSCSIadm login commands
// until the portal is reachable
time.Sleep(VolumeWaitTimeout * time.Second)
retries++
if retries >= VolumeWaitRetryCount {
// Let the caller function decide further if the volume is
// not reachable even after 12 seconds ( This number was arrived at
// based on the kubelets retrying logic. Kubelet retries to publish
// volume after every 14s )
return fmt.Errorf(
"iSCSI Target not reachable, TargetPortal %v, err:%v",
targetPortal, err)
}
}
}
// WaitForVolumeToBeReady retrieves the volume info from cstorVolume CR and
// waits until consistency factor is met for connected replicas
func WaitForVolumeToBeReady(volumeID string) error {
var retries int
checkVolumeStatus:
// Status is fetched from cstorVolume CR
volStatus, err := getVolStatus(volumeID)
if err != nil {
return err
} else if volStatus == "Healthy" || volStatus == "Degraded" {
// In both healthy and degraded states the volume can serve IOs
logrus.Infof("Volume is ready to accept IOs")
} else if retries >= VolumeWaitRetryCount {
// Let the caller function decide further if the volume is still not
// ready to accdept IOs after 12 seconds ( This number was arrived at
// based on the kubelets retrying logic. Kubelet retries to publish
// volume after every 14s )
return fmt.Errorf(
"Volume is not ready: Replicas yet to connect to controller",
)
} else {
TransitionVolList[volumeID] = apis.CSIVolumeStatusWaitingForVolumeToBeReady
time.Sleep(VolumeWaitTimeout * time.Second)
retries++
goto checkVolumeStatus
}
return nil
}
/*
// GetVolumeByName fetches the volume from Volumes list based on th input name
func GetVolumeByName(volName string) (*apis.CSIVolume, error) {
for _, Vol := range Volumes {
if Vol.Spec.Volume.Name == volName {
return Vol, nil
}
}
return nil,
fmt.Errorf("volume name %s does not exit in the volumes list", volName)
}
*/
func listContains(
mountPath string, list []mount.MountPoint,
) (*mount.MountPoint, bool) {
for _, info := range list {
if info.Path == mountPath {
mntInfo := info
return &mntInfo, true
}
}
return nil, false
}
// MonitorMounts makes sure that all the volumes present in the inmemory list
// with the driver are mounted with the original mount options
// This function runs a never ending loop therefore should be run as a goroutine
// Mounted list is fetched from the OS and the state of all the volumes is
// reverified after every 5 seconds. If the mountpoint is not present in the
// list or if it has been remounted with a different mount option by the OS, the
// volume is added to the ReqMountList which is removed as soon as the remount
// operation on the volume is complete
// For each remount operation a new goroutine is created, so that if multiple
// volumes have lost their original state they can all be remounted in parallel
func MonitorMounts() {
var (
err error
csivolList *apis.CSIVolumeList
mountList []mount.MountPoint
)
mounter := mount.New("")
ticker := time.NewTicker(MonitorMountRetryTimeout * time.Second)
for {
select {
case <-ticker.C:
// Get list of mounted paths present with the node
TransitionVolListLock.Lock()
if mountList, err = mounter.List(); err != nil {
break
}
if csivolList, err = GetVolListForNode(); err != nil {
break
}
for _, vol := range csivolList.Items {
// This check is added to avoid monitoring volume if it has not
// been mounted yet. Although CSIVolume CR gets created at
// ControllerPublish step.
if (vol.Spec.Volume.StagingTargetPath == "") ||
(vol.Spec.Volume.TargetPath == "") {
continue
}
// Search the volume in the list of mounted volumes at the node
// retrieved above
stagingMountPoint, stagingPathExists := listContains(
vol.Spec.Volume.StagingTargetPath, mountList,
)
_, targetPathExists := listContains(
vol.Spec.Volume.TargetPath, mountList,
)
// If the volume is present in the list verify its state
// If stagingPath is in rw then TargetPath will also be in rw
// mode
if stagingPathExists && targetPathExists &&
verifyMountOpts(stagingMountPoint.Opts, "rw") {
// Continue with remaining volumes since this volume looks
// to be in good shape
continue
}
if _, ok := TransitionVolList[vol.Spec.Volume.Name]; !ok {
TransitionVolList[vol.Spec.Volume.Name] = vol.Status
ReqMountList[vol.Spec.Volume.Name] = vol.Status
csivol := vol
go func() {
logrus.Infof("Remounting vol: %s at %s and %s",
vol.Spec.Volume.Name, vol.Spec.Volume.StagingTargetPath,
vol.Spec.Volume.TargetPath)
defer func() {
TransitionVolListLock.Lock()
// Remove the volume from ReqMountList once the remount operation is
// complete
delete(TransitionVolList, vol.Spec.Volume.Name)
delete(ReqMountList, vol.Spec.Volume.Name)
TransitionVolListLock.Unlock()
}()
if err := RemountVolume(
stagingPathExists, targetPathExists,
&csivol,
); err != nil {
logrus.Errorf(
"Remount failed for vol: %s : err: %v",
vol.Spec.Volume.Name, err,
)
} else {
logrus.Infof(
"Remount successful for vol: %s",
vol.Spec.Volume.Name,
)
}
}()
}
}
TransitionVolListLock.Unlock()
}
}
}
// WaitForVolumeReadyAndReachable waits until the volume is ready to accept IOs
// and is reachable, this function will not come out until both the conditions
// are met. This function stops the driver from overloading the OS with iSCSI
// login commands.
func WaitForVolumeReadyAndReachable(vol *apis.CSIVolume) error {
var err error
// This function return after 12s in case the volume is not ready
if err = WaitForVolumeToBeReady(vol.Spec.Volume.Name); err != nil {
logrus.Error(err)
return err
}
// This function return after 12s in case the volume is not reachable
err = WaitForVolumeToBeReachable(vol.Spec.ISCSI.TargetPortal)
if err != nil {
logrus.Error(err)
return err
}
return nil
}
func verifyMountOpts(opts []string, desiredOpt string) bool {
for _, opt := range opts {
if opt == desiredOpt {
return true
}
}
return false
}
// RemountVolume unmounts the volume if it is already mounted in an undesired
// state and then tries to mount again. If it is not mounted the volume, first
// the disk will be attached via iSCSI login and then it will be mounted
func RemountVolume(
stagingPathExists bool, targetPathExists bool,
vol *apis.CSIVolume,
) (err error) {
mounter := mount.New("")
options := []string{"rw"}
// Wait until it is possible to chhange the state of mountpoint or when
// login to volume is possible
if err = WaitForVolumeReadyAndReachable(vol); err != nil {
return
}
if stagingPathExists {
mounter.Unmount(vol.Spec.Volume.StagingTargetPath)
}
if targetPathExists {
mounter.Unmount(vol.Spec.Volume.TargetPath)
}
// Unmount and mount operation is performed instead of just remount since
// the remount option didn't give the desired results
if err = mounter.Mount(vol.Spec.Volume.DevicePath,
vol.Spec.Volume.StagingTargetPath, "", options,
); err != nil {
return
}
options = []string{"bind"}
err = mounter.Mount(vol.Spec.Volume.StagingTargetPath,
vol.Spec.Volume.TargetPath, "", options)
return
}
// GetMounts gets mountpoints for the specified volume
func GetMounts(volumeID string) ([]string, error) {
var (
currentMounts []string
err error
mountList []mount.MountPoint
)
mounter := mount.New("")
// Get list of mounted paths present with the node
if mountList, err = mounter.List(); err != nil {
return nil, err
}
for _, mntInfo := range mountList {
if strings.Contains(mntInfo.Path, volumeID) {
currentMounts = append(currentMounts, mntInfo.Path)
}
}
return currentMounts, nil
}
// CreateSnapshot creates a snapshot of cstor volume
func CreateSnapshot(volumeName, snapName string) error {
volIP, err := GetVolumeIP(volumeName)
if err != nil {
return err
}
_, err = snapshotclient.CreateSnapshot(volIP, volumeName, snapName)
// If there is no err that means call was successful
return err
}
// DeleteSnapshot deletes a snapshot of cstor volume
func DeleteSnapshot(volumeName, snapName string) error {
volIP, err := GetVolumeIP(volumeName)
if err != nil {
return err
}
_, err = snapshotclient.DestroySnapshot(volIP, volumeName, snapName)
return err
}
| [
"\"OPENEBS_NAMESPACE\"",
"\"OPENEBS_NODE_ID\"",
"\"OPENEBS_NODE_DRIVER\""
]
| []
| [
"OPENEBS_NODE_DRIVER",
"OPENEBS_NAMESPACE",
"OPENEBS_NODE_ID"
]
| [] | ["OPENEBS_NODE_DRIVER", "OPENEBS_NAMESPACE", "OPENEBS_NODE_ID"] | go | 3 | 0 | |
e2e/common/cli/global_kamelet_test.go | // +build integration
// To enable compilation of this file in Goland, go to "Settings -> Go -> Vendoring & Build Tags -> Custom Tags" and add "integration"
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"os"
"testing"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
. "github.com/apache/camel-k/e2e/support"
"github.com/apache/camel-k/pkg/util/openshift"
)
func TestRunGlobalKamelet(t *testing.T) {
forceGlobalTest := os.Getenv("CAMEL_K_FORCE_GLOBAL_TEST") == "true"
if !forceGlobalTest {
ocp, err := openshift.IsOpenShift(TestClient())
assert.Nil(t, err)
if ocp {
t.Skip("Prefer not to run on OpenShift to avoid giving more permissions to the user running tests")
return
}
}
WithNewTestNamespace(t, func(ns string) {
Expect(Kamel("install", "-n", ns, "--global").Execute()).To(Succeed())
Expect(CreateTimerKamelet(ns, "my-own-timer-source")()).To(Succeed())
// NS2: namespace without operator
WithNewTestNamespace(t, func(ns2 string) {
Expect(Kamel("install", "-n", ns2, "--skip-operator-setup", "--olm=false").Execute()).To(Succeed())
Expect(Kamel("run", "-n", ns2, "files/timer-kamelet-usage.groovy").Execute()).To(Succeed())
Eventually(IntegrationPodPhase(ns2, "timer-kamelet-usage"), TestTimeoutMedium).Should(Equal(v1.PodRunning))
Eventually(IntegrationLogs(ns2, "timer-kamelet-usage"), TestTimeoutShort).Should(ContainSubstring("Hello world"))
Expect(Kamel("delete", "--all", "-n", ns2).Execute()).To(Succeed())
})
Expect(Kamel("uninstall", "-n", ns, "--skip-cluster-roles=false", "--skip-cluster-role-bindings=false").Execute()).To(Succeed())
})
}
| [
"\"CAMEL_K_FORCE_GLOBAL_TEST\""
]
| []
| [
"CAMEL_K_FORCE_GLOBAL_TEST"
]
| [] | ["CAMEL_K_FORCE_GLOBAL_TEST"] | go | 1 | 0 | |
spyder_mod/Spyder 5.1.5/site-packages/spyder/app/utils.py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Utility functions for the Spyder application."""
# Standard library imports
import glob
import logging
import os
import os.path as osp
import re
import sys
# Third-party imports
import psutil
from qtpy.QtCore import QCoreApplication, Qt
from qtpy.QtGui import QColor, QIcon, QPalette, QPixmap, QPainter, QImage
from qtpy.QtWidgets import QApplication, QSplashScreen
from qtpy.QtSvg import QSvgRenderer
# Local imports
from spyder.config.base import (
DEV, get_conf_path, get_debug_level, running_in_mac_app,
running_under_pytest)
from spyder.config.manager import CONF
from spyder.utils.external.dafsa.dafsa import DAFSA
from spyder.utils.image_path_manager import get_image_path
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import file_uri, qapplication
# For spyder-ide/spyder#7447.
try:
from qtpy.QtQuick import QQuickWindow, QSGRendererInterface
except Exception:
QQuickWindow = QSGRendererInterface = None
root_logger = logging.getLogger()
FILTER_NAMES = os.environ.get('SPYDER_FILTER_LOG', "").split(',')
FILTER_NAMES = [f.strip() for f in FILTER_NAMES]
class Spy:
"""
This is used to inject a 'spy' object in the internal console
namespace to inspect Spyder internals.
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return (list(self.__dict__.keys()) +
[x for x in dir(self.__class__) if x[0] != '_'])
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
def set_opengl_implementation(option):
"""
Set the OpenGL implementation used by Spyder.
See spyder-ide/spyder#7447 for the details.
"""
if option == 'software':
QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)
elif option == 'desktop':
QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
elif option == 'gles':
QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)
if QQuickWindow is not None:
QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
def setup_logging(cli_options):
"""Setup logging with cli options defined by the user."""
if cli_options.debug_info or get_debug_level() > 0:
levels = {2: logging.INFO, 3: logging.DEBUG}
log_level = levels[get_debug_level()]
log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'
console_filters = cli_options.filter_log.split(',')
console_filters = [x.strip() for x in console_filters]
console_filters = console_filters + FILTER_NAMES
console_filters = [x for x in console_filters if x != '']
handlers = [logging.StreamHandler()]
filepath = os.environ['SPYDER_DEBUG_FILE']
handlers.append(
logging.FileHandler(filename=filepath, mode='w+')
)
match_func = lambda x: True
if console_filters != [''] and len(console_filters) > 0:
dafsa = DAFSA(console_filters)
match_func = lambda x: (dafsa.lookup(x, stop_on_prefix=True)
is not None)
formatter = logging.Formatter(log_format)
class ModuleFilter(logging.Filter):
"""Filter messages based on module name prefix."""
def filter(self, record):
return match_func(record.name)
filter = ModuleFilter()
root_logger.setLevel(log_level)
for handler in handlers:
handler.addFilter(filter)
handler.setFormatter(formatter)
handler.setLevel(log_level)
root_logger.addHandler(handler)
def delete_debug_log_files():
"""Delete previous debug log files."""
regex = re.compile(r'.*_.*_(\d+)[.]log')
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
match = regex.match(f)
if match is not None:
pid = int(match.group(1))
if not psutil.pid_exists(pid):
os.remove(f)
debug_file = os.environ['SPYDER_DEBUG_FILE']
if osp.exists(debug_file):
os.remove(debug_file)
def qt_message_handler(msg_type, msg_log_context, msg_string):
"""
Qt warning messages are intercepted by this handler.
On some operating systems, warning messages might be displayed
even if the actual message does not apply. This filter adds a
blacklist for messages that are being printed for no apparent
reason. Anything else will get printed in the internal console.
In DEV mode, all messages are printed.
"""
BLACKLIST = [
'QMainWidget::resizeDocks: all sizes need to be larger than 0',
]
if DEV or msg_string not in BLACKLIST:
print(msg_string) # spyder: test-skip
def create_splash_screen():
"""Create splash screen."""
if not running_under_pytest():
image = QImage(500, 220, QImage.Format_ARGB32_Premultiplied)
image.fill(0)
painter = QPainter(image)
renderer = QSvgRenderer(get_image_path('Tellurium_splash'))
renderer.render(painter)
painter.end()
pm = QPixmap.fromImage(image)
pm = pm.copy(0, 0, 500, 220)
splash = QSplashScreen(pm)
splash_font = splash.font()
splash_font.setPixelSize(14)
splash.setFont(splash_font)
else:
splash = None
return splash
def set_links_color(app):
"""
Fix color for links.
This was taken from QDarkstyle, which is MIT licensed.
"""
color = QStylePalette.COLOR_ACCENT_4
qcolor = QColor(color)
app_palette = app.palette()
app_palette.setColor(QPalette.Normal, QPalette.Link, qcolor)
app.setPalette(app_palette)
def create_application():
"""Create application and patch sys.exit."""
# Our QApplication
app = qapplication()
# --- Set application icon
app_icon = QIcon(get_image_path("spyder"))
app.setWindowIcon(app_icon)
# Required for correct icon on GNOME/Wayland:
if hasattr(app, 'setDesktopFileName'):
app.setDesktopFileName('spyder')
# ---- Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ---- Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ---- Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
return app
def create_window(WindowClass, app, splash, options, args):
"""
Create and show Spyder's main window and start QApplication event loop.
Parameters
----------
WindowClass: QMainWindow
Subclass to instantiate the Window.
app: QApplication
Instance to start the application.
splash: QSplashScreen
Splash screen instamce.
options: argparse.Namespace
Command line options passed to Spyder
args: list
List of file names passed to the Spyder executable in the
command line.
"""
# Main window
main = WindowClass(splash, options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.pre_visible_setup()
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Propagate current configurations to all configuration observers
CONF.notify_all_observers()
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
| []
| []
| [
"SPYDER_FILTER_LOG",
"SPYDER_DEBUG_FILE"
]
| [] | ["SPYDER_FILTER_LOG", "SPYDER_DEBUG_FILE"] | python | 2 | 0 | |
scripts/update_schemas.py | #!/usr/bin/python3
import requests
import zipfile
from io import BytesIO
import os
from collections import defaultdict
from collections import OrderedDict
from distutils.version import StrictVersion
import shutil
import json
import glob
import xml.etree.ElementTree as ET
VERSION = "DSP8010_2021.1"
# To use a new schema, add to list and rerun tool
include_list = [
'AccountService',
'ActionInfo',
'Assembly',
'AttributeRegistry',
'Bios',
'Certificate',
'CertificateCollection',
'CertificateLocations',
'CertificateService',
'Chassis',
'ChassisCollection',
'ComputerSystem',
'ComputerSystemCollection',
'Drive',
'DriveCollection',
'EthernetInterface',
'EthernetInterfaceCollection',
'Event',
'EventDestination',
'EventDestinationCollection',
'EventService',
'FabricAdapter',
'FabricAdapterCollection',
'FanCollection',
'Fan',
'IPAddresses',
'JsonSchemaFile',
'JsonSchemaFileCollection', #redfish/v1/JsonSchemas
'LogEntry',
'LogEntryCollection',
'LogService',
'LogServiceCollection',
'Manager',
'ManagerAccount',
'ManagerAccountCollection',
'ManagerCollection',
'ManagerNetworkProtocol',
'Memory',
'MemoryCollection',
'Message',
'MessageRegistry',
'MessageRegistryCollection',
'MessageRegistryFile',
'MessageRegistryFileCollection',
'MetricDefinition',
'MetricDefinitionCollection',
'MetricReport',
'MetricReportCollection',
'MetricReportDefinition',
'MetricReportDefinitionCollection',
'OperatingConfig',
'OperatingConfigCollection',
'PCIeDevice',
'PCIeDeviceCollection',
'PCIeFunction',
'PCIeFunctionCollection',
'PCIeSlots',
'Power',
'Port',
'PortCollection',
'PowerSubsystem',
'PowerSupplyCollection',
'PowerSupply',
'Privileges', #Used in Role
'Processor',
'ProcessorCollection',
'RedfishError',
'RedfishExtensions',
'Redundancy',
'Resource',
'Role',
'RoleCollection',
'Sensor',
'SensorCollection',
'ServiceRoot',
'Session',
'SessionCollection',
'SessionService',
'Settings',
'SoftwareInventory',
'SoftwareInventoryCollection',
'Storage',
'StorageCollection',
'StorageController',
'StorageControllerCollection',
'Task',
'TaskCollection',
'TaskService',
'TelemetryService',
'Thermal',
'ThermalSubsystem',
'ThermalMetrics',
'UpdateService',
'VLanNetworkInterfaceCollection',
'VLanNetworkInterface',
'VirtualMedia',
'VirtualMediaCollection',
'odata',
'odata-v4',
'redfish-error',
'redfish-payload-annotations',
'redfish-schema',
'redfish-schema-v1',
]
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
proxies = {
'https': os.environ.get("https_proxy", None)
}
r = requests.get(
'https://www.dmtf.org/sites/default/files/standards/documents/' +
VERSION +
'.zip',
proxies=proxies)
r.raise_for_status()
static_path = os.path.realpath(os.path.join(SCRIPT_DIR, "..", "static",
"redfish", "v1"))
schema_path = os.path.join(static_path, "schema")
json_schema_path = os.path.join(static_path, "JsonSchemas")
metadata_index_path = os.path.join(static_path, "$metadata", "index.xml")
zipBytesIO = BytesIO(r.content)
zip_ref = zipfile.ZipFile(zipBytesIO)
# Remove the old files
if os.path.exists(schema_path):
files = glob.glob(os.path.join(schema_path, '[!Oem]*'))
for f in files:
os.remove(f)
if os.path.exists(json_schema_path):
files = glob.glob(os.path.join(json_schema_path, '[!Oem]*'))
for f in files:
if (os.path.isfile(f)):
os.remove(f)
else:
shutil.rmtree(f)
os.remove(metadata_index_path)
if not os.path.exists(schema_path):
os.makedirs(schema_path)
if not os.path.exists(json_schema_path):
os.makedirs(json_schema_path)
with open(metadata_index_path, 'w') as metadata_index:
metadata_index.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
metadata_index.write(
"<edmx:Edmx xmlns:edmx=\"http://docs.oasis-open.org/odata/ns/edmx\" Version=\"4.0\">\n")
for zip_filepath in zip_ref.namelist():
if zip_filepath.startswith(VERSION +
'/csdl/') & (zip_filepath != VERSION +
"/csdl/") & (zip_filepath != VERSION +
"/csdl/"):
filename = os.path.basename(zip_filepath)
# filename looks like Zone_v1.xml
filenamesplit = filename.split("_")
if filenamesplit[0] not in include_list:
print("excluding schema: " + filename)
continue
with open(os.path.join(schema_path, filename), 'wb') as schema_file:
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/" +
filename +
"\">\n")
content = zip_ref.read(zip_filepath)
content = content.replace(b'\r\n', b'\n')
xml_root = ET.fromstring(content)
for edmx_child in xml_root:
if edmx_child.tag == "{http://docs.oasis-open.org/odata/ns/edmx}DataServices":
for data_child in edmx_child:
if data_child.tag == "{http://docs.oasis-open.org/odata/ns/edm}Schema":
namespace = data_child.attrib["Namespace"]
if namespace.startswith("RedfishExtensions"):
metadata_index.write(
" <edmx:Include Namespace=\"" + namespace + "\" Alias=\"Redfish\"/>\n")
else:
metadata_index.write(
" <edmx:Include Namespace=\"" + namespace + "\"/>\n")
schema_file.write(content)
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write(""" <edmx:DataServices>
<Schema xmlns="http://docs.oasis-open.org/odata/ns/edm" Namespace="Service">
<EntityContainer Name="Service" Extends="ServiceRoot.v1_0_0.ServiceContainer"/>
</Schema>
</edmx:DataServices>
""")
# TODO:Issue#32 There's a bug in the script that currently deletes this
# schema (because it's an OEM schema). Because it's the only six, and we
# don't update schemas very often, we just manually fix it. Need a
# permanent fix to the script.
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/OemManager_v1.xml\">\n")
metadata_index.write(" <edmx:Include Namespace=\"OemManager\"/>\n")
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/OemComputerSystem_v1.xml\">\n")
metadata_index.write(" <edmx:Include Namespace=\"OemComputerSystem\"/>\n")
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/OemVirtualMedia_v1.xml\">\n")
metadata_index.write(" <edmx:Include Namespace=\"OemVirtualMedia\"/>\n")
metadata_index.write(" <edmx:Include Namespace=\"OemVirtualMedia.v1_0_0\"/>\n")
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/OemAccountService_v1.xml\">\n")
metadata_index.write(" <edmx:Include Namespace=\"OemAccountService\"/>\n")
metadata_index.write(" <edmx:Include Namespace=\"OemAccountService.v1_0_0\"/>\n")
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write(
" <edmx:Reference Uri=\"/redfish/v1/schema/OemSession_v1.xml\">\n")
metadata_index.write(" <edmx:Include Namespace=\"OemSession\"/>\n")
metadata_index.write(" <edmx:Include Namespace=\"OemSession.v1_0_0\"/>\n")
metadata_index.write(" </edmx:Reference>\n")
metadata_index.write("</edmx:Edmx>\n")
schema_files = {}
for zip_filepath in zip_ref.namelist():
if zip_filepath.startswith(os.path.join(VERSION, 'json-schema/')):
filename = os.path.basename(zip_filepath)
filenamesplit = filename.split(".")
# exclude schemas again to save flash space
if filenamesplit[0] not in include_list:
continue
if len(filenamesplit) == 3:
thisSchemaVersion = schema_files.get(filenamesplit[0], None)
if thisSchemaVersion is None:
schema_files[filenamesplit[0]] = filenamesplit[1]
else:
# need to see if we're a newer version.
if list(map(int, filenamesplit[1][1:].split("_"))) > list(map(
int, thisSchemaVersion[1:].split("_"))):
schema_files[filenamesplit[0]] = filenamesplit[1]
for schema, version in schema_files.items():
basename = schema + "." + version + ".json"
zip_filepath = os.path.join(VERSION, "json-schema", basename)
schemadir = os.path.join(json_schema_path, schema)
os.makedirs(schemadir)
location_json = OrderedDict()
location_json["Language"] = "en"
location_json["PublicationUri"] = (
"http://redfish.dmtf.org/schemas/v1/" + schema + ".json")
location_json["Uri"] = (
"/redfish/v1/JsonSchemas/" + schema + "/" + schema + ".json")
index_json = OrderedDict()
index_json["@odata.context"] = "/redfish/v1/$metadata#JsonSchemaFile.JsonSchemaFile"
index_json["@odata.id"] = "/redfish/v1/JsonSchemas/" + schema
index_json["@odata.type"] = "#JsonSchemaFile.v1_0_2.JsonSchemaFile"
index_json["Name"] = schema + " Schema File"
index_json["Schema"] = "#" + schema + "." + schema
index_json["Description"] = schema + " Schema File Location"
index_json["Id"] = schema
index_json["Languages"] = ["en"]
index_json["[email protected]"] = 1
index_json["Location"] = [location_json]
index_json["[email protected]"] = 1
with open(os.path.join(schemadir, "index.json"), 'w') as schema_file:
json.dump(index_json, schema_file, indent=4)
with open(os.path.join(schemadir, schema + ".json"), 'wb') as schema_file:
schema_file.write(zip_ref.read(zip_filepath).replace(b'\r\n', b'\n'))
with open(os.path.join(json_schema_path, "index.json"), 'w') as index_file:
members = [{"@odata.id": "/redfish/v1/JsonSchemas/" + schema}
for schema in schema_files]
members.sort(key=lambda x: x["@odata.id"])
indexData = OrderedDict()
indexData["@odata.id"] = "/redfish/v1/JsonSchemas"
indexData["@odata.context"] = ("/redfish/v1/$metadata"
"#JsonSchemaFileCollection."
"JsonSchemaFileCollection")
indexData["@odata.type"] = ("#JsonSchemaFileCollection."
"JsonSchemaFileCollection")
indexData["Name"] = "JsonSchemaFile Collection"
indexData["Description"] = "Collection of JsonSchemaFiles"
indexData["[email protected]"] = len(schema_files)
indexData["Members"] = members
json.dump(indexData, index_file, indent=2)
zip_ref.close()
| []
| []
| [
"https_proxy"
]
| [] | ["https_proxy"] | python | 1 | 0 | |
python_modules/dagster-airflow/dagster_airflow_tests/conftest.py | # pylint doesn't understand the way that pytest constructs fixture dependnecies
# pylint: disable=redefined-outer-name
import datetime
import os
import shutil
import subprocess
import sys
import tempfile
import uuid
import airflow.plugins_manager
import docker
import pytest
from dagster import check
from dagster.core.execution import create_execution_plan
from dagster.utils import load_yaml_from_path, mkdir_p, script_relative_path
from dagster_airflow import scaffold_airflow_dag
from .test_project.dagster_airflow_demo import define_demo_execution_pipeline
from .utils import reload_module
IMAGE = 'dagster-airflow-demo'
# py2 compat
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
@pytest.fixture(scope='module')
def airflow_home():
airflow_home_dir = os.getenv('AIRFLOW_HOME')
assert airflow_home_dir, 'No AIRFLOW_HOME set -- is airflow installed?'
airflow_home_dir = os.path.abspath(os.path.expanduser(airflow_home_dir))
return airflow_home_dir
@pytest.fixture(scope='module')
def temp_dir():
dir_path = os.path.join('/tmp', str(uuid.uuid4()))
mkdir_p(dir_path)
yield dir_path
shutil.rmtree(dir_path)
@pytest.fixture(scope='module')
def docker_client():
try:
client = docker.from_env()
client.info()
except docker.errors.APIError:
check.failed('Couldn\'t find docker at {url} -- is it running?'.format(url=client._url('')))
return client
@pytest.fixture(scope='module')
def docker_image(docker_client):
try:
docker_client.images.get(IMAGE)
except docker.errors.ImageNotFound:
check.failed(
'Couldn\'t find docker image {image} required for test: please run the script at '
'{script_path}'.format(
image=IMAGE, script_path=script_relative_path('test_project/build.sh')
)
)
return IMAGE
@pytest.fixture(scope='module')
def dags_path(airflow_home):
path = os.path.join(airflow_home, 'dags', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def plugins_path(airflow_home):
path = os.path.join(airflow_home, 'plugins', '')
mkdir_p(os.path.abspath(path))
return path
@pytest.fixture(scope='module')
def host_tmp_dir():
mkdir_p('/tmp/results')
return '/tmp/results'
@pytest.fixture(scope='module')
def airflow_test(docker_image, dags_path, plugins_path, host_tmp_dir):
assert docker_image
plugin_definition_filename = 'dagster_plugin.py'
plugin_path = os.path.abspath(os.path.join(plugins_path, plugin_definition_filename))
temporary_plugin_path = None
try:
if os.path.exists(plugin_path):
temporary_plugin_file = tempfile.NamedTemporaryFile(delete=False)
temporary_plugin_file.close()
temporary_plugin_path = temporary_plugin_file.name
shutil.copyfile(plugin_path, temporary_plugin_path)
shutil.copyfile(
script_relative_path(os.path.join('..', 'dagster_airflow', plugin_definition_filename)),
plugin_path,
)
mkdir_p(os.path.abspath(dags_path))
sys.path.append(os.path.abspath(dags_path))
created_init_py = False
init_py_path = os.path.join(os.path.abspath(dags_path), '__init__.py')
if not os.path.exists(init_py_path):
with open(init_py_path, 'a'):
pass
created_init_py = True
subprocess.check_output(['airflow', 'initdb'])
# necromancy; follows airflow.operators.__init__
reload_module(airflow.plugins_manager)
for operators_module in airflow.plugins_manager.operators_modules:
sys.modules[operators_module.__name__] = operators_module
globals()[operators_module._name] = operators_module
# Test that we can now actually import the DagsterOperator
from airflow.operators.dagster_plugin import DagsterOperator
del DagsterOperator
yield (docker_image, dags_path, host_tmp_dir)
finally:
if os.path.exists(plugin_path):
os.remove(plugin_path)
if temporary_plugin_path is not None:
shutil.copyfile(temporary_plugin_path, plugin_path)
os.remove(temporary_plugin_path)
if created_init_py:
os.remove(init_py_path)
sys.path = sys.path[:-1]
@pytest.fixture(scope='module')
def scaffold_dag(airflow_test):
docker_image, dags_path, _ = airflow_test
pipeline = define_demo_execution_pipeline()
env_config = load_yaml_from_path(script_relative_path('test_project/env.yml'))
tempdir = tempfile.gettempdir()
static_path, editable_path = scaffold_airflow_dag(
pipeline=pipeline,
env_config=env_config,
image=docker_image,
output_path=tempdir,
dag_kwargs={'default_args': {'start_date': datetime.datetime(1900, 1, 1)}},
)
# Ensure that the scaffolded files parse correctly
subprocess.check_output(['python', editable_path])
shutil.copyfile(
static_path, os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)))
)
shutil.copyfile(
editable_path, os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)))
)
os.remove(static_path)
os.remove(editable_path)
execution_date = datetime.datetime.utcnow().strftime('%Y-%m-%d')
pipeline_name = pipeline.name
execution_plan = create_execution_plan(pipeline, env_config)
yield (
pipeline_name,
execution_plan,
execution_date,
os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))),
os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))),
)
os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(static_path))))
os.remove(os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path))))
try:
os.remove(
os.path.abspath(os.path.join(dags_path, os.path.basename(static_path)[:-3] + '.pyc'))
)
os.remove(
os.path.abspath(os.path.join(dags_path, os.path.basename(editable_path)[:-3] + '.pyc'))
)
except (FileNotFoundError, OSError):
pass
| []
| []
| [
"AIRFLOW_HOME"
]
| [] | ["AIRFLOW_HOME"] | python | 1 | 0 | |
shynet/shynet/settings.py | """
Django settings for Shynet.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Messages
from django.contrib.messages import constants as messages
# Increment on new releases
VERSION = "v0.4.0"
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "onlyusethisindev")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv("DEBUG", "False") == "True"
ALLOWED_HOSTS = os.getenv("ALLOWED_HOSTS", "*").split(",")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"django.contrib.humanize",
"rules.apps.AutodiscoverRulesConfig",
"a17t",
"core",
"dashboard.apps.DashboardConfig",
"analytics",
"allauth",
"allauth.account",
"allauth.socialaccount",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.sites.middleware.CurrentSiteMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "shynet.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "shynet.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
if os.getenv("SQLITE", "False") == "True":
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
else:
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.environ.get("DB_NAME"),
"USER": os.environ.get("DB_USER"),
"PASSWORD": os.environ.get("DB_PASSWORD"),
"HOST": os.environ.get("DB_HOST"),
"PORT": os.environ.get("DB_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "{levelname} {asctime} {module} {process:d} {thread:d} {message}",
"style": "{",
},
"simple": {"format": "{levelname} {message}", "style": "{"},
},
"filters": {"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"}},
"handlers": {
"console": {
"level": "INFO",
"filters": [],
"class": "logging.StreamHandler",
"formatter": "simple",
},
"mail_admins": {
"level": "ERROR",
"class": "django.utils.log.AdminEmailHandler",
"filters": [],
},
},
"loggers": {
"django": {"handlers": ["console"], "propagate": True},
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = os.getenv("TIME_ZONE", "America/New_York")
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = "compiledstatic/"
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Redis
if not DEBUG and os.getenv("REDIS_CACHE_LOCATION") is not None:
CACHES = {
"default": {
"BACKEND": "redis_cache.RedisCache",
"LOCATION": os.getenv("REDIS_CACHE_LOCATION"),
"KEY_PREFIX": "v1_", # Increment when migrations occur
}
}
# Auth
AUTH_USER_MODEL = "core.User"
AUTHENTICATION_BACKENDS = (
"rules.permissions.ObjectPermissionBackend",
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_EMAIL_SUBJECT_PREFIX = ""
ACCOUNT_USER_DISPLAY = lambda k: k.email
ACCOUNT_SIGNUPS_ENABLED = os.getenv("ACCOUNT_SIGNUPS_ENABLED", "False") == "True"
LOGIN_REDIRECT_URL = "/"
SITE_ID = 1
# Celery
CELERY_TASK_ALWAYS_EAGER = os.getenv("CELERY_TASK_ALWAYS_EAGER", "True") == "True"
CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL")
CELERY_REDIS_SOCKET_TIMEOUT = 15
# GeoIP
MAXMIND_CITY_DB = os.getenv("MAXMIND_CITY_DB", "/etc/GeoLite2-City.mmdb")
MAXMIND_ASN_DB = os.getenv("MAXMIND_ASN_DB", "/etc/GeoLite2-ASN.mmdb")
MESSAGE_TAGS = {
messages.INFO: "~info",
messages.WARNING: "~warning",
messages.ERROR: "~critical",
messages.SUCCESS: "~positive",
}
# Email
SERVER_EMAIL = os.getenv("SERVER_EMAIL", "Shynet <[email protected]>")
DEFAULT_FROM_EMAIL = SERVER_EMAIL
if DEBUG or os.environ.get("EMAIL_HOST") is None:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
else:
EMAIL_HOST = os.environ.get("EMAIL_HOST")
EMAIL_PORT = int(os.environ.get("EMAIL_PORT", 465))
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD")
EMAIL_USE_SSL = True
# Shynet
# Can everyone create services, or only superusers?
# Note that in the current version of Shynet, being able to edit a service allows
# you to see every registered user on the Shynet instance. This will be changed in
# a future version.
ONLY_SUPERUSERS_CREATE = os.getenv("ONLY_SUPERUSERS_CREATE", "True") == "True"
# Should the script use HTTPS to send the POST requests? The hostname is from
# the django SITE default. (Edit it using the admin panel.)
SCRIPT_USE_HTTPS = os.getenv("SCRIPT_USE_HTTPS", "True") == "True"
# How frequently should the tracking script "phone home" with a heartbeat, in
# milliseconds?
SCRIPT_HEARTBEAT_FREQUENCY = int(os.getenv("SCRIPT_HEARTBEAT_FREQUENCY", "5000"))
# How much time can elapse between requests from the same user before a new
# session is created, in seconds?
SESSION_MEMORY_TIMEOUT = int(os.getenv("SESSION_MEMORY_TIMEOUT", "1800"))
| []
| []
| [
"DJANGO_SECRET_KEY",
"DB_HOST",
"TIME_ZONE",
"DB_PORT",
"DB_NAME",
"MAXMIND_CITY_DB",
"EMAIL_HOST",
"CELERY_BROKER_URL",
"CELERY_TASK_ALWAYS_EAGER",
"SCRIPT_HEARTBEAT_FREQUENCY",
"SERVER_EMAIL",
"DB_USER",
"MAXMIND_ASN_DB",
"ONLY_SUPERUSERS_CREATE",
"ALLOWED_HOSTS",
"REDIS_CACHE_LOCATION",
"ACCOUNT_SIGNUPS_ENABLED",
"EMAIL_PORT",
"EMAIL_HOST_PASSWORD",
"EMAIL_HOST_USER",
"DB_PASSWORD",
"SCRIPT_USE_HTTPS",
"SQLITE",
"SESSION_MEMORY_TIMEOUT",
"DEBUG"
]
| [] | ["DJANGO_SECRET_KEY", "DB_HOST", "TIME_ZONE", "DB_PORT", "DB_NAME", "MAXMIND_CITY_DB", "EMAIL_HOST", "CELERY_BROKER_URL", "CELERY_TASK_ALWAYS_EAGER", "SCRIPT_HEARTBEAT_FREQUENCY", "SERVER_EMAIL", "DB_USER", "MAXMIND_ASN_DB", "ONLY_SUPERUSERS_CREATE", "ALLOWED_HOSTS", "REDIS_CACHE_LOCATION", "ACCOUNT_SIGNUPS_ENABLED", "EMAIL_PORT", "EMAIL_HOST_PASSWORD", "EMAIL_HOST_USER", "DB_PASSWORD", "SCRIPT_USE_HTTPS", "SQLITE", "SESSION_MEMORY_TIMEOUT", "DEBUG"] | python | 25 | 0 | |
python_modules/libraries/dagster-shell/dagster_shell/solids.py | import os
from dagster import (
Enum,
EnumValue,
Failure,
Field,
InputDefinition,
Noneable,
Nothing,
OutputDefinition,
Permissive,
check,
op,
solid,
)
from .utils import execute, execute_script_file
def shell_op_config():
return {
"env": Field(
Noneable(Permissive()),
default_value=os.environ.copy(),
is_required=False,
description="An optional dict of environment variables to pass to the subprocess. "
"Defaults to using os.environ.copy().",
),
"output_logging": Field(
Enum(
name="OutputType",
enum_values=[
EnumValue("STREAM", description="Stream script stdout/stderr."),
EnumValue(
"BUFFER",
description="Buffer shell script stdout/stderr, then log upon completion.",
),
EnumValue("NONE", description="No logging"),
],
),
is_required=False,
default_value="BUFFER",
),
"cwd": Field(
Noneable(str),
default_value=None,
is_required=False,
description="Working directory in which to execute shell script",
),
}
def core_shell(dagster_decorator, decorator_name):
@dagster_decorator(
name=f"shell_{decorator_name}",
description=(
f"This {decorator_name} executes a shell command it receives as input.\n\n"
f"This {decorator_name} is suitable for uses where the command to execute is generated dynamically by "
f"upstream {decorator_name}. If you know the command to execute at pipeline construction time, "
f"consider `shell_command_{decorator_name}` instead."
),
input_defs=[InputDefinition("shell_command", str)],
output_defs=[OutputDefinition(str, "result")],
config_schema=shell_op_config(),
)
def shell_fn(context, shell_command):
output, return_code = execute(
shell_command=shell_command, log=context.log, **context.op_config
)
if return_code:
raise Failure(
description="Shell command execution failed with output: {output}".format(
output=output
)
)
return output
return shell_fn
shell_solid = core_shell(solid, "solid")
shell_op = core_shell(op, "op")
def create_shell_command_op(
shell_command,
name,
description=None,
required_resource_keys=None,
tags=None,
):
"""This function is a factory that constructs ops to execute a shell command.
Note that you can only use `shell_command_op` if you know the command you'd like to execute
at pipeline construction time. If you'd like to construct shell commands dynamically during
pipeline execution and pass them between ops, you should use `shell_op` instead.
Examples:
.. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_command_op.py
:language: python
Args:
shell_command (str): The shell command that the constructed op will execute.
name (str): The name of the constructed op.
description (Optional[str]): Human-readable description of this op.
required_resource_keys (Optional[Set[str]]): Set of resource handles required by this op.
Setting this ensures that resource spin up for the required resources will occur before
the shell command is executed.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for the op. Frameworks may
expect and require certain metadata to be attached to a op. Users should generally
not set metadata directly. Values that are not strings will be json encoded and must meet
the criteria that `json.loads(json.dumps(value)) == value`.
Raises:
Failure: Raised when the shell command returns a non-zero exit code.
Returns:
OpDefinition: Returns the constructed op definition.
"""
return core_create_shell_command(
op,
shell_command=shell_command,
name=name,
description=description,
required_resource_keys=required_resource_keys,
tags=tags,
)
def create_shell_command_solid(
shell_command,
name,
description=None,
required_resource_keys=None,
tags=None,
):
"""This function is a factory that constructs solids to execute a shell command.
Note that you can only use `shell_command_solid` if you know the command you'd like to execute
at pipeline construction time. If you'd like to construct shell commands dynamically during
pipeline execution and pass them between solids, you should use `shell_solid` instead.
Examples:
.. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_command_solid.py
:language: python
Args:
shell_command (str): The shell command that the constructed solid will execute.
name (str): The name of the constructed solid.
description (Optional[str]): Human-readable description of this solid.
required_resource_keys (Optional[Set[str]]): Set of resource handles required by this solid.
Setting this ensures that resource spin up for the required resources will occur before
the shell command is executed.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for the solid. Frameworks may
expect and require certain metadata to be attached to a solid. Users should generally
not set metadata directly. Values that are not strings will be json encoded and must meet
the criteria that `json.loads(json.dumps(value)) == value`.
Raises:
Failure: Raised when the shell command returns a non-zero exit code.
Returns:
SolidDefinition: Returns the constructed solid definition.
"""
return core_create_shell_command(
solid,
shell_command=shell_command,
name=name,
description=description,
required_resource_keys=required_resource_keys,
tags=tags,
)
def core_create_shell_command(
dagster_decorator,
shell_command,
name,
description=None,
required_resource_keys=None,
tags=None,
):
check.str_param(shell_command, "shell_command")
name = check.str_param(name, "name")
@dagster_decorator(
name=name,
description=description,
input_defs=[InputDefinition("start", Nothing)],
output_defs=[OutputDefinition(str, "result")],
config_schema=shell_op_config(),
required_resource_keys=required_resource_keys,
tags=tags,
)
def _shell_fn(context):
output, return_code = execute(
shell_command=shell_command, log=context.log, **context.op_config
)
if return_code:
raise Failure(
description="Shell command execution failed with output: {output}".format(
output=output
)
)
return output
return _shell_fn
def create_shell_script_op(
shell_script_path, name="create_shell_script_op", input_defs=None, **kwargs
):
"""This function is a factory which constructs an op that will execute a shell command read
from a script file.
Any kwargs passed to this function will be passed along to the underlying :func:`@op
<dagster.op>` decorator. However, note that overriding ``config`` or ``output_defs`` is not
supported.
You might consider using :func:`@graph <dagster.graph>` to wrap this op
in the cases where you'd like to configure the shell op with different config fields.
Examples:
.. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_script_op.py
:language: python
Args:
shell_script_path (str): The script file to execute.
name (str, optional): The name of this op. Defaults to "create_shell_script_op".
input_defs (List[InputDefinition], optional): input definitions for the op. Defaults to
a single Nothing input.
Raises:
Failure: Raised when the shell command returns a non-zero exit code.
Returns:
OpDefinition: Returns the constructed op definition.
"""
return core_create_shell_script(
dagster_decorator=solid,
decorator_name="solid",
shell_script_path=shell_script_path,
name=name,
input_defs=input_defs,
**kwargs,
)
def create_shell_script_solid(
shell_script_path, name="create_shell_script_solid", input_defs=None, **kwargs
):
"""This function is a factory which constructs a solid that will execute a shell command read
from a script file.
Any kwargs passed to this function will be passed along to the underlying :func:`@solid
<dagster.solid>` decorator. However, note that overriding ``config`` or ``output_defs`` is not
supported.
You might consider using :func:`@composite_solid <dagster.composite_solid>` to wrap this solid
in the cases where you'd like to configure the shell solid with different config fields.
Examples:
.. literalinclude:: ../../../../../../python_modules/libraries/dagster-shell/dagster_shell_tests/example_shell_script_solid.py
:language: python
Args:
shell_script_path (str): The script file to execute.
name (str, optional): The name of this solid. Defaults to "create_shell_script_solid".
input_defs (List[InputDefinition], optional): input definitions for the solid. Defaults to
a single Nothing input.
Raises:
Failure: Raised when the shell command returns a non-zero exit code.
Returns:
SolidDefinition: Returns the constructed solid definition.
"""
return core_create_shell_script(
dagster_decorator=solid,
decorator_name="solid",
shell_script_path=shell_script_path,
name=name,
input_defs=input_defs,
**kwargs,
)
def core_create_shell_script(
dagster_decorator,
decorator_name,
shell_script_path,
name="create_shell_script_solid",
input_defs=None,
**kwargs,
):
check.str_param(shell_script_path, "shell_script_path")
name = check.str_param(name, "name")
check.opt_list_param(input_defs, "input_defs", of_type=InputDefinition)
if "output_defs" in kwargs:
raise TypeError(f"Overriding output_defs for shell {decorator_name} is not supported.")
if "config" in kwargs:
raise TypeError(f"Overriding config for shell {decorator_name} is not supported.")
@dagster_decorator(
name=name,
description=kwargs.pop("description", f"A {decorator_name} to invoke a shell command."),
input_defs=input_defs or [InputDefinition("start", Nothing)],
output_defs=[OutputDefinition(str, "result")],
config_schema=shell_op_config(),
**kwargs,
)
def _shell_script_fn(context):
output, return_code = execute_script_file(
shell_script_path=shell_script_path, log=context.log, **context.op_config
)
if return_code:
raise Failure(
description="Shell command execution failed with output: {output}".format(
output=output
)
)
return output
return _shell_script_fn
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/symbols/internal/symbols/ctags.go | package symbols
import (
"fmt"
"log"
"os"
"strconv"
ctags "github.com/sourcegraph/go-ctags"
"github.com/sourcegraph/sourcegraph/internal/env"
)
const debugLogs = false
var logErrors = os.Getenv("DEPLOY_TYPE") == "dev"
var ctagsCommand = env.Get("CTAGS_COMMAND", "universal-ctags", "ctags command (should point to universal-ctags executable compiled with JSON and seccomp support)")
// Increasing this value may increase the size of the symbols cache, but will also stop long lines containing symbols from
// being highlighted improperly. See https://github.com/sourcegraph/sourcegraph/issues/7668.
var rawPatternLengthLimit = env.Get("CTAGS_PATTERN_LENGTH_LIMIT", "250", "the maximum length of the patterns output by ctags")
// New runs the ctags command from the CTAGS_COMMAND environment
// variable, falling back to `universal-ctags`.
func NewParser() (ctags.Parser, error) {
patternLengthLimit, err := strconv.Atoi(rawPatternLengthLimit)
if err != nil {
return nil, fmt.Errorf("invalid pattern length limit: %s", rawPatternLengthLimit)
}
var info *log.Logger
if logErrors {
info = log.New(os.Stderr, "ctags: ", log.LstdFlags)
}
var debug *log.Logger
if debugLogs {
debug = log.New(os.Stderr, "DBUG ctags: ", log.LstdFlags)
}
return ctags.New(ctags.Options{
Bin: ctagsCommand,
PatternLengthLimit: patternLengthLimit,
Info: info,
Debug: debug,
})
}
| [
"\"DEPLOY_TYPE\""
]
| []
| [
"DEPLOY_TYPE"
]
| [] | ["DEPLOY_TYPE"] | go | 1 | 0 | |
airflow/operators/python.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import os
import pickle
import sys
import types
from inspect import signature
from itertools import islice
from tempfile import TemporaryDirectory
from textwrap import dedent
from typing import Callable, Dict, Iterable, List, Optional
import dill
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator, SkipMixin
from airflow.utils.decorators import apply_defaults
from airflow.utils.operator_helpers import context_to_airflow_vars
from airflow.utils.process_utils import execute_in_subprocess
from airflow.utils.python_virtualenv import prepare_virtualenv
class PythonOperator(BaseOperator):
"""
Executes a Python callable
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonOperator`
:param python_callable: A reference to an object that is callable
:type python_callable: python callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function
:type op_kwargs: dict (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable
:type op_args: list (templated)
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied. (templated)
:type templates_dict: dict[str]
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:type templates_exts: list[str]
"""
template_fields = ('templates_dict', 'op_args', 'op_kwargs')
ui_color = '#ffefeb'
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects(e.g protobuf).
shallow_copy_attrs = ('python_callable', 'op_kwargs',)
@apply_defaults
def __init__(
self,
python_callable: Callable,
op_args: Optional[List] = None,
op_kwargs: Optional[Dict] = None,
templates_dict: Optional[Dict] = None,
templates_exts: Optional[List[str]] = None,
*args,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
if not callable(python_callable):
raise AirflowException('`python_callable` param must be callable')
self.python_callable = python_callable
self.op_args = op_args or []
self.op_kwargs = op_kwargs or {}
self.templates_dict = templates_dict
if templates_exts:
self.template_ext = templates_exts
@staticmethod
def determine_op_kwargs(python_callable: Callable,
context: Dict,
num_op_args: int = 0) -> Dict:
"""
Function that will inspect the signature of a python_callable to determine which
values need to be passed to the function.
:param python_callable: The function that you want to invoke
:param context: The context provided by the execute method of the Operator/Sensor
:param num_op_args: The number of op_args provided, so we know how many to skip
:return: The op_args dictionary which contains the values that are compatible with the Callable
"""
context_keys = context.keys()
sig = signature(python_callable).parameters.items()
op_args_names = islice(sig, num_op_args)
for name, _ in op_args_names:
# Check if it is part of the context
if name in context_keys:
# Raise an exception to let the user know that the keyword is reserved
raise ValueError(
"The key {} in the op_args is part of the context, and therefore reserved".format(name)
)
if any(str(param).startswith("**") for _, param in sig):
# If there is a ** argument then just dump everything.
op_kwargs = context
else:
# If there is only for example, an execution_date, then pass only these in :-)
op_kwargs = {
name: context[name]
for name, _ in sig
if name in context # If it isn't available on the context, then ignore
}
return op_kwargs
def execute(self, context: Dict):
# Export context to make it available for callables to use.
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.debug("Exporting the following env vars:\n%s",
'\n'.join(["{}={}".format(k, v)
for k, v in airflow_context_vars.items()]))
os.environ.update(airflow_context_vars)
context.update(self.op_kwargs)
context['templates_dict'] = self.templates_dict
self.op_kwargs = PythonOperator.determine_op_kwargs(self.python_callable, context, len(self.op_args))
return_value = self.execute_callable()
self.log.info("Done. Returned value was: %s", return_value)
return return_value
def execute_callable(self):
return self.python_callable(*self.op_args, **self.op_kwargs)
class BranchPythonOperator(PythonOperator, SkipMixin):
"""
Allows a workflow to "branch" or follow a path following the execution
of this task.
It derives the PythonOperator and expects a Python function that returns
a single task_id or list of task_ids to follow. The task_id(s) returned
should point to a task directly downstream from {self}. All other "branches"
or directly downstream tasks are marked with a state of ``skipped`` so that
these paths can't move forward. The ``skipped`` states are propagated
downstream to allow for the DAG state to fill up and the DAG run's state
to be inferred.
"""
def execute(self, context: Dict):
branch = super().execute(context)
self.skip_all_except(context['ti'], branch)
return branch
class ShortCircuitOperator(PythonOperator, SkipMixin):
"""
Allows a workflow to continue only if a condition is met. Otherwise, the
workflow "short-circuits" and downstream tasks are skipped.
The ShortCircuitOperator is derived from the PythonOperator. It evaluates a
condition and short-circuits the workflow if the condition is False. Any
downstream tasks are marked with a state of "skipped". If the condition is
True, downstream tasks proceed as normal.
The condition is determined by the result of `python_callable`.
"""
def execute(self, context: Dict):
condition = super().execute(context)
self.log.info("Condition result is %s", condition)
if condition:
self.log.info('Proceeding with downstream tasks...')
return
self.log.info('Skipping downstream tasks...')
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks)
self.log.info("Done.")
class PythonVirtualenvOperator(PythonOperator):
"""
Allows one to run a function in a virtualenv that is created and destroyed
automatically (with certain caveats).
The function must be defined using def, and not be
part of a class. All imports must happen inside the function
and no variables outside of the scope may be referenced. A global scope
variable named virtualenv_string_args will be available (populated by
string_args). In addition, one can pass stuff through op_args and op_kwargs, and one
can use a return value.
Note that if your virtualenv runs in a different Python major version than Airflow,
you cannot use return values, op_args, or op_kwargs. You can use string_args though.
:param python_callable: A python function with no references to outside variables,
defined with def, which will be run in a virtualenv
:type python_callable: function
:param requirements: A list of requirements as specified in a pip install command
:type requirements: list[str]
:param python_version: The Python version to run the virtualenv with. Note that
both 2 and 2.7 are acceptable forms.
:type python_version: str
:param use_dill: Whether to use dill to serialize
the args and result (pickle is default). This allow more complex types
but requires you to include dill in your requirements.
:type use_dill: bool
:param system_site_packages: Whether to include
system_site_packages in your virtualenv.
See virtualenv documentation for more information.
:type system_site_packages: bool
:param op_args: A list of positional arguments to pass to python_callable.
:type op_kwargs: list
:param op_kwargs: A dict of keyword arguments to pass to python_callable.
:type op_kwargs: dict
:param string_args: Strings that are present in the global var virtualenv_string_args,
available to python_callable at runtime as a list[str]. Note that args are split
by newline.
:type string_args: list[str]
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied
:type templates_dict: dict of str
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:type templates_exts: list[str]
"""
@apply_defaults
def __init__(
self,
python_callable: Callable,
requirements: Optional[Iterable[str]] = None,
python_version: Optional[str] = None,
use_dill: bool = False,
system_site_packages: bool = True,
op_args: Optional[Iterable] = None,
op_kwargs: Optional[Dict] = None,
string_args: Optional[Iterable[str]] = None,
templates_dict: Optional[Dict] = None,
templates_exts: Optional[Iterable[str]] = None,
*args,
**kwargs
):
super().__init__(
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
templates_dict=templates_dict,
templates_exts=templates_exts,
*args,
**kwargs)
self.requirements = requirements or []
self.string_args = string_args or []
self.python_version = python_version
self.use_dill = use_dill
self.system_site_packages = system_site_packages
# check that dill is present if needed
dill_in_requirements = map(lambda x: x.lower().startswith('dill'),
self.requirements)
if (not system_site_packages) and use_dill and not any(dill_in_requirements):
raise AirflowException('If using dill, dill must be in the environment ' +
'either via system_site_packages or requirements')
# check that a function is passed, and that it is not a lambda
if (not isinstance(self.python_callable,
types.FunctionType) or (self.python_callable.__name__ ==
(lambda x: 0).__name__)):
raise AirflowException('{} only supports functions for python_callable arg',
self.__class__.__name__)
# check that args are passed iff python major version matches
if (python_version is not None and
str(python_version)[0] != str(sys.version_info[0]) and
self._pass_op_args()):
raise AirflowException("Passing op_args or op_kwargs is not supported across "
"different Python major versions "
"for PythonVirtualenvOperator. "
"Please use string_args.")
def execute_callable(self):
with TemporaryDirectory(prefix='venv') as tmp_dir:
if self.templates_dict:
self.op_kwargs['templates_dict'] = self.templates_dict
# generate filenames
input_filename = os.path.join(tmp_dir, 'script.in')
output_filename = os.path.join(tmp_dir, 'script.out')
string_args_filename = os.path.join(tmp_dir, 'string_args.txt')
script_filename = os.path.join(tmp_dir, 'script.py')
# set up virtualenv
python_bin = 'python' + str(self.python_version) if self.python_version else None
prepare_virtualenv(
venv_directory=tmp_dir,
python_bin=python_bin,
system_site_packages=self.system_site_packages,
requirements=self.requirements,
)
self._write_args(input_filename)
self._write_script(script_filename)
self._write_string_args(string_args_filename)
# execute command in virtualenv
execute_in_subprocess(
self._generate_python_cmd(tmp_dir,
script_filename,
input_filename,
output_filename,
string_args_filename))
return self._read_result(output_filename)
def _pass_op_args(self):
# we should only pass op_args if any are given to us
return len(self.op_args) + len(self.op_kwargs) > 0
def _write_string_args(self, filename):
# writes string_args to a file, which are read line by line
with open(filename, 'w') as file:
file.write('\n'.join(map(str, self.string_args)))
def _write_args(self, input_filename):
# serialize args to file
if self._pass_op_args():
with open(input_filename, 'wb') as file:
arg_dict = ({'args': self.op_args, 'kwargs': self.op_kwargs})
if self.use_dill:
dill.dump(arg_dict, file)
else:
pickle.dump(arg_dict, file)
def _read_result(self, output_filename):
if os.stat(output_filename).st_size == 0:
return None
with open(output_filename, 'rb') as file:
try:
if self.use_dill:
return dill.load(file)
else:
return pickle.load(file)
except ValueError:
self.log.error("Error deserializing result. "
"Note that result deserialization "
"is not supported across major Python versions.")
raise
def _write_script(self, script_filename):
with open(script_filename, 'w') as file:
python_code = self._generate_python_code()
self.log.debug('Writing code to file\n', python_code)
file.write(python_code)
@staticmethod
def _generate_python_cmd(tmp_dir, script_filename,
input_filename, output_filename, string_args_filename):
# direct path alleviates need to activate
return ['{}/bin/python'.format(tmp_dir), script_filename,
input_filename, output_filename, string_args_filename]
def _generate_python_code(self):
if self.use_dill:
pickling_library = 'dill'
else:
pickling_library = 'pickle'
fn = self.python_callable
# dont try to read pickle if we didnt pass anything
if self._pass_op_args():
load_args_line = 'with open(sys.argv[1], "rb") as file: arg_dict = {}.load(file)' \
.format(pickling_library)
else:
load_args_line = 'arg_dict = {"args": [], "kwargs": {}}'
# no indents in original code so we can accept
# any type of indents in the original function
# we deserialize args, call function, serialize result if necessary
return dedent("""\
import {pickling_library}
import sys
{load_args_code}
args = arg_dict["args"]
kwargs = arg_dict["kwargs"]
with open(sys.argv[3], 'r') as file:
virtualenv_string_args = list(map(lambda x: x.strip(), list(file)))
{python_callable_lines}
res = {python_callable_name}(*args, **kwargs)
with open(sys.argv[2], 'wb') as file:
res is not None and {pickling_library}.dump(res, file)
""").format(load_args_code=load_args_line,
python_callable_lines=dedent(inspect.getsource(fn)),
python_callable_name=fn.__name__,
pickling_library=pickling_library)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
_integrations/nrlambda/config.go | // Copyright 2020 New Relic Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package nrlambda
import (
"os"
"time"
newrelic "github.com/divyanshgaba/go-agent"
)
// NewConfig populates a newrelic.Config with correct default settings for a
// Lambda serverless environment. NewConfig will populate fields based on
// environment variables common to all New Relic agents that support Lambda.
// Environment variables NEW_RELIC_ACCOUNT_ID, NEW_RELIC_TRUSTED_ACCOUNT_KEY,
// and NEW_RELIC_PRIMARY_APPLICATION_ID configure fields required for
// distributed tracing. Environment variable NEW_RELIC_APDEX_T may be used to
// set a custom apdex threshold.
func NewConfig() newrelic.Config {
return newConfigInternal(os.Getenv)
}
func newConfigInternal(getenv func(string) string) newrelic.Config {
cfg := newrelic.NewConfig("", "")
cfg.ServerlessMode.Enabled = true
cfg.ServerlessMode.AccountID = getenv("NEW_RELIC_ACCOUNT_ID")
cfg.ServerlessMode.TrustedAccountKey = getenv("NEW_RELIC_TRUSTED_ACCOUNT_KEY")
cfg.ServerlessMode.PrimaryAppID = getenv("NEW_RELIC_PRIMARY_APPLICATION_ID")
cfg.DistributedTracer.Enabled = true
if s := getenv("NEW_RELIC_APDEX_T"); "" != s {
if apdex, err := time.ParseDuration(s + "s"); nil == err {
cfg.ServerlessMode.ApdexThreshold = apdex
}
}
return cfg
}
| []
| []
| []
| [] | [] | go | 0 | 0 | |
nekbot/core/management.py | import os
import logging
import sys
from nekbot.utils.filesystem import copytree, copy_template
from nekbot.utils.system import execute_hook
__author__ = 'nekmo'
__dir__ = os.path.abspath(os.path.dirname(__file__))
nekbot_src_dir = os.path.dirname(os.path.dirname(__file__))
conf_src_dir = os.path.join(nekbot_src_dir, 'conf')
class Management(object):
no_configuration_required = ['createbot']
def __init__(self, settings=None, description=None, default_level=logging.INFO):
self.parser = self.argument_parser(settings, description, default_level)
def argument_parser(self, settings=None, description=None, default_level=logging.INFO):
import argparse
if settings is None:
settings = os.environ.get('NEKBOT_SETTINGS_MODULE', 'settings')
if description is None:
from __main__ import __doc__ as description
parser = argparse.ArgumentParser(prog='nekbot', description=description)
# Niveles de logging
parser.add_argument('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
parser.add_argument('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
parser.add_argument('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
parser.add_argument('-s', '--settings', help='Settings file module',
default=settings)
parser.sub = parser.add_subparsers()
# Subcommand Create bot
parser_createbot = parser.sub.add_parser('createbot', help='Create a new bot directory usign template.')
parser_createbot.set_defaults(which='createbot')
parser_createbot.add_argument('name')
parser_createbot.add_argument('dest', nargs='?', default=None)
# Subcommand Create plugin
parser_createplugin = parser.sub.add_parser('createplugin', help='Create a new plugin for distribute.')
parser_createplugin.set_defaults(which='createplugin')
parser_createplugin.add_argument('dest')
# Subcommand start
parser_start = parser.sub.add_parser('start', help='Start bot.')
parser_start.set_defaults(which='start')
return parser
def execute(self, parser=None):
from nekbot.conf import settings
if parser is None:
parser = self.parser
args = parser.parse_args()
if not args.which in self.no_configuration_required:
settings.configure(args.settings)
logging.basicConfig(level=args.loglevel)
if not hasattr(self, 'command_' + args.which):
raise ValueError('Comand %s is invalid.' % args.which)
getattr(self, 'command_' + args.which)(args)
def command_createbot(self, args):
if args.dest:
dest = args.dest
else:
dest = args.name
if not args.dest and os.path.exists(dest):
sys.stderr.write("Sorry, directory %s exists. I can't create directory.\n" % dest)
sys.exit(1)
elif not os.path.exists(dest):
os.mkdir(dest)
try:
copytree(os.path.join(conf_src_dir, 'project_template'), dest)
except Exception as e:
sys.stderr.write('Unknown error: %s\n' % e)
print('Project created as %s' % dest)
def command_createplugin(self, args):
from nekbot.conf import settings
dest = args.dest
name = os.path.split(dest)[1]
replace = {
'plugin_template': name,
'plugin_author_name': settings.PLUGIN_AUTHOR_NAME,
'plugin_author_email': settings.PLUGIN_AUTHOR_EMAIL,
'plugin_author_website': settings.PLUGIN_AUTHOR_WEBSITE,
}
if os.path.exists(dest):
sys.stderr.write("Sorry, directory %s exists. I can't create directory.\n" % dest)
exit(1)
else:
os.mkdir(dest)
if settings.HOOK_BEFORE_CREATE_PLUGIN:
execute_hook(settings.HOOK_BEFORE_CREATE_PLUGIN, dest, name, settings)
copy_template(os.path.join(conf_src_dir, 'plugin_template'), dest, replace)
print('Plugin created as %s in %s' % (name, dest))
if settings.HOOK_AFTER_CREATE_PLUGIN:
execute_hook(settings.HOOK_AFTER_CREATE_PLUGIN, dest, name, settings)
def command_start(self, args):
from nekbot import NekBot
nekbot = NekBot()
nekbot = nekbot.start()
try:
nekbot.loop()
except (KeyboardInterrupt, SystemExit):
nekbot.close() | []
| []
| [
"NEKBOT_SETTINGS_MODULE"
]
| [] | ["NEKBOT_SETTINGS_MODULE"] | python | 1 | 0 | |
harp.go | package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/DisposaBoy/JsonConfigReader"
)
// TODOs
// *** version control (harp version; go version too?)
// *** checkers in PATH
// *** tail command
// ** git status in Info?
// * tmux support for long migrations?
// PRINCIPLES
// KISS
// BC (Being Convinent: all things in one place)
// local
// pwd/.harp/files
// pwd/.harp/migration
//
// server
// $GOPATH/bin
// $GOPATH/src
// $HOME/harp/$APP/build.$num.tar.gz
// $HOME/harp/$APP/pid
// $HOME/harp/$APP/log
// $HOME/harp/$APP/migration.tar.gz
// $HOME/harp/$APP/script
func init() {
log.SetOutput(os.Stdout)
if option.debug {
log.SetFlags(log.Lshortfile)
} else {
log.SetFlags(0)
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
cleanCaches()
os.Exit(0)
}
}()
}
type Config struct {
GOOS, GOARCH string
NoRollback bool
RollbackCount int
// TODO
BuildVersionCmd string
// LogDir string `json:"log_dir"`
// TODO: multiple instances support
// TODO: multiple apps support
App App
// // TODO: migration and flag support (-after and -before)
// Hooks struct {
// Deploy struct {
// Before, After string
// }
// }
Servers map[string][]*Server
}
type App struct {
Name string
ImportPath string
NoRelMatch bool
DefaultExcludeds []string
Files []File
Args []string
Envs map[string]string
BuildCmd string
BuildArgs string
KillSig string
// Default: 1MB
FileWarningSize int64
DeployScript string
RestartScript string
MigrationScript string
// TODO
// Hooks struct{}
}
type Tasks []string
func (t Tasks) String() string { return "" }
func (t *Tasks) Set(s string) error {
migrations = append(migrations, newMigration(s))
return nil
}
type FlagStrings []string
func (t FlagStrings) String() string { return "" }
func (t *FlagStrings) Set(s string) error {
*t = append(*t, s)
return nil
}
var (
// option is a global control center, keeping flags in one place.
option = struct {
configPath string
debug bool
// verbose bool
noBuild bool
noUpload bool
noDeploy bool
noFiles bool
script string
softExclude bool
keepCache bool
toTailLog bool
tailBeginLineNum int
syncFileLimit int
// TODO: can specify a single server, instead of the whole server set
servers FlagStrings
serverSets FlagStrings
help bool
version bool
buildArgs string
all bool
deploy string
tasks Tasks
hand bool // hand flag indicates migration are executed but only deployed on servers
// cli bool
// transient flag allow running go program without the presence of harp.json
transient bool
hook struct {
before, after string
}
docker bool
force bool
}{}
migrations []Migration
cfg Config
GoPaths = strings.Split(os.Getenv("GOPATH"), ":")
GoPath = GoPaths[0]
)
var tmpDir = ".harp"
func main() {
flag.StringVar(&option.configPath, "c", "harp.json", "config file path")
flag.BoolVar(&option.debug, "debug", false, "print debug info")
flag.BoolVar(&option.noBuild, "nb", false, "no build")
flag.BoolVar(&option.noBuild, "no-build", false, "no build")
flag.BoolVar(&option.noUpload, "nu", false, "no upload")
flag.BoolVar(&option.noUpload, "no-upload", false, "no upload")
flag.BoolVar(&option.noDeploy, "nd", false, "no deploy")
flag.BoolVar(&option.noDeploy, "no-deploy", false, "no deploy")
flag.BoolVar(&option.noDeploy, "nr", false, "no run (same as -no-deploy)")
flag.BoolVar(&option.noDeploy, "no-run", false, "no run (same as -no-deploy)")
flag.BoolVar(&option.noFiles, "nf", false, "no files")
flag.BoolVar(&option.noFiles, "no-files", false, "no files")
flag.BoolVar(&option.toTailLog, "log", false, "tail log after deploy")
flag.IntVar(&option.tailBeginLineNum, "n", 32, "tail log tail localtion line number (tail -n 32)")
flag.BoolVar(&option.help, "help", false, "print helps")
flag.BoolVar(&option.help, "h", false, "print helps")
flag.BoolVar(&option.version, "v", false, "print version num")
flag.BoolVar(&option.version, "version", false, "print version num")
flag.BoolVar(&option.softExclude, "soft-exclude", false, "use strings.Contains to exclude files")
flag.BoolVar(&option.keepCache, "cache", false, "cache data in .harp")
flag.StringVar(&option.buildArgs, "build-args", "", "build args speicified for building your programs. (default -a -v)")
flag.Var(&option.serverSets, "s", "specify server sets to deploy, multiple sets are split by comma")
flag.Var(&option.serverSets, "server-set", "specify server sets to deploy, multiple sets are split by comma")
flag.Var(&option.servers, "server", "specify servers to deploy, multiple servers are split by comma")
flag.BoolVar(&option.all, "all", false, "execute action on all server")
flag.IntVar(&option.syncFileLimit, "sync-queue-size", 5, "set file syncing queue size.")
flag.StringVar(&option.deploy, "deploy", "", "deploy app to servers/sets")
flag.Var(&option.tasks, "run", "run go scripts/packages on remote server.")
flag.BoolVar(&option.hand, "hand", false, "pirnt out shell scripts could be executed by hand on remote servers")
flag.StringVar(&cfg.GOOS, "goos", "linux", "GOOS")
flag.StringVar(&cfg.GOARCH, "goarch", "amd64", "GOARCH")
flag.BoolVar(&option.transient, "t", false, "run migration in transient app")
flag.BoolVar(&option.force, "f", false, "force harp to deploy. ingore version checking")
flag.Parse()
if option.debug {
log.SetFlags(log.Lshortfile)
}
if option.version {
printVersion()
return
}
var action string
args := flag.Args()
switch {
case len(migrations) > 0:
action = "run"
case len(args) > 0:
action = args[0]
case len(args) == 0 || option.help:
printUsage()
return
}
switch action {
case "init":
initHarp()
return
case "clean":
option.keepCache = false
cleanCaches()
return
}
if option.transient {
cfg.App.Name = "harp"
} else {
cfg = parseCfg(option.configPath)
}
var servers []*Server
if action != "cross-compile" && action != "xc" && !(action == "inspect" && args[1] == "files") {
servers = retrieveServers()
}
switch action {
case "kill":
kill(servers)
case "deploy":
deploy(servers)
case "migrate", "run":
// TODO: could specify to run on all servers
if len(migrations) == 0 {
migrations = retrieveMigrations(args[1:])
}
if len(migrations) == 0 {
log.Println("please specify migration file or package import path")
log.Println("e.g. harp -s prod run file.go my/import/path/to/pkg")
os.Exit(1)
}
migrate(servers, migrations)
case "info", "status":
info(servers)
case "log":
option.toTailLog = true
case "restart":
// option.noBuild = true
// option.noUpload = true
// deploy(servers)
restart(servers)
case "inspect":
inspectScript(servers, args[1])
case "rollback":
if len(args) == 1 {
fmt.Println("please specify rollback command or version")
os.Exit(1)
}
if args[1] == "l" || args[1] == "ls" || args[1] == "list" {
lsRollbackVersions(servers, args[1] == "list")
} else {
rollback(servers, strings.TrimSpace(args[1]))
}
case "cross-compile", "xc":
initXC()
case "console", "shell", "sh":
startConsole(servers)
default:
fmt.Println("unknown command:", args[0])
os.Exit(1)
}
if option.toTailLog {
// if !option.keepCache {
// if err := os.RemoveAll(tmpDir); err != nil {
// exitf("os.RemoveAll(%s) error: %s", tmpDir, err)
// }
// }
tailLog(servers, option.tailBeginLineNum)
}
}
func initTmpDir() func() {
if err := os.RemoveAll(tmpDir); err != nil {
exitf("os.RemoveAll(%s) error: %s", tmpDir, err)
}
if err := os.MkdirAll(tmpDir, 0755); err != nil {
exitf("os.MkdirAll(%s) error: %s", tmpDir, err)
}
return cleanCaches
}
func deploy(servers []*Server) {
defer initTmpDir()()
info := getBuildLog()
if !option.noBuild {
log.Println("building")
build()
}
if !option.noUpload {
syncFiles()
}
var wg sync.WaitGroup
for _, server := range servers {
wg.Add(1)
go func(server *Server) {
defer wg.Done()
// check harp version
if err := server.checkHarpVersion(); err != nil {
if option.force {
fmt.Fprintf(os.Stderr, err.Error()+"\n")
} else {
exitf(err.Error() + "\n")
}
}
if !option.noUpload {
diff := server.diffFiles()
if diff != "" {
diff = "diff: \n" + diff
}
log.Printf("uploading: [%s] %s\n%s", server.Set, server, diff)
server.upload(info)
}
if !option.noDeploy {
log.Printf("deploying: [%s] %s\n", server.Set, server)
server.deploy()
}
}(server)
}
wg.Wait()
}
func (s *Server) checkHarpVersion() error {
output, err := s.getBuildInfo()
if err != nil {
fmt.Fprintf(os.Stderr, "[%s] getBuildInfo(): %s\n", s, err)
return nil
}
matches := regexp.MustCompile(harpVersionPrefix + "([0-9\\.]+)\n").FindStringSubmatch(output)
if len(matches) != 2 {
fmt.Fprintf(os.Stderr, "[%s] failed to retrieve harp version\n", s)
return nil
}
old := strings.Split(matches[1], ".")
cur := strings.Split(getVersion(), ".")
if cmpver(old[0], cur[0]) > 0 || cmpver(old[1], cur[1]) > 0 || cmpver(old[2], cur[2]) > 0 {
return fmt.Errorf("server %s is deployed by harp version %s; your harp version is %s, please upgrade harp or skip harp version checking by flag -f", s, matches[1], getVersion())
}
return nil
}
func cmpver(v1, v2 string) int {
i1, _ := strconv.Atoi(v1)
i2, _ := strconv.Atoi(v2)
return i1 - i2
}
func info(servers []*Server) {
var wg sync.WaitGroup
for _, serv := range servers {
wg.Add(1)
go func(serv *Server) {
defer wg.Done()
serv.initPathes()
output, err := serv.getBuildInfo()
if err != nil {
exitf("failed to cat %s.info on %s: %s(%s)", cfg.App.Name, serv, err, output)
}
fmt.Printf("=====\n%s\n%s", serv.String(), output)
}(serv)
}
wg.Wait()
}
func (s *Server) getBuildInfo() (string, error) {
session := s.getSession()
output, err := session.CombinedOutput(fmt.Sprintf(
"cat %s/src/%s/harp-build.info",
s.GoPath, cfg.App.ImportPath,
))
return string(output), err
}
func parseCfg(configPath string) (cfg Config) {
var r io.Reader
r, err := os.OpenFile(configPath, os.O_RDONLY, 0644)
if err != nil {
if os.IsNotExist(err) {
exitf("Config %s doesn't exist or is unspecified.\nTo specify with flag -c (e.g. -c harp.json)", configPath)
}
exitf("failed to read config: %s", err)
}
if err := json.NewDecoder(JsonConfigReader.New(r)).Decode(&cfg); err != nil {
exitf("failed to parse config: %s", err)
}
if cfg.App.KillSig == "" {
cfg.App.KillSig = "KILL"
}
for k, set := range cfg.Servers {
for _, s := range set {
s.Set = k
}
}
if cfg.RollbackCount == 0 {
cfg.RollbackCount = 3
}
cfg.App.DefaultExcludeds = append(cfg.App.DefaultExcludeds, ".harp/")
if cfg.App.FileWarningSize == 0 {
cfg.App.FileWarningSize = 1 << 20
}
return
}
const harpVersionPrefix = "Harp Version: "
func getBuildLog() string {
var info string
info += "Go Version: " + cmd("go", "version")
if cfg.GOOS != "" {
info += "GOOS: " + cfg.GOOS + "\n"
}
if cfg.GOARCH != "" {
info += "GOARCH: " + cfg.GOARCH + "\n"
}
info += harpVersionPrefix + getVersion() + "\n"
vcs, checksum := retrieveChecksum()
info += vcs + " Checksum: " + checksum + "\n"
info += "Composer: " + retrieveAuthor() + "\n"
info += "Build At: " + time.Now().String()
return info
}
func retrieveChecksum() (vcs, checksum string) {
checksum = tryCmd("git", "rev-parse", "HEAD")
if checksum != "" {
return "Git", strings.TrimSpace(checksum)
}
checksum = tryCmd("hg", "id", "-i")
if checksum != "" {
return "Hg", strings.TrimSpace(checksum)
}
checksum = tryCmd("bzr", "version-info", "--custom", `--template="{revision_id}\n"`)
if checksum != "" {
return "Bzr", strings.TrimSpace(checksum)
}
return
}
func retrieveAuthor() string {
name, err := ioutil.ReadFile(".harp-composer")
if err == nil && len(name) > 0 {
return strings.TrimSpace(string(name))
}
if author := tryCmd("git", "config", "user.name"); author != "" {
return strings.TrimSpace(author)
}
if author := tryCmd("whoami"); author != "" {
return strings.TrimSpace(author)
}
return "anonymous"
}
func isUsingGit() bool {
_, err := os.Stat(".git")
return err == nil
}
func cmd(name string, args ...string) string {
cmd := exec.Command(name, args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "GOOS="+cfg.GOOS, "GOARCH="+cfg.GOARCH)
output, err := cmd.CombinedOutput()
if err != nil {
exitf("faied to run: %s %s: %s\n%s\n", name, strings.Join(args, " "), err, output)
}
return string(output)
}
func tryCmd(name string, args ...string) string {
cmd := exec.Command(name, args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "GOOS="+cfg.GOOS, "GOARCH="+cfg.GOARCH)
output, err := cmd.CombinedOutput()
if err != nil && option.debug {
log.Printf("faied to run %s %s: %s(%s)\n", name, args, err, string(output))
}
return string(output)
}
func build() {
app := cfg.App
boutput := filepath.Join(tmpDir, app.Name)
ba := cfg.App.BuildArgs
if ba == "" {
ba = "-a -v"
}
if option.buildArgs != "" {
ba = option.buildArgs
}
buildCmd := fmt.Sprintf("go build %s -o %s %s", ba, boutput, app.ImportPath)
if app.BuildCmd != "" {
buildCmd = fmt.Sprintf(app.BuildCmd, boutput, app.ImportPath)
}
if option.debug {
println("build cmd:", buildCmd)
}
output := cmd("sh", "-c", buildCmd)
if option.debug {
print(output)
}
}
func exitf(format string, args ...interface{}) {
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
fmt.Fprintf(os.Stderr, format, args...)
if option.debug {
debug.PrintStack()
}
os.Exit(1)
}
func printUsage() {
fmt.Println(`harp is a go application deployment tool.
usage:
harp [options] [action]
actions:
deploy Deploy your application (e.g. harp -s prod deploy).
run Run migrations on server (e.g. harp -s prod migrate path/to/my_migration.go).
kill Kill server.
info Print build info of servers (e.g. harp -s prod info). Alias: status.
log Print real time logs of application (e.g. harp -s prod log).
restart Restart application (e.g. harp -s prod restart).
init Initialize a harp.json file.
rollback
ls List all the current releases. Alias: l, list.
$version Rollback to $version.
inspect Inspect script content and others.
deploy
restart
kill
rollback
files
options:`)
flag.PrintDefaults()
fmt.Println(`
examples:
Deploy:
harp -s prod -log deploy
Compile and run a go package or file in server/Migration:
Simple:
harp -server [email protected]:49153 run migration.go
With env and arguments (behold the quotes):
harp -server [email protected]:49153 run "Env1=val Env2=val migration2.go -arg1 val1"
Multiple migrations (behold the quotes):
harp -server [email protected]:49153 run migration.go "Env1=val migration2.go -arg1 val1"`)
}
func retrieveServers() []*Server {
serverSets := option.serverSets
servers := option.servers
if option.all {
serverSets = []string{}
for set, _ := range cfg.Servers {
serverSets = append(serverSets, set)
}
}
if len(servers) == 0 && len(serverSets) == 0 {
println("please specify servers or server sets to deploy (-s or -server).")
println("specify -all flag to execute the action on all servers.")
os.Exit(1)
}
var targetServers []*Server
for _, set := range serverSets {
servers, ok := cfg.Servers[set]
if !ok {
var existings []string
for s, _ := range cfg.Servers {
existings = append(existings, s)
}
sort.Sort(sort.StringSlice(existings))
fmt.Printf("server set doesn't exist: %s (%s)\n", set, strings.Join(existings, ", "))
os.Exit(1)
}
targetServers = append(targetServers, servers...)
}
serversLoop:
for _, server := range servers {
for _, set := range cfg.Servers {
for _, s := range set {
if server == s.String() || server == s.ID {
targetServers = append(targetServers, s)
continue serversLoop
}
}
}
// one-shot servers
if s := newOneShotServer(server); s != nil {
targetServers = append(targetServers, newOneShotServer(server))
} else {
exitf("wrong url format (eg: name@host:port): %s", server)
}
}
for _, s := range targetServers {
s.init()
}
return targetServers
}
func initHarp() {
if _, err := os.Stat("harp.json"); err == nil {
println("harp.json exists")
os.Exit(1)
}
file, err := os.Create("harp.json")
if err != nil {
panic(err)
}
wd, err := os.Getwd()
if err != nil {
return
}
gopath := filepath.Join(filepath.SplitList(os.Getenv("GOPATH"))[0], "src")
importpath := strings.Replace(wd, gopath+"/", "", 1)
appName := filepath.Base(importpath)
file.WriteString(fmt.Sprintf(`{
"goos": "linux",
"goarch": "amd64",
"app": {
"name": "%s",
"importpath": "%s",
"envs": {},
"DefaultExcludeds": [".git/", "tmp/", ".DS_Store", "node_modules/", "*.swp", "*.go"],
"files": [
"%s"
]
},
"servers": {
"prod": [{
"gopath": "/home/app",
"user": "app",
"host": "",
"envs": {},
"port": ":22"
}]
}
}`, appName, importpath, importpath))
}
func inspectScript(servers []*Server, name string) {
if name == "files" {
inspectFiles()
return
}
for _, s := range servers {
fmt.Println("# ====================================")
fmt.Println("#", s.String())
switch name {
case "deploy":
fmt.Println(s.retrieveDeployScript())
case "restart":
fmt.Println(s.retrieveRestartScript(retrieveAuthor()))
case "kill":
fmt.Println(s.retrieveKillScript(retrieveAuthor()))
case "rollback":
fmt.Println(s.retrieveRollbackScript())
default:
exitf("unknown script: %s\n", name)
}
}
}
func kill(servers []*Server) {
var wg sync.WaitGroup
for _, server := range servers {
wg.Add(1)
go func(s *Server) {
defer func() { wg.Done() }()
session := s.getSession()
defer session.Close()
output, err := session.CombinedOutput(s.retrieveKillScript(retrieveAuthor()))
if err != nil {
exitf("failed to exec %s: %s %s", option.script, string(output), err)
}
log.Printf("%s killed\n", s)
}(server)
}
wg.Wait()
}
var killScriptTmpl = template.Must(template.New("").Parse(`set -e
if [[ -f {{.Home}}/harp/{{.App.Name}}/app.pid ]]; then
target=$(cat {{.Home}}/harp/{{.App.Name}}/app.pid);
if ps -p $target > /dev/null; then
kill -KILL $target; > /dev/null 2>&1;
fi
{{.GetHarpComposer}}
echo "[harp] {\"datetime\": \"$(date)\", \"user\": \"$harp_composer\", \"type\": \"kill\"}" | tee -a {{.LogPath}} {{.HistoryLogPath}} >/dev/null
fi`))
func (s *Server) retrieveKillScript(who string) string {
s.initPathes()
var buf bytes.Buffer
if err := killScriptTmpl.Execute(&buf, struct {
Config
*Server
GetHarpComposer string
}{
Config: cfg,
Server: s,
GetHarpComposer: s.GetHarpComposer(who),
}); err != nil {
exitf(err.Error())
}
if option.debug {
fmt.Println(buf.String())
}
return buf.String()
}
func restart(servers []*Server) {
var wg sync.WaitGroup
for _, server := range servers {
wg.Add(1)
go func(s *Server) {
defer func() { wg.Done() }()
session := s.getSession()
defer session.Close()
output, err := session.CombinedOutput(s.retrieveRestartScript(retrieveAuthor()))
if err != nil {
exitf("failed to exec %s: %s %s", option.script, string(output), err)
}
log.Printf("%s restarted\n", s)
}(server)
}
wg.Wait()
}
func initXC() {
goroot := strings.TrimSpace(cmd("go", "env", "GOROOT"))
cmd := exec.Command("./make.bash")
cmd.Dir = filepath.Join(goroot, "src")
cmd.Env = append(os.Environ(), "GOOS="+cfg.GOOS, "GOARCH="+cfg.GOARCH)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
exitf("failed to init cross compilation (GOOS=%s, GOARCH=%s): %s", cfg.GOOS, cfg.GOARCH, err)
}
}
func cleanCaches() {
if option.keepCache {
return
}
if err := os.RemoveAll(tmpDir); err != nil {
exitf("os.RemoveAll(%s) error: %s", tmpDir, err)
}
}
func printVersion() { fmt.Println(getVersion()) }
func getVersion() string { return fmt.Sprintf("0.6.%d", version) }
| [
"\"GOPATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
main.py | #!/usr/local/bin/python3
import os
from flask import Flask, request
from flask import jsonify
import sys
app = Flask(__name__)
@app.route("/")
def echo ():
return "echo ok\n"
@app.route('/ping', methods = ['POST', 'GET'])
def pong():
if request.method == 'POST':
print (request.is_json)
content = request.get_json()
print (content,file=sys.stderr)
return "POST ok"
else:
return "GET ok"
if __name__ == '__main__':
app.run(debug=os.environ['FLASK_ENV'] == 'development',host='0.0.0.0', port=8080)
| []
| []
| [
"FLASK_ENV"
]
| [] | ["FLASK_ENV"] | python | 1 | 0 | |
pkg/kube_config_manager/kube_config_manager.go | package kube_config_manager
import (
"context"
"encoding/json"
"fmt"
"os"
"time"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
corev1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"github.com/flant/shell-operator/pkg/kube"
"github.com/flant/addon-operator/pkg/utils"
)
type KubeConfigManager interface {
WithContext(ctx context.Context)
WithKubeClient(client kube.KubernetesClient)
WithNamespace(namespace string)
WithConfigMapName(configMap string)
WithValuesChecksumsAnnotation(annotation string)
SetKubeGlobalValues(values utils.Values) error
SetKubeModuleValues(moduleName string, values utils.Values) error
Init() error
Start()
Stop()
InitialConfig() *Config
}
type kubeConfigManager struct {
ctx context.Context
cancel context.CancelFunc
KubeClient kube.KubernetesClient
Namespace string
ConfigMapName string
ValuesChecksumsAnnotation string
initialConfig *Config
GlobalValuesChecksum string
ModulesValuesChecksum map[string]string
}
// kubeConfigManager should implement KubeConfigManager
var _ KubeConfigManager = &kubeConfigManager{}
type ModuleConfigs map[string]utils.ModuleConfig
type Config struct {
Values utils.Values
ModuleConfigs ModuleConfigs
}
func NewConfig() *Config {
return &Config{
Values: make(utils.Values),
ModuleConfigs: make(map[string]utils.ModuleConfig),
}
}
var (
VerboseDebug bool
// ConfigUpdated chan receives a new Config when global values are changed
ConfigUpdated chan Config
// ModuleConfigsUpdated chan receives a list of all ModuleConfig in configData. Updated items marked as IsUpdated.
ModuleConfigsUpdated chan ModuleConfigs
)
func simpleMergeConfigMapData(data map[string]string, newData map[string]string) map[string]string {
for k, v := range newData {
data[k] = v
}
return data
}
func (kcm *kubeConfigManager) WithContext(ctx context.Context) {
kcm.ctx, kcm.cancel = context.WithCancel(ctx)
}
func (kcm *kubeConfigManager) Stop() {
if kcm.cancel != nil {
kcm.cancel()
}
}
func (kcm *kubeConfigManager) WithKubeClient(client kube.KubernetesClient) {
kcm.KubeClient = client
}
func (kcm *kubeConfigManager) saveGlobalKubeConfig(globalKubeConfig GlobalKubeConfig) error {
return kcm.changeOrCreateKubeConfig(func(obj *v1.ConfigMap) error {
checksums, err := kcm.getValuesChecksums(obj)
if err != nil {
return err
}
checksums[utils.GlobalValuesKey] = globalKubeConfig.Checksum
err = kcm.setValuesChecksums(obj, checksums)
if err != nil {
return fmt.Errorf("update global values checksum in annotation: %s", err)
}
obj.Data = simpleMergeConfigMapData(obj.Data, globalKubeConfig.ConfigData)
return nil
})
}
func (kcm *kubeConfigManager) saveModuleKubeConfig(moduleKubeConfig ModuleKubeConfig) error {
return kcm.changeOrCreateKubeConfig(func(obj *v1.ConfigMap) error {
checksums, err := kcm.getValuesChecksums(obj)
if err != nil {
return err
}
checksums[moduleKubeConfig.ModuleName] = moduleKubeConfig.Checksum
err = kcm.setValuesChecksums(obj, checksums)
if err != nil {
return fmt.Errorf("update module '%s' values checksum in annotation: %s", moduleKubeConfig.ModuleName, err)
}
obj.Data = simpleMergeConfigMapData(obj.Data, moduleKubeConfig.ConfigData)
return nil
})
}
func (kcm *kubeConfigManager) changeOrCreateKubeConfig(configChangeFunc func(*v1.ConfigMap) error) error {
var err error
obj, err := kcm.getConfigMap()
if err != nil {
return nil
}
if obj != nil {
if obj.Data == nil {
obj.Data = make(map[string]string)
}
err = configChangeFunc(obj)
if err != nil {
return err
}
_, err := kcm.KubeClient.CoreV1().ConfigMaps(kcm.Namespace).Update(obj)
if err != nil {
return err
}
return nil
} else {
obj := &v1.ConfigMap{}
obj.Name = kcm.ConfigMapName
obj.Data = make(map[string]string)
err = configChangeFunc(obj)
if err != nil {
return err
}
_, err := kcm.KubeClient.CoreV1().ConfigMaps(kcm.Namespace).Create(obj)
if err != nil {
return err
}
return nil
}
}
func (kcm *kubeConfigManager) WithNamespace(namespace string) {
kcm.Namespace = namespace
}
func (kcm *kubeConfigManager) WithConfigMapName(configMap string) {
kcm.ConfigMapName = configMap
}
func (kcm *kubeConfigManager) WithValuesChecksumsAnnotation(annotation string) {
kcm.ValuesChecksumsAnnotation = annotation
}
func (kcm *kubeConfigManager) SetKubeGlobalValues(values utils.Values) error {
globalKubeConfig, err := GetGlobalKubeConfigFromValues(values)
if err != nil {
return err
}
if globalKubeConfig != nil {
log.Debugf("Kube config manager: set kube global values:\n%s", values.DebugString())
err := kcm.saveGlobalKubeConfig(*globalKubeConfig)
if err != nil {
return err
}
}
return nil
}
func (kcm *kubeConfigManager) SetKubeModuleValues(moduleName string, values utils.Values) error {
moduleKubeConfig, err := GetModuleKubeConfigFromValues(moduleName, values)
if err != nil {
return err
}
if moduleKubeConfig != nil {
log.Debugf("Kube config manager: set kube module values:\n%s", moduleKubeConfig.ModuleConfig.String())
err := kcm.saveModuleKubeConfig(*moduleKubeConfig)
if err != nil {
return err
}
}
return nil
}
func (kcm *kubeConfigManager) getConfigMap() (*v1.ConfigMap, error) {
list, err := kcm.KubeClient.CoreV1().
ConfigMaps(kcm.Namespace).
List(metav1.ListOptions{})
if err != nil {
return nil, err
}
objExists := false
for _, obj := range list.Items {
if obj.ObjectMeta.Name == kcm.ConfigMapName {
objExists = true
break
}
}
if objExists {
obj, err := kcm.KubeClient.CoreV1().
ConfigMaps(kcm.Namespace).
Get(kcm.ConfigMapName, metav1.GetOptions{})
if err != nil {
return nil, err
}
log.Debugf("KUBE_CONFIG_MANAGER: Will use ConfigMap/%s for persistent values", kcm.ConfigMapName)
return obj, nil
} else {
log.Debugf("KUBE_CONFIG_MANAGER: ConfigMap/%s is not created", kcm.ConfigMapName)
return nil, nil
}
}
func (kcm *kubeConfigManager) InitialConfig() *Config {
return kcm.initialConfig
}
func NewKubeConfigManager() KubeConfigManager {
kcm := &kubeConfigManager{}
kcm.initialConfig = NewConfig()
return kcm
}
func (kcm *kubeConfigManager) initConfig() error {
obj, err := kcm.getConfigMap()
if err != nil {
return err
}
if obj == nil {
log.Infof("Init config from ConfigMap: cm/%s is not found", kcm.ConfigMapName)
return nil
}
initialConfig := NewConfig()
globalValuesChecksum := ""
modulesValuesChecksum := make(map[string]string)
globalKubeConfig, err := GetGlobalKubeConfigFromConfigData(obj.Data)
if err != nil {
return err
}
if globalKubeConfig != nil {
initialConfig.Values = globalKubeConfig.Values
globalValuesChecksum = globalKubeConfig.Checksum
}
for moduleName := range GetModulesNamesFromConfigData(obj.Data) {
// all GetModulesNamesFromConfigData must exist
moduleKubeConfig, err := ExtractModuleKubeConfig(moduleName, obj.Data)
if err != nil {
return err
}
initialConfig.ModuleConfigs[moduleKubeConfig.ModuleName] = moduleKubeConfig.ModuleConfig
modulesValuesChecksum[moduleKubeConfig.ModuleName] = moduleKubeConfig.Checksum
}
kcm.initialConfig = initialConfig
kcm.GlobalValuesChecksum = globalValuesChecksum
kcm.ModulesValuesChecksum = modulesValuesChecksum
return nil
}
func (kcm *kubeConfigManager) Init() error {
log.Debug("INIT: KUBE_CONFIG")
VerboseDebug = false
if os.Getenv("KUBE_CONFIG_MANAGER_DEBUG") != "" {
VerboseDebug = true
}
ConfigUpdated = make(chan Config, 1)
ModuleConfigsUpdated = make(chan ModuleConfigs, 1)
err := kcm.initConfig()
if err != nil {
return err
}
return nil
}
func (kcm *kubeConfigManager) getValuesChecksums(cm *v1.ConfigMap) (map[string]string, error) {
data, hasKey := cm.Annotations[kcm.ValuesChecksumsAnnotation]
if !hasKey {
return make(map[string]string), nil
}
var res map[string]string
err := json.Unmarshal([]byte(data), &res)
if err != nil {
return nil, fmt.Errorf("KUBE_CONFIG: cannot unmarshal json annotation '%s' in ConfigMap '%s': %s\n%s", kcm.ValuesChecksumsAnnotation, cm.Name, err, data)
}
return res, nil
}
func (kcm *kubeConfigManager) setValuesChecksums(cm *v1.ConfigMap, checksums map[string]string) error {
data, err := json.Marshal(checksums)
if err != nil {
// this should not happen
return err
}
if cm.Annotations == nil {
cm.Annotations = make(map[string]string)
}
cm.Annotations[kcm.ValuesChecksumsAnnotation] = string(data)
return nil
}
// handleNewCm determine changes in kube config.
//
// New Config is send over ConfigUpdate channel if global section is changed.
//
// Array of actual ModuleConfig is send over ModuleConfigsUpdated channel
// if module sections are changed or deleted.
func (kcm *kubeConfigManager) handleNewCm(obj *v1.ConfigMap) error {
savedChecksums, err := kcm.getValuesChecksums(obj)
if err != nil {
return err
}
globalKubeConfig, err := GetGlobalKubeConfigFromConfigData(obj.Data)
if err != nil {
return err
}
// if global values are changed or deleted then new config should be sent over ConfigUpdated channel
isGlobalUpdated := globalKubeConfig != nil &&
globalKubeConfig.Checksum != savedChecksums[utils.GlobalValuesKey] &&
globalKubeConfig.Checksum != kcm.GlobalValuesChecksum
isGlobalDeleted := globalKubeConfig == nil && kcm.GlobalValuesChecksum != ""
if isGlobalUpdated || isGlobalDeleted {
log.Infof("Kube config manager: detect changes in global section")
newConfig := NewConfig()
// calculate new checksum of a global section
newGlobalValuesChecksum := ""
if globalKubeConfig != nil {
newConfig.Values = globalKubeConfig.Values
newGlobalValuesChecksum = globalKubeConfig.Checksum
}
kcm.GlobalValuesChecksum = newGlobalValuesChecksum
// calculate new checksums of a module sections
newModulesValuesChecksum := make(map[string]string)
for moduleName := range GetModulesNamesFromConfigData(obj.Data) {
// all GetModulesNamesFromConfigData must exist
moduleKubeConfig, err := ExtractModuleKubeConfig(moduleName, obj.Data)
if err != nil {
return err
}
newConfig.ModuleConfigs[moduleKubeConfig.ModuleName] = moduleKubeConfig.ModuleConfig
newModulesValuesChecksum[moduleKubeConfig.ModuleName] = moduleKubeConfig.Checksum
}
kcm.ModulesValuesChecksum = newModulesValuesChecksum
log.Debugf("Kube config manager: global section new values:\n%s",
newConfig.Values.DebugString())
for _, moduleConfig := range newConfig.ModuleConfigs {
log.Debugf("%s", moduleConfig.String())
}
ConfigUpdated <- *newConfig
} else {
actualModulesNames := GetModulesNamesFromConfigData(obj.Data)
moduleConfigsActual := make(ModuleConfigs)
updatedCount := 0
removedCount := 0
// create ModuleConfig for each module in configData
// IsUpdated flag set for updated configs
for moduleName := range actualModulesNames {
// all GetModulesNamesFromConfigData must exist
moduleKubeConfig, err := ExtractModuleKubeConfig(moduleName, obj.Data)
if err != nil {
return err
}
if moduleKubeConfig.Checksum != savedChecksums[moduleName] && moduleKubeConfig.Checksum != kcm.ModulesValuesChecksum[moduleName] {
kcm.ModulesValuesChecksum[moduleName] = moduleKubeConfig.Checksum
moduleKubeConfig.ModuleConfig.IsUpdated = true
updatedCount++
} else {
moduleKubeConfig.ModuleConfig.IsUpdated = false
}
moduleConfigsActual[moduleName] = moduleKubeConfig.ModuleConfig
}
// delete checksums for removed module sections
for module := range kcm.ModulesValuesChecksum {
if _, isActual := actualModulesNames[module]; isActual {
continue
}
delete(kcm.ModulesValuesChecksum, module)
removedCount++
}
if updatedCount > 0 || removedCount > 0 {
log.Infof("KUBE_CONFIG Detect module sections changes: %d updated, %d removed", updatedCount, removedCount)
for _, moduleConfig := range moduleConfigsActual {
log.Debugf("%s", moduleConfig.String())
}
ModuleConfigsUpdated <- moduleConfigsActual
}
}
return nil
}
func (kcm *kubeConfigManager) handleCmAdd(obj *v1.ConfigMap) error {
if VerboseDebug {
objYaml, err := yaml.Marshal(obj)
if err != nil {
return err
}
log.Debugf("Kube config manager: informer: handle ConfigMap '%s' add:\n%s", obj.Name, objYaml)
}
return kcm.handleNewCm(obj)
}
func (kcm *kubeConfigManager) handleCmUpdate(_ *v1.ConfigMap, obj *v1.ConfigMap) error {
if VerboseDebug {
objYaml, err := yaml.Marshal(obj)
if err != nil {
return err
}
log.Debugf("Kube config manager: informer: handle ConfigMap '%s' update:\n%s", obj.Name, objYaml)
}
return kcm.handleNewCm(obj)
}
func (kcm *kubeConfigManager) handleCmDelete(obj *v1.ConfigMap) error {
if VerboseDebug {
objYaml, err := yaml.Marshal(obj)
if err != nil {
return err
}
log.Debugf("Kube config manager: handle ConfigMap '%s' delete:\n%s", obj.Name, objYaml)
}
if kcm.GlobalValuesChecksum != "" {
kcm.GlobalValuesChecksum = ""
kcm.ModulesValuesChecksum = make(map[string]string)
ConfigUpdated <- Config{
Values: make(utils.Values),
ModuleConfigs: make(map[string]utils.ModuleConfig),
}
} else {
// Global values is already known to be empty.
// So check each module values change separately,
// and generate signals per-module.
// Note: Only ModuleName field is needed in ModuleConfig.
moduleConfigsUpdate := make(ModuleConfigs)
updateModulesNames := make([]string, 0)
for module := range kcm.ModulesValuesChecksum {
updateModulesNames = append(updateModulesNames, module)
}
for _, module := range updateModulesNames {
delete(kcm.ModulesValuesChecksum, module)
moduleConfigsUpdate[module] = utils.ModuleConfig{
ModuleName: module,
Values: make(utils.Values),
}
}
ModuleConfigsUpdated <- moduleConfigsUpdate
}
return nil
}
func (kcm *kubeConfigManager) Start() {
log.Debugf("Run kube config manager")
// define resyncPeriod for informer
resyncPeriod := time.Duration(5) * time.Minute
// define indexers for informer
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
// define tweakListOptions for informer
tweakListOptions := func(options *metav1.ListOptions) {
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", kcm.ConfigMapName).String()
}
cmInformer := corev1.NewFilteredConfigMapInformer(kcm.KubeClient, kcm.Namespace, resyncPeriod, indexers, tweakListOptions)
cmInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
err := kcm.handleCmAdd(obj.(*v1.ConfigMap))
if err != nil {
log.Errorf("Kube config manager: cannot handle ConfigMap add: %s", err)
}
},
UpdateFunc: func(prevObj interface{}, obj interface{}) {
err := kcm.handleCmUpdate(prevObj.(*v1.ConfigMap), obj.(*v1.ConfigMap))
if err != nil {
log.Errorf("Kube config manager: cannot handle ConfigMap update: %s", err)
}
},
DeleteFunc: func(obj interface{}) {
err := kcm.handleCmDelete(obj.(*v1.ConfigMap))
if err != nil {
log.Errorf("Kube config manager: cannot handle ConfigMap delete: %s", err)
}
},
})
cmInformer.Run(kcm.ctx.Done())
}
| [
"\"KUBE_CONFIG_MANAGER_DEBUG\""
]
| []
| [
"KUBE_CONFIG_MANAGER_DEBUG"
]
| [] | ["KUBE_CONFIG_MANAGER_DEBUG"] | go | 1 | 0 | |
tests/mobility2/run.py | #!/usr/bin/env python3
import os
import sys
import glob
import copy
import json
sys.path.append('../../')
import software
import network
import topology
import mobility
from shared import Remote
import tools
remotes= [Remote()]
tools.check_access(remotes)
software.clear(remotes)
network.clear(remotes)
prefix = os.environ.get('PREFIX', '')
# 100MBit LAN cable
def get_tc_command(link, ifname):
return f'tc qdisc replace dev "{ifname}" root tbf rate 100mbit burst 8192 latency 1ms'
def run(protocol, csvfile):
tools.seed_random(23)
node_count = 50
state = topology.create_nodes(node_count)
mobility.randomize_positions(state, xy_range=1000)
mobility.connect_range(state, max_links=150)
# create network and start routing software
network.apply(state=state, link_command=get_tc_command, remotes=remotes)
software.start(protocol)
tools.sleep(30)
for step_distance in [50, 100, 150, 200, 250, 300, 350, 400]:
print(f'{protocol}: step_distance {step_distance}')
traffic_beg = tools.traffic(remotes)
for n in range(0, 6):
#with open(f'graph-{step_distance}-{n}.json', 'w+') as file:
# json.dump(state, file, indent=' ')
# connect nodes range
wait_beg_ms = tools.millis()
# update network representation
mobility.move_random(state, distance=step_distance)
mobility.connect_range(state, max_links=150)
# update network
network.apply(state=state, link_command=get_tc_command, remotes=remotes)
# Wait until wait seconds are over, else error
tools.wait(wait_beg_ms, 15)
paths = tools.get_random_paths(state, 2 * 400)
paths = tools.filter_paths(state, paths, min_hops=2, path_count=200)
ping_result = tools.ping_paths(remotes=remotes, paths=paths, duration_ms=2000, verbosity='verbose')
packets_arrived_pc = 100 * (ping_result.received / ping_result.send)
traffic_end = tools.traffic(remotes)
# add data to csv file
extra = (['node_count', 'time_ms', 'step_distance_m', 'n', 'packets_arrived_pc'], [node_count, tools.millis() - wait_beg_ms, step_distance, n, packets_arrived_pc])
tools.csv_update(csvfile, '\t', extra, (traffic_end - traffic_beg).getData(), ping_result.getData())
traffic_beg = traffic_end
software.clear(remotes)
network.clear(remotes)
# remove none, after it has been verified to be 0% (also for mobility1)
for protocol in ['babel', 'batman-adv', 'bmx6', 'bmx7', 'cjdns', 'olsr1', 'olsr2', 'ospf', 'yggdrasil']:
with open(f"{prefix}mobility2-{protocol}.csv", 'w+') as csvfile:
run(protocol, csvfile)
tools.stop_all_terminals()
| []
| []
| [
"PREFIX"
]
| [] | ["PREFIX"] | python | 1 | 0 | |
setup.py | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
import sys
import os
import platform
import shutil
# We need to import setuptools before because it monkey-patches distutils
import setuptools # noqa
from distutils.command.clean import clean as Clean
from distutils.command.sdist import sdist
import traceback
import importlib
try:
import builtins
except ImportError:
# Python 2 compat: just to be able to declare that Python >=3.7 is needed.
import __builtin__ as builtins
# This is a bit (!) hackish: we are setting a global variable so that the
# main sklearn __init__ can detect if it is being loaded by the setup
# routine, to avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to
# recursively build the compiled extensions in sub-packages is based on the
# Python import machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = "scikit-learn"
DESCRIPTION = "A set of python modules for machine learning and data mining"
with open("README.rst") as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = "Andreas Mueller"
MAINTAINER_EMAIL = "[email protected]"
URL = "http://scikit-learn.org"
DOWNLOAD_URL = "https://pypi.org/project/scikit-learn/#files"
LICENSE = "new BSD"
PROJECT_URLS = {
"Bug Tracker": "https://github.com/scikit-learn/scikit-learn/issues",
"Documentation": "https://scikit-learn.org/stable/documentation.html",
"Source Code": "https://github.com/scikit-learn/scikit-learn",
}
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn # noqa
import sklearn._min_dependencies as min_deps # noqa
from sklearn.externals._packaging.version import parse as parse_version # noqa
VERSION = sklearn.__version__
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = {
"develop",
"release",
"bdist_egg",
"bdist_rpm",
"bdist_wininst",
"install_egg_info",
"build_sphinx",
"egg_info",
"easy_install",
"upload",
"bdist_wheel",
"--single-version-externally-managed",
}
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
key: min_deps.tag_to_packages[key]
for key in ["examples", "docs", "tests", "benchmark"]
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, "PKG-INFO"))
if remove_c_files:
print("Will remove generated .c files")
if os.path.exists("build"):
shutil.rmtree("build")
for dirpath, dirnames, filenames in os.walk("sklearn"):
for filename in filenames:
if any(
filename.endswith(suffix)
for suffix in (".so", ".pyd", ".dll", ".pyc")
):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in [".c", ".cpp"]:
pyx_file = str.replace(filename, extension, ".pyx")
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == "__pycache__":
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {"clean": CleanCommand, "sdist": sdist}
# Custom build_ext command to set OpenMP compile flags depending on os and
# compiler. Also makes it possible to set the parallelism level via
# and environment variable (useful for the wheel building CI).
# build_ext has to be imported after setuptools
try:
from numpy.distutils.command.build_ext import build_ext # noqa
class build_ext_subclass(build_ext):
def finalize_options(self):
super().finalize_options()
if self.parallel is None:
# Do not override self.parallel if already defined by
# command-line flag (--parallel or -j)
parallel = os.environ.get("SKLEARN_BUILD_PARALLEL")
if parallel:
self.parallel = int(parallel)
if self.parallel:
print("setting parallel=%d " % self.parallel)
def build_extensions(self):
from sklearn._build_utils.openmp_helpers import get_openmp_flag
if sklearn._OPENMP_SUPPORTED:
openmp_flag = get_openmp_flag(self.compiler)
for e in self.extensions:
e.extra_compile_args += openmp_flag
e.extra_link_args += openmp_flag
build_ext.build_extensions(self)
cmdclass["build_ext"] = build_ext_subclass
except ImportError:
# Numpy should not be a dependency just to be able to introspect
# that python 3.7 is required.
pass
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = {"fetch_artifacts", "upload_all"}
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package="", top_path=None):
if os.path.exists("MANIFEST"):
os.remove("MANIFEST")
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import _check_cython_version
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True,
)
# Cython is required by config.add_subpackage for templated extensions
# that need the tempita sub-submodule. So check that we have the correct
# version of Cython so as to be able to raise a more informative error
# message from the start if it's not the case.
_check_cython_version()
config.add_subpackage("sklearn")
return config
def check_package_status(package, min_version):
"""
Returns a dictionary containing a boolean specifying whether given package
is up-to-date, along with the version string (empty string if
not installed).
"""
package_status = {}
try:
module = importlib.import_module(package)
package_version = module.__version__
package_status["up_to_date"] = parse_version(package_version) >= parse_version(
min_version
)
package_status["version"] = package_version
except ImportError:
traceback.print_exc()
package_status["up_to_date"] = False
package_status["version"] = ""
req_str = "scikit-learn requires {} >= {}.\n".format(package, min_version)
instructions = (
"Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n"
)
if package_status["up_to_date"] is False:
if package_status["version"]:
raise ImportError(
"Your installation of {} "
"{} is out-of-date.\n{}{}".format(
package, package_status["version"], req_str, instructions
)
)
else:
raise ImportError(
"{} is not " "installed.\n{}{}".format(package, req_str, instructions)
)
def setup_package():
metadata = dict(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
version=VERSION,
long_description=LONG_DESCRIPTION,
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Development Status :: 5 - Production/Stable",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
("Programming Language :: Python :: " "Implementation :: CPython"),
("Programming Language :: Python :: " "Implementation :: PyPy"),
],
cmdclass=cmdclass,
python_requires=">=3.7",
install_requires=min_deps.tag_to_packages["install"],
package_data={"": ["*.pxd"]},
**extra_setuptools_args,
)
commands = [arg for arg in sys.argv[1:] if not arg.startswith("-")]
if all(
command in ("egg_info", "dist_info", "clean", "check") for command in commands
):
# These actions are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
# These commands use setup from setuptools
from setuptools import setup
metadata["version"] = VERSION
else:
if sys.version_info < (3, 6):
raise RuntimeError(
"Scikit-learn requires Python 3.7 or later. The current"
" Python version is %s installed in %s."
% (platform.python_version(), sys.executable)
)
check_package_status("numpy", min_deps.NUMPY_MIN_VERSION)
check_package_status("scipy", min_deps.SCIPY_MIN_VERSION)
# These commands require the setup from numpy.distutils because they
# may use numpy.distutils compiler classes.
from numpy.distutils.core import setup
metadata["configuration"] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| []
| []
| [
"SKLEARN_BUILD_PARALLEL"
]
| [] | ["SKLEARN_BUILD_PARALLEL"] | python | 1 | 0 | |
src/products/src/products-service/repository.go | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: MIT-0
package main
import (
"fmt"
"log"
"os"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"github.com/aws/aws-sdk-go/service/dynamodb/expression"
guuuid "github.com/google/uuid"
)
// Root/base URL to use when building fully-qualified URLs to product detail view.
var webRootURL = os.Getenv("WEB_ROOT_URL")
func setProductURL(p *Product) {
if len(webRootURL) > 0 {
p.URL = webRootURL + "/#/product/" + p.ID
}
}
func setCategoryURL(c *Category) {
if len(webRootURL) > 0 && len(c.Name) > 0 {
c.URL = webRootURL + "/#/category/" + c.Name
}
}
// RepoFindProduct Function
func RepoFindProduct(id string) Product {
var product Product
id = strings.ToLower(id)
log.Println("RepoFindProduct: ", id, ddbTableProducts)
keycond := expression.Key("id").Equal(expression.Value(id))
expr, err := expression.NewBuilder().WithKeyCondition(keycond).Build()
// Build the query input parameters
params := &dynamodb.QueryInput{
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
KeyConditionExpression: expr.KeyCondition(),
ProjectionExpression: expr.Projection(),
TableName: aws.String(ddbTableProducts),
}
// Make the DynamoDB Query API call
result, err := dynamoClient.Query(params)
if err != nil {
log.Println("get item error " + string(err.Error()))
return product
}
if len(result.Items) > 0 {
err = dynamodbattribute.UnmarshalMap(result.Items[0], &product)
if err != nil {
panic(fmt.Sprintf("Failed to unmarshal Record, %v", err))
}
setProductURL(&product)
log.Println("RepoFindProduct returning: ", product.Name, product.Category)
}
return product
}
// RepoFindCategory Function
func RepoFindCategory(id string) Category {
var category Category
id = strings.ToLower(id)
log.Println("RepoFindCategory: ", id, ddbTableCategories)
keycond := expression.Key("id").Equal(expression.Value(id))
expr, err := expression.NewBuilder().WithKeyCondition(keycond).Build()
// Build the query input parameters
params := &dynamodb.QueryInput{
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
KeyConditionExpression: expr.KeyCondition(),
ProjectionExpression: expr.Projection(),
TableName: aws.String(ddbTableCategories),
}
// Make the DynamoDB Query API call
result, err := dynamoClient.Query(params)
if err != nil {
log.Println("get item error " + string(err.Error()))
return category
}
if len(result.Items) > 0 {
err = dynamodbattribute.UnmarshalMap(result.Items[0], &category)
if err != nil {
panic(fmt.Sprintf("Failed to unmarshal Record, %v", err))
}
setCategoryURL(&category)
log.Println("RepoFindCategory returning: ", category.Name)
}
return category
}
// RepoFindCategoriesByName Function
func RepoFindCategoriesByName(name string) Categories {
var categories Categories
log.Println("RepoFindCategoriesByName: ", name, ddbTableCategories)
keycond := expression.Key("name").Equal(expression.Value(name))
proj := expression.NamesList(expression.Name("id"),
expression.Name("name"),
expression.Name("image"))
expr, err := expression.NewBuilder().WithKeyCondition(keycond).WithProjection(proj).Build()
if err != nil {
log.Println("Got error building expression:")
log.Println(err.Error())
}
// Build the query input parameters
params := &dynamodb.QueryInput{
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
KeyConditionExpression: expr.KeyCondition(),
ProjectionExpression: expr.Projection(),
TableName: aws.String(ddbTableCategories),
IndexName: aws.String("name-index"),
}
// Make the DynamoDB Query API call
result, err := dynamoClient.Query(params)
if err != nil {
log.Println("Got error QUERY expression:")
log.Println(err.Error())
}
log.Println("RepoFindCategoriesByName / items found = ", len(result.Items))
for _, i := range result.Items {
item := Category{}
err = dynamodbattribute.UnmarshalMap(i, &item)
if err != nil {
log.Println("Got error unmarshalling:")
log.Println(err.Error())
} else {
setCategoryURL(&item)
}
categories = append(categories, item)
}
if len(result.Items) == 0 {
categories = make([]Category, 0)
}
return categories
}
// RepoFindProductByCategory Function
func RepoFindProductByCategory(category string) Products {
log.Println("RepoFindProductByCategory: ", category)
var f Products
keycond := expression.Key("category").Equal(expression.Value(category))
proj := expression.NamesList(expression.Name("id"),
expression.Name("category"),
expression.Name("name"),
expression.Name("image"),
expression.Name("style"),
expression.Name("description"),
expression.Name("price"),
expression.Name("brewery"),
expression.Name("abv"),
expression.Name("ibu"))
expr, err := expression.NewBuilder().WithKeyCondition(keycond).WithProjection(proj).Build()
if err != nil {
log.Println("Got error building expression:")
log.Println(err.Error())
}
// Build the query input parameters
params := &dynamodb.QueryInput{
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
KeyConditionExpression: expr.KeyCondition(),
ProjectionExpression: expr.Projection(),
TableName: aws.String(ddbTableProducts),
IndexName: aws.String("category-index"),
}
// Make the DynamoDB Query API call
result, err := dynamoClient.Query(params)
if err != nil {
log.Println("Got error QUERY expression:")
log.Println(err.Error())
}
log.Println("RepoFindProductByCategory / items found = ", len(result.Items))
for _, i := range result.Items {
item := Product{}
err = dynamodbattribute.UnmarshalMap(i, &item)
if err != nil {
log.Println("Got error unmarshalling:")
log.Println(err.Error())
} else {
setProductURL(&item)
}
f = append(f, item)
}
if len(result.Items) == 0 {
f = make([]Product, 0)
}
return f
}
// RepoFindFeatured Function
func RepoFindFeatured() Products {
log.Println("RepoFindFeatured | featured=true")
var f Products
filt := expression.Name("featured").Equal(expression.Value("true"))
expr, err := expression.NewBuilder().WithFilter(filt).Build()
if err != nil {
log.Println("Got error building expression:")
log.Println(err.Error())
}
// Build the query input
// using index for performance (few items are featured)
params := &dynamodb.ScanInput{
ExpressionAttributeNames: expr.Names(),
ExpressionAttributeValues: expr.Values(),
FilterExpression: expr.Filter(),
ProjectionExpression: expr.Projection(),
TableName: aws.String(ddbTableProducts),
IndexName: aws.String("id-featured-index"),
}
// Make the DynamoDB Query API call
result, err := dynamoClient.Scan(params)
if err != nil {
log.Println("Got error scan expression:")
log.Println(err.Error())
}
log.Println("RepoFindProductFeatured / items found = ", len(result.Items))
for _, i := range result.Items {
item := Product{}
err = dynamodbattribute.UnmarshalMap(i, &item)
if err != nil {
log.Println("Got error unmarshalling:")
log.Println(err.Error())
} else {
setProductURL(&item)
}
f = append(f, item)
}
if len(result.Items) == 0 {
f = make([]Product, 0)
}
return f
}
// RepoFindALLCategories - loads all categories
func RepoFindALLCategories() Categories {
// TODO: implement some caching
log.Println("RepoFindALLCategories: ")
var f Categories
// Build the query input parameters
params := &dynamodb.ScanInput{
TableName: aws.String(ddbTableCategories),
}
// Make the DynamoDB Query API call
result, err := dynamoClient.Scan(params)
if err != nil {
log.Println("Got error scan expression:")
log.Println(err.Error())
}
log.Println("RepoFindALLCategories / items found = ", len(result.Items))
for _, i := range result.Items {
item := Category{}
err = dynamodbattribute.UnmarshalMap(i, &item)
if err != nil {
log.Println("Got error unmarshalling:")
log.Println(err.Error())
} else {
setCategoryURL(&item)
}
f = append(f, item)
}
if len(result.Items) == 0 {
f = make([]Category, 0)
}
return f
}
// RepoFindALLProducts Function
func RepoFindALLProducts() Products {
log.Println("RepoFindALLProducts")
var f Products
// Build the query input parameters
params := &dynamodb.ScanInput{
TableName: aws.String(ddbTableProducts),
}
// Make the DynamoDB Query API call
result, err := dynamoClient.Scan(params)
if err != nil {
log.Println("Got error scan expression:")
log.Println(err.Error())
}
log.Println("RepoFindALLProducts / items found = ", len(result.Items))
for _, i := range result.Items {
item := Product{}
err = dynamodbattribute.UnmarshalMap(i, &item)
if err != nil {
log.Println("Got error unmarshalling:")
log.Println(err.Error())
} else {
setProductURL(&item)
}
f = append(f, item)
}
if len(result.Items) == 0 {
f = make([]Product, 0)
}
return f
}
// RepoUpdateProduct - updates an existing product
func RepoUpdateProduct(existingProduct *Product, updatedProduct *Product) error {
updatedProduct.ID = existingProduct.ID // Ensure we're not changing product ID.
updatedProduct.URL = "" // URL is generated so ignore if specified
log.Printf("UpdateProduct from %#v to %#v", existingProduct, updatedProduct)
av, err := dynamodbattribute.MarshalMap(updatedProduct)
if err != nil {
fmt.Println("Got error calling dynamodbattribute MarshalMap:")
fmt.Println(err.Error())
return err
}
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String(ddbTableProducts),
}
_, err = dynamoClient.PutItem(input)
if err != nil {
fmt.Println("Got error calling PutItem:")
fmt.Println(err.Error())
}
setProductURL(updatedProduct)
return err
}
// RepoNewProduct - initializes and persists new product
func RepoNewProduct(product *Product) error {
log.Printf("RepoNewProduct --> %#v", product)
product.ID = strings.ToLower(guuuid.New().String())
av, err := dynamodbattribute.MarshalMap(product)
if err != nil {
fmt.Println("Got error calling dynamodbattribute MarshalMap:")
fmt.Println(err.Error())
return err
}
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String(ddbTableProducts),
}
_, err = dynamoClient.PutItem(input)
if err != nil {
fmt.Println("Got error calling PutItem:")
fmt.Println(err.Error())
}
setProductURL(product)
return err
}
// RepoDeleteProduct - deletes a single product
func RepoDeleteProduct(product *Product) error {
log.Println("Deleting product: ", product)
input := &dynamodb.DeleteItemInput{
Key: map[string]*dynamodb.AttributeValue{
"id": {
S: aws.String(product.ID),
},
"category": {
S: aws.String(product.Category),
},
},
TableName: aws.String(ddbTableProducts),
}
_, err := dynamoClient.DeleteItem(input)
if err != nil {
fmt.Println("Got error calling DeleteItem:")
fmt.Println(err.Error())
}
return err
}
| [
"\"WEB_ROOT_URL\""
]
| []
| [
"WEB_ROOT_URL"
]
| [] | ["WEB_ROOT_URL"] | go | 1 | 0 | |
selfdrive/test/process_replay/process_replay.py | #!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
import signal
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from common.timeout import Timeout
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.car.car_helpers import get_car, interfaces
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
TIMEOUT = 15
PROC_REPLAY_DIR = os.path.dirname(os.path.abspath(__file__))
FAKEDATA = os.path.join(PROC_REPLAY_DIR, "fakedata/")
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster', 'submaster_config'], defaults=({},))
def wait_for_event(evt):
if not evt.wait(TIMEOUT):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services, ignore_alive=None, ignore_avg_freq=None):
super().__init__(services, ignore_alive=ignore_alive, ignore_avg_freq=ignore_avg_freq, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super().update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock, fingerprint):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
def get_car_params(msgs, fsm, can_sock, fingerprint):
if fingerprint:
CarInterface, _, _ = interfaces[fingerprint]
CP = CarInterface.get_params(fingerprint)
else:
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def controlsd_rcv_callback(msg, CP, cfg, fsm):
# no sendcan until controlsd is initialized
socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
if "sendcan" in socks and fsm.frame < 2000:
socks.remove("sendcan")
return socks, len(socks) > 0
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaStates": [], "peripheralState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "managerState": [], "testJoystick": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=controlsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
submaster_config={'ignore_avg_freq': ['radarState', 'longitudinalPlan']}
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan", "longitudinalPlan"],
"carState": [], "controlsState": [], "radarState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay", "longitudinalPlan.solverExecutionTime", "lateralPlan.solverExecutionTime"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=False,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
fake_pubsubmaster=False,
),
]
def replay_process(cfg, lr, fingerprint=None):
if cfg.fake_pubsubmaster:
return python_replay_process(cfg, lr, fingerprint)
else:
return cpp_replay_process(cfg, lr, fingerprint)
def setup_env(simulation=False):
params = Params()
params.clear_all()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("DisengageOnAccelerator", True)
params.put_bool("EnableWideCamera", False)
params.put_bool("DisableLogging", False)
os.environ["NO_RADAR_SLEEP"] = "1"
os.environ["REPLAY"] = "1"
if simulation:
os.environ["SIMULATION"] = "1"
elif "SIMULATION" in os.environ:
del os.environ["SIMULATION"]
def python_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets, **cfg.submaster_config)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
setup_env()
# TODO: remove after getting new route for civic & accord
migration = {
"HONDA CIVIC 2016 TOURING": "HONDA CIVIC 2016",
"HONDA ACCORD 2018 SPORT 2T": "HONDA ACCORD 2018",
"HONDA ACCORD 2T 2018": "HONDA ACCORD 2018",
"Mazda CX-9 2021": "MAZDA CX-9 2021",
}
if fingerprint is not None:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = fingerprint
else:
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if msg.carParams.fingerprintSource == "fw" and (car_fingerprint in FW_VERSIONS):
Params().put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock, fingerprint)
CP = car.CarParams.from_bytes(Params().get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(msg.logMonoTime / 1e9, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg().as_builder()
m.logMonoTime = msg.logMonoTime
m = m.as_reader()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
log_msgs = []
# We need to fake SubMaster alive since we can't inject a fake clock
setup_env(simulation=True)
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
try:
with Timeout(TIMEOUT):
while not all(pm.all_readers_updated(s) for s in cfg.pub_sub.keys()):
time.sleep(0)
# Make sure all subscribers are connected
sockets = {s: messaging.sub_sock(s, timeout=2000) for s in sub_sockets}
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for i, msg in enumerate(tqdm(pub_msgs, disable=False)):
pm.send(msg.which(), msg.as_builder())
resp_sockets = cfg.pub_sub[msg.which()] if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is None:
print(f"Warning, no response received {i}")
else:
response = response.as_builder()
response.logMonoTime = msg.logMonoTime
response = response.as_reader()
log_msgs.append(response)
if not len(resp_sockets): # We only need to wait if we didn't already wait for a response
while not pm.all_readers_updated(msg.which()):
time.sleep(0)
finally:
managed_processes[cfg.proc_name].signal(signal.SIGKILL)
managed_processes[cfg.proc_name].stop()
return log_msgs
def check_enabled(msgs):
for msg in msgs:
if msg.which() == "carParams":
if msg.carParams.notCar:
return True
elif msg.which() == "controlsState":
if msg.controlsState.active:
return True
return False
| []
| []
| [
"REPLAY",
"SKIP_FW_QUERY",
"NO_RADAR_SLEEP",
"SIMULATION",
"FINGERPRINT"
]
| [] | ["REPLAY", "SKIP_FW_QUERY", "NO_RADAR_SLEEP", "SIMULATION", "FINGERPRINT"] | python | 5 | 0 | |
src/autoscaler/scalingengine/cmd/scalingengine/scalingengine_suite_test.go | package main_test
import (
"autoscaler/cf"
"autoscaler/db"
"autoscaler/models"
"autoscaler/scalingengine/config"
"path/filepath"
"code.cloudfoundry.org/cfhttp"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/onsi/gomega/ghttp"
"gopkg.in/yaml.v2"
"database/sql"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strings"
"testing"
"time"
)
func TestScalingengine(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Scalingengine Suite")
}
var (
enginePath string
conf config.Config
port int
configFile *os.File
ccUAA *ghttp.Server
appId string
httpClient *http.Client
)
var _ = SynchronizedBeforeSuite(
func() []byte {
compiledPath, err := gexec.Build("autoscaler/scalingengine/cmd/scalingengine", "-race")
Expect(err).NotTo(HaveOccurred())
return []byte(compiledPath)
},
func(pathBytes []byte) {
enginePath = string(pathBytes)
ccUAA = ghttp.NewServer()
ccUAA.RouteToHandler("GET", "/v2/info", ghttp.RespondWithJSONEncoded(http.StatusOK,
cf.Endpoints{
AuthEndpoint: ccUAA.URL(),
DopplerEndpoint: strings.Replace(ccUAA.URL(), "http", "ws", 1),
}))
ccUAA.RouteToHandler("POST", "/oauth/token", ghttp.RespondWithJSONEncoded(http.StatusOK, cf.Tokens{}))
appId = fmt.Sprintf("%s-%d", "app-id", GinkgoParallelNode())
appState := models.AppStatusStarted
ccUAA.RouteToHandler("GET", "/v2/apps/"+appId+"/summary", ghttp.RespondWithJSONEncoded(http.StatusOK,
models.AppEntity{Instances: 2, State: &appState}))
ccUAA.RouteToHandler("PUT", "/v2/apps/"+appId, ghttp.RespondWith(http.StatusCreated, ""))
conf.Cf = cf.CfConfig{
Api: ccUAA.URL(),
GrantType: cf.GrantTypePassword,
Username: "admin",
Password: "admin",
}
port = 7000 + GinkgoParallelNode()
testCertDir := "../../../../../test-certs"
conf.Server.Port = port
conf.Server.TLS.KeyFile = filepath.Join(testCertDir, "scalingengine.key")
conf.Server.TLS.CertFile = filepath.Join(testCertDir, "scalingengine.crt")
conf.Server.TLS.CACertFile = filepath.Join(testCertDir, "autoscaler-ca.crt")
conf.Logging.Level = "info"
conf.Db.PolicyDb = db.DatabaseConfig{
Url: os.Getenv("DBURL"),
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
conf.Db.ScalingEngineDb = db.DatabaseConfig{
Url: os.Getenv("DBURL"),
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
conf.Db.SchedulerDb = db.DatabaseConfig{
Url: os.Getenv("DBURL"),
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
conf.Synchronizer.ActiveScheduleSyncInterval = 10 * time.Minute
conf.DefaultCoolDownSecs = 300
conf.LockSize = 32
conf.DBLock.LockDB = db.DatabaseConfig{
Url: os.Getenv("DBURL"),
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
conf.DBLock.LockTTL = 15 * time.Second
conf.DBLock.LockRetryInterval = 5 * time.Second
conf.EnableDBLock = true
configFile = writeConfig(&conf)
testDB, err := sql.Open(db.PostgresDriverName, os.Getenv("DBURL"))
Expect(err).NotTo(HaveOccurred())
_, err = testDB.Exec("DELETE FROM scalinghistory WHERE appid = $1", appId)
Expect(err).NotTo(HaveOccurred())
_, err = testDB.Exec("DELETE from policy_json WHERE app_id = $1", appId)
Expect(err).NotTo(HaveOccurred())
_, err = testDB.Exec("DELETE from activeschedule WHERE appid = $1", appId)
Expect(err).NotTo(HaveOccurred())
_, err = testDB.Exec("DELETE from app_scaling_active_schedule WHERE app_id = $1", appId)
Expect(err).NotTo(HaveOccurred())
policy := `
{
"instance_min_count": 1,
"instance_max_count": 5
}`
_, err = testDB.Exec("INSERT INTO policy_json(app_id, policy_json, guid) values($1, $2, $3)", appId, policy, "1234")
Expect(err).NotTo(HaveOccurred())
err = testDB.Close()
Expect(err).NotTo(HaveOccurred())
tlsConfig, err := cfhttp.NewTLSConfig(
filepath.Join(testCertDir, "eventgenerator.crt"),
filepath.Join(testCertDir, "eventgenerator.key"),
filepath.Join(testCertDir, "autoscaler-ca.crt"))
Expect(err).NotTo(HaveOccurred())
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
},
}
})
var _ = SynchronizedAfterSuite(
func() {
ccUAA.Close()
os.Remove(configFile.Name())
},
func() {
gexec.CleanupBuildArtifacts()
})
func writeConfig(c *config.Config) *os.File {
cfg, err := ioutil.TempFile("", "engine")
Expect(err).NotTo(HaveOccurred())
defer cfg.Close()
bytes, err := yaml.Marshal(c)
Expect(err).NotTo(HaveOccurred())
_, err = cfg.Write(bytes)
Expect(err).NotTo(HaveOccurred())
return cfg
}
type ScalingEngineRunner struct {
configPath string
startCheck string
port int
Session *gexec.Session
}
func NewScalingEngineRunner() *ScalingEngineRunner {
return &ScalingEngineRunner{
configPath: configFile.Name(),
startCheck: "scalingengine.started",
}
}
func (engine *ScalingEngineRunner) Start() {
engineSession, err := gexec.Start(
exec.Command(
enginePath,
"-c",
engine.configPath,
),
gexec.NewPrefixedWriter("\x1b[32m[o]\x1b[32m[engine]\x1b[0m ", GinkgoWriter),
gexec.NewPrefixedWriter("\x1b[91m[e]\x1b[32m[engine]\x1b[0m ", GinkgoWriter),
)
Expect(err).NotTo(HaveOccurred())
engine.Session = engineSession
}
func (engine *ScalingEngineRunner) Interrupt() {
if engine.Session != nil {
engine.Session.Interrupt().Wait(5 * time.Second)
}
}
func (engine *ScalingEngineRunner) KillWithFire() {
if engine.Session != nil {
engine.Session.Kill().Wait(5 * time.Second)
}
}
func ClearLockDatabase() {
lockDB, err := sql.Open(db.PostgresDriverName, os.Getenv("DBURL"))
Expect(err).NotTo(HaveOccurred())
_, err = lockDB.Exec("DELETE FROM scalingengine_lock")
Expect(err).NotTo(HaveOccurred())
}
| [
"\"DBURL\"",
"\"DBURL\"",
"\"DBURL\"",
"\"DBURL\"",
"\"DBURL\"",
"\"DBURL\""
]
| []
| [
"DBURL"
]
| [] | ["DBURL"] | go | 1 | 0 | |
backend/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ideapros_llc_stream_33286.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
hostinfo/hostinfo.go | // Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package hostinfo answers questions about the host environment that Tailscale is
// running on.
package hostinfo
import (
"bufio"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"sync/atomic"
"time"
"go4.org/mem"
"tailscale.com/tailcfg"
"tailscale.com/util/dnsname"
"tailscale.com/util/lineread"
"tailscale.com/version"
)
// New returns a partially populated Hostinfo for the current host.
func New() *tailcfg.Hostinfo {
hostname, _ := os.Hostname()
hostname = dnsname.FirstLabel(hostname)
return &tailcfg.Hostinfo{
IPNVersion: version.Long,
Hostname: hostname,
OS: version.OS(),
OSVersion: GetOSVersion(),
Package: packageType(),
GoArch: runtime.GOARCH,
DeviceModel: deviceModel(),
}
}
var osVersion func() string // non-nil on some platforms
// GetOSVersion returns the OSVersion of current host if available.
func GetOSVersion() string {
if s, _ := osVersionAtomic.Load().(string); s != "" {
return s
}
if osVersion != nil {
return osVersion()
}
return ""
}
func packageType() string {
if v, _ := packagingType.Load().(string); v != "" {
return v
}
switch runtime.GOOS {
case "windows":
if _, err := os.Stat(`C:\ProgramData\chocolatey\lib\tailscale`); err == nil {
return "choco"
}
case "darwin":
// Using tailscaled or IPNExtension?
exe, _ := os.Executable()
return filepath.Base(exe)
case "linux":
// Report whether this is in a snap.
// See https://snapcraft.io/docs/environment-variables
// We just look at two somewhat arbitrarily.
if os.Getenv("SNAP_NAME") != "" && os.Getenv("SNAP") != "" {
return "snap"
}
}
return ""
}
// EnvType represents a known environment type.
// The empty string, the default, means unknown.
type EnvType string
const (
KNative = EnvType("kn")
AWSLambda = EnvType("lm")
Heroku = EnvType("hr")
AzureAppService = EnvType("az")
AWSFargate = EnvType("fg")
FlyDotIo = EnvType("fly")
Kubernetes = EnvType("k8s")
DockerDesktop = EnvType("dde")
)
var envType atomic.Value // of EnvType
func GetEnvType() EnvType {
if e, ok := envType.Load().(EnvType); ok {
return e
}
e := getEnvType()
envType.Store(e)
return e
}
var (
deviceModelAtomic atomic.Value // of string
osVersionAtomic atomic.Value // of string
packagingType atomic.Value // of string
)
// SetDeviceModel sets the device model for use in Hostinfo updates.
func SetDeviceModel(model string) { deviceModelAtomic.Store(model) }
// SetOSVersion sets the OS version.
func SetOSVersion(v string) { osVersionAtomic.Store(v) }
// SetPackage sets the packaging type for the app.
// This is currently (2021-10-05) only used by Android,
// set to "nogoogle" for the F-Droid build.
func SetPackage(v string) { packagingType.Store(v) }
func deviceModel() string {
s, _ := deviceModelAtomic.Load().(string)
return s
}
func getEnvType() EnvType {
if inKnative() {
return KNative
}
if inAWSLambda() {
return AWSLambda
}
if inHerokuDyno() {
return Heroku
}
if inAzureAppService() {
return AzureAppService
}
if inAWSFargate() {
return AWSFargate
}
if inFlyDotIo() {
return FlyDotIo
}
if inKubernetes() {
return Kubernetes
}
if inDockerDesktop() {
return DockerDesktop
}
return ""
}
// inContainer reports whether we're running in a container.
func inContainer() bool {
if runtime.GOOS != "linux" {
return false
}
var ret bool
lineread.File("/proc/1/cgroup", func(line []byte) error {
if mem.Contains(mem.B(line), mem.S("/docker/")) ||
mem.Contains(mem.B(line), mem.S("/lxc/")) {
ret = true
return io.EOF // arbitrary non-nil error to stop loop
}
return nil
})
lineread.File("/proc/mounts", func(line []byte) error {
if mem.Contains(mem.B(line), mem.S("fuse.lxcfs")) {
ret = true
return io.EOF
}
return nil
})
return ret
}
func inKnative() bool {
// https://cloud.google.com/run/docs/reference/container-contract#env-vars
if os.Getenv("K_REVISION") != "" && os.Getenv("K_CONFIGURATION") != "" &&
os.Getenv("K_SERVICE") != "" && os.Getenv("PORT") != "" {
return true
}
return false
}
func inAWSLambda() bool {
// https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html
if os.Getenv("AWS_LAMBDA_FUNCTION_NAME") != "" &&
os.Getenv("AWS_LAMBDA_FUNCTION_VERSION") != "" &&
os.Getenv("AWS_LAMBDA_INITIALIZATION_TYPE") != "" &&
os.Getenv("AWS_LAMBDA_RUNTIME_API") != "" {
return true
}
return false
}
func inHerokuDyno() bool {
// https://devcenter.heroku.com/articles/dynos#local-environment-variables
if os.Getenv("PORT") != "" && os.Getenv("DYNO") != "" {
return true
}
return false
}
func inAzureAppService() bool {
if os.Getenv("APPSVC_RUN_ZIP") != "" && os.Getenv("WEBSITE_STACK") != "" &&
os.Getenv("WEBSITE_AUTH_AUTO_AAD") != "" {
return true
}
return false
}
func inAWSFargate() bool {
if os.Getenv("AWS_EXECUTION_ENV") == "AWS_ECS_FARGATE" {
return true
}
return false
}
func inFlyDotIo() bool {
if os.Getenv("FLY_APP_NAME") != "" && os.Getenv("FLY_REGION") != "" {
return true
}
return false
}
func inKubernetes() bool {
if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "" {
return true
}
return false
}
func inDockerDesktop() bool {
if os.Getenv("TS_HOST_ENV") == "dde" {
return true
}
return false
}
type etcAptSrcResult struct {
mod time.Time
disabled bool
}
var etcAptSrcCache atomic.Value // of etcAptSrcResult
// DisabledEtcAptSource reports whether Ubuntu (or similar) has disabled
// the /etc/apt/sources.list.d/tailscale.list file contents upon upgrade
// to a new release of the distro.
//
// See https://github.com/tailscale/tailscale/issues/3177
func DisabledEtcAptSource() bool {
if runtime.GOOS != "linux" {
return false
}
const path = "/etc/apt/sources.list.d/tailscale.list"
fi, err := os.Stat(path)
if err != nil || !fi.Mode().IsRegular() {
return false
}
mod := fi.ModTime()
if c, ok := etcAptSrcCache.Load().(etcAptSrcResult); ok && c.mod == mod {
return c.disabled
}
f, err := os.Open(path)
if err != nil {
return false
}
defer f.Close()
v := etcAptSourceFileIsDisabled(f)
etcAptSrcCache.Store(etcAptSrcResult{mod: mod, disabled: v})
return v
}
func etcAptSourceFileIsDisabled(r io.Reader) bool {
bs := bufio.NewScanner(r)
disabled := false // did we find the "disabled on upgrade" comment?
for bs.Scan() {
line := strings.TrimSpace(bs.Text())
if strings.Contains(line, "# disabled on upgrade") {
disabled = true
}
if line == "" || line[0] == '#' {
continue
}
// Well, it has some contents in it at least.
return false
}
return disabled
}
| [
"\"SNAP_NAME\"",
"\"SNAP\"",
"\"K_REVISION\"",
"\"K_CONFIGURATION\"",
"\"K_SERVICE\"",
"\"PORT\"",
"\"AWS_LAMBDA_FUNCTION_NAME\"",
"\"AWS_LAMBDA_FUNCTION_VERSION\"",
"\"AWS_LAMBDA_INITIALIZATION_TYPE\"",
"\"AWS_LAMBDA_RUNTIME_API\"",
"\"PORT\"",
"\"DYNO\"",
"\"APPSVC_RUN_ZIP\"",
"\"WEBSITE_STACK\"",
"\"WEBSITE_AUTH_AUTO_AAD\"",
"\"AWS_EXECUTION_ENV\"",
"\"FLY_APP_NAME\"",
"\"FLY_REGION\"",
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\"",
"\"TS_HOST_ENV\""
]
| []
| [
"SNAP_NAME",
"WEBSITE_STACK",
"K_SERVICE",
"AWS_LAMBDA_FUNCTION_VERSION",
"PORT",
"AWS_EXECUTION_ENV",
"AWS_LAMBDA_FUNCTION_NAME",
"APPSVC_RUN_ZIP",
"KUBERNETES_SERVICE_HOST",
"WEBSITE_AUTH_AUTO_AAD",
"FLY_APP_NAME",
"AWS_LAMBDA_INITIALIZATION_TYPE",
"K_REVISION",
"K_CONFIGURATION",
"DYNO",
"FLY_REGION",
"AWS_LAMBDA_RUNTIME_API",
"KUBERNETES_SERVICE_PORT",
"SNAP",
"TS_HOST_ENV"
]
| [] | ["SNAP_NAME", "WEBSITE_STACK", "K_SERVICE", "AWS_LAMBDA_FUNCTION_VERSION", "PORT", "AWS_EXECUTION_ENV", "AWS_LAMBDA_FUNCTION_NAME", "APPSVC_RUN_ZIP", "KUBERNETES_SERVICE_HOST", "WEBSITE_AUTH_AUTO_AAD", "FLY_APP_NAME", "AWS_LAMBDA_INITIALIZATION_TYPE", "K_REVISION", "K_CONFIGURATION", "DYNO", "FLY_REGION", "AWS_LAMBDA_RUNTIME_API", "KUBERNETES_SERVICE_PORT", "SNAP", "TS_HOST_ENV"] | go | 20 | 0 | |
libcontainer/factory_linux.go | // +build linux
package libcontainer
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"runtime/debug"
"strconv"
"syscall"
"github.com/docker/docker/pkg/mount"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/configs/validate"
"github.com/opencontainers/runc/libcontainer/utils"
)
const (
stateFilename = "state.json"
)
var (
idRegex = regexp.MustCompile(`^[\w+-\.]+$`)
maxIdLen = 1024
)
// InitArgs returns an options func to configure a LinuxFactory with the
// provided init arguments.
func InitArgs(args ...string) func(*LinuxFactory) error {
return func(l *LinuxFactory) error {
name := args[0]
if filepath.Base(name) == name {
if lp, err := exec.LookPath(name); err == nil {
name = lp
}
} else {
abs, err := filepath.Abs(name)
if err != nil {
return err
}
name = abs
}
l.InitPath = "/proc/self/exe"
l.InitArgs = append([]string{name}, args[1:]...)
return nil
}
}
// InitPath returns an options func to configure a LinuxFactory with the
// provided absolute path to the init binary and arguements.
func InitPath(path string, args ...string) func(*LinuxFactory) error {
return func(l *LinuxFactory) error {
l.InitPath = path
l.InitArgs = args
return nil
}
}
// SystemdCgroups is an options func to configure a LinuxFactory to return
// containers that use systemd to create and manage cgroups.
func SystemdCgroups(l *LinuxFactory) error {
l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
return &systemd.Manager{
Cgroups: config,
Paths: paths,
}
}
return nil
}
// Cgroupfs is an options func to configure a LinuxFactory to return
// containers that use the native cgroups filesystem implementation to
// create and manage cgroups.
func Cgroupfs(l *LinuxFactory) error {
l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
return &fs.Manager{
Cgroups: config,
Paths: paths,
}
}
return nil
}
// TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs.
func TmpfsRoot(l *LinuxFactory) error {
mounted, err := mount.Mounted(l.Root)
if err != nil {
return err
}
if !mounted {
if err := syscall.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil {
return err
}
}
return nil
}
// New returns a linux based container factory based in the root directory and
// configures the factory with the provided option funcs.
func New(root string, options ...func(*LinuxFactory) error) (Factory, error) {
if root != "" {
if err := os.MkdirAll(root, 0700); err != nil {
return nil, newGenericError(err, SystemError)
}
}
l := &LinuxFactory{
Root: root,
Validator: validate.New(),
CriuPath: "criu",
}
InitArgs(os.Args[0], "init")(l)
Cgroupfs(l)
for _, opt := range options {
if err := opt(l); err != nil {
return nil, err
}
}
return l, nil
}
// LinuxFactory implements the default factory interface for linux based systems.
type LinuxFactory struct {
// Root directory for the factory to store state.
Root string
// InitPath is the absolute path to the init binary.
InitPath string
// InitArgs are arguments for calling the init responsibilities for spawning
// a container.
InitArgs []string
// CriuPath is the path to the criu binary used for checkpoint and restore of
// containers.
CriuPath string
// Validator provides validation to container configurations.
Validator validate.Validator
// NewCgroupsManager returns an initialized cgroups manager for a single container.
NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager
}
func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {
if l.Root == "" {
return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
}
if err := l.validateID(id); err != nil {
return nil, err
}
if err := l.Validator.Validate(config); err != nil {
return nil, newGenericError(err, ConfigInvalid)
}
containerRoot := filepath.Join(l.Root, id)
if _, err := os.Stat(containerRoot); err == nil {
return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse)
} else if !os.IsNotExist(err) {
return nil, newGenericError(err, SystemError)
}
if err := os.MkdirAll(containerRoot, 0700); err != nil {
return nil, newGenericError(err, SystemError)
}
c := &linuxContainer{
id: id,
root: containerRoot,
config: config,
initPath: l.InitPath,
initArgs: l.InitArgs,
criuPath: l.CriuPath,
cgroupManager: l.NewCgroupsManager(config.Cgroups, nil),
}
c.state = &stoppedState{c: c}
return c, nil
}
func (l *LinuxFactory) Load(id string) (Container, error) {
if l.Root == "" {
return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
}
containerRoot := filepath.Join(l.Root, id)
state, err := l.loadState(containerRoot)
if err != nil {
return nil, err
}
r := &nonChildProcess{
processPid: state.InitProcessPid,
processStartTime: state.InitProcessStartTime,
fds: state.ExternalDescriptors,
}
c := &linuxContainer{
initProcess: r,
id: id,
config: &state.Config,
initPath: l.InitPath,
initArgs: l.InitArgs,
criuPath: l.CriuPath,
cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths),
root: containerRoot,
created: state.Created,
}
c.state = &loadedState{c: c}
if err := c.refreshState(); err != nil {
return nil, err
}
return c, nil
}
func (l *LinuxFactory) Type() string {
return "libcontainer"
}
// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state
// This is a low level implementation detail of the reexec and should not be consumed externally
func (l *LinuxFactory) StartInitialization() (err error) {
// start the signal handler as soon as we can
s := make(chan os.Signal, 1)
signal.Notify(s, InitContinueSignal)
fdStr := os.Getenv("_LIBCONTAINER_INITPIPE")
pipefd, err := strconv.Atoi(fdStr)
if err != nil {
return fmt.Errorf("error converting env var _LIBCONTAINER_INITPIPE(%q) to an int: %s", fdStr, err)
}
var (
pipe = os.NewFile(uintptr(pipefd), "pipe")
it = initType(os.Getenv("_LIBCONTAINER_INITTYPE"))
)
// clear the current process's environment to clean any libcontainer
// specific env vars.
os.Clearenv()
var i initer
defer func() {
// We have an error during the initialization of the container's init,
// send it back to the parent process in the form of an initError.
// If container's init successed, syscall.Exec will not return, hence
// this defer function will never be called.
if _, ok := i.(*linuxStandardInit); ok {
// Synchronisation only necessary for standard init.
if err := utils.WriteJSON(pipe, syncT{procError}); err != nil {
panic(err)
}
}
if err := utils.WriteJSON(pipe, newSystemError(err)); err != nil {
panic(err)
}
// ensure that this pipe is always closed
pipe.Close()
}()
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("panic from initialization: %v, %v", e, string(debug.Stack()))
}
}()
i, err = newContainerInit(it, pipe)
if err != nil {
return err
}
return i.Init(s)
}
func (l *LinuxFactory) loadState(root string) (*State, error) {
f, err := os.Open(filepath.Join(root, stateFilename))
if err != nil {
if os.IsNotExist(err) {
return nil, newGenericError(err, ContainerNotExists)
}
return nil, newGenericError(err, SystemError)
}
defer f.Close()
var state *State
if err := json.NewDecoder(f).Decode(&state); err != nil {
return nil, newGenericError(err, SystemError)
}
return state, nil
}
func (l *LinuxFactory) validateID(id string) error {
if !idRegex.MatchString(id) {
return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat)
}
if len(id) > maxIdLen {
return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat)
}
return nil
}
| [
"\"_LIBCONTAINER_INITPIPE\"",
"\"_LIBCONTAINER_INITTYPE\""
]
| []
| [
"_LIBCONTAINER_INITTYPE",
"_LIBCONTAINER_INITPIPE"
]
| [] | ["_LIBCONTAINER_INITTYPE", "_LIBCONTAINER_INITPIPE"] | go | 2 | 0 | |
py_modules/werobot/mp/mp.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import time
import json
import hashlib
import string
import random
import requests
from .base import Map, WeixinError
__all__ = ("WeixinMPError", "WeixinMP")
DEFAULT_DIR = os.getenv("HOME", os.getcwd())
class WeixinMPError(WeixinError):
def __init__(self, msg):
super(WeixinMPError, self).__init__(msg)
class WeixinMP(object):
"""
微信公众号相关接口
当需要全局使用access token可以选择继承WeixinMP实现access_token
class WeixinMPSub(object):
def __init__(self, app_id, app_secret):
WeixinMP.__init__(app_id, app_secret)
@property
def access_token(self):
return requests.get("http://example.com").content
mp = WeixinMPSub("app_id", "app_secret")
也可以选择传入jt_callback
def get_access_token(mp):
return requests.get("http://example.com").content
WeixinMP("app_id", "app_secret", ac_callback=get_access_token)
"""
api_uri = "https://api.weixin.qq.com"
def __init__(self, app_id, app_secret, ac_path=None, jt_path=None, ac_callback=None, jt_callback=None):
"""
:param :app_id 微信app id
:param :app_secret 微信app secret
:param :ac_path access token 保存路径
:param :jt_path js ticket 保存路径
:param :ac_callback ac_callback
:param :jt_callback jt_callback
"""
self.app_id = app_id
self.app_secret = app_secret
self.session = requests.Session()
if ac_path is None:
ac_path = os.path.join(DEFAULT_DIR, ".access_token")
if jt_path is None:
jt_path = os.path.join(DEFAULT_DIR, ".jsapi_ticket")
self.ac_path = ac_path
self.jt_path = jt_path
self.ac_callback = ac_callback
self.jt_callback = jt_callback
def fetch(self, method, url, params=None, data=None, headers=None):
req = requests.Request(method, url, params=params,
data=data, headers=headers)
prepped = req.prepare()
resp = self.session.send(prepped, timeout=20)
data = Map(resp.json())
if data.errcode:
msg = "%(errcode)d %(errmsg)s" % data
raise WeixinMPError(msg)
return data
def get(self, path, params=None, token=True, prefix="/cgi-bin"):
url = "{0}{1}{2}".format(self.api_uri, prefix, path)
params = {} if not params else params
token and params.setdefault("access_token", self.access_token)
return self.fetch("GET", url, params)
def post(self, path, data, prefix="/cgi-bin", json_encode=True, token=True):
url = "{0}{1}{2}".format(self.api_uri, prefix, path)
params = {}
token and params.setdefault("access_token", self.access_token)
headers = {}
if json_encode:
# data = json.dumps(data, ensure_ascii=False)
data = json.dumps(data)
headers["Content-Type"] = "application/json;charset=UTF-8"
# print url, params, headers, data
return self.fetch("POST", url, params=params, data=data, headers=headers)
@property
def access_token(self):
"""
获取服务端凭证
当多台服务器需要共用access_token的时候
如果不想自己继承实现access_token,可以传入ac_callback()
接收一个WeixinMP对象作为参数
"""
if self.ac_callback and callable(self.ac_callback):
return self.ac_callback(self)
timestamp = time.time()
if not os.path.exists(self.ac_path) or \
int(os.path.getmtime(self.ac_path)) < timestamp:
params = dict()
params.setdefault("grant_type", "client_credential")
params.setdefault("appid", self.app_id)
params.setdefault("secret", self.app_secret)
data = self.get("/token", params, False)
with open(self.ac_path, 'wb') as fp:
fp.write(data.access_token.encode("utf-8"))
os.utime(self.ac_path, (timestamp, timestamp + data.expires_in - 600))
return open(self.ac_path).read().strip()
@property
def jsapi_ticket(self):
"""
获取jsapi ticket
当多台服务器需要共用js_ticket的时候
如果不想自己继承实现js_ticket,可以传入jt_callback()
接收一个WeixinMP对象作为参数
"""
if self.jt_callback and callable(self.jt_callback):
return self.jt_callback(self)
timestamp = time.time()
if not os.path.exists(self.jt_path) or \
int(os.path.getmtime(self.jt_path)) < timestamp:
params = dict()
params.setdefault("type", "jsapi")
data = self.get("/ticket/getticket", params, True)
with open(self.jt_path, 'wb') as fp:
fp.write(data.ticket.encode("utf-8"))
os.utime(self.jt_path, (timestamp, timestamp + data.expires_in - 600))
return open(self.jt_path).read()
@property
def nonce_str(self):
char = string.ascii_letters + string.digits
return "".join(random.choice(char) for _ in range(32))
def jsapi_sign(self,url):
"""
生成签名给js使用
"""
kwargs = dict()
timestamp = str(int(time.time()))
nonce_str = self.nonce_str
kwargs.setdefault("jsapi_ticket", self.jsapi_ticket)
kwargs.setdefault("timestamp", timestamp)
kwargs.setdefault("noncestr", nonce_str)
kwargs.setdefault("url", url)
raw = [(k, kwargs[k]) for k in sorted(kwargs.keys())]
s = "&".join("=".join(kv) for kv in raw if kv[1])
sign = hashlib.sha1(s.encode("utf-8")).hexdigest().lower()
return Map(sign=sign, timestamp=timestamp, noncestr=nonce_str, appId=self.app_id)
def groups_create(self, name):
"""
创建分组
:param name: 分组名
"""
data = dict(group=dict(name=name))
return self.post("/groups/create", data)
def groups_get(self):
"""
获取所有分组
"""
return self.get("/groups/get")
def groups_getid(self, openid):
"""
查询用户所在分组
:param openid: 用户id
"""
data = dict(openid=openid)
return self.post("/groups/getid", data)
def groups_update(self, id, name):
"""
修改分组名
:param id: 分组id
:param name: 分组名
"""
data = dict(group=dict(id=id, name=name))
return self.post("/groups/update", data)
def groups_members_update(self, to_groupid, openid):
"""
移动用户分组
:param to_groupid: 分组id
:param openid: 用户唯一标识符
"""
data = dict(openid=openid, to_groupid=to_groupid)
return self.post("/groups/members/update", data)
def groups_members_batchupdate(self, to_groupid, *openid):
"""
批量移动用户分组
:param to_groupid: 分组id
:param openid: 用户唯一标示列表
"""
data = dict(openid_list=openid, to_groupid=to_groupid)
return self.post("/groups/members/batchupdate", data)
def groups_delete(self, id):
"""
删除组
:param id: 分组的id
"""
data = dict(group=dict(id=id))
return self.post("/groups/delete", data)
def user_info_updateremark(self, openid, remark):
"""
设置备注名
:param openid: 用户唯一标识符
:param remark: 备注
"""
data = dict(openid=openid, remark=remark)
return self.post("/user/info/updateremark", data)
def user_info(self, openid):
"""
获取用户信息
包含subscribe字段,可以用来判断用户是否关注公众号
:param openid: 用户id
"""
args = dict(openid=openid, lang="zh_CN")
return self.get("/user/info", args)
def user_info_batchget(self, *openid):
"""
批量获取用户信息
"""
user_list = []
for id in openid:
user_list.append(dict(openid=openid, lang="zh_CN"))
data = dict(user_list=user_list)
return self.post("/user/info/batchget", data)
def user_get(self, next_openid=None):
"""
获取公众号关注列表
一次最多返回1000个
:param next_openid: 第一个拉取的openid,不填默认从头开始
"""
args = dict()
if next_openid:
args.setdefault("next_openid", next_openid)
return self.get("/user/get", args)
def menu_create(self, data):
data = dict(button=data)
return self.post("/menu/create", data)
def menu_get(self):
return self.get("/menu/get")
def menu_delete(self):
return self.get("/menu/delete")
def get_current_selfmenu_info(self):
return self.get("/get_current_selfmenu_info")
def shorturl(self, long_url):
"""
长链接转为短链接
:param long_url: 长链接
"""
data = dict(action="long2short", long_url=long_url)
result = self.post("/shorturl", data)
if result.errcode == 0 :
return result.short_url
else:
return None
def qrcode_create(self, scene_id, expires=30):
"""
创建qrcode
"""
data = dict(
action_name="QR_SCENE", expire_seconds=expires,
action_info=dict(scene=dict(scene_id=scene_id)),
)
return self.post("/qrcode/create", data)
def qrcode_create_limit(self, input):
"""
创建qrcode限制方式
"""
data = dict()
if isinstance(input, int):
data["action_name"] = "QR_LIMIT_SCENE"
data["action_info"] = dict(scene=dict(
scene_id=input,
))
elif isinstance(input, str):
data["action_name"] = "QR_LIMIT_STR_SCENE"
data["action_info"] = dict(scene=dict(
scene_str=input,
))
else:
raise ValueError("invalid type")
return self.post("/qrcode/create", data)
def qrcode_show(self, ticket):
"""
显示qrcode
"""
url = "https://mp.weixin.qq.com/cgi-bin/showqrcode"
return self.add_query(url, dict(ticket=ticket))
def shop_list(self, pageindex=1, pagesize=10):
"""
门店列表
"""
data = dict(pageindex=pageindex, pagesize=pagesize)
return self.post("/shop/list", data, prefix="/bizwifi")
def shop_get(self, shop_id):
"""
查询门店Wi-Fi信息
"""
return self.post("/shop/get", dict(shop_id=shop_id), prefix="/bizwifi")
def shop_update(self, shop_id, old_ssid, ssid, password=None):
"""
修改门店网络信息
"""
data = dict(shop_id=shop_id, old_ssid=old_ssid, ssid=ssid)
if password:
data.update(dict(password=password))
return self.post("/shop/update", data, prefix="/bizwifi")
def shop_clean(self, shop_id):
"""
通过此接口清空门店的网络配置及所有设备,恢复空门店状态
"""
return self.post("/shop/clean", dict(shop_id=shop_id), prefix="/bizwifi")
def apportal_register(self, shop_id, ssid, reset):
"""
添加portal型设备
"""
data = dict(shop_id=shop_id, ssid=ssid, reset=reset)
return self.post("/apportal/register", data)
def device_list(self, shop_id=None, pageindex=1, pagesize=10, prefix="/bizwifi"):
"""
查询设备
"""
data = dict(pageindex=pageindex, pagesize=pagesize)
if shop_id:
data.update(dict(shop_id=shop_id))
return self.post("/device/list", data, prefix="/bizwifi")
def device_delete(self, bssid):
"""
删除设备
"""
return self.post("/device/delete", dict(bssid=bssid), prefix="/bizwifi")
def qrcode_get(self, shop_id, ssid, img_id):
"""
获取物料二维码
"""
data = dict(shop_id=shop_id, ssid=ssid, img_id=img_id)
return self.post("/qrcode/get", data, prefix="/bizwifi")
def get_all_private_template(self):
"""
获取所有私有模板列表
"""
return self.get("/template/get_all_private_template")
def del_private_template(self, template_id):
"""
删除私有模板
"""
return self.post("/template/del_private_template", dict(template_id=template_id))
def template_send(self, template_id, touser, data, url=None, miniprogram=None, **kwargs):
"""
发送模板消息
:paramas template_id: 模板id
:params touser: openid
:params data: 模板消息对应的内容跟颜色
:params url: 跳转地址
:parms miniprogram: 小程序跳转相关
"""
kwargs.setdefault("template_id", template_id)
kwargs.setdefault("touser", touser)
kwargs.setdefault("data", data)
url and kwargs.setdefault("url", url)
miniprogram and kwargs.setdefault("miniprogram", miniprogram)
# print kwargs
return self.post("/message/template/send", kwargs)
if __name__ == '__main__':
APP_ID = "wx7b27955ce810b11f"
APP_SECRET = "d2754228e5db08f3ce1a1011d59e9798"
a = WeixinMP(APP_ID,APP_SECRET).access_token
print(a) | []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
net/lib/box/nms/gpu_nms/setup.py | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import os
from os.path import join as pjoin
import numpy as np
# /opt/anaconda3/bin/python3 setup.py build_ext --inplace
# http://martinsosic.com/development/2016/02/08/wrapping-c-library-as-python-module.html
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
return None
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {
'home': home,
'nvcc': nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')
}
for k, v in cudaconfig.items():
if not os.path.exists(v):
return None
return cudaconfig
CUDA = locate_cuda()
print("CUDA found:", CUDA)
##----------------------------------------------------------------------------------------
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
##----------------------------------------------------------------------------------------
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
#/usr/local/cuda-9.1/bin/nvcc -c -o gpu_nms_kernel.cu.o gpu_nms_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_52
ext_modules = [
Extension(
"gpu_nms",
sources=["gpu_nms.pyx", "src/gpu_nms_kernel.cu"],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={
'gcc': [],
'nvcc': ['-arch=sm_52', '--ptxas-options=-v', '-c', '--compiler-options', "'-fPIC'"],
},
include_dirs=[numpy_include, CUDA['include'], 'src']),
]
setup(name='mask_rcnn', cmdclass={'build_ext': custom_build_ext}, ext_modules=ext_modules)
| []
| []
| [
"PATH",
"CUDAHOME"
]
| [] | ["PATH", "CUDAHOME"] | python | 2 | 0 | |
cases.py | import matplotlib.pyplot as plt
import numpy as np
import os
import get_dc_data
# Cumulative figure.
casedata = get_dc_data.retrieve(download=False)
f2 = plt.figure(figsize=(6,4))
plt.suptitle("COVID-19 Data Summary, District of Columbia ",
fontweight="bold")
plt.title("github.com/reidac/covid19-curve-dc", style="oblique")
plt.xlabel("Days since March 8, 2020")
plt.ylabel("Cases")
plt.bar(casedata.x,casedata.positive,color='y',width=1.0)
plt.bar(casedata.x,casedata.recovered,
bottom=casedata.positive-casedata.recovered,color='g',width=1.0)
plt.bar(casedata.x,casedata.deaths,color='r',width=1.0)
plt.legend(labels=['Positives','Recovered positives','Deaths'])
if "FIG_PATH" in os.environ:
fig_path = os.environ['FIG_PATH']
else:
fig_path = "."
plt.savefig("{0}/us_dc_cases.png".format(fig_path),dpi=300,bbox_inches="tight")
print("Bar graph of cumulative Covid-19 cases reported by DC, broken out into positives, recoveries, and deaths.")
| []
| []
| [
"FIG_PATH"
]
| [] | ["FIG_PATH"] | python | 1 | 0 | |
stream_alert/rule_processor/__init__.py | """Initialize logging for the rule processor."""
import logging
import os
from stream_alert.shared import RULE_PROCESSOR_NAME as FUNCTION_NAME
# Create a package level logger to import
LEVEL = os.environ.get('LOGGER_LEVEL', 'INFO').upper()
# Cast integer levels to avoid a ValueError
if LEVEL.isdigit():
LEVEL = int(LEVEL)
logging.basicConfig(format='%(name)s [%(levelname)s]: [%(module)s.%(funcName)s] %(message)s')
LOGGER = logging.getLogger('StreamAlert')
try:
LOGGER.setLevel(LEVEL)
except (TypeError, ValueError) as err:
LOGGER.setLevel('INFO')
LOGGER.error('Defaulting to INFO logging: %s', err)
LOGGER_DEBUG_ENABLED = LOGGER.isEnabledFor(logging.DEBUG)
| []
| []
| [
"LOGGER_LEVEL"
]
| [] | ["LOGGER_LEVEL"] | python | 1 | 0 | |
examples/ResNet/imagenet-resnet.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: imagenet-resnet.py
import argparse
import os
from tensorpack import QueueInput, TFDatasetInput, logger
from tensorpack.callbacks import *
from tensorpack.dataflow import FakeData
from tensorpack.models import *
from tensorpack.tfutils import argscope, get_model_loader
from tensorpack.train import SyncMultiGPUTrainerReplicated, TrainConfig, launch_train_with_config
from tensorpack.utils.gpu import get_num_gpu
from imagenet_utils import ImageNetModel, eval_on_ILSVRC12, get_imagenet_dataflow, get_imagenet_tfdata
from resnet_model import (
preresnet_basicblock, preresnet_bottleneck, preresnet_group, resnet_backbone, resnet_basicblock, resnet_bottleneck,
resnet_group, se_resnet_bottleneck)
class Model(ImageNetModel):
def __init__(self, depth, mode='resnet'):
if mode == 'se':
assert depth >= 50
self.mode = mode
basicblock = preresnet_basicblock if mode == 'preact' else resnet_basicblock
bottleneck = {
'resnet': resnet_bottleneck,
'preact': preresnet_bottleneck,
'se': se_resnet_bottleneck}[mode]
self.num_blocks, self.block_func = {
18: ([2, 2, 2, 2], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}[depth]
def get_logits(self, image):
with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format=self.data_format):
return resnet_backbone(
image, self.num_blocks,
preresnet_group if self.mode == 'preact' else resnet_group, self.block_func)
def get_config(model):
nr_tower = max(get_num_gpu(), 1)
assert args.batch % nr_tower == 0
batch = args.batch // nr_tower
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
if batch < 32 or batch > 64:
logger.warn("Batch size per tower not in [32, 64]. This probably will lead to worse accuracy than reported.")
if args.fake:
data = QueueInput(FakeData(
[[batch, 224, 224, 3], [batch]], 1000, random=False, dtype='uint8'))
callbacks = []
else:
if args.symbolic:
data = TFDatasetInput(get_imagenet_tfdata(args.data, 'train', batch))
else:
data = QueueInput(get_imagenet_dataflow(args.data, 'train', batch))
START_LR = 0.1
BASE_LR = START_LR * (args.batch / 256.0)
callbacks = [
ModelSaver(),
EstimatedTimeLeft(),
ScheduledHyperParamSetter(
'learning_rate', [
(0, min(START_LR, BASE_LR)), (30, BASE_LR * 1e-1), (60, BASE_LR * 1e-2),
(90, BASE_LR * 1e-3), (100, BASE_LR * 1e-4)]),
]
if BASE_LR > START_LR:
callbacks.append(
ScheduledHyperParamSetter(
'learning_rate', [(0, START_LR), (5, BASE_LR)], interp='linear'))
infs = [ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]
dataset_val = get_imagenet_dataflow(args.data, 'val', batch)
if nr_tower == 1:
# single-GPU inference with queue prefetch
callbacks.append(InferenceRunner(QueueInput(dataset_val), infs))
else:
# multi-GPU inference (with mandatory queue prefetch)
callbacks.append(DataParallelInferenceRunner(
dataset_val, infs, list(range(nr_tower))))
if get_num_gpu() > 0:
callbacks.append(GPUUtilizationTracker())
return TrainConfig(
model=model,
data=data,
callbacks=callbacks,
steps_per_epoch=100 if args.fake else 1281167 // args.batch,
max_epoch=105,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# generic:
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use. Default to use all available ones')
parser.add_argument('--eval', action='store_true', help='run offline evaluation instead of training')
parser.add_argument('--load', help='load a model for training or evaluation')
# data:
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--fake', help='use FakeData to debug or benchmark this model', action='store_true')
parser.add_argument('--symbolic', help='use symbolic data loader', action='store_true')
# model:
parser.add_argument('--data-format', help='the image data layout used by the model',
default='NCHW', choices=['NCHW', 'NHWC'])
parser.add_argument('-d', '--depth', help='ResNet depth',
type=int, default=50, choices=[18, 34, 50, 101, 152])
parser.add_argument('--weight-decay-norm', action='store_true',
help="apply weight decay on normalization layers (gamma & beta)."
"This is used in torch/pytorch, and slightly "
"improves validation accuracy of large models.")
parser.add_argument('--batch', default=256, type=int,
help="total batch size. "
"Note that it's best to keep per-GPU batch size in [32, 64] to obtain the best accuracy."
"Pretrained models listed in README were trained with batch=32x8.")
parser.add_argument('--mode', choices=['resnet', 'preact', 'se'],
help='variants of resnet to use', default='resnet')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model = Model(args.depth, args.mode)
model.data_format = args.data_format
if model.weight_decay_norm:
model.weight_decay_pattern = ".*/W|.*/gamma|.*/beta"
if args.eval:
batch = 128 # something that can run on one gpu
ds = get_imagenet_dataflow(args.data, 'val', batch)
eval_on_ILSVRC12(model, get_model_loader(args.load), ds)
else:
if args.fake:
logger.set_logger_dir(os.path.join('train_log', 'tmp'), 'd')
else:
logger.set_logger_dir(
os.path.join('train_log',
'imagenet-{}-d{}-batch{}'.format(
args.mode, args.depth, args.batch)))
config = get_config(model)
if args.load:
config.session_init = get_model_loader(args.load)
trainer = SyncMultiGPUTrainerReplicated(max(get_num_gpu(), 1))
launch_train_with_config(config, trainer)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
pkg/prom/instance/instance.go | // Package instance provides a mini Prometheus scraper and remote_writer.
package instance
import (
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/grafana/agent/pkg/build"
"github.com/grafana/agent/pkg/prom/wal"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/pkg/relabel"
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/storage/remote"
"gopkg.in/yaml.v2"
)
func init() {
remote.UserAgent = fmt.Sprintf("GrafanaCloudAgent/%s", build.Version)
}
var (
remoteWriteMetricName = "queue_highest_sent_timestamp_seconds"
managerMtx sync.Mutex
)
// Default configuration values
var (
DefaultConfig = Config{
HostFilter: false,
WALTruncateFrequency: 1 * time.Minute,
MinWALTime: 5 * time.Minute,
MaxWALTime: 4 * time.Hour,
RemoteFlushDeadline: 1 * time.Minute,
WriteStaleOnShutdown: false,
}
)
// Config is a specific agent that runs within the overall Prometheus
// agent. It has its own set of scrape_configs and remote_write rules.
type Config struct {
Name string `yaml:"name" json:"name"`
HostFilter bool `yaml:"host_filter" json:"host_filter"`
HostFilterRelabelConfigs []*relabel.Config `yaml:"host_filter_relabel_configs,omitempty"`
ScrapeConfigs []*config.ScrapeConfig `yaml:"scrape_configs,omitempty" json:"scrape_configs,omitempty"`
RemoteWrite []*config.RemoteWriteConfig `yaml:"remote_write,omitempty" json:"remote_write,omitempty"`
// How frequently the WAL should be truncated.
WALTruncateFrequency time.Duration `yaml:"wal_truncate_frequency,omitempty" json:"wal_truncate_frequency,omitempty"`
// Minimum and maximum time series should exist in the WAL for.
MinWALTime time.Duration `yaml:"min_wal_time,omitempty" json:"min_wal_time,omitempty"`
MaxWALTime time.Duration `yaml:"max_wal_time,omitempty" json:"max_wal_time,omitempty"`
RemoteFlushDeadline time.Duration `yaml:"remote_flush_deadline,omitempty" json:"remote_flush_deadline,omitempty"`
WriteStaleOnShutdown bool `yaml:"write_stale_on_shutdown,omitempty" json:"write_stale_on_shutdown,omitempty"`
}
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultConfig
type plain Config
return unmarshal((*plain)(c))
}
func (c Config) MarshalYAML() (interface{}, error) {
// We want users to be able to marshal instance.Configs directly without
// *needing* to call instance.MarshalConfig, so we call it internally
// here and return a map.
bb, err := MarshalConfig(&c, false)
if err != nil {
return nil, err
}
// Use a yaml.MapSlice rather than a map[string]interface{} so
// order of keys is retained compared to just calling MarshalConfig.
var m yaml.MapSlice
if err := yaml.Unmarshal(bb, &m); err != nil {
return nil, err
}
return m, nil
}
// ApplyDefaults applies default configurations to the configuration to all
// values that have not been changed to their non-zero value. ApplyDefaults
// also validates the config.
func (c *Config) ApplyDefaults(global *config.GlobalConfig) error {
switch {
case c.Name == "":
return errors.New("missing instance name")
case c.WALTruncateFrequency <= 0:
return errors.New("wal_truncate_frequency must be greater than 0s")
case c.RemoteFlushDeadline <= 0:
return errors.New("remote_flush_deadline must be greater than 0s")
case c.MinWALTime > c.MaxWALTime:
return errors.New("min_wal_time must be less than max_wal_time")
}
jobNames := map[string]struct{}{}
for _, sc := range c.ScrapeConfigs {
if sc == nil {
return fmt.Errorf("empty or null scrape config section")
}
// First set the correct scrape interval, then check that the timeout
// (inferred or explicit) is not greater than that.
if sc.ScrapeInterval == 0 {
sc.ScrapeInterval = global.ScrapeInterval
}
if sc.ScrapeTimeout > sc.ScrapeInterval {
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", sc.JobName)
}
if time.Duration(sc.ScrapeInterval) > c.WALTruncateFrequency {
return fmt.Errorf("scrape interval greater than wal_truncate_frequency for scrape config with job name %q", sc.JobName)
}
if sc.ScrapeTimeout == 0 {
if global.ScrapeTimeout > sc.ScrapeInterval {
sc.ScrapeTimeout = sc.ScrapeInterval
} else {
sc.ScrapeTimeout = global.ScrapeTimeout
}
}
if _, exists := jobNames[sc.JobName]; exists {
return fmt.Errorf("found multiple scrape configs with job name %q", sc.JobName)
}
jobNames[sc.JobName] = struct{}{}
}
rwNames := map[string]struct{}{}
for _, cfg := range c.RemoteWrite {
if cfg == nil {
return fmt.Errorf("empty or null remote write config section")
}
// Typically Prometheus ignores empty names here, but we need to assign a
// unique name to the config so we can pull metrics from it when running
// an instance.
var generatedName bool
if cfg.Name == "" {
hash, err := getHash(cfg)
if err != nil {
return err
}
// We have to add the name of the instance to ensure that generated metrics
// are unique across multiple agent instances. The remote write queues currently
// globally register their metrics so we can't inject labels here.
cfg.Name = c.Name + "-" + hash[:6]
generatedName = true
}
if _, exists := rwNames[cfg.Name]; exists {
if generatedName {
return fmt.Errorf("found two identical remote_write configs")
}
return fmt.Errorf("found duplicate remote write configs with name %q", cfg.Name)
}
rwNames[cfg.Name] = struct{}{}
}
return nil
}
type walStorageFactory func(reg prometheus.Registerer) (walStorage, error)
// Instance is an individual metrics collector and remote_writer.
type Instance struct {
// All fields in the following block may be accessed and modified by
// concurrently running goroutines.
//
// Note that all Prometheus components listed here may be nil at any
// given time; methods reading them should take care to do nil checks.
mut sync.Mutex
cfg Config
wal walStorage
discovery *discoveryService
readyScrapeManager *scrape.ReadyScrapeManager
remoteStore *remote.Storage
storage storage.Storage
globalCfg config.GlobalConfig
logger log.Logger
reg prometheus.Registerer
newWal walStorageFactory
vc *MetricValueCollector
}
// New creates a new Instance with a directory for storing the WAL. The instance
// will not start until Run is called on the instance.
func New(reg prometheus.Registerer, globalCfg config.GlobalConfig, cfg Config, walDir string, logger log.Logger) (*Instance, error) {
logger = log.With(logger, "instance", cfg.Name)
instWALDir := filepath.Join(walDir, cfg.Name)
newWal := func(reg prometheus.Registerer) (walStorage, error) {
return wal.NewStorage(logger, reg, instWALDir)
}
return newInstance(globalCfg, cfg, reg, logger, newWal)
}
func newInstance(globalCfg config.GlobalConfig, cfg Config, reg prometheus.Registerer, logger log.Logger, newWal walStorageFactory) (*Instance, error) {
vc := NewMetricValueCollector(prometheus.DefaultGatherer, remoteWriteMetricName)
i := &Instance{
cfg: cfg,
globalCfg: globalCfg,
logger: logger,
vc: vc,
reg: reg,
newWal: newWal,
readyScrapeManager: &scrape.ReadyScrapeManager{},
}
return i, nil
}
// Run starts the instance, initializing Prometheus components, and will
// continue to run until an error happens during execution or the provided
// context is cancelled.
//
// Run may be re-called after exiting, as components will be reinitialized each
// time Run is called.
func (i *Instance) Run(ctx context.Context) error {
// i.cfg may change at any point in the middle of this method but not in a way
// that affects any of the code below; rather than grabbing a mutex every time
// we want to read the config, we'll simplify the access and just grab a copy
// now.
i.mut.Lock()
cfg := i.cfg
i.mut.Unlock()
level.Debug(i.logger).Log("msg", "initializing instance", "name", cfg.Name)
// trackingReg wraps the register for the instance to make sure that if Run
// exits, any metrics Prometheus registers are removed and can be
// re-registered if Run is called again.
trackingReg := unregisterAllRegisterer{wrap: i.reg}
defer trackingReg.UnregisterAll()
if err := i.initialize(ctx, &trackingReg, &cfg); err != nil {
level.Error(i.logger).Log("msg", "failed to initialize instance", "err", err)
return fmt.Errorf("failed to initialize instance: %w", err)
}
// The actors defined here are defined in the order we want them to shut down.
// Primarily, we want to ensure that the following shutdown order is
// maintained:
// 1. The scrape manager stops
// 2. WAL storage is closed
// 3. Remote write storage is closed
// This is done to allow the instance to write stale markers for all active
// series.
rg := runGroupWithContext(ctx)
{
// Target Discovery
rg.Add(i.discovery.Run, i.discovery.Stop)
}
{
// Truncation loop
ctx, contextCancel := context.WithCancel(context.Background())
defer contextCancel()
rg.Add(
func() error {
i.truncateLoop(ctx, i.wal, &cfg)
level.Info(i.logger).Log("msg", "truncation loop stopped")
return nil
},
func(err error) {
level.Info(i.logger).Log("msg", "stopping truncation loop...")
contextCancel()
},
)
}
{
sm, err := i.readyScrapeManager.Get()
if err != nil {
level.Error(i.logger).Log("msg", "failed to get scrape manager")
return err
}
// Scrape manager
rg.Add(
func() error {
err := sm.Run(i.discovery.SyncCh())
level.Info(i.logger).Log("msg", "scrape manager stopped")
return err
},
func(err error) {
// The scrape manager is closed first to allow us to write staleness
// markers without receiving new samples from scraping in the meantime.
level.Info(i.logger).Log("msg", "stopping scrape manager...")
sm.Stop()
// On a graceful shutdown, write staleness markers. If something went
// wrong, then the instance will be relaunched.
if err == nil && cfg.WriteStaleOnShutdown {
level.Info(i.logger).Log("msg", "writing staleness markers...")
err := i.wal.WriteStalenessMarkers(i.getRemoteWriteTimestamp)
if err != nil {
level.Error(i.logger).Log("msg", "error writing staleness markers", "err", err)
}
}
level.Info(i.logger).Log("msg", "closing storage...")
if err := i.storage.Close(); err != nil {
level.Error(i.logger).Log("msg", "error stopping storage", "err", err)
}
},
)
}
level.Debug(i.logger).Log("msg", "running instance", "name", cfg.Name)
err := rg.Run()
if err != nil {
level.Error(i.logger).Log("msg", "agent instance stopped with error", "err", err)
}
return err
}
// initialize sets up the various Prometheus components with their initial
// settings. initialize will be called each time the Instance is run. Prometheus
// components cannot be reused after they are stopped so we need to recreate them
// each run.
func (i *Instance) initialize(ctx context.Context, reg prometheus.Registerer, cfg *Config) error {
i.mut.Lock()
defer i.mut.Unlock()
var err error
i.wal, err = i.newWal(reg)
if err != nil {
return fmt.Errorf("error creating WAL: %w", err)
}
i.discovery, err = i.newDiscoveryManager(ctx, cfg)
if err != nil {
return fmt.Errorf("error creating discovery manager: %w", err)
}
i.readyScrapeManager = &scrape.ReadyScrapeManager{}
// Setup the remote storage
remoteLogger := log.With(i.logger, "component", "remote")
i.remoteStore = remote.NewStorage(remoteLogger, reg, i.wal.StartTime, i.wal.Directory(), cfg.RemoteFlushDeadline, i.readyScrapeManager)
err = i.remoteStore.ApplyConfig(&config.Config{
GlobalConfig: i.globalCfg,
RemoteWriteConfigs: cfg.RemoteWrite,
})
if err != nil {
return fmt.Errorf("failed applying config to remote storage: %w", err)
}
i.storage = storage.NewFanout(i.logger, i.wal, i.remoteStore)
scrapeManager := newScrapeManager(log.With(i.logger, "component", "scrape manager"), i.storage)
err = scrapeManager.ApplyConfig(&config.Config{
GlobalConfig: i.globalCfg,
ScrapeConfigs: cfg.ScrapeConfigs,
})
if err != nil {
return fmt.Errorf("failed applying config to scrape manager: %w", err)
}
i.readyScrapeManager.Set(scrapeManager)
return nil
}
// Update accepts a new Config for the Instance and will dynamically update any
// running Prometheus components with the new values from Config. Update will
// return an ErrInvalidUpdate if the Update could not be applied.
func (i *Instance) Update(c Config) error {
i.mut.Lock()
defer i.mut.Unlock()
// It's only (currently) valid to update scrape_configs and remote_write, so
// if any other field has changed here, return the error.
var err error
switch {
// This first case should never happen in practice but it's included here for
// completions sake.
case i.cfg.Name != c.Name:
err = errImmutableField{Field: "name"}
case i.cfg.HostFilter != c.HostFilter:
err = errImmutableField{Field: "host_filter"}
case i.cfg.WALTruncateFrequency != c.WALTruncateFrequency:
err = errImmutableField{Field: "wal_truncate_frequency"}
case i.cfg.RemoteFlushDeadline != c.RemoteFlushDeadline:
err = errImmutableField{Field: "remote_flush_deadline"}
case i.cfg.WriteStaleOnShutdown != c.WriteStaleOnShutdown:
err = errImmutableField{Field: "write_stale_on_shutdown"}
}
if err != nil {
return ErrInvalidUpdate{Inner: err}
}
// Check to see if the components exist yet.
if i.discovery == nil || i.remoteStore == nil || i.readyScrapeManager == nil {
return ErrInvalidUpdate{
Inner: fmt.Errorf("cannot dynamically update because instance is not running"),
}
}
// NOTE(rfratto): Prometheus applies configs in a specific order to ensure
// flow from service discovery down to the WAL continues working properly.
//
// Keep the following order below:
//
// 1. Remote Store
// 2. Scrape Manager
// 3. Discovery Manager
err = i.remoteStore.ApplyConfig(&config.Config{
GlobalConfig: i.globalCfg,
RemoteWriteConfigs: c.RemoteWrite,
})
if err != nil {
return fmt.Errorf("error applying new remote_write configs: %w", err)
}
sm, err := i.readyScrapeManager.Get()
if err != nil {
return fmt.Errorf("couldn't get scrape manager to apply new scrape configs: %w", err)
}
err = sm.ApplyConfig(&config.Config{
GlobalConfig: i.globalCfg,
ScrapeConfigs: c.ScrapeConfigs,
})
if err != nil {
return fmt.Errorf("error applying updated configs to scrape manager: %w", err)
}
sdConfigs := map[string]discovery.Configs{}
for _, v := range c.ScrapeConfigs {
sdConfigs[v.JobName] = v.ServiceDiscoveryConfigs
}
err = i.discovery.Manager.ApplyConfig(sdConfigs)
if err != nil {
return fmt.Errorf("failed applying configs to discovery manager: %w", err)
}
i.cfg = c
return nil
}
// TargetsActive returns the set of active targets from the scrape manager. Returns nil
// if the scrape manager is not ready yet.
func (i *Instance) TargetsActive() map[string][]*scrape.Target {
i.mut.Lock()
defer i.mut.Unlock()
if i.readyScrapeManager == nil {
return nil
}
mgr, err := i.readyScrapeManager.Get()
if err == scrape.ErrNotReady {
return nil
} else if err != nil {
level.Error(i.logger).Log("msg", "failed to get scrape manager when collecting active targets", "err", err)
return nil
}
return mgr.TargetsActive()
}
type discoveryService struct {
Manager *discovery.Manager
RunFunc func() error
StopFunc func(err error)
SyncChFunc func() GroupChannel
}
func (s *discoveryService) Run() error { return s.RunFunc() }
func (s *discoveryService) Stop(err error) { s.StopFunc(err) }
func (s *discoveryService) SyncCh() GroupChannel { return s.SyncChFunc() }
// newDiscoveryManager returns an implementation of a runnable service
// that outputs discovered targets to a channel. The implementation
// uses the Prometheus Discovery Manager. Targets will be filtered
// if the instance is configured to perform host filtering.
func (i *Instance) newDiscoveryManager(ctx context.Context, cfg *Config) (*discoveryService, error) {
ctx, cancel := context.WithCancel(ctx)
logger := log.With(i.logger, "component", "discovery manager")
manager := discovery.NewManager(ctx, logger, discovery.Name("scrape"))
// TODO(rfratto): refactor this to a function?
// TODO(rfratto): ensure job name name is unique
c := map[string]discovery.Configs{}
for _, v := range cfg.ScrapeConfigs {
c[v.JobName] = v.ServiceDiscoveryConfigs
}
err := manager.ApplyConfig(c)
if err != nil {
cancel()
level.Error(i.logger).Log("msg", "failed applying config to discovery manager", "err", err)
return nil, fmt.Errorf("failed applying config to discovery manager: %w", err)
}
rg := runGroupWithContext(ctx)
// Run the manager
rg.Add(func() error {
err := manager.Run()
level.Info(i.logger).Log("msg", "discovery manager stopped")
return err
}, func(err error) {
level.Info(i.logger).Log("msg", "stopping discovery manager...")
cancel()
})
syncChFunc := manager.SyncCh
// If host filtering is enabled, run it and use its channel for discovered
// targets.
if cfg.HostFilter {
hostname, err := Hostname()
if err != nil {
cancel()
return nil, fmt.Errorf("failed to create host filterer: %w", err)
}
level.Debug(i.logger).Log("msg", "creating host filterer", "for_host", hostname)
filterer := NewHostFilter(hostname, cfg.HostFilterRelabelConfigs)
rg.Add(func() error {
filterer.Run(manager.SyncCh())
level.Info(i.logger).Log("msg", "host filterer stopped")
return nil
}, func(_ error) {
level.Info(i.logger).Log("msg", "stopping host filterer...")
filterer.Stop()
})
syncChFunc = filterer.SyncCh
}
return &discoveryService{
Manager: manager,
RunFunc: rg.Run,
StopFunc: rg.Stop,
SyncChFunc: syncChFunc,
}, nil
}
func (i *Instance) truncateLoop(ctx context.Context, wal walStorage, cfg *Config) {
for {
select {
case <-ctx.Done():
return
case <-time.After(cfg.WALTruncateFrequency):
// The timestamp ts is used to determine which series are not receiving
// samples and may be deleted from the WAL. Their most recent append
// timestamp is compared to ts, and if that timestamp is older then ts,
// they are considered inactive and may be deleted.
//
// Subtracting a duration from ts will delay when it will be considered
// inactive and scheduled for deletion.
ts := i.getRemoteWriteTimestamp()
if ts == 0 {
level.Debug(i.logger).Log("msg", "can't truncate the WAL yet")
continue
}
ts -= i.cfg.MinWALTime.Milliseconds()
// Network issues can prevent the result of getRemoteWriteTimestamp from
// changing. We don't want data in the WAL to grow forever, so we set a cap
// on the maximum age data can be. If our ts is older than this cutoff point,
// we'll shift it forward to start deleting very stale data.
if maxTS := timestamp.FromTime(time.Now().Add(-i.cfg.MaxWALTime)); ts < maxTS {
ts = maxTS
}
level.Debug(i.logger).Log("msg", "truncating the WAL", "ts", ts)
err := wal.Truncate(ts)
if err != nil {
// The only issue here is larger disk usage and a greater replay time,
// so we'll only log this as a warning.
level.Warn(i.logger).Log("msg", "could not truncate WAL", "err", err)
}
}
}
}
// getRemoteWriteTimestamp looks up the last successful remote write timestamp.
// This is passed to wal.Storage for its truncation. If no remote write sections
// are configured, getRemoteWriteTimestamp returns the current time.
func (i *Instance) getRemoteWriteTimestamp() int64 {
i.mut.Lock()
defer i.mut.Unlock()
if len(i.cfg.RemoteWrite) == 0 {
return timestamp.FromTime(time.Now())
}
lbls := make([]string, len(i.cfg.RemoteWrite))
for idx := 0; idx < len(lbls); idx++ {
lbls[idx] = i.cfg.RemoteWrite[idx].Name
}
vals, err := i.vc.GetValues("remote_name", lbls...)
if err != nil {
level.Error(i.logger).Log("msg", "could not get remote write timestamps", "err", err)
return 0
}
if len(vals) == 0 {
return 0
}
// We use the lowest value from the metric since we don't want to delete any
// segments from the WAL until they've been written by all of the remote_write
// configurations.
ts := int64(math.MaxInt64)
for _, val := range vals {
ival := int64(val)
if ival < ts {
ts = ival
}
}
// Convert to the millisecond precision which is used by the WAL
return ts * 1000
}
// walStorage is an interface satisfied by wal.Storage, and created for testing.
type walStorage interface {
// walStorage implements Queryable/ChunkQueryable for compatibility, but is unused.
storage.Queryable
storage.ChunkQueryable
Directory() string
StartTime() (int64, error)
WriteStalenessMarkers(remoteTsFunc func() int64) error
Appender(context.Context) storage.Appender
Truncate(mint int64) error
Close() error
}
type unregisterAllRegisterer struct {
wrap prometheus.Registerer
cs map[prometheus.Collector]struct{}
}
// Register implements prometheus.Registerer.
func (u *unregisterAllRegisterer) Register(c prometheus.Collector) error {
if u.wrap == nil {
return nil
}
err := u.wrap.Register(c)
if err != nil {
return err
}
if u.cs == nil {
u.cs = make(map[prometheus.Collector]struct{})
}
u.cs[c] = struct{}{}
return nil
}
// MustRegister implements prometheus.Registerer.
func (u *unregisterAllRegisterer) MustRegister(cs ...prometheus.Collector) {
for _, c := range cs {
if err := u.Register(c); err != nil {
panic(err)
}
}
}
// Unregister implements prometheus.Registerer.
func (u *unregisterAllRegisterer) Unregister(c prometheus.Collector) bool {
if u.wrap == nil {
return false
}
ok := u.wrap.Unregister(c)
if ok && u.cs != nil {
delete(u.cs, c)
}
return ok
}
// UnregisterAll unregisters all collectors that were registered through the
// Reigsterer.
func (u *unregisterAllRegisterer) UnregisterAll() {
if u.cs == nil {
return
}
for c := range u.cs {
u.Unregister(c)
}
}
// Hostname retrieves the hostname identifying the machine the process is
// running on. It will return the value of $HOSTNAME, if defined, and fall
// back to Go's os.Hostname.
func Hostname() (string, error) {
hostname := os.Getenv("HOSTNAME")
if hostname != "" {
return hostname, nil
}
hostname, err := os.Hostname()
if err != nil {
return "", fmt.Errorf("failed to get hostname: %w", err)
}
return hostname, nil
}
func getHash(data interface{}) (string, error) {
bytes, err := json.Marshal(data)
if err != nil {
return "", err
}
hash := md5.Sum(bytes)
return hex.EncodeToString(hash[:]), nil
}
// MetricValueCollector wraps around a Gatherer and provides utilities for
// pulling metric values from a given metric name and label matchers.
//
// This is used by the agent instances to find the most recent timestamp
// successfully remote_written to for pruposes of safely truncating the WAL.
//
// MetricValueCollector is only intended for use with Gauges and Counters.
type MetricValueCollector struct {
g prometheus.Gatherer
match string
}
// NewMetricValueCollector creates a new MetricValueCollector.
func NewMetricValueCollector(g prometheus.Gatherer, match string) *MetricValueCollector {
return &MetricValueCollector{
g: g,
match: match,
}
}
// GetValues looks through all the tracked metrics and returns all values
// for metrics that match some key value pair.
func (vc *MetricValueCollector) GetValues(label string, labelValues ...string) ([]float64, error) {
vals := []float64{}
families, err := vc.g.Gather()
if err != nil {
return nil, err
}
for _, family := range families {
if !strings.Contains(family.GetName(), vc.match) {
continue
}
for _, m := range family.GetMetric() {
matches := false
for _, l := range m.GetLabel() {
if l.GetName() != label {
continue
}
v := l.GetValue()
for _, match := range labelValues {
if match == v {
matches = true
break
}
}
break
}
if !matches {
continue
}
var value float64
if m.Gauge != nil {
value = m.Gauge.GetValue()
} else if m.Counter != nil {
value = m.Counter.GetValue()
} else if m.Untyped != nil {
value = m.Untyped.GetValue()
} else {
return nil, errors.New("tracking unexpected metric type")
}
vals = append(vals, value)
}
}
return vals, nil
}
func newScrapeManager(logger log.Logger, app storage.Appendable) *scrape.Manager {
// scrape.NewManager modifies a global variable in Prometheus. To avoid a
// data race of modifying that global, we lock a mutex here briefly.
managerMtx.Lock()
defer managerMtx.Unlock()
return scrape.NewManager(logger, app)
}
type runGroupContext struct {
cancel context.CancelFunc
g *run.Group
}
// runGroupWithContext creates a new run.Group that will be stopped if the
// context gets canceled in addition to the normal behavior of stopping
// when any of the actors stop.
func runGroupWithContext(ctx context.Context) *runGroupContext {
ctx, cancel := context.WithCancel(ctx)
var g run.Group
g.Add(func() error {
<-ctx.Done()
return nil
}, func(_ error) {
cancel()
})
return &runGroupContext{cancel: cancel, g: &g}
}
func (rg *runGroupContext) Add(execute func() error, interrupt func(error)) {
rg.g.Add(execute, interrupt)
}
func (rg *runGroupContext) Run() error { return rg.g.Run() }
func (rg *runGroupContext) Stop(_ error) { rg.cancel() }
| [
"\"HOSTNAME\""
]
| []
| [
"HOSTNAME"
]
| [] | ["HOSTNAME"] | go | 1 | 0 | |
img2lambda/extract/repack_image.go | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: MIT-0
package extract
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"github.com/awslabs/aws-lambda-container-image-converter/img2lambda/types"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/transports/alltransports"
imgtypes "github.com/containers/image/v5/types"
zglob "github.com/mattn/go-zglob"
"github.com/mholt/archiver"
"github.com/pkg/errors"
)
// Converts container image to Lambda layer and function deployment package archive files
func RepackImage(imageName string, layerOutputDir string) (layers []types.LambdaLayer, function *types.LambdaDeploymentPackage, retErr error) {
log.Printf("Parsing the image %s", imageName)
// Get image's layer data from image name
ref, err := alltransports.ParseImageName(imageName)
if err != nil {
return nil, nil, err
}
sys := &imgtypes.SystemContext{}
dockerHost := os.Getenv("DOCKER_HOST")
// Support communicating with Docker for Windows over local plain-text TCP socket
if dockerHost == "tcp://localhost:2375" || dockerHost == "tcp://127.0.0.1:2375" {
sys.DockerDaemonHost = strings.Replace(dockerHost, "tcp://", "http://", -1)
}
// Support communicating with Docker Toolbox over encrypted socket
if strings.HasPrefix(dockerHost, "tcp://192.168.") && strings.HasSuffix(dockerHost, ":2376") {
sys.DockerDaemonHost = strings.Replace(dockerHost, "tcp://", "https://", -1)
}
ctx := context.Background()
cache := blobinfocache.DefaultCache(sys)
rawSource, err := ref.NewImageSource(ctx, sys)
if err != nil {
return nil, nil, err
}
src, err := image.FromSource(ctx, sys, rawSource)
if err != nil {
if closeErr := rawSource.Close(); closeErr != nil {
return nil, nil, errors.Wrapf(err, " (close error: %v)", closeErr)
}
return nil, nil, err
}
defer func() {
if err := src.Close(); err != nil {
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
}
}()
return repackImage(&repackOptions{
ctx: ctx,
cache: cache,
imageSource: src,
rawImageSource: rawSource,
imageName: imageName,
layerOutputDir: layerOutputDir,
})
}
type repackOptions struct {
ctx context.Context
cache imgtypes.BlobInfoCache
imageSource imgtypes.ImageCloser
rawImageSource imgtypes.ImageSource
imageName string
layerOutputDir string
}
func repackImage(opts *repackOptions) (layers []types.LambdaLayer, function *types.LambdaDeploymentPackage, retErr error) {
layerInfos := opts.imageSource.LayerInfos()
log.Printf("Image %s has %d layers", opts.imageName, len(layerInfos))
// Unpack and inspect each image layer, copy relevant files to new Lambda layer or to a Lambda deployment package
if err := os.MkdirAll(opts.layerOutputDir, 0777); err != nil {
return nil, nil, err
}
function = &types.LambdaDeploymentPackage{FileCount: 0, File: filepath.Join(opts.layerOutputDir, "function.zip")}
functionZip, functionFile, err := startZipFile(function.File)
if err != nil {
return nil, nil, fmt.Errorf("starting zip file: %v", err)
}
defer func() {
if err := functionZip.Close(); err != nil {
retErr = errors.Wrapf(err, " (zip close error: %v)", err)
}
if err := functionFile.Close(); err != nil {
retErr = errors.Wrapf(err, " (file close error: %v)", err)
}
}()
lambdaLayerNum := 1
for _, layerInfo := range layerInfos {
lambdaLayerFilename := filepath.Join(opts.layerOutputDir, fmt.Sprintf("layer-%d.zip", lambdaLayerNum))
layerStream, _, err := opts.rawImageSource.GetBlob(opts.ctx, layerInfo, opts.cache)
if err != nil {
return nil, function, err
}
defer layerStream.Close()
layerFileCreated, layerFunctionFileCount, err := repackLayer(lambdaLayerFilename, functionZip, layerStream, false)
if err != nil {
tarErr := err
// tar extraction failed, try tar.gz
layerStream, _, err = opts.rawImageSource.GetBlob(opts.ctx, layerInfo, opts.cache)
if err != nil {
return nil, function, err
}
defer layerStream.Close()
layerFileCreated, layerFunctionFileCount, err = repackLayer(lambdaLayerFilename, functionZip, layerStream, true)
if err != nil {
return nil, function, fmt.Errorf("could not read layer with tar nor tar.gz: %v, %v", err, tarErr)
}
}
function.FileCount += layerFunctionFileCount
if layerFunctionFileCount == 0 {
log.Printf("Did not extract any Lambda function files from image layer %s (no relevant files found)", string(layerInfo.Digest))
}
if layerFileCreated {
log.Printf("Created Lambda layer file %s from image layer %s", lambdaLayerFilename, string(layerInfo.Digest))
lambdaLayerNum++
layers = append(layers, types.LambdaLayer{Digest: string(layerInfo.Digest), File: lambdaLayerFilename})
} else {
log.Printf("Did not create a Lambda layer file from image layer %s (no relevant files found)", string(layerInfo.Digest))
}
}
log.Printf("Extracted %d Lambda function files for image %s", function.FileCount, opts.imageName)
if function.FileCount > 0 {
log.Printf("Created Lambda function deployment package %s", function.File)
}
log.Printf("Created %d Lambda layer files for image %s", len(layers), opts.imageName)
return layers, function, retErr
}
// Converts container image layer archive (tar) to Lambda layer archive (zip).
// Filters files from the source and only writes a new archive if at least
// one file in the source matches the filter (i.e. does not create empty archives).
func repackLayer(outputFilename string, functionZip *archiver.Zip, layerContents io.Reader, isGzip bool) (lambdaLayerCreated bool, functionFileCount int, retError error) {
t := archiver.NewTar()
contentsReader := layerContents
var err error
if isGzip {
gzr, err := gzip.NewReader(layerContents)
if err != nil {
return false, 0, fmt.Errorf("could not create gzip reader for layer: %v", err)
}
defer gzr.Close()
contentsReader = gzr
}
err = t.Open(contentsReader, 0)
if err != nil {
return false, 0, fmt.Errorf("opening layer tar: %v", err)
}
defer t.Close()
// Walk the files in the tar
var z *archiver.Zip
var out *os.File
defer func() {
if z != nil {
if err := z.Close(); err != nil {
retError = errors.Wrapf(err, " (zip close error: %v)", err)
}
}
if out != nil {
if err := out.Close(); err != nil {
retError = errors.Wrapf(err, " (file close error: %v)", err)
}
}
}()
for {
// Get next file in tar
f, err := t.Read()
if err == io.EOF {
break
}
if err != nil {
return false, 0, fmt.Errorf("opening next file in layer tar: %v", err)
}
// Determine if this file should be repacked into a Lambda layer
repack, err := shouldRepackLayerFileToLambdaLayer(f)
if err != nil {
return false, 0, fmt.Errorf("filtering file in layer tar: %v", err)
}
if repack {
if z == nil {
z, out, err = startZipFile(outputFilename)
if err != nil {
return false, 0, fmt.Errorf("starting zip file: %v", err)
}
}
err = repackLayerFile(f, z)
}
if err != nil {
return false, 0, fmt.Errorf("walking %s in layer tar: %v", f.Name(), err)
}
// Determine if this file should be repacked into a Lambda function package
repack, err = shouldRepackLayerFileToLambdaFunction(f)
if err != nil {
return false, 0, fmt.Errorf("filtering file in layer tar: %v", err)
}
if repack {
err = repackLayerFile(f, functionZip)
functionFileCount++
}
if err != nil {
return false, 0, fmt.Errorf("walking %s in layer tar: %v", f.Name(), err)
}
}
return (z != nil), functionFileCount, nil
}
func startZipFile(destination string) (zip *archiver.Zip, zipFile *os.File, err error) {
z := archiver.NewZip()
out, err := os.Create(destination)
if err != nil {
return nil, nil, fmt.Errorf("creating %s: %v", destination, err)
}
err = z.Create(out)
if err != nil {
return nil, nil, fmt.Errorf("creating zip: %v", err)
}
return z, out, nil
}
func getLayerFileName(f archiver.File) (name string, err error) {
header, ok := f.Header.(*tar.Header)
if !ok {
return "", fmt.Errorf("expected header to be *tar.Header but was %T", f.Header)
}
if f.IsDir() || header.Typeflag == tar.TypeDir {
return "", nil
}
// Ignore whiteout files
if strings.HasPrefix(f.Name(), ".wh.") {
return "", nil
}
return header.Name, nil
}
func shouldRepackLayerFileToLambdaLayer(f archiver.File) (should bool, err error) {
filename, err := getLayerFileName(f)
if err != nil {
return false, err
}
if filename == "" {
return false, nil
}
// Only extract files that can be used for Lambda custom runtimes
return zglob.Match("opt/**/**", filename)
}
func shouldRepackLayerFileToLambdaFunction(f archiver.File) (should bool, err error) {
filename, err := getLayerFileName(f)
if err != nil {
return false, err
}
if filename == "" {
return false, nil
}
// Only extract files that can be used for Lambda deployment packages
return zglob.Match("var/task/**/**", filename)
}
func repackLayerFile(f archiver.File, z *archiver.Zip) error {
hdr, ok := f.Header.(*tar.Header)
if !ok {
return fmt.Errorf("expected header to be *tar.Header but was %T", f.Header)
}
filename := strings.TrimPrefix(filepath.ToSlash(hdr.Name), "opt/")
filename = strings.TrimPrefix(filename, "var/task/")
switch hdr.Typeflag {
case tar.TypeReg, tar.TypeRegA, tar.TypeChar, tar.TypeBlock, tar.TypeFifo, tar.TypeSymlink, tar.TypeLink:
return z.Write(archiver.File{
FileInfo: archiver.FileInfo{
FileInfo: f.FileInfo,
CustomName: filename,
},
ReadCloser: f,
})
case tar.TypeXGlobalHeader:
return nil // ignore
default:
return fmt.Errorf("%s: unknown type flag: %c", hdr.Name, hdr.Typeflag)
}
}
| [
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
pkg/local/local.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
log "github.com/sirupsen/logrus"
"os"
)
// Local the Local struct
type Local struct {
driver *csicommon.CSIDriver
endpoint string
idServer *identityServer
nodeServer csi.NodeServer
controllerServer *controllerServer
cap []*csi.VolumeCapability_AccessMode
cscap []*csi.ControllerServiceCapability
}
const (
defaultDriverName = "localplugin.csi.alibabacloud.com"
csiVersion = "1.0.0"
)
// Init checks for the persistent volume file and loads all found volumes
// into a memory structure
func initDriver() {
}
// NewDriver create the identity/node/controller server and disk driver
func NewDriver(nodeID, endpoint string) *Local {
initDriver()
tmplvm := &Local{}
tmplvm.endpoint = endpoint
if nodeID == "" {
nodeID = GetMetaData(InstanceID)
log.Infof("Use node id : %s", nodeID)
}
// set driver name;
driverName := defaultDriverName
tmpValue := os.Getenv("DRIVER_VENDOR")
if tmpValue != "" {
driverName = tmpValue
}
csiDriver := csicommon.NewCSIDriver(driverName, csiVersion, nodeID)
tmplvm.driver = csiDriver
tmplvm.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
})
tmplvm.driver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
// Create GRPC servers
tmplvm.idServer = newIdentityServer(tmplvm.driver)
tmplvm.nodeServer = NewNodeServer(tmplvm.driver, driverName, nodeID)
tmplvm.controllerServer = newControllerServer(tmplvm.driver)
return tmplvm
}
// Run start a new server
func (lvm *Local) Run() {
server := csicommon.NewNonBlockingGRPCServer()
server.Start(lvm.endpoint, lvm.idServer, lvm.controllerServer, lvm.nodeServer)
server.Wait()
}
| [
"\"DRIVER_VENDOR\""
]
| []
| [
"DRIVER_VENDOR"
]
| [] | ["DRIVER_VENDOR"] | go | 1 | 0 | |
internal/config/config.go | package config
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
)
var (
// Default input plugins
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel",
"processes", "disk", "diskio"}
// Default output plugins
outputDefaults = []string{"influxdb"}
// envVarRe is a regex to find environment variables in the config file
envVarRe = regexp.MustCompile(`\$\w+`)
envVarEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
)
// Config specifies the URL/user/password for the database that telegraf
// will be logging to, as well as all the plugins that the user has
// specified
type Config struct {
Tags map[string]string
InputFilters []string
OutputFilters []string
Path string
Pathdir string
Agent *AgentConfig
Inputs []*models.RunningInput
Outputs []*models.RunningOutput
Aggregators []*models.RunningAggregator
// Processors have a slice wrapper type because they need to be sorted
Processors models.RunningProcessors
}
func NewConfig() *Config {
c := &Config{
// Agent defaults:
Agent: &AgentConfig{
Interval: internal.Duration{Duration: 10 * time.Second},
RoundInterval: true,
FlushInterval: internal.Duration{Duration: 10 * time.Second},
},
Path: "",
Pathdir: "",
Tags: make(map[string]string),
Inputs: make([]*models.RunningInput, 0),
Outputs: make([]*models.RunningOutput, 0),
Processors: make([]*models.RunningProcessor, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
return c
}
type AgentConfig struct {
// Interval at which to gather information
Interval internal.Duration
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
// By default or when set to "0s", precision will be set to the same
// timestamp order as the collection interval, with the maximum being 1s.
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
// Precision will NOT be used for service inputs. It is up to each individual
// service input to set the timestamp at the appropriate precision.
Precision internal.Duration
// CollectionJitter is used to jitter the collection by a random amount.
// Each plugin will sleep for a random time within jitter before collecting.
// This can be used to avoid many plugins querying things like sysfs at the
// same time, which can have a measurable effect on the system.
CollectionJitter internal.Duration
// FlushInterval is the Interval at which to flush data
FlushInterval internal.Duration
// FlushJitter Jitters the flush interval by a random amount.
// This is primarily to avoid large write spikes for users running a large
// number of telegraf instances.
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
FlushJitter internal.Duration
// MetricBatchSize is the maximum number of metrics that is wrote to an
// output plugin in one call.
MetricBatchSize int
// MetricBufferLimit is the max number of metrics that each output plugin
// will cache. The buffer is cleared when a successful write occurs. When
// full, the oldest metrics will be overwritten. This number should be a
// multiple of MetricBatchSize. Due to current implementation, this could
// not be less than 2 times MetricBatchSize.
MetricBufferLimit int
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval.
FlushBufferWhenFull bool
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatibility
UTC bool `toml:"utc"`
// Debug is the option for running in debug mode
Debug bool
// Logfile specifies the file to send logs to
Logfile string
// Quiet is the option for running in quiet mode
Quiet bool
Hostname string
OmitHostname bool
}
// Inputs returns a list of strings of the configured inputs.
func (c *Config) InputNames() []string {
var name []string
for _, input := range c.Inputs {
name = append(name, input.Name())
}
return name
}
// Outputs returns a list of strings of the configured aggregators.
func (c *Config) AggregatorNames() []string {
var name []string
for _, aggregator := range c.Aggregators {
name = append(name, aggregator.Name())
}
return name
}
// Outputs returns a list of strings of the configured processors.
func (c *Config) ProcessorNames() []string {
var name []string
for _, processor := range c.Processors {
name = append(name, processor.Name)
}
return name
}
// Outputs returns a list of strings of the configured outputs.
func (c *Config) OutputNames() []string {
var name []string
for _, output := range c.Outputs {
name = append(name, output.Name)
}
return name
}
// ListTags returns a string of tags specified in the config,
// line-protocol style
func (c *Config) ListTags() string {
var tags []string
for k, v := range c.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(tags)
return strings.Join(tags, " ")
}
var header = `# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply prepend
# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
## Run telegraf with debug log messages.
debug = false
## Run telegraf in quiet mode (error log messages only).
quiet = false
## Specify the log file name. The empty string means to log to stderr.
logfile = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
`
var processorHeader = `
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
`
var aggregatorHeader = `
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
`
var inputHeader = `
###############################################################################
# INPUT PLUGINS #
###############################################################################
`
var serviceInputHeader = `
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
`
// PrintSampleConfig prints the sample config
func PrintSampleConfig(
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
processorFilters []string,
) {
fmt.Printf(header)
// print output plugins
if len(outputFilters) != 0 {
printFilteredOutputs(outputFilters, false)
} else {
printFilteredOutputs(outputDefaults, false)
// Print non-default outputs, commented
var pnames []string
for pname := range outputs.Outputs {
if !sliceContains(pname, outputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredOutputs(pnames, true)
}
// print processor plugins
fmt.Printf(processorHeader)
if len(processorFilters) != 0 {
printFilteredProcessors(processorFilters, false)
} else {
pnames := []string{}
for pname := range processors.Processors {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredProcessors(pnames, true)
}
// pring aggregator plugins
fmt.Printf(aggregatorHeader)
if len(aggregatorFilters) != 0 {
printFilteredAggregators(aggregatorFilters, false)
} else {
pnames := []string{}
for pname := range aggregators.Aggregators {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredAggregators(pnames, true)
}
// print input plugins
fmt.Printf(inputHeader)
if len(inputFilters) != 0 {
printFilteredInputs(inputFilters, false)
} else {
printFilteredInputs(inputDefaults, false)
// Print non-default inputs, commented
var pnames []string
for pname := range inputs.Inputs {
if !sliceContains(pname, inputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredInputs(pnames, true)
}
}
func printFilteredProcessors(processorFilters []string, commented bool) {
// Filter processors
var pnames []string
for pname := range processors.Processors {
if sliceContains(pname, processorFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Outputs
for _, pname := range pnames {
creator := processors.Processors[pname]
output := creator()
printConfig(pname, output, "processors", commented)
}
}
func printFilteredAggregators(aggregatorFilters []string, commented bool) {
// Filter outputs
var anames []string
for aname := range aggregators.Aggregators {
if sliceContains(aname, aggregatorFilters) {
anames = append(anames, aname)
}
}
sort.Strings(anames)
// Print Outputs
for _, aname := range anames {
creator := aggregators.Aggregators[aname]
output := creator()
printConfig(aname, output, "aggregators", commented)
}
}
func printFilteredInputs(inputFilters []string, commented bool) {
// Filter inputs
var pnames []string
for pname := range inputs.Inputs {
if sliceContains(pname, inputFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// cache service inputs to print them at the end
servInputs := make(map[string]telegraf.ServiceInput)
// for alphabetical looping:
servInputNames := []string{}
// Print Inputs
for _, pname := range pnames {
creator := inputs.Inputs[pname]
input := creator()
switch p := input.(type) {
case telegraf.ServiceInput:
servInputs[pname] = p
servInputNames = append(servInputNames, pname)
continue
}
printConfig(pname, input, "inputs", commented)
}
// Print Service Inputs
if len(servInputs) == 0 {
return
}
sort.Strings(servInputNames)
fmt.Printf(serviceInputHeader)
for _, name := range servInputNames {
printConfig(name, servInputs[name], "inputs", commented)
}
}
func printFilteredOutputs(outputFilters []string, commented bool) {
// Filter outputs
var onames []string
for oname := range outputs.Outputs {
if sliceContains(oname, outputFilters) {
onames = append(onames, oname)
}
}
sort.Strings(onames)
// Print Outputs
for _, oname := range onames {
creator := outputs.Outputs[oname]
output := creator()
printConfig(oname, output, "outputs", commented)
}
}
type printer interface {
Description() string
SampleConfig() string
}
func printConfig(name string, p printer, op string, commented bool) {
comment := ""
if commented {
comment = "# "
}
fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment,
op, name)
config := p.SampleConfig()
if config == "" {
fmt.Printf("\n%s # no configuration\n\n", comment)
} else {
lines := strings.Split(config, "\n")
for i, line := range lines {
if i == 0 || i == len(lines)-1 {
fmt.Print("\n")
continue
}
fmt.Print(strings.TrimRight(comment+line, " ") + "\n")
}
}
}
func sliceContains(name string, list []string) bool {
for _, b := range list {
if b == name {
return true
}
}
return false
}
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig(name string) error {
if creator, ok := inputs.Inputs[name]; ok {
printConfig(name, creator(), "inputs", false)
} else {
return errors.New(fmt.Sprintf("Input %s not found", name))
}
return nil
}
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string) error {
if creator, ok := outputs.Outputs[name]; ok {
printConfig(name, creator(), "outputs", false)
} else {
return errors.New(fmt.Sprintf("Output %s not found", name))
}
return nil
}
func (c *Config) LoadDirectory(path string) error {
walkfn := func(thispath string, info os.FileInfo, _ error) error {
if info == nil {
log.Printf("W! Telegraf is not permitted to read %s", thispath)
return nil
}
if info.IsDir() {
if strings.HasPrefix(info.Name(), "..") {
// skip Kubernetes mounts, prevening loading the same config twice
return filepath.SkipDir
}
return nil
}
name := info.Name()
if len(name) < 6 || name[len(name)-5:] != ".conf" {
return nil
}
err := c.LoadConfig(thispath)
if err != nil {
return err
}
return nil
}
return filepath.Walk(path, walkfn)
}
// Try to find a default config file at these locations (in order):
// 1. $TELEGRAF_CONFIG_PATH
// 2. $HOME/.telegraf/telegraf.conf
// 3. /etc/telegraf/telegraf.conf
//
func getDefaultConfigPath() (string, error) {
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
etcfile := "/etc/telegraf/telegraf.conf"
if runtime.GOOS == "windows" {
etcfile = `C:\Program Files\Telegraf\telegraf.conf`
}
for _, path := range []string{envfile, homefile, etcfile} {
if _, err := os.Stat(path); err == nil {
log.Printf("I! Using config file: %s", path)
return path, nil
}
}
// if we got here, we didn't find a file in a default location
return "", fmt.Errorf("No config file specified, and could not find one"+
" in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile)
}
// LoadConfig loads the given config file and applies it to c
func (c *Config) LoadConfig(path string) error {
//fmt.Println(path)
var err error
if path == "" {
if path, err = getDefaultConfigPath(); err != nil {
return err
}
}
data, err := loadConfig(path)
//print(data)
//fmt.Println(data)
if err != nil {
return fmt.Errorf("Error loading %s, %s", path, err)
}
tbl, err := parseConfig(data)
//fmt.Println(tbl)
if err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
// Parse tags tables first:
for _, tableName := range []string{"tags", "global_tags"} {
if val, ok := tbl.Fields[tableName]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("E! Could not parse [global_tags] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
}
// Parse agent table:
if val, ok := tbl.Fields["agent"]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
log.Printf("E! Could not parse [agent] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
if !c.Agent.OmitHostname {
if c.Agent.Hostname == "" {
hostname, err := os.Hostname()
if err != nil {
return err
}
c.Agent.Hostname = hostname
}
c.Tags["host"] = c.Agent.Hostname
}
// Parse all the rest of the plugins:
for name, val := range tbl.Fields {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
switch name {
case "agent", "global_tags", "tags":
case "outputs":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [outputs.influxdb] support
case *ast.Table:
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addOutput(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "inputs", "plugins":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [inputs.cpu] support
case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addInput(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "processors":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addProcessor(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
case "aggregators":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addAggregator(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s, file %s",
pluginName, path)
}
}
// Assume it's an input input for legacy config file support if no other
// identifiers are present
default:
if err = c.addInput(name, subTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", path, err)
}
}
}
if len(c.Processors) > 1 {
sort.Sort(c.Processors)
}
return nil
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// this is for Windows compatibility only.
// see https://github.com/influxdata/telegraf/issues/1378
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv(value string) string {
return envVarEscaper.Replace(value)
}
func loadConfig(config string) ([]byte, error) {
//print(config)
u, err := url.Parse(config)
if err != nil {
return nil, err
}
switch u.Scheme {
case "https", "http":
return fetchConfig(u)
default:
// If it isn't a https scheme, try it as a file.
}
//print(ioutil.ReadFile(config))
return ioutil.ReadFile(config)
}
func fetchConfig(u *url.URL) ([]byte, error) {
v := os.Getenv("INFLUX_TOKEN")
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", "Token "+v)
req.Header.Add("Accept", "application/toml")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// parseConfig loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
func parseConfig(contents []byte) (*ast.Table, error) {
contents = trimBOM(contents)
env_vars := envVarRe.FindAll(contents, -1)
for _, env_var := range env_vars {
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
if ok {
env_val = escapeEnv(env_val)
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
}
}
return toml.Parse(contents)
}
func (c *Config) addAggregator(name string, table *ast.Table) error {
creator, ok := aggregators.Aggregators[name]
if !ok {
return fmt.Errorf("Undefined but requested aggregator: %s", name)
}
aggregator := creator()
conf, err := buildAggregator(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, aggregator); err != nil {
return err
}
c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf))
return nil
}
func (c *Config) addProcessor(name string, table *ast.Table) error {
creator, ok := processors.Processors[name]
if !ok {
return fmt.Errorf("Undefined but requested processor: %s", name)
}
processor := creator()
processorConfig, err := buildProcessor(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, processor); err != nil {
return err
}
rf := &models.RunningProcessor{
Name: name,
Processor: processor,
Config: processorConfig,
}
c.Processors = append(c.Processors, rf)
return nil
}
func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil
}
creator, ok := outputs.Outputs[name]
if !ok {
return fmt.Errorf("Undefined but requested output: %s", name)
}
output := creator()
// If the output has a SetSerializer function, then this means it can write
// arbitrary types of output, so build the serializer and set it.
switch t := output.(type) {
case serializers.SerializerOutput:
serializer, err := buildSerializer(name, table)
if err != nil {
return err
}
t.SetSerializer(serializer)
}
outputConfig, err := buildOutput(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, output); err != nil {
return err
}
ro := models.NewRunningOutput(name, output, outputConfig,
c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
c.Outputs = append(c.Outputs, ro)
return nil
}
func (c *Config) addInput(name string, table *ast.Table) error {
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
return nil
}
// Legacy support renaming io input to diskio
if name == "io" {
name = "diskio"
}
creator, ok := inputs.Inputs[name]
if !ok {
return fmt.Errorf("Undefined but requested input: %s", name)
}
input := creator()
// If the input has a SetParser function, then this means it can accept
// arbitrary types of input, so build the parser and set it.
switch t := input.(type) {
case parsers.ParserInput:
parser, err := buildParser(name, table)
if err != nil {
return err
}
t.SetParser(parser)
}
switch t := input.(type) {
case parsers.ParserFuncInput:
config, err := getParserConfig(name, table)
if err != nil {
return err
}
t.SetParserFunc(func() (parsers.Parser, error) {
return parsers.NewParser(config)
})
}
pluginConfig, err := buildInput(name, table)
if err != nil {
return err
}
if err := toml.UnmarshalTable(table, input); err != nil {
return err
}
rp := models.NewRunningInput(input, pluginConfig)
rp.SetDefaultTags(c.Tags)
c.Inputs = append(c.Inputs, rp)
return nil
}
// buildAggregator parses Aggregator specific items from the ast.Table,
// builds the filter and returns a
// models.AggregatorConfig to be inserted into models.RunningAggregator
func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) {
conf := &models.AggregatorConfig{
Name: name,
Delay: time.Millisecond * 100,
Period: time.Second * 30,
}
if node, ok := tbl.Fields["period"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
conf.Period = dur
}
}
}
if node, ok := tbl.Fields["delay"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
conf.Delay = dur
}
}
}
if node, ok := tbl.Fields["drop_original"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
conf.DropOriginal, err = strconv.ParseBool(b.Value)
if err != nil {
log.Printf("Error parsing boolean value for %s: %s\n", name, err)
}
}
}
}
if node, ok := tbl.Fields["name_prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.MeasurementPrefix = str.Value
}
}
}
if node, ok := tbl.Fields["name_suffix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.MeasurementSuffix = str.Value
}
}
}
if node, ok := tbl.Fields["name_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
conf.NameOverride = str.Value
}
}
}
conf.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
log.Printf("Could not parse tags for input %s\n", name)
}
}
}
delete(tbl.Fields, "period")
delete(tbl.Fields, "delay")
delete(tbl.Fields, "drop_original")
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
delete(tbl.Fields, "tags")
var err error
conf.Filter, err = buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildProcessor parses Processor specific items from the ast.Table,
// builds the filter and returns a
// models.ProcessorConfig to be inserted into models.RunningProcessor
func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) {
conf := &models.ProcessorConfig{Name: name}
if node, ok := tbl.Fields["order"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Integer); ok {
var err error
conf.Order, err = strconv.ParseInt(b.Value, 10, 64)
if err != nil {
log.Printf("Error parsing int value for %s: %s\n", name, err)
}
}
}
}
delete(tbl.Fields, "order")
var err error
conf.Filter, err = buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
// be inserted into the models.OutputConfig/models.InputConfig
// to be used for glob filtering on tags and measurements
func buildFilter(tbl *ast.Table) (models.Filter, error) {
f := models.Filter{}
if node, ok := tbl.Fields["namepass"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.NamePass = append(f.NamePass, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["namedrop"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.NameDrop = append(f.NameDrop, str.Value)
}
}
}
}
}
fields := []string{"pass", "fieldpass"}
for _, field := range fields {
if node, ok := tbl.Fields[field]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldPass = append(f.FieldPass, str.Value)
}
}
}
}
}
}
fields = []string{"drop", "fielddrop"}
for _, field := range fields {
if node, ok := tbl.Fields[field]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.FieldDrop = append(f.FieldDrop, str.Value)
}
}
}
}
}
}
if node, ok := tbl.Fields["tagpass"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
f.TagPass = append(f.TagPass, *tagfilter)
}
}
}
}
if node, ok := tbl.Fields["tagdrop"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := &models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
}
f.TagDrop = append(f.TagDrop, *tagfilter)
}
}
}
}
if node, ok := tbl.Fields["tagexclude"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.TagExclude = append(f.TagExclude, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["taginclude"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
f.TagInclude = append(f.TagInclude, str.Value)
}
}
}
}
}
if err := f.Compile(); err != nil {
return f, err
}
delete(tbl.Fields, "namedrop")
delete(tbl.Fields, "namepass")
delete(tbl.Fields, "fielddrop")
delete(tbl.Fields, "fieldpass")
delete(tbl.Fields, "drop")
delete(tbl.Fields, "pass")
delete(tbl.Fields, "tagdrop")
delete(tbl.Fields, "tagpass")
delete(tbl.Fields, "tagexclude")
delete(tbl.Fields, "taginclude")
return f, nil
}
// buildInput parses input specific items from the ast.Table,
// builds the filter and returns a
// models.InputConfig to be inserted into models.RunningInput
func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
cp := &models.InputConfig{Name: name}
if node, ok := tbl.Fields["interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
cp.Interval = dur
}
}
}
if node, ok := tbl.Fields["name_prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.MeasurementPrefix = str.Value
}
}
}
if node, ok := tbl.Fields["name_suffix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.MeasurementSuffix = str.Value
}
}
}
if node, ok := tbl.Fields["name_override"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
cp.NameOverride = str.Value
}
}
}
cp.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
log.Printf("E! Could not parse tags for input %s\n", name)
}
}
}
delete(tbl.Fields, "name_prefix")
delete(tbl.Fields, "name_suffix")
delete(tbl.Fields, "name_override")
delete(tbl.Fields, "interval")
delete(tbl.Fields, "tags")
var err error
cp.Filter, err = buildFilter(tbl)
if err != nil {
return cp, err
}
return cp, nil
}
// buildParser grabs the necessary entries from the ast.Table for creating
// a parsers.Parser object, and creates it, which can then be added onto
// an Input object.
func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
config, err := getParserConfig(name, tbl)
if err != nil {
return nil, err
}
return parsers.NewParser(config)
}
func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
c := &parsers.Config{}
if node, ok := tbl.Fields["data_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DataFormat = str.Value
}
}
}
// Legacy support, exec plugin originally parsed JSON by default.
if name == "exec" && c.DataFormat == "" {
c.DataFormat = "json"
} else if c.DataFormat == "" {
c.DataFormat = "influx"
}
if node, ok := tbl.Fields["separator"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.Separator = str.Value
}
}
}
if node, ok := tbl.Fields["templates"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.Templates = append(c.Templates, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["tag_keys"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.TagKeys = append(c.TagKeys, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["json_string_fields"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.JSONStringFields = append(c.JSONStringFields, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["json_name_key"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.JSONNameKey = str.Value
}
}
}
if node, ok := tbl.Fields["json_query"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.JSONQuery = str.Value
}
}
}
if node, ok := tbl.Fields["json_time_key"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.JSONTimeKey = str.Value
}
}
}
if node, ok := tbl.Fields["json_time_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.JSONTimeFormat = str.Value
}
}
}
if node, ok := tbl.Fields["data_type"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DataType = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_auth_file"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdAuthFile = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_security_level"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdSecurityLevel = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_parse_multivalue"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdSplit = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_typesdb"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardMetricRegistryPath = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_time_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTimePath = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_time_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTimeFormat = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_tags_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTagsPath = str.Value
}
}
}
c.DropwizardTagPathsMap = make(map[string]string)
if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTagPathsMap[name] = str.Value
}
}
}
}
}
//for grok data_format
if node, ok := tbl.Fields["grok_named_patterns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.GrokNamedPatterns = append(c.GrokNamedPatterns, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["grok_patterns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.GrokPatterns = append(c.GrokPatterns, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["grok_custom_patterns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.GrokCustomPatterns = str.Value
}
}
}
if node, ok := tbl.Fields["grok_custom_pattern_files"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.GrokCustomPatternFiles = append(c.GrokCustomPatternFiles, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["grok_timezone"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.GrokTimezone = str.Value
}
}
}
//for csv parser
if node, ok := tbl.Fields["csv_column_names"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CSVColumnNames = append(c.CSVColumnNames, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["csv_column_types"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CSVColumnTypes = append(c.CSVColumnTypes, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["csv_tag_columns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CSVTagColumns = append(c.CSVTagColumns, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["csv_delimiter"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVDelimiter = str.Value
}
}
}
if node, ok := tbl.Fields["csv_comment"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVComment = str.Value
}
}
}
if node, ok := tbl.Fields["csv_measurement_column"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVMeasurementColumn = str.Value
}
}
}
if node, ok := tbl.Fields["csv_timestamp_column"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVTimestampColumn = str.Value
}
}
}
if node, ok := tbl.Fields["csv_timestamp_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CSVTimestampFormat = str.Value
}
}
}
if node, ok := tbl.Fields["csv_header_row_count"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.CSVHeaderRowCount = int(v)
}
}
}
if node, ok := tbl.Fields["csv_skip_rows"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.CSVHeaderRowCount = int(v)
}
}
}
if node, ok := tbl.Fields["csv_skip_columns"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.CSVHeaderRowCount = int(v)
}
}
}
if node, ok := tbl.Fields["csv_trim_space"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.Boolean); ok {
//for config with no quotes
val, err := strconv.ParseBool(str.Value)
c.CSVTrimSpace = val
if err != nil {
return nil, fmt.Errorf("E! parsing to bool: %v", err)
}
}
}
}
c.MetricName = name
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "separator")
delete(tbl.Fields, "templates")
delete(tbl.Fields, "tag_keys")
delete(tbl.Fields, "json_name_key")
delete(tbl.Fields, "json_query")
delete(tbl.Fields, "json_string_fields")
delete(tbl.Fields, "json_time_format")
delete(tbl.Fields, "json_time_key")
delete(tbl.Fields, "data_type")
delete(tbl.Fields, "collectd_auth_file")
delete(tbl.Fields, "collectd_security_level")
delete(tbl.Fields, "collectd_typesdb")
delete(tbl.Fields, "collectd_parse_multivalue")
delete(tbl.Fields, "dropwizard_metric_registry_path")
delete(tbl.Fields, "dropwizard_time_path")
delete(tbl.Fields, "dropwizard_time_format")
delete(tbl.Fields, "dropwizard_tags_path")
delete(tbl.Fields, "dropwizard_tag_paths")
delete(tbl.Fields, "grok_named_patterns")
delete(tbl.Fields, "grok_patterns")
delete(tbl.Fields, "grok_custom_patterns")
delete(tbl.Fields, "grok_custom_pattern_files")
delete(tbl.Fields, "grok_timezone")
delete(tbl.Fields, "csv_column_names")
delete(tbl.Fields, "csv_column_types")
delete(tbl.Fields, "csv_comment")
delete(tbl.Fields, "csv_delimiter")
delete(tbl.Fields, "csv_field_columns")
delete(tbl.Fields, "csv_header_row_count")
delete(tbl.Fields, "csv_measurement_column")
delete(tbl.Fields, "csv_skip_columns")
delete(tbl.Fields, "csv_skip_rows")
delete(tbl.Fields, "csv_tag_columns")
delete(tbl.Fields, "csv_timestamp_column")
delete(tbl.Fields, "csv_timestamp_format")
delete(tbl.Fields, "csv_trim_space")
return c, nil
}
// buildSerializer grabs the necessary entries from the ast.Table for creating
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
if node, ok := tbl.Fields["data_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DataFormat = str.Value
}
}
}
if c.DataFormat == "" {
c.DataFormat = "influx"
}
if node, ok := tbl.Fields["prefix"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.Prefix = str.Value
}
}
}
if node, ok := tbl.Fields["template"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.Template = str.Value
}
}
}
if node, ok := tbl.Fields["influx_max_line_bytes"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.InfluxMaxLineBytes = int(v)
}
}
}
if node, ok := tbl.Fields["influx_sort_fields"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.InfluxSortFields, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["influx_uint_support"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.InfluxUintSupport, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["graphite_tag_support"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.GraphiteTagSupport, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
timestampVal, err := time.ParseDuration(str.Value)
if err != nil {
return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
}
// now that we have a duration, truncate it to the nearest
// power of ten (just in case)
nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
c.TimestampUnits = time.Duration(new_nanoseconds)
}
}
}
if node, ok := tbl.Fields["splunkmetric_hec_routing"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.HecRouting, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
delete(tbl.Fields, "influx_max_line_bytes")
delete(tbl.Fields, "influx_sort_fields")
delete(tbl.Fields, "influx_uint_support")
delete(tbl.Fields, "graphite_tag_support")
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "prefix")
delete(tbl.Fields, "template")
delete(tbl.Fields, "json_timestamp_units")
delete(tbl.Fields, "splunkmetric_hec_routing")
return serializers.NewSerializer(c)
}
// buildOutput parses output specific items from the ast.Table,
// builds the filter and returns an
// models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error
func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
filter, err := buildFilter(tbl)
if err != nil {
return nil, err
}
oc := &models.OutputConfig{
Name: name,
Filter: filter,
}
// TODO
// Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass
if len(oc.Filter.FieldDrop) > 0 {
oc.Filter.NameDrop = oc.Filter.FieldDrop
}
if len(oc.Filter.FieldPass) > 0 {
oc.Filter.NamePass = oc.Filter.FieldPass
}
if node, ok := tbl.Fields["flush_interval"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
dur, err := time.ParseDuration(str.Value)
if err != nil {
return nil, err
}
oc.FlushInterval = dur
}
}
}
if node, ok := tbl.Fields["metric_buffer_limit"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
oc.MetricBufferLimit = int(v)
}
}
}
if node, ok := tbl.Fields["metric_batch_size"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
oc.MetricBatchSize = int(v)
}
}
}
delete(tbl.Fields, "flush_interval")
delete(tbl.Fields, "metric_buffer_limit")
delete(tbl.Fields, "metric_batch_size")
return oc, nil
}
| [
"\"TELEGRAF_CONFIG_PATH\"",
"\"INFLUX_TOKEN\""
]
| []
| [
"TELEGRAF_CONFIG_PATH",
"INFLUX_TOKEN"
]
| [] | ["TELEGRAF_CONFIG_PATH", "INFLUX_TOKEN"] | go | 2 | 0 | |
ssh/ssh.go | package ssh
import (
"fmt"
"net"
"os"
"os/exec"
"strings"
log "github.com/Sirupsen/logrus"
)
func GetSSHCommand(host string, port int, user string, sshKey string, args ...string) *exec.Cmd {
defaultSSHArgs := []string{
"-o", "IdentitiesOnly=yes",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "LogLevel=quiet", // suppress "Warning: Permanently added '[localhost]:2022' (ECDSA) to the list of known hosts."
"-p", fmt.Sprintf("%d", port),
"-i", sshKey,
fmt.Sprintf("%s@%s", user, host),
}
sshArgs := append(defaultSSHArgs, args...)
cmd := exec.Command("ssh", sshArgs...)
cmd.Stderr = os.Stderr
if os.Getenv("DEBUG") != "" {
cmd.Stdout = os.Stdout
}
log.Debugf("executing: %v", strings.Join(cmd.Args, " "))
return cmd
}
func GenerateSSHKey(path string) error {
if _, err := os.Stat(path); err != nil {
if !os.IsNotExist(err) {
return err
}
cmd := exec.Command("ssh-keygen", "-t", "rsa", "-N", "", "-f", path)
if os.Getenv("DEBUG") != "" {
cmd.Stdout = os.Stdout
}
cmd.Stderr = os.Stderr
log.Debugf("executing: %v %v\n", cmd.Path, strings.Join(cmd.Args, " "))
if err := cmd.Run(); err != nil {
return err
}
}
return nil
}
func WaitForTCP(addr string) error {
for {
conn, err := net.Dial("tcp", addr)
if err != nil {
continue
}
defer conn.Close()
if _, err = conn.Read(make([]byte, 1)); err != nil {
continue
}
break
}
return nil
}
| [
"\"DEBUG\"",
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
src/virtualenv/create/creator.py | from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
import os
import shutil
import sys
from abc import ABCMeta, abstractmethod
from argparse import ArgumentTypeError
from ast import literal_eval
from collections import OrderedDict
from stat import S_IWUSR
from six import add_metaclass
from virtualenv.discovery.cached_py_info import LogCmd
from virtualenv.info import WIN_CPYTHON_2
from virtualenv.pyenv_cfg import PyEnvCfg
from virtualenv.util.path import Path
from virtualenv.util.six import ensure_str, ensure_text
from virtualenv.util.subprocess import run_cmd
from virtualenv.util.zipapp import ensure_file_on_disk
from virtualenv.version import __version__
HERE = Path(os.path.abspath(__file__)).parent
DEBUG_SCRIPT = HERE / "debug.py"
@add_metaclass(ABCMeta)
class Creator(object):
"""A class that given a python Interpreter creates a virtual environment"""
def __init__(self, options, interpreter):
"""Construct a new virtual environment creator.
:param options: the CLI option as parsed from :meth:`add_parser_arguments`
:param interpreter: the interpreter to create virtual environment from
"""
self.interpreter = interpreter
self._debug = None
self.dest = Path(options.dest)
self.clear = options.clear
self.pyenv_cfg = PyEnvCfg.from_folder(self.dest)
def __repr__(self):
return ensure_str(self.__unicode__())
def __unicode__(self):
return "{}({})".format(self.__class__.__name__, ", ".join("{}={}".format(k, v) for k, v in self._args()))
def _args(self):
return [
("dest", ensure_text(str(self.dest))),
("clear", self.clear),
]
@classmethod
def can_create(cls, interpreter):
"""Determine if we can create a virtual environment.
:param interpreter: the interpreter in question
:return: ``None`` if we can't create, any other object otherwise that will be forwarded to \
:meth:`add_parser_arguments`
"""
return True
@classmethod
def add_parser_arguments(cls, parser, interpreter, meta):
"""Add CLI arguments for the creator.
:param parser: the CLI parser
:param interpreter: the interpreter we're asked to create virtual environment for
:param meta: value as returned by :meth:`can_create`
"""
parser.add_argument(
"dest", help="directory to create virtualenv at", type=cls.validate_dest,
)
parser.add_argument(
"--clear",
dest="clear",
action="store_true",
help="remove the destination directory if exist before starting (will overwrite files otherwise)",
default=False,
)
@abstractmethod
def create(self):
"""Perform the virtual environment creation."""
raise NotImplementedError
@classmethod
def validate_dest(cls, raw_value):
"""No path separator in the path, valid chars and must be write-able"""
def non_write_able(dest, value):
common = Path(*os.path.commonprefix([value.parts, dest.parts]))
raise ArgumentTypeError(
"the destination {} is not write-able at {}".format(dest.relative_to(common), common)
)
# the file system must be able to encode
# note in newer CPython this is always utf-8 https://www.python.org/dev/peps/pep-0529/
encoding = sys.getfilesystemencoding()
refused = OrderedDict()
kwargs = {"errors": "ignore"} if encoding != "mbcs" else {}
for char in ensure_text(raw_value):
try:
trip = char.encode(encoding, **kwargs).decode(encoding)
if trip == char:
continue
raise ValueError(trip)
except ValueError:
refused[char] = None
if refused:
raise ArgumentTypeError(
"the file system codec ({}) cannot handle characters {!r} within {!r}".format(
encoding, "".join(refused.keys()), raw_value
)
)
if os.pathsep in raw_value:
raise ArgumentTypeError(
"destination {!r} must not contain the path separator ({}) as this would break "
"the activation scripts".format(raw_value, os.pathsep)
)
value = Path(raw_value)
if value.exists() and value.is_file():
raise ArgumentTypeError("the destination {} already exists and is a file".format(value))
if (3, 3) <= sys.version_info <= (3, 6):
# pre 3.6 resolve is always strict, aka must exists, sidestep by using os.path operation
dest = Path(os.path.realpath(raw_value))
else:
dest = Path(os.path.abspath(str(value))).resolve() # on Windows absolute does not imply resolve so use both
value = dest
while dest:
if dest.exists():
if os.access(ensure_text(str(dest)), os.W_OK):
break
else:
non_write_able(dest, value)
base, _ = dest.parent, dest.name
if base == dest:
non_write_able(dest, value) # pragma: no cover
dest = base
return str(value)
def run(self):
if self.dest.exists() and self.clear:
logging.debug("delete %s", self.dest)
def onerror(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, S_IWUSR)
func(path)
else:
raise
shutil.rmtree(str(self.dest), ignore_errors=True, onerror=onerror)
self.create()
self.set_pyenv_cfg()
def set_pyenv_cfg(self):
self.pyenv_cfg.content = OrderedDict()
self.pyenv_cfg["home"] = self.interpreter.system_exec_prefix
self.pyenv_cfg["implementation"] = self.interpreter.implementation
self.pyenv_cfg["version_info"] = ".".join(str(i) for i in self.interpreter.version_info)
self.pyenv_cfg["virtualenv"] = __version__
@property
def debug(self):
"""
:return: debug information about the virtual environment (only valid after :meth:`create` has run)
"""
if self._debug is None and self.exe is not None:
self._debug = get_env_debug_info(self.exe, self.debug_script())
return self._debug
# noinspection PyMethodMayBeStatic
def debug_script(self):
return DEBUG_SCRIPT
def get_env_debug_info(env_exe, debug_script):
env = os.environ.copy()
env.pop(str("PYTHONPATH"), None)
with ensure_file_on_disk(debug_script) as debug_script:
cmd = [str(env_exe), str(debug_script)]
if WIN_CPYTHON_2:
cmd = [ensure_text(i) for i in cmd]
logging.debug(str("debug via %r"), LogCmd(cmd))
code, out, err = run_cmd(cmd)
# noinspection PyBroadException
try:
if code != 0:
result = literal_eval(out)
else:
result = json.loads(out)
if err:
result["err"] = err
except Exception as exception:
return {"out": out, "err": err, "returncode": code, "exception": repr(exception)}
if "sys" in result and "path" in result["sys"]:
del result["sys"]["path"][0]
return result
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/server-manager/main.go | package main
import (
"crypto/tls"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
servermanager "github.com/JustaPenguin/assetto-server-manager"
"github.com/JustaPenguin/assetto-server-manager/cmd/server-manager/static"
"github.com/JustaPenguin/assetto-server-manager/cmd/server-manager/views"
"github.com/JustaPenguin/assetto-server-manager/internal/changelog"
"github.com/JustaPenguin/assetto-server-manager/pkg/udp"
"github.com/dustin/go-humanize"
"github.com/fatih/color"
"github.com/lorenzosaino/go-sysctl"
_ "github.com/mjibson/esc/embed"
"github.com/pkg/browser"
"github.com/sirupsen/logrus"
lua "github.com/yuin/gopher-lua"
)
var defaultAddress = "0.0.0.0:8772"
const (
udpRealtimePosRefreshIntervalMin = 100
)
func init() {
runtime.LockOSThread()
servermanager.InitLogging()
}
func main() {
config, err := servermanager.ReadConfig("config.yml")
if err != nil {
ServeHTTPWithError(defaultAddress, "Read configuration file (config.yml)", err)
return
}
if config.Monitoring.Enabled {
servermanager.InitMonitoring()
}
store, err := config.Store.BuildStore()
if err != nil {
ServeHTTPWithError(config.HTTP.Hostname, "Open server manager storage (bolt or json)", err)
return
}
changes, err := changelog.LoadChangelog()
if err != nil {
ServeHTTPWithError(config.HTTP.Hostname, "Load changelog (internal error)", err)
return
}
servermanager.Changelog = changes
var templateLoader servermanager.TemplateLoader
var filesystem http.FileSystem
if os.Getenv("FILESYSTEM_HTML") == "true" {
templateLoader = servermanager.NewFilesystemTemplateLoader("views")
filesystem = http.Dir("static")
} else {
templateLoader = &views.TemplateLoader{}
filesystem = static.FS(false)
}
resolver, err := servermanager.NewResolver(templateLoader, os.Getenv("FILESYSTEM_HTML") == "true", store)
if err != nil {
ServeHTTPWithError(config.HTTP.Hostname, "Initialise resolver (internal error)", err)
return
}
servermanager.SetAssettoInstallPath(config.Steam.InstallPath)
err = servermanager.InstallAssettoCorsaServer(config.Steam.Username, config.Steam.Password, config.Steam.ForceUpdate)
if err != nil {
ServeHTTPWithError(defaultAddress, "Install assetto corsa server with steamcmd. Likely you do not have steamcmd installed correctly.", err)
return
}
if config.LiveMap.IsEnabled() {
if config.LiveMap.IntervalMs < udpRealtimePosRefreshIntervalMin {
udp.RealtimePosIntervalMs = udpRealtimePosRefreshIntervalMin
} else {
udp.RealtimePosIntervalMs = config.LiveMap.IntervalMs
}
if runtime.GOOS == "linux" {
// check known kernel net memory restrictions. if they're lower than the recommended
// values, then print out explaining how to increase them
memValues := []string{"net.core.rmem_max", "net.core.rmem_default", "net.core.wmem_max", "net.core.wmem_default"}
for _, val := range memValues {
checkMemValue(val)
}
}
}
if config.Lua.Enabled && servermanager.Premium() {
luaPath := os.Getenv("LUA_PATH")
newPath, err := filepath.Abs("./plugins/?.lua")
if err != nil {
logrus.WithError(err).Error("Couldn't get absolute path for /plugins folder")
} else {
if luaPath != "" {
luaPath = luaPath + ";" + newPath
} else {
luaPath = newPath
}
err = os.Setenv("LUA_PATH", luaPath)
if err != nil {
logrus.WithError(err).Error("Couldn't automatically set Lua path, lua will not run! Try setting the environment variable LUA_PATH manually.")
}
}
servermanager.Lua = lua.NewState()
defer servermanager.Lua.Close()
servermanager.InitLua(resolver.ResolveRaceControl())
}
err = servermanager.InitWithResolver(resolver)
if err != nil {
ServeHTTPWithError(config.HTTP.Hostname, "Initialise server manager (internal error)", err)
return
}
listener, err := net.Listen("tcp", config.HTTP.Hostname)
if err != nil {
ServeHTTPWithError(defaultAddress, "Listen on hostname "+config.HTTP.Hostname+". Likely the port has already been taken by another application", err)
return
}
logrus.Infof("starting assetto server manager on: %s", config.HTTP.Hostname)
if !config.Server.DisableWindowsBrowserOpen && runtime.GOOS == "windows" {
_ = browser.OpenURL("http://" + strings.Replace(config.HTTP.Hostname, "0.0.0.0", "127.0.0.1", 1))
}
router := resolver.ResolveRouter(filesystem)
srv := &http.Server{
Handler: router,
}
if config.HTTP.TLS.Enabled {
srv.TLSConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
}
srv.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
if err := srv.ServeTLS(listener, config.HTTP.TLS.CertPath, config.HTTP.TLS.KeyPath); err != nil {
logrus.WithError(err).Fatal("Could not start TLS server")
}
} else {
if err := srv.Serve(listener); err != nil {
logrus.WithError(err).Fatal("Could not start server")
}
}
}
const udpBufferRecommendedSize = uint64(2e6) // 2MB
func checkMemValue(key string) {
val, err := sysctlAsUint64(key)
if err != nil {
logrus.WithError(err).Errorf("Could not check sysctl val: %s", key)
return
}
if val < udpBufferRecommendedSize {
d := color.New(color.FgRed)
red := d.PrintfFunc()
redln := d.PrintlnFunc()
redln()
redln("-------------------------------------------------------------------")
redln(" W A R N I N G")
redln("-------------------------------------------------------------------")
red("System %s value is too small! UDP messages are \n", key)
redln("more likely to be lost and the stability of various Server Manager")
redln("systems will be greatly affected.")
redln()
red("Your current value is %s. We recommend a value of %s for a \n", humanize.Bytes(val), humanize.Bytes(udpBufferRecommendedSize))
redln("more consistent operation.")
redln()
red("You can do this with the command:\n\t sysctl -w %s=%d\n", key, udpBufferRecommendedSize)
redln()
redln("More information can be found on sysctl variables here:\n\t https://www.cyberciti.biz/faq/howto-set-sysctl-variables/")
}
}
func sysctlAsUint64(val string) (uint64, error) {
val, err := sysctl.Get(val)
if err != nil {
return 0, err
}
return strconv.ParseUint(val, 10, 0)
}
| [
"\"FILESYSTEM_HTML\"",
"\"FILESYSTEM_HTML\"",
"\"LUA_PATH\""
]
| []
| [
"FILESYSTEM_HTML",
"LUA_PATH"
]
| [] | ["FILESYSTEM_HTML", "LUA_PATH"] | go | 2 | 0 | |
src/terminal.go | package fzf
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"os/signal"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/guoshimin/fzf/src/tui"
"github.com/guoshimin/fzf/src/util"
)
// import "github.com/pkg/profile"
var placeholder *regexp.Regexp
func init() {
placeholder = regexp.MustCompile("\\\\?(?:{\\+?[0-9,-.]*}|{q})")
}
type jumpMode int
const (
jumpDisabled jumpMode = iota
jumpEnabled
jumpAcceptEnabled
)
type previewer struct {
text string
lines int
offset int
enabled bool
}
type itemLine struct {
current bool
selected bool
label string
queryLen int
width int
result Result
}
var emptyLine = itemLine{}
// Terminal represents terminal input/output
type Terminal struct {
initDelay time.Duration
inlineInfo bool
prompt string
promptLen int
reverse bool
fullscreen bool
hscroll bool
hscrollOff int
wordRubout string
wordNext string
cx int
cy int
offset int
yanked []rune
input []rune
multi bool
sort bool
toggleSort bool
delimiter Delimiter
expect map[int]string
keymap map[int][]action
pressed string
printQuery bool
history *History
cycle bool
header []string
header0 []string
ansi bool
tabstop int
margin [4]sizeSpec
strong tui.Attr
bordered bool
cleanExit bool
border tui.Window
window tui.Window
pborder tui.Window
pwindow tui.Window
count int
progress int
reading bool
success bool
jumping jumpMode
jumpLabels string
printer func(string)
merger *Merger
selected map[int32]selectedItem
version int64
reqBox *util.EventBox
preview previewOpts
previewer previewer
previewBox *util.EventBox
eventBox *util.EventBox
mutex sync.Mutex
initFunc func()
prevLines []itemLine
suppress bool
startChan chan bool
slab *util.Slab
theme *tui.ColorTheme
tui tui.Renderer
}
type selectedItem struct {
at time.Time
item *Item
}
type byTimeOrder []selectedItem
func (a byTimeOrder) Len() int {
return len(a)
}
func (a byTimeOrder) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a byTimeOrder) Less(i, j int) bool {
return a[i].at.Before(a[j].at)
}
var _spinner = []string{`-`, `\`, `|`, `/`, `-`, `\`, `|`, `/`}
const (
reqPrompt util.EventType = iota
reqInfo
reqHeader
reqList
reqJump
reqRefresh
reqReinit
reqRedraw
reqClose
reqPrintQuery
reqPreviewEnqueue
reqPreviewDisplay
reqPreviewRefresh
reqQuit
)
type action struct {
t actionType
a string
}
type actionType int
const (
actIgnore actionType = iota
actInvalid
actRune
actMouse
actBeginningOfLine
actAbort
actAccept
actAcceptNonEmpty
actBackwardChar
actBackwardDeleteChar
actBackwardWord
actCancel
actClearScreen
actDeleteChar
actDeleteCharEOF
actEndOfLine
actForwardChar
actForwardWord
actKillLine
actKillWord
actUnixLineDiscard
actUnixWordRubout
actYank
actBackwardKillWord
actSelectAll
actDeselectAll
actToggle
actToggleAll
actToggleDown
actToggleUp
actToggleIn
actToggleOut
actDown
actUp
actPageUp
actPageDown
actHalfPageUp
actHalfPageDown
actJump
actJumpAccept
actPrintQuery
actReplaceQuery
actToggleSort
actTogglePreview
actTogglePreviewWrap
actPreviewUp
actPreviewDown
actPreviewPageUp
actPreviewPageDown
actPreviousHistory
actNextHistory
actExecute
actExecuteSilent
actExecuteMulti // Deprecated
actSigStop
actTop
)
func toActions(types ...actionType) []action {
actions := make([]action, len(types))
for idx, t := range types {
actions[idx] = action{t: t, a: ""}
}
return actions
}
func defaultKeymap() map[int][]action {
keymap := make(map[int][]action)
keymap[tui.Invalid] = toActions(actInvalid)
keymap[tui.Resize] = toActions(actClearScreen)
keymap[tui.CtrlA] = toActions(actBeginningOfLine)
keymap[tui.CtrlB] = toActions(actBackwardChar)
keymap[tui.CtrlC] = toActions(actAbort)
keymap[tui.CtrlG] = toActions(actAbort)
keymap[tui.CtrlQ] = toActions(actAbort)
keymap[tui.ESC] = toActions(actAbort)
keymap[tui.CtrlD] = toActions(actDeleteCharEOF)
keymap[tui.CtrlE] = toActions(actEndOfLine)
keymap[tui.CtrlF] = toActions(actForwardChar)
keymap[tui.CtrlH] = toActions(actBackwardDeleteChar)
keymap[tui.BSpace] = toActions(actBackwardDeleteChar)
keymap[tui.Tab] = toActions(actToggleDown)
keymap[tui.BTab] = toActions(actToggleUp)
keymap[tui.CtrlJ] = toActions(actDown)
keymap[tui.CtrlK] = toActions(actUp)
keymap[tui.CtrlL] = toActions(actClearScreen)
keymap[tui.CtrlM] = toActions(actAccept)
keymap[tui.CtrlN] = toActions(actDown)
keymap[tui.CtrlP] = toActions(actUp)
keymap[tui.CtrlU] = toActions(actUnixLineDiscard)
keymap[tui.CtrlW] = toActions(actUnixWordRubout)
keymap[tui.CtrlY] = toActions(actYank)
if !util.IsWindows() {
keymap[tui.CtrlZ] = toActions(actSigStop)
}
keymap[tui.AltB] = toActions(actBackwardWord)
keymap[tui.SLeft] = toActions(actBackwardWord)
keymap[tui.AltF] = toActions(actForwardWord)
keymap[tui.SRight] = toActions(actForwardWord)
keymap[tui.AltD] = toActions(actKillWord)
keymap[tui.AltBS] = toActions(actBackwardKillWord)
keymap[tui.Up] = toActions(actUp)
keymap[tui.Down] = toActions(actDown)
keymap[tui.Left] = toActions(actBackwardChar)
keymap[tui.Right] = toActions(actForwardChar)
keymap[tui.Home] = toActions(actBeginningOfLine)
keymap[tui.End] = toActions(actEndOfLine)
keymap[tui.Del] = toActions(actDeleteChar)
keymap[tui.PgUp] = toActions(actPageUp)
keymap[tui.PgDn] = toActions(actPageDown)
keymap[tui.SUp] = toActions(actPreviewUp)
keymap[tui.SDown] = toActions(actPreviewDown)
keymap[tui.Rune] = toActions(actRune)
keymap[tui.Mouse] = toActions(actMouse)
keymap[tui.DoubleClick] = toActions(actAccept)
keymap[tui.LeftClick] = toActions(actIgnore)
keymap[tui.RightClick] = toActions(actToggle)
return keymap
}
func trimQuery(query string) []rune {
return []rune(strings.Replace(query, "\t", " ", -1))
}
// NewTerminal returns new Terminal object
func NewTerminal(opts *Options, eventBox *util.EventBox) *Terminal {
input := trimQuery(opts.Query)
var header []string
if opts.Reverse {
header = opts.Header
} else {
header = reverseStringArray(opts.Header)
}
var delay time.Duration
if opts.Tac {
delay = initialDelayTac
} else {
delay = initialDelay
}
var previewBox *util.EventBox
if len(opts.Preview.command) > 0 {
previewBox = util.NewEventBox()
}
strongAttr := tui.Bold
if !opts.Bold {
strongAttr = tui.AttrRegular
}
var renderer tui.Renderer
fullscreen := opts.Height.size == 0 || opts.Height.percent && opts.Height.size == 100
if fullscreen {
if tui.HasFullscreenRenderer() {
renderer = tui.NewFullscreenRenderer(opts.Theme, opts.Black, opts.Mouse)
} else {
renderer = tui.NewLightRenderer(opts.Theme, opts.Black, opts.Mouse, opts.Tabstop, opts.ClearOnExit,
true, func(h int) int { return h })
}
} else {
maxHeightFunc := func(termHeight int) int {
var maxHeight int
if opts.Height.percent {
maxHeight = util.Max(int(opts.Height.size*float64(termHeight)/100.0), opts.MinHeight)
} else {
maxHeight = int(opts.Height.size)
}
effectiveMinHeight := minHeight
if previewBox != nil && (opts.Preview.position == posUp || opts.Preview.position == posDown) {
effectiveMinHeight *= 2
}
if opts.InlineInfo {
effectiveMinHeight -= 1
}
if opts.Bordered {
effectiveMinHeight += 2
}
return util.Min(termHeight, util.Max(maxHeight, effectiveMinHeight))
}
renderer = tui.NewLightRenderer(opts.Theme, opts.Black, opts.Mouse, opts.Tabstop, opts.ClearOnExit, false, maxHeightFunc)
}
wordRubout := "[^[:alnum:]][[:alnum:]]"
wordNext := "[[:alnum:]][^[:alnum:]]|(.$)"
if opts.FileWord {
sep := regexp.QuoteMeta(string(os.PathSeparator))
wordRubout = fmt.Sprintf("%s[^%s]", sep, sep)
wordNext = fmt.Sprintf("[^%s]%s|(.$)", sep, sep)
}
t := Terminal{
initDelay: delay,
inlineInfo: opts.InlineInfo,
reverse: opts.Reverse,
fullscreen: fullscreen,
hscroll: opts.Hscroll,
hscrollOff: opts.HscrollOff,
wordRubout: wordRubout,
wordNext: wordNext,
cx: len(input),
cy: 0,
offset: 0,
yanked: []rune{},
input: input,
multi: opts.Multi,
sort: opts.Sort > 0,
toggleSort: opts.ToggleSort,
delimiter: opts.Delimiter,
expect: opts.Expect,
keymap: opts.Keymap,
pressed: "",
printQuery: opts.PrintQuery,
history: opts.History,
margin: opts.Margin,
bordered: opts.Bordered,
cleanExit: opts.ClearOnExit,
strong: strongAttr,
cycle: opts.Cycle,
header: header,
header0: header,
ansi: opts.Ansi,
tabstop: opts.Tabstop,
reading: true,
success: true,
jumping: jumpDisabled,
jumpLabels: opts.JumpLabels,
printer: opts.Printer,
merger: EmptyMerger,
selected: make(map[int32]selectedItem),
reqBox: util.NewEventBox(),
preview: opts.Preview,
previewer: previewer{"", 0, 0, previewBox != nil && !opts.Preview.hidden},
previewBox: previewBox,
eventBox: eventBox,
mutex: sync.Mutex{},
suppress: true,
slab: util.MakeSlab(slab16Size, slab32Size),
theme: opts.Theme,
startChan: make(chan bool, 1),
tui: renderer,
initFunc: func() { renderer.Init() }}
t.prompt, t.promptLen = t.processTabs([]rune(opts.Prompt), 0)
return &t
}
// Input returns current query string
func (t *Terminal) Input() []rune {
t.mutex.Lock()
defer t.mutex.Unlock()
return copySlice(t.input)
}
// UpdateCount updates the count information
func (t *Terminal) UpdateCount(cnt int, final bool, success bool) {
t.mutex.Lock()
t.count = cnt
t.reading = !final
t.success = success
t.mutex.Unlock()
t.reqBox.Set(reqInfo, nil)
if final {
t.reqBox.Set(reqRefresh, nil)
}
}
func reverseStringArray(input []string) []string {
size := len(input)
reversed := make([]string, size)
for idx, str := range input {
reversed[size-idx-1] = str
}
return reversed
}
// UpdateHeader updates the header
func (t *Terminal) UpdateHeader(header []string) {
t.mutex.Lock()
t.header = append(append([]string{}, t.header0...), header...)
t.mutex.Unlock()
t.reqBox.Set(reqHeader, nil)
}
// UpdateProgress updates the search progress
func (t *Terminal) UpdateProgress(progress float32) {
t.mutex.Lock()
newProgress := int(progress * 100)
changed := t.progress != newProgress
t.progress = newProgress
t.mutex.Unlock()
if changed {
t.reqBox.Set(reqInfo, nil)
}
}
// UpdateList updates Merger to display the list
func (t *Terminal) UpdateList(merger *Merger) {
t.mutex.Lock()
t.progress = 100
t.merger = merger
t.mutex.Unlock()
t.reqBox.Set(reqInfo, nil)
t.reqBox.Set(reqList, nil)
}
func (t *Terminal) output() bool {
if t.printQuery {
t.printer(string(t.input))
}
if len(t.expect) > 0 {
t.printer(t.pressed)
}
found := len(t.selected) > 0
if !found {
current := t.currentItem()
if current != nil {
t.printer(current.AsString(t.ansi))
found = true
}
} else {
for _, sel := range t.sortSelected() {
t.printer(sel.item.AsString(t.ansi))
}
}
return found
}
func (t *Terminal) sortSelected() []selectedItem {
sels := make([]selectedItem, 0, len(t.selected))
for _, sel := range t.selected {
sels = append(sels, sel)
}
sort.Sort(byTimeOrder(sels))
return sels
}
func (t *Terminal) displayWidth(runes []rune) int {
l := 0
for _, r := range runes {
l += util.RuneWidth(r, l, t.tabstop)
}
return l
}
const (
minWidth = 16
minHeight = 4
maxDisplayWidthCalc = 1024
)
func calculateSize(base int, size sizeSpec, margin int, minSize int) int {
max := base - margin
if size.percent {
return util.Constrain(int(float64(base)*0.01*size.size), minSize, max)
}
return util.Constrain(int(size.size), minSize, max)
}
func (t *Terminal) resizeWindows() {
screenWidth := t.tui.MaxX()
screenHeight := t.tui.MaxY()
marginInt := [4]int{}
t.prevLines = make([]itemLine, screenHeight)
for idx, sizeSpec := range t.margin {
if sizeSpec.percent {
var max float64
if idx%2 == 0 {
max = float64(screenHeight)
} else {
max = float64(screenWidth)
}
marginInt[idx] = int(max * sizeSpec.size * 0.01)
} else {
marginInt[idx] = int(sizeSpec.size)
}
if t.bordered && idx%2 == 0 {
marginInt[idx] += 1
}
}
adjust := func(idx1 int, idx2 int, max int, min int) {
if max >= min {
margin := marginInt[idx1] + marginInt[idx2]
if max-margin < min {
desired := max - min
marginInt[idx1] = desired * marginInt[idx1] / margin
marginInt[idx2] = desired * marginInt[idx2] / margin
}
}
}
previewVisible := t.isPreviewEnabled() && t.preview.size.size > 0
minAreaWidth := minWidth
minAreaHeight := minHeight
if previewVisible {
switch t.preview.position {
case posUp, posDown:
minAreaHeight *= 2
case posLeft, posRight:
minAreaWidth *= 2
}
}
adjust(1, 3, screenWidth, minAreaWidth)
adjust(0, 2, screenHeight, minAreaHeight)
if t.border != nil {
t.border.Close()
}
if t.window != nil {
t.window.Close()
}
if t.pborder != nil {
t.pborder.Close()
t.pwindow.Close()
}
width := screenWidth - marginInt[1] - marginInt[3]
height := screenHeight - marginInt[0] - marginInt[2]
if t.bordered {
t.border = t.tui.NewWindow(
marginInt[0]-1,
marginInt[3],
width,
height+2, tui.BorderHorizontal)
}
if previewVisible {
createPreviewWindow := func(y int, x int, w int, h int) {
t.pborder = t.tui.NewWindow(y, x, w, h, tui.BorderAround)
pwidth := w - 4
// ncurses auto-wraps the line when the cursor reaches the right-end of
// the window. To prevent unintended line-wraps, we use the width one
// column larger than the desired value.
if !t.preview.wrap && t.tui.DoesAutoWrap() {
pwidth += 1
}
t.pwindow = t.tui.NewWindow(y+1, x+2, pwidth, h-2, tui.BorderNone)
os.Setenv("FZF_PREVIEW_HEIGHT", strconv.Itoa(h-2))
}
switch t.preview.position {
case posUp:
pheight := calculateSize(height, t.preview.size, minHeight, 3)
t.window = t.tui.NewWindow(
marginInt[0]+pheight, marginInt[3], width, height-pheight, tui.BorderNone)
createPreviewWindow(marginInt[0], marginInt[3], width, pheight)
case posDown:
pheight := calculateSize(height, t.preview.size, minHeight, 3)
t.window = t.tui.NewWindow(
marginInt[0], marginInt[3], width, height-pheight, tui.BorderNone)
createPreviewWindow(marginInt[0]+height-pheight, marginInt[3], width, pheight)
case posLeft:
pwidth := calculateSize(width, t.preview.size, minWidth, 5)
t.window = t.tui.NewWindow(
marginInt[0], marginInt[3]+pwidth, width-pwidth, height, tui.BorderNone)
createPreviewWindow(marginInt[0], marginInt[3], pwidth, height)
case posRight:
pwidth := calculateSize(width, t.preview.size, minWidth, 5)
t.window = t.tui.NewWindow(
marginInt[0], marginInt[3], width-pwidth, height, tui.BorderNone)
createPreviewWindow(marginInt[0], marginInt[3]+width-pwidth, pwidth, height)
}
} else {
t.window = t.tui.NewWindow(
marginInt[0],
marginInt[3],
width,
height, tui.BorderNone)
}
for i := 0; i < t.window.Height(); i++ {
t.window.MoveAndClear(i, 0)
}
t.truncateQuery()
}
func (t *Terminal) move(y int, x int, clear bool) {
if !t.reverse {
y = t.window.Height() - y - 1
}
if clear {
t.window.MoveAndClear(y, x)
} else {
t.window.Move(y, x)
}
}
func (t *Terminal) placeCursor() {
t.move(0, t.promptLen+t.displayWidth(t.input[:t.cx]), false)
}
func (t *Terminal) printPrompt() {
t.move(0, 0, true)
t.window.CPrint(tui.ColPrompt, t.strong, t.prompt)
t.window.CPrint(tui.ColNormal, t.strong, string(t.input))
}
func (t *Terminal) printInfo() {
pos := 0
if t.inlineInfo {
pos = t.promptLen + t.displayWidth(t.input) + 1
if pos+len(" < ") > t.window.Width() {
return
}
t.move(0, pos, true)
if t.reading {
t.window.CPrint(tui.ColSpinner, t.strong, " < ")
} else {
t.window.CPrint(tui.ColPrompt, t.strong, " < ")
}
pos += len(" < ")
} else {
t.move(1, 0, true)
if t.reading {
duration := int64(spinnerDuration)
idx := (time.Now().UnixNano() % (duration * int64(len(_spinner)))) / duration
t.window.CPrint(tui.ColSpinner, t.strong, _spinner[idx])
}
t.move(1, 2, false)
pos = 2
}
output := fmt.Sprintf("%d/%d", t.merger.Length(), t.count)
if t.toggleSort {
if t.sort {
output += " +S"
} else {
output += " -S"
}
}
if t.multi && len(t.selected) > 0 {
output += fmt.Sprintf(" (%d)", len(t.selected))
}
if t.progress > 0 && t.progress < 100 {
output += fmt.Sprintf(" (%d%%)", t.progress)
}
if !t.success && t.count == 0 {
if len(os.Getenv("FZF_DEFAULT_COMMAND")) > 0 {
output = "[$FZF_DEFAULT_COMMAND failed]"
} else {
output = "[default command failed - $FZF_DEFAULT_COMMAND required]"
}
}
if pos+len(output) <= t.window.Width() {
t.window.CPrint(tui.ColInfo, 0, output)
}
}
func (t *Terminal) printHeader() {
if len(t.header) == 0 {
return
}
max := t.window.Height()
var state *ansiState
for idx, lineStr := range t.header {
line := idx + 2
if t.inlineInfo {
line--
}
if line >= max {
continue
}
trimmed, colors, newState := extractColor(lineStr, state, nil)
state = newState
item := &Item{
text: util.ToChars([]byte(trimmed)),
colors: colors}
t.move(line, 2, true)
t.printHighlighted(Result{item: item},
tui.AttrRegular, tui.ColHeader, tui.ColHeader, false, false)
}
}
func (t *Terminal) printList() {
t.constrain()
maxy := t.maxItems()
count := t.merger.Length() - t.offset
for j := 0; j < maxy; j++ {
i := j
if !t.reverse {
i = maxy - 1 - j
}
line := i + 2 + len(t.header)
if t.inlineInfo {
line--
}
if i < count {
t.printItem(t.merger.Get(i+t.offset), line, i, i == t.cy-t.offset)
} else if t.prevLines[i] != emptyLine {
t.prevLines[i] = emptyLine
t.move(line, 0, true)
}
}
}
func (t *Terminal) printItem(result Result, line int, i int, current bool) {
item := result.item
_, selected := t.selected[item.Index()]
label := " "
if t.jumping != jumpDisabled {
if i < len(t.jumpLabels) {
// Striped
current = i%2 == 0
label = t.jumpLabels[i : i+1]
}
} else if current {
label = ">"
}
// Avoid unnecessary redraw
newLine := itemLine{current: current, selected: selected, label: label,
result: result, queryLen: len(t.input), width: 0}
prevLine := t.prevLines[i]
if prevLine.current == newLine.current &&
prevLine.selected == newLine.selected &&
prevLine.label == newLine.label &&
prevLine.queryLen == newLine.queryLen &&
prevLine.result == newLine.result {
return
}
t.move(line, 0, false)
t.window.CPrint(tui.ColCursor, t.strong, label)
if current {
if selected {
t.window.CPrint(tui.ColSelected, t.strong, ">")
} else {
t.window.CPrint(tui.ColCurrent, t.strong, " ")
}
newLine.width = t.printHighlighted(result, t.strong, tui.ColCurrent, tui.ColCurrentMatch, true, true)
} else {
if selected {
t.window.CPrint(tui.ColSelected, t.strong, ">")
} else {
t.window.Print(" ")
}
newLine.width = t.printHighlighted(result, 0, tui.ColNormal, tui.ColMatch, false, true)
}
fillSpaces := prevLine.width - newLine.width
if fillSpaces > 0 {
t.window.Print(strings.Repeat(" ", fillSpaces))
}
t.prevLines[i] = newLine
}
func (t *Terminal) trimRight(runes []rune, width int) ([]rune, int) {
// We start from the beginning to handle tab characters
l := 0
for idx, r := range runes {
l += util.RuneWidth(r, l, t.tabstop)
if l > width {
return runes[:idx], len(runes) - idx
}
}
return runes, 0
}
func (t *Terminal) displayWidthWithLimit(runes []rune, prefixWidth int, limit int) int {
l := 0
for _, r := range runes {
l += util.RuneWidth(r, l+prefixWidth, t.tabstop)
if l > limit {
// Early exit
return l
}
}
return l
}
func (t *Terminal) trimLeft(runes []rune, width int) ([]rune, int32) {
if len(runes) > maxDisplayWidthCalc && len(runes) > width {
trimmed := len(runes) - width
return runes[trimmed:], int32(trimmed)
}
currentWidth := t.displayWidth(runes)
var trimmed int32
for currentWidth > width && len(runes) > 0 {
runes = runes[1:]
trimmed++
currentWidth = t.displayWidthWithLimit(runes, 2, width)
}
return runes, trimmed
}
func (t *Terminal) overflow(runes []rune, max int) bool {
return t.displayWidthWithLimit(runes, 0, max) > max
}
func (t *Terminal) printHighlighted(result Result, attr tui.Attr, col1 tui.ColorPair, col2 tui.ColorPair, current bool, match bool) int {
item := result.item
// Overflow
text := make([]rune, item.text.Length())
copy(text, item.text.ToRunes())
matchOffsets := []Offset{}
var pos *[]int
if match && t.merger.pattern != nil {
_, matchOffsets, pos = t.merger.pattern.MatchItem(item, true, t.slab)
}
charOffsets := matchOffsets
if pos != nil {
charOffsets = make([]Offset, len(*pos))
for idx, p := range *pos {
offset := Offset{int32(p), int32(p + 1)}
charOffsets[idx] = offset
}
sort.Sort(ByOrder(charOffsets))
}
var maxe int
for _, offset := range charOffsets {
maxe = util.Max(maxe, int(offset[1]))
}
offsets := result.colorOffsets(charOffsets, t.theme, col2, attr, current)
maxWidth := t.window.Width() - 3
maxe = util.Constrain(maxe+util.Min(maxWidth/2-2, t.hscrollOff), 0, len(text))
displayWidth := t.displayWidthWithLimit(text, 0, maxWidth)
if displayWidth > maxWidth {
if t.hscroll {
// Stri..
if !t.overflow(text[:maxe], maxWidth-2) {
text, _ = t.trimRight(text, maxWidth-2)
text = append(text, []rune("..")...)
} else {
// Stri..
if t.overflow(text[maxe:], 2) {
text = append(text[:maxe], []rune("..")...)
}
// ..ri..
var diff int32
text, diff = t.trimLeft(text, maxWidth-2)
// Transform offsets
for idx, offset := range offsets {
b, e := offset.offset[0], offset.offset[1]
b += 2 - diff
e += 2 - diff
b = util.Max32(b, 2)
offsets[idx].offset[0] = b
offsets[idx].offset[1] = util.Max32(b, e)
}
text = append([]rune(".."), text...)
}
} else {
text, _ = t.trimRight(text, maxWidth-2)
text = append(text, []rune("..")...)
for idx, offset := range offsets {
offsets[idx].offset[0] = util.Min32(offset.offset[0], int32(maxWidth-2))
offsets[idx].offset[1] = util.Min32(offset.offset[1], int32(maxWidth))
}
}
displayWidth = t.displayWidthWithLimit(text, 0, displayWidth)
}
var index int32
var substr string
var prefixWidth int
maxOffset := int32(len(text))
for _, offset := range offsets {
b := util.Constrain32(offset.offset[0], index, maxOffset)
e := util.Constrain32(offset.offset[1], index, maxOffset)
substr, prefixWidth = t.processTabs(text[index:b], prefixWidth)
t.window.CPrint(col1, attr, substr)
if b < e {
substr, prefixWidth = t.processTabs(text[b:e], prefixWidth)
t.window.CPrint(offset.color, offset.attr, substr)
}
index = e
if index >= maxOffset {
break
}
}
if index < maxOffset {
substr, _ = t.processTabs(text[index:], prefixWidth)
t.window.CPrint(col1, attr, substr)
}
return displayWidth
}
func numLinesMax(str string, max int) int {
lines := 0
for lines < max {
idx := strings.Index(str, "\n")
if idx < 0 {
break
}
str = str[idx+1:]
lines++
}
return lines
}
func (t *Terminal) printPreview() {
if !t.hasPreviewWindow() {
return
}
t.pwindow.Erase()
maxWidth := t.pwindow.Width()
if t.tui.DoesAutoWrap() {
maxWidth -= 1
}
reader := bufio.NewReader(strings.NewReader(t.previewer.text))
lineNo := -t.previewer.offset
height := t.pwindow.Height()
var ansi *ansiState
for {
line, err := reader.ReadString('\n')
eof := err == io.EOF
if !eof {
line = line[:len(line)-1]
}
lineNo++
if lineNo > height ||
t.pwindow.Y() == height-1 && t.pwindow.X() > 0 {
break
} else if lineNo > 0 {
var fillRet tui.FillReturn
_, _, ansi = extractColor(line, ansi, func(str string, ansi *ansiState) bool {
trimmed := []rune(str)
if !t.preview.wrap {
trimmed, _ = t.trimRight(trimmed, maxWidth-t.pwindow.X())
}
str, _ = t.processTabs(trimmed, 0)
if t.theme != nil && ansi != nil && ansi.colored() {
fillRet = t.pwindow.CFill(ansi.fg, ansi.bg, ansi.attr, str)
} else {
fillRet = t.pwindow.CFill(tui.ColNormal.Fg(), tui.ColNormal.Bg(), tui.AttrRegular, str)
}
return fillRet == tui.FillContinue
})
switch fillRet {
case tui.FillNextLine:
continue
case tui.FillSuspend:
break
}
t.pwindow.Fill("\n")
}
if eof {
break
}
}
t.pwindow.FinishFill()
if t.previewer.lines > height {
offset := fmt.Sprintf("%d/%d", t.previewer.offset+1, t.previewer.lines)
pos := t.pwindow.Width() - len(offset)
if t.tui.DoesAutoWrap() {
pos -= 1
}
t.pwindow.Move(0, pos)
t.pwindow.CPrint(tui.ColInfo, tui.Reverse, offset)
}
}
func (t *Terminal) processTabs(runes []rune, prefixWidth int) (string, int) {
var strbuf bytes.Buffer
l := prefixWidth
for _, r := range runes {
w := util.RuneWidth(r, l, t.tabstop)
l += w
if r == '\t' {
strbuf.WriteString(strings.Repeat(" ", w))
} else {
strbuf.WriteRune(r)
}
}
return strbuf.String(), l
}
func (t *Terminal) printAll() {
t.resizeWindows()
t.printList()
t.printPrompt()
t.printInfo()
t.printHeader()
t.printPreview()
}
func (t *Terminal) refresh() {
if !t.suppress {
windows := make([]tui.Window, 0, 4)
if t.bordered {
windows = append(windows, t.border)
}
if t.hasPreviewWindow() {
windows = append(windows, t.pborder, t.pwindow)
}
windows = append(windows, t.window)
t.tui.RefreshWindows(windows)
}
}
func (t *Terminal) delChar() bool {
if len(t.input) > 0 && t.cx < len(t.input) {
t.input = append(t.input[:t.cx], t.input[t.cx+1:]...)
return true
}
return false
}
func findLastMatch(pattern string, str string) int {
rx, err := regexp.Compile(pattern)
if err != nil {
return -1
}
locs := rx.FindAllStringIndex(str, -1)
if locs == nil {
return -1
}
return locs[len(locs)-1][0]
}
func findFirstMatch(pattern string, str string) int {
rx, err := regexp.Compile(pattern)
if err != nil {
return -1
}
loc := rx.FindStringIndex(str)
if loc == nil {
return -1
}
return loc[0]
}
func copySlice(slice []rune) []rune {
ret := make([]rune, len(slice))
copy(ret, slice)
return ret
}
func (t *Terminal) rubout(pattern string) {
pcx := t.cx
after := t.input[t.cx:]
t.cx = findLastMatch(pattern, string(t.input[:t.cx])) + 1
t.yanked = copySlice(t.input[t.cx:pcx])
t.input = append(t.input[:t.cx], after...)
}
func keyMatch(key int, event tui.Event) bool {
return event.Type == key ||
event.Type == tui.Rune && int(event.Char) == key-tui.AltZ ||
event.Type == tui.Mouse && key == tui.DoubleClick && event.MouseEvent.Double
}
func quoteEntryCmd(entry string) string {
escaped := strings.Replace(entry, `\`, `\\`, -1)
escaped = `"` + strings.Replace(escaped, `"`, `\"`, -1) + `"`
r, _ := regexp.Compile(`[&|<>()@^%!"]`)
return r.ReplaceAllStringFunc(escaped, func(match string) string {
return "^" + match
})
}
func quoteEntry(entry string) string {
if util.IsWindows() {
return quoteEntryCmd(entry)
}
return "'" + strings.Replace(entry, "'", "'\\''", -1) + "'"
}
func hasPlusFlag(template string) bool {
for _, match := range placeholder.FindAllString(template, -1) {
if match[0] == '\\' {
continue
}
if match[1] == '+' {
return true
}
}
return false
}
func replacePlaceholder(template string, stripAnsi bool, delimiter Delimiter, forcePlus bool, query string, allItems []*Item) string {
current := allItems[:1]
selected := allItems[1:]
if current[0] == nil {
current = []*Item{}
}
if selected[0] == nil {
selected = []*Item{}
}
return placeholder.ReplaceAllStringFunc(template, func(match string) string {
// Escaped pattern
if match[0] == '\\' {
return match[1:]
}
// Current query
if match == "{q}" {
return quoteEntry(query)
}
plusFlag := forcePlus
if match[1] == '+' {
match = "{" + match[2:]
plusFlag = true
}
items := current
if plusFlag {
items = selected
}
replacements := make([]string, len(items))
if match == "{}" {
for idx, item := range items {
replacements[idx] = quoteEntry(item.AsString(stripAnsi))
}
return strings.Join(replacements, " ")
}
tokens := strings.Split(match[1:len(match)-1], ",")
ranges := make([]Range, len(tokens))
for idx, s := range tokens {
r, ok := ParseRange(&s)
if !ok {
// Invalid expression, just return the original string in the template
return match
}
ranges[idx] = r
}
for idx, item := range items {
tokens := Tokenize(item.AsString(stripAnsi), delimiter)
trans := Transform(tokens, ranges)
str := string(joinTokens(trans))
if delimiter.str != nil {
str = strings.TrimSuffix(str, *delimiter.str)
} else if delimiter.regex != nil {
delims := delimiter.regex.FindAllStringIndex(str, -1)
if len(delims) > 0 && delims[len(delims)-1][1] == len(str) {
str = str[:delims[len(delims)-1][0]]
}
}
str = strings.TrimSpace(str)
replacements[idx] = quoteEntry(str)
}
return strings.Join(replacements, " ")
})
}
func (t *Terminal) redraw() {
t.tui.Clear()
t.tui.Refresh()
t.printAll()
}
func (t *Terminal) executeCommand(template string, forcePlus bool, background bool) {
valid, list := t.buildPlusList(template, forcePlus)
if !valid {
return
}
command := replacePlaceholder(template, t.ansi, t.delimiter, forcePlus, string(t.input), list)
cmd := util.ExecCommand(command)
if !background {
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
t.tui.Pause(true)
cmd.Run()
t.tui.Resume(true)
t.redraw()
t.refresh()
} else {
cmd.Run()
}
}
func (t *Terminal) hasPreviewer() bool {
return t.previewBox != nil
}
func (t *Terminal) isPreviewEnabled() bool {
return t.hasPreviewer() && t.previewer.enabled
}
func (t *Terminal) hasPreviewWindow() bool {
return t.pwindow != nil && t.isPreviewEnabled()
}
func (t *Terminal) currentItem() *Item {
cnt := t.merger.Length()
if cnt > 0 && cnt > t.cy {
return t.merger.Get(t.cy).item
}
return nil
}
func (t *Terminal) buildPlusList(template string, forcePlus bool) (bool, []*Item) {
current := t.currentItem()
if !forcePlus && !hasPlusFlag(template) || len(t.selected) == 0 {
return current != nil, []*Item{current, current}
}
sels := make([]*Item, len(t.selected)+1)
sels[0] = current
for i, sel := range t.sortSelected() {
sels[i+1] = sel.item
}
return true, sels
}
func (t *Terminal) truncateQuery() {
maxPatternLength := util.Max(1, t.window.Width()-t.promptLen-1)
t.input, _ = t.trimRight(t.input, maxPatternLength)
t.cx = util.Constrain(t.cx, 0, len(t.input))
}
func (t *Terminal) selectItem(item *Item) {
t.selected[item.Index()] = selectedItem{time.Now(), item}
t.version++
}
func (t *Terminal) deselectItem(item *Item) {
delete(t.selected, item.Index())
t.version++
}
func (t *Terminal) toggleItem(item *Item) {
if _, found := t.selected[item.Index()]; !found {
t.selectItem(item)
} else {
t.deselectItem(item)
}
}
// Loop is called to start Terminal I/O
func (t *Terminal) Loop() {
// prof := profile.Start(profile.ProfilePath("/tmp/"))
<-t.startChan
{ // Late initialization
intChan := make(chan os.Signal, 1)
signal.Notify(intChan, os.Interrupt, os.Kill, syscall.SIGTERM)
go func() {
<-intChan
t.reqBox.Set(reqQuit, nil)
}()
contChan := make(chan os.Signal, 1)
notifyOnCont(contChan)
go func() {
for {
<-contChan
t.reqBox.Set(reqReinit, nil)
}
}()
resizeChan := make(chan os.Signal, 1)
notifyOnResize(resizeChan) // Non-portable
go func() {
for {
<-resizeChan
t.reqBox.Set(reqRedraw, nil)
}
}()
t.mutex.Lock()
t.initFunc()
t.resizeWindows()
t.printPrompt()
t.placeCursor()
t.refresh()
t.printInfo()
t.printHeader()
t.mutex.Unlock()
go func() {
timer := time.NewTimer(t.initDelay)
<-timer.C
t.reqBox.Set(reqRefresh, nil)
}()
// Keep the spinner spinning
go func() {
for {
t.mutex.Lock()
reading := t.reading
t.mutex.Unlock()
if !reading {
break
}
time.Sleep(spinnerDuration)
t.reqBox.Set(reqInfo, nil)
}
}()
}
if t.hasPreviewer() {
go func() {
for {
var request []*Item
t.previewBox.Wait(func(events *util.Events) {
for req, value := range *events {
switch req {
case reqPreviewEnqueue:
request = value.([]*Item)
}
}
events.Clear()
})
// We don't display preview window if no match
if request[0] != nil {
command := replacePlaceholder(t.preview.command,
t.ansi, t.delimiter, false, string(t.input), request)
cmd := util.ExecCommand(command)
if t.pwindow != nil {
env := os.Environ()
env = append(env, fmt.Sprintf("LINES=%d", t.pwindow.Height()))
env = append(env, fmt.Sprintf("COLUMNS=%d", t.pwindow.Width()))
cmd.Env = env
}
out, _ := cmd.CombinedOutput()
t.reqBox.Set(reqPreviewDisplay, string(out))
} else {
t.reqBox.Set(reqPreviewDisplay, "")
}
}
}()
}
exit := func(getCode func() int) {
if !t.cleanExit && t.fullscreen && t.inlineInfo {
t.placeCursor()
}
t.tui.Close()
code := getCode()
if code <= exitNoMatch && t.history != nil {
t.history.append(string(t.input))
}
// prof.Stop()
os.Exit(code)
}
go func() {
var focused *Item
var version int64
for {
t.reqBox.Wait(func(events *util.Events) {
defer events.Clear()
t.mutex.Lock()
for req, value := range *events {
switch req {
case reqPrompt:
t.printPrompt()
if t.inlineInfo {
t.printInfo()
}
case reqInfo:
t.printInfo()
case reqList:
t.printList()
currentFocus := t.currentItem()
if currentFocus != focused || version != t.version {
version = t.version
focused = currentFocus
if t.isPreviewEnabled() {
_, list := t.buildPlusList(t.preview.command, false)
t.previewBox.Set(reqPreviewEnqueue, list)
}
}
case reqJump:
if t.merger.Length() == 0 {
t.jumping = jumpDisabled
}
t.printList()
case reqHeader:
t.printHeader()
case reqRefresh:
t.suppress = false
case reqReinit:
t.tui.Resume(t.fullscreen)
t.redraw()
case reqRedraw:
t.redraw()
case reqClose:
exit(func() int {
if t.output() {
return exitOk
}
return exitNoMatch
})
case reqPreviewDisplay:
t.previewer.text = value.(string)
t.previewer.lines = strings.Count(t.previewer.text, "\n")
t.previewer.offset = 0
t.printPreview()
case reqPreviewRefresh:
t.printPreview()
case reqPrintQuery:
exit(func() int {
t.printer(string(t.input))
return exitOk
})
case reqQuit:
exit(func() int { return exitInterrupt })
}
}
t.placeCursor()
t.mutex.Unlock()
})
t.refresh()
}
}()
looping := true
for looping {
event := t.tui.GetChar()
t.mutex.Lock()
previousInput := t.input
events := []util.EventType{reqPrompt}
req := func(evts ...util.EventType) {
for _, event := range evts {
events = append(events, event)
if event == reqClose || event == reqQuit {
looping = false
}
}
}
toggle := func() {
if t.cy < t.merger.Length() {
t.toggleItem(t.merger.Get(t.cy).item)
req(reqInfo)
}
}
scrollPreview := func(amount int) {
t.previewer.offset = util.Constrain(
t.previewer.offset+amount, 0, t.previewer.lines-1)
req(reqPreviewRefresh)
}
for key, ret := range t.expect {
if keyMatch(key, event) {
t.pressed = ret
t.reqBox.Set(reqClose, nil)
t.mutex.Unlock()
return
}
}
var doAction func(action, int) bool
doActions := func(actions []action, mapkey int) bool {
for _, action := range actions {
if !doAction(action, mapkey) {
return false
}
}
return true
}
doAction = func(a action, mapkey int) bool {
switch a.t {
case actIgnore:
case actExecute, actExecuteSilent:
t.executeCommand(a.a, false, a.t == actExecuteSilent)
case actExecuteMulti:
t.executeCommand(a.a, true, false)
case actInvalid:
t.mutex.Unlock()
return false
case actTogglePreview:
if t.hasPreviewer() {
t.previewer.enabled = !t.previewer.enabled
t.tui.Clear()
t.resizeWindows()
if t.previewer.enabled {
valid, list := t.buildPlusList(t.preview.command, false)
if valid {
t.previewBox.Set(reqPreviewEnqueue, list)
}
}
req(reqList, reqInfo, reqHeader)
}
case actTogglePreviewWrap:
if t.hasPreviewWindow() {
t.preview.wrap = !t.preview.wrap
req(reqPreviewRefresh)
}
case actToggleSort:
t.sort = !t.sort
t.eventBox.Set(EvtSearchNew, t.sort)
t.mutex.Unlock()
return false
case actPreviewUp:
if t.hasPreviewWindow() {
scrollPreview(-1)
}
case actPreviewDown:
if t.hasPreviewWindow() {
scrollPreview(1)
}
case actPreviewPageUp:
if t.hasPreviewWindow() {
scrollPreview(-t.pwindow.Height())
}
case actPreviewPageDown:
if t.hasPreviewWindow() {
scrollPreview(t.pwindow.Height())
}
case actBeginningOfLine:
t.cx = 0
case actBackwardChar:
if t.cx > 0 {
t.cx--
}
case actPrintQuery:
req(reqPrintQuery)
case actReplaceQuery:
if t.cy >= 0 && t.cy < t.merger.Length() {
t.input = t.merger.Get(t.cy).item.text.ToRunes()
t.cx = len(t.input)
}
case actAbort:
req(reqQuit)
case actDeleteChar:
t.delChar()
case actDeleteCharEOF:
if !t.delChar() && t.cx == 0 {
req(reqQuit)
}
case actEndOfLine:
t.cx = len(t.input)
case actCancel:
if len(t.input) == 0 {
req(reqQuit)
} else {
t.yanked = t.input
t.input = []rune{}
t.cx = 0
}
case actForwardChar:
if t.cx < len(t.input) {
t.cx++
}
case actBackwardDeleteChar:
if t.cx > 0 {
t.input = append(t.input[:t.cx-1], t.input[t.cx:]...)
t.cx--
}
case actSelectAll:
if t.multi {
for i := 0; i < t.merger.Length(); i++ {
t.selectItem(t.merger.Get(i).item)
}
req(reqList, reqInfo)
}
case actDeselectAll:
if t.multi {
t.selected = make(map[int32]selectedItem)
t.version++
req(reqList, reqInfo)
}
case actToggle:
if t.multi && t.merger.Length() > 0 {
toggle()
req(reqList)
}
case actToggleAll:
if t.multi {
for i := 0; i < t.merger.Length(); i++ {
t.toggleItem(t.merger.Get(i).item)
}
req(reqList, reqInfo)
}
case actToggleIn:
if t.reverse {
return doAction(action{t: actToggleUp}, mapkey)
}
return doAction(action{t: actToggleDown}, mapkey)
case actToggleOut:
if t.reverse {
return doAction(action{t: actToggleDown}, mapkey)
}
return doAction(action{t: actToggleUp}, mapkey)
case actToggleDown:
if t.multi && t.merger.Length() > 0 {
toggle()
t.vmove(-1, true)
req(reqList)
}
case actToggleUp:
if t.multi && t.merger.Length() > 0 {
toggle()
t.vmove(1, true)
req(reqList)
}
case actDown:
t.vmove(-1, true)
req(reqList)
case actUp:
t.vmove(1, true)
req(reqList)
case actAccept:
req(reqClose)
case actAcceptNonEmpty:
if len(t.selected) > 0 || t.merger.Length() > 0 || !t.reading && t.count == 0 {
req(reqClose)
}
case actClearScreen:
req(reqRedraw)
case actTop:
t.vset(0)
req(reqList)
case actUnixLineDiscard:
if t.cx > 0 {
t.yanked = copySlice(t.input[:t.cx])
t.input = t.input[t.cx:]
t.cx = 0
}
case actUnixWordRubout:
if t.cx > 0 {
t.rubout("\\s\\S")
}
case actBackwardKillWord:
if t.cx > 0 {
t.rubout(t.wordRubout)
}
case actYank:
suffix := copySlice(t.input[t.cx:])
t.input = append(append(t.input[:t.cx], t.yanked...), suffix...)
t.cx += len(t.yanked)
case actPageUp:
t.vmove(t.maxItems()-1, false)
req(reqList)
case actPageDown:
t.vmove(-(t.maxItems() - 1), false)
req(reqList)
case actHalfPageUp:
t.vmove(t.maxItems()/2, false)
req(reqList)
case actHalfPageDown:
t.vmove(-(t.maxItems() / 2), false)
req(reqList)
case actJump:
t.jumping = jumpEnabled
req(reqJump)
case actJumpAccept:
t.jumping = jumpAcceptEnabled
req(reqJump)
case actBackwardWord:
t.cx = findLastMatch(t.wordRubout, string(t.input[:t.cx])) + 1
case actForwardWord:
t.cx += findFirstMatch(t.wordNext, string(t.input[t.cx:])) + 1
case actKillWord:
ncx := t.cx +
findFirstMatch(t.wordNext, string(t.input[t.cx:])) + 1
if ncx > t.cx {
t.yanked = copySlice(t.input[t.cx:ncx])
t.input = append(t.input[:t.cx], t.input[ncx:]...)
}
case actKillLine:
if t.cx < len(t.input) {
t.yanked = copySlice(t.input[t.cx:])
t.input = t.input[:t.cx]
}
case actRune:
prefix := copySlice(t.input[:t.cx])
t.input = append(append(prefix, event.Char), t.input[t.cx:]...)
t.cx++
case actPreviousHistory:
if t.history != nil {
t.history.override(string(t.input))
t.input = trimQuery(t.history.previous())
t.cx = len(t.input)
}
case actNextHistory:
if t.history != nil {
t.history.override(string(t.input))
t.input = trimQuery(t.history.next())
t.cx = len(t.input)
}
case actSigStop:
p, err := os.FindProcess(os.Getpid())
if err == nil {
t.tui.Clear()
t.tui.Pause(t.fullscreen)
notifyStop(p)
t.mutex.Unlock()
return false
}
case actMouse:
me := event.MouseEvent
mx, my := me.X, me.Y
if me.S != 0 {
// Scroll
if t.window.Enclose(my, mx) && t.merger.Length() > 0 {
if t.multi && me.Mod {
toggle()
}
t.vmove(me.S, true)
req(reqList)
} else if t.hasPreviewWindow() && t.pwindow.Enclose(my, mx) {
scrollPreview(-me.S)
}
} else if t.window.Enclose(my, mx) {
mx -= t.window.Left()
my -= t.window.Top()
mx = util.Constrain(mx-t.promptLen, 0, len(t.input))
if !t.reverse {
my = t.window.Height() - my - 1
}
min := 2 + len(t.header)
if t.inlineInfo {
min--
}
if me.Double {
// Double-click
if my >= min {
if t.vset(t.offset+my-min) && t.cy < t.merger.Length() {
return doActions(t.keymap[tui.DoubleClick], tui.DoubleClick)
}
}
} else if me.Down {
if my == 0 && mx >= 0 {
// Prompt
t.cx = mx
} else if my >= min {
// List
if t.vset(t.offset+my-min) && t.multi && me.Mod {
toggle()
}
req(reqList)
if me.Left {
return doActions(t.keymap[tui.LeftClick], tui.LeftClick)
}
return doActions(t.keymap[tui.RightClick], tui.RightClick)
}
}
}
}
return true
}
changed := false
mapkey := event.Type
if t.jumping == jumpDisabled {
actions := t.keymap[mapkey]
if mapkey == tui.Rune {
mapkey = int(event.Char) + int(tui.AltZ)
if act, prs := t.keymap[mapkey]; prs {
actions = act
}
}
if !doActions(actions, mapkey) {
continue
}
t.truncateQuery()
changed = string(previousInput) != string(t.input)
if onChanges, prs := t.keymap[tui.Change]; changed && prs {
if !doActions(onChanges, tui.Change) {
continue
}
}
} else {
if mapkey == tui.Rune {
if idx := strings.IndexRune(t.jumpLabels, event.Char); idx >= 0 && idx < t.maxItems() && idx < t.merger.Length() {
t.cy = idx + t.offset
if t.jumping == jumpAcceptEnabled {
req(reqClose)
}
}
}
t.jumping = jumpDisabled
req(reqList)
}
t.mutex.Unlock() // Must be unlocked before touching reqBox
if changed {
t.eventBox.Set(EvtSearchNew, t.sort)
}
for _, event := range events {
t.reqBox.Set(event, nil)
}
}
}
func (t *Terminal) constrain() {
count := t.merger.Length()
height := t.maxItems()
diffpos := t.cy - t.offset
t.cy = util.Constrain(t.cy, 0, count-1)
t.offset = util.Constrain(t.offset, t.cy-height+1, t.cy)
// Adjustment
if count-t.offset < height {
t.offset = util.Max(0, count-height)
t.cy = util.Constrain(t.offset+diffpos, 0, count-1)
}
t.offset = util.Max(0, t.offset)
}
func (t *Terminal) vmove(o int, allowCycle bool) {
if t.reverse {
o *= -1
}
dest := t.cy + o
if t.cycle && allowCycle {
max := t.merger.Length() - 1
if dest > max {
if t.cy == max {
dest = 0
}
} else if dest < 0 {
if t.cy == 0 {
dest = max
}
}
}
t.vset(dest)
}
func (t *Terminal) vset(o int) bool {
t.cy = util.Constrain(o, 0, t.merger.Length()-1)
return t.cy == o
}
func (t *Terminal) maxItems() int {
max := t.window.Height() - 2 - len(t.header)
if t.inlineInfo {
max++
}
return util.Max(max, 0)
}
| [
"\"FZF_DEFAULT_COMMAND\""
]
| []
| [
"FZF_DEFAULT_COMMAND"
]
| [] | ["FZF_DEFAULT_COMMAND"] | go | 1 | 0 | |
etl.py | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, dayofweek, from_unixtime
from pyspark.sql.functions import monotonically_increasing_id
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS_CREDENTIALS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS_CREDENTIALS']['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
# get filepath to song data file
song_data = input_data + "song_data/*/*/*/*.json"
# read song data file
df = spark.read.json(song_data)
df.createOrReplaceTempView("song_data_table")
# extract columns to create songs table
songs_select_sql = """
SELECT DISTINCT song_id, title, artist_id, year, duration
FROM song_data_table
WHERE song_id IS NOT NULL
"""
songs_table = spark.sql(songs_select_sql)
# write songs table to parquet files partitioned by year and artist
songs_table.write.partitionBy("year", "artist_id").parquet(output_data + "songs")
# extract columns to create artists table
artists_select_sql = """
SELECT DISTINCT artist_id, artist_name as name, artist_location as location,
artist_latitude as latitude, artist_longitude as longitude
FROM song_data_table
WHERE artist_id IS NOT NULL
"""
artists_table = spark.sql(artists_select_sql)
# write artists table to parquet files
artists_table.write.parquet(output_data + "artists")
def process_log_data(spark, input_data, output_data):
# get filepath to log data file
log_data = input_data + "log_data/*/*/*.json"
# read log data file
df = spark.read.json(log_data)
# filter by actions for song plays
df = df.filter("page == 'NextSong'")
# create timestamp column from original timestamp column
get_timestamp = udf(lambda x : x // 1000)
df = df.withColumn("timestamp", get_timestamp(df.ts))
# create datetime column from original timestamp column
df = df.withColumn("datetime", from_unixtime(df.timestamp))
df.createOrReplaceTempView("log_data_table")
# extract columns for users table
users_select_sql = """
SELECT DISTINCT userId AS user_id, firstName AS first_name, lastName AS last_name, gender, level
FROM log_data_table
WHERE userId IS NOT NULL
"""
users_table = spark.sql(users_select_sql)
# write users table to parquet files
users_table.write.parquet(output_data + "users")
# extract columns to create time table
time_table_sql = """
SELECT DISTINCT timestamp as start_time, hour(datetime) as hour, dayofmonth(datetime) as day, weekofyear(datetime) as week,
month(datetime) as month, year(datetime) as year, dayofweek(datetime) as weekday
FROM log_data_table
WHERE timestamp IS NOT NULL
"""
time_table = spark.sql(time_table_sql)
# write time table to parquet files partitioned by year and month
time_table.write.partitionBy("year", "month").parquet(output_data + "time")
# read in song data to use for songplays table
song_df = spark.read.parquet(output_data + "songs")
song_df.createOrReplaceTempView("songs")
# extract columns from joined song and log datasets to create songplays table
songplays_select_sql = """
SELECT DISTINCT se.timestamp AS start_time, se.userId AS user_id, se.level, ss.song_id, ss.artist_id,
se.sessionId as session_id, se.location, se.userAgent AS user_agent, month(se.datetime) as month, year(se.datetime) as year
FROM log_data_table AS se
JOIN songs AS ss
ON se.song = ss.title AND se.length = ss.duration
"""
songplays_table = spark.sql(songplays_select_sql)
songplays_table.withColumn("songplay_id", monotonically_increasing_id())
# write songplays table to parquet files partitioned by year and month
songplays_table.write.partitionBy("year", "month").parquet(output_data + "songplays")
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://dend-spark-data-lake/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| []
| []
| [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
]
| [] | ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] | python | 2 | 0 | |
insecureinc/src/main/java/insecure/inc/Util.java | /**
* Copyright 2017-2018 Trend Micro Incorporated
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this work except in compliance with the License. You may obtain a copy of the License at
* https://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
package insecure.inc;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.StringBufferInputStream;
import java.io.StringWriter;
import java.io.UnsupportedEncodingException;
import java.security.NoSuchAlgorithmException;
import java.util.Base64;
import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
public class Util {
/**
* Checks the admin pass against a hard-coded hash
* @param pwd
* @return
* @throws UnsupportedEncodingException
* @throws NoSuchAlgorithmException
*/
public static boolean isAdminPassOk(String pwd) throws NoSuchAlgorithmException, UnsupportedEncodingException{
if(pwd==null) return false;
byte [] digest = Crypto.getInstance().getHash(pwd+"PucMfDDfkG7jVOaaK51AjQ","SHA-256");
String passHashString = Base64.getEncoder().encodeToString(digest);
return passHashString.equals("6lvOg9Sb1U8XIo2pNifNw+S3+Kk82+vX0E7CcqttkYU=");
}
/**
* Executes a command and returns the output
* @param command
* @return
* @throws IOException
* @throws InterruptedException
*/
public static String exec(String ... commandArgs) throws IOException, InterruptedException{
Process p = Runtime.getRuntime().exec(commandArgs);
p.waitFor();
StringBuilder sb = new StringBuilder();
BufferedReader reader = null;
if(p.exitValue() == 0){
reader = new BufferedReader(new InputStreamReader(p.getInputStream()));
}
else{
reader = new BufferedReader(new InputStreamReader(p.getErrorStream()));
}
String line = String.format("Executing command:");
for(String s : commandArgs){
line+=s+" ";
}
sb.append(line+"\n");
while ((line = reader.readLine())!= null) {
sb.append(line + "\n");
}
return sb.toString();
}
public static String bytesToHex(byte[] in) {
final StringBuilder builder = new StringBuilder();
for(byte b : in) {
builder.append(String.format("%02x", b));
}
return builder.toString();
}
public static boolean hasScriptTagOrEvent(String value){
if(value==null || value.equals("")) return false;
//extract the image tag
Pattern p = Pattern.compile("(?i)(<script|['\"]\\son\\w+|\";\\w+\\()");
Matcher m = p.matcher(value);
if(m.find()){
return true;
}
return false;
}
public static boolean isMatch(String value, String pattern){
if(value==null || value.equals("")) return false;
Pattern p = Pattern.compile(pattern);
Matcher m = p.matcher(value);
if(m.find()){
return true;
}
return false;
}
public static boolean hasImgTag(String value){
if(value==null || value.equals("")) return false;
//extract the image tag
Pattern p = Pattern.compile("(?i)<img");
Matcher m = p.matcher(value);
if(m.find()){
return true;
}
return false;
}
public static boolean hasImgTagAndEvent(String value){
if(value==null || value.equals("")) return false;
//extract the image tag
Pattern p = Pattern.compile("(?i)<img\\s*[^>]*on(error|load)\\s*=\\s*['\"]?\\s*(alert|prompt)");
Matcher m = p.matcher(value);
if(m.find()){
return true;
}
return false;
}
public static boolean hasXSS(String value){
boolean result=false;
if(value==null || value.equals("")) return false;
//extract the image tag
Pattern p = Pattern.compile("(?i)<img\\s*[^>]*on(error|load)\\s*=\\s*['\"]?\\s*(alert|prompt)\\(['\"]FIRE!?['\"]\\)[^>]*>");
Matcher m = p.matcher(value);
if(m.find()){
String imgTag = m.group(0);
//should have a source
p = Pattern.compile("(?i)\\s+src\\s*=\\s*([^>\\s]+)[\\s>/]+");
m = p.matcher(imgTag);
if(m.find()){
return true;
}
}
return result;
}
public static boolean hasCSRF(String value){
boolean result=false;
if(value==null || value.equals("")) return false;
//extract the image tag
Pattern p = Pattern.compile("(?i)(https?://[^/]+/)?cwe352loggedin.jsp\\?displayName=Banjo");
Matcher m = p.matcher(value);
if(m.find()){
return true;
}
p = Pattern.compile("(?i)(https?://[^/]+/)?cwe352loggedin.jsp\\?.*&displayName=Banjo");
m = p.matcher(value);
if(m.find()){
return true;
}
return result;
}
public static boolean isExternalProtocol(String value){
boolean result=false;
if(value==null || value.equals("")) return false;
//extract the image tag
Pattern p = Pattern.compile("(?i)system\\s*[\"']?(http|ftp|jar)");
Matcher m = p.matcher(value);
if(m.find()){
return true;
}
return result;
}
public static boolean isValidServerName(String value){
boolean result=false;
if(value==null || value.equals("")) return false;
//extract the image tag
Pattern p = Pattern.compile("^[\\w\\.]+(\\s*;\\s*(cat|less)\\s*\\/etc\\/passwd)?$");
Matcher m = p.matcher(value);
if(m.find()){
return true;
}
return result;
}
public static String getUnlockCode(){
String unlockCode = System.getenv("UNLOCK_CODE");
if(unlockCode==null){
//try to get the key pairs with System.getProperty
unlockCode = System.getProperty("UNLOCK_CODE");
}
return unlockCode;
}
public static String executeMasterPwd(String userPwd,String sourceFile) throws IOException, InterruptedException{
StringBuilder consoleOutput = new StringBuilder();
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
InputStream is = classLoader.getResourceAsStream(sourceFile);
OutputStream os = null;
Random r = new Random();
int val = r.nextInt(1000000);
String tmpFileName = "/tmp/"+val;
try {
os = new FileOutputStream(tmpFileName+".c");
byte[] buffer = new byte[1024];
int length;
while ((length = is.read(buffer)) > 0) {
os.write(buffer, 0, length);
}
} finally {
if(is!=null) is.close();
if(os!=null) os.close();
}
String [] cmdArgs = {"/usr/bin/gcc","-fno-stack-protector",tmpFileName+".c","-o",tmpFileName+".exe"};
String out = exec(cmdArgs);
consoleOutput.append(out);
String cmd = String.format("%s.exe",tmpFileName);
Process p = Runtime.getRuntime().exec(cmd);
OutputStream stdIn = p.getOutputStream();
BufferedReader reader = null;
reader = new BufferedReader(new InputStreamReader(p.getInputStream()));
stdIn.write(userPwd.getBytes());
stdIn.flush();
stdIn.close();
p.waitFor();
StringBuilder sb = new StringBuilder();
if(p.exitValue() == 0){
reader = new BufferedReader(new InputStreamReader(p.getInputStream()));
}
else{
reader = new BufferedReader(new InputStreamReader(p.getErrorStream()));
}
String line = String.format("Executing command: %s",cmd);
consoleOutput.append(line + "\n");
while ((line = reader.readLine())!= null) {
consoleOutput.append(line + "\n");
}
File f = new File(tmpFileName+".c");
f.delete();
f= new File(tmpFileName+".exe");
f.delete();
return consoleOutput.toString();
}
public static boolean isAlphanum(String val, char ... exceptions){
boolean result = true;
int count = val.length();
for(int i=0;i<count;i++){
char c = val.charAt(i);
boolean isOk = false;
//if it's alphabetic turns true
isOk = isOk | Character.isAlphabetic(c);
//if it's a digit turns true
isOk = isOk | Character.isDigit(c);
//if it's in the list of exception turns true
for(char ex : exceptions){
isOk = isOk | ex==c;
}
if(isOk == false){ //if the character didn't meet the requirements return false
return false;
}
}
return result;
}
public static String xmlToString(Document doc) {
try {
StringWriter sw = new StringWriter();
TransformerFactory tf = TransformerFactory.newInstance();
Transformer transformer = tf.newTransformer();
transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
transformer.setOutputProperty(OutputKeys.METHOD, "xml");
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
transformer.transform(new DOMSource(doc), new StreamResult(sw));
return sw.toString();
} catch (Exception ex) {
throw new RuntimeException("Error converting to String", ex);
}
}
public static Document parseXML(String xml) throws SAXException, IOException, ParserConfigurationException {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
Document doc = builder.parse(new StringBufferInputStream(xml));
doc.getDocumentElement().normalize();
return doc;
}
public static String getStringFromInputStream(InputStream input) throws IOException{
String contents = "";
BufferedReader reader = null;
StringBuilder sb = new StringBuilder();
try {
reader = new BufferedReader(new InputStreamReader(input,"UTF-8"));
String line = reader.readLine();
while (line != null) {
sb.append(line);
sb.append("\n");
line = reader.readLine();
if(sb.length()>1024 * 10) throw new Exception("Invalid file size");
}
}
catch(Exception ex){
contents = ex.getMessage();
} finally {
if(reader!=null) reader.close();
}
contents = sb.toString();
return contents;
}
}
| [
"\"UNLOCK_CODE\""
]
| []
| [
"UNLOCK_CODE"
]
| [] | ["UNLOCK_CODE"] | java | 1 | 0 | |
rexart/_app.py | #!/usr/bin/env python3
"""
Script to make matplotlib plots from TRExFitter output.
"""
# fmt: off
import matplotlib
matplotlib.use("pdf")
import matplotlib.font_manager as font_manager
#import os
#if os.environ.get("HELVETICA_MPL"):
# curdir = os.path.dirname(os.path.abspath(__file__))
# print(curdir)
# fontprop_reg = font_manager.FontProperties(fname="{}/Helvetica/Regular.ttf".format(curdir))
# fontprop_atl = font_manager.FontProperties(fname="{}/Helvetica/Bold_Italic.ttf".format(curdir))
# matplotlib.rcParams["font.family"] = fontprop_reg.get_name()
# print(fontprop_reg.get_name())
#matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.rcParams['ps.fonttype'] = 42
#matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams["axes.labelsize"] = 14
matplotlib.rcParams["font.size"] = 12
matplotlib.rcParams["xtick.top"] = True
matplotlib.rcParams["ytick.right"] = True
matplotlib.rcParams["xtick.direction"] = "in"
matplotlib.rcParams["ytick.direction"] = "in"
matplotlib.rcParams["xtick.labelsize"] = 12
matplotlib.rcParams["ytick.labelsize"] = 12
matplotlib.rcParams["xtick.minor.visible"] = True
matplotlib.rcParams["ytick.minor.visible"] = True
matplotlib.rcParams["xtick.major.width"] = 0.8
matplotlib.rcParams["xtick.minor.width"] = 0.8
matplotlib.rcParams["xtick.major.size"] = 7.0
matplotlib.rcParams["xtick.minor.size"] = 4.0
matplotlib.rcParams["xtick.major.pad"] = 1.5
matplotlib.rcParams["xtick.minor.pad"] = 1.4
matplotlib.rcParams["ytick.major.width"] = 0.8
matplotlib.rcParams["ytick.minor.width"] = 0.8
matplotlib.rcParams["ytick.major.size"] = 7.0
matplotlib.rcParams["ytick.minor.size"] = 4.0
matplotlib.rcParams["ytick.major.pad"] = 1.5
matplotlib.rcParams["ytick.minor.pad"] = 1.4
matplotlib.rcParams["legend.frameon"] = False
matplotlib.rcParams["legend.numpoints"] = 1
matplotlib.rcParams["legend.fontsize"] = 11
matplotlib.rcParams["legend.handlelength"] = 1.5
matplotlib.rcParams["axes.formatter.limits"] = [-4, 4]
matplotlib.rcParams["axes.formatter.use_mathtext"] = True
# fmt: on
import argparse
from rexart.pulls import run_pulls
from rexart.stacks import run_stacks
import logging
def get_args():
# fmt: off
parser = argparse.ArgumentParser()
subcommands = parser.add_subparsers(dest="action", help="main action")
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument("--shrink", action="store_true", help="shrink output PDFs via ghostscript")
common_parser.add_argument("--debug", action="store_true", help="set logging level to debug")
stacks = subcommands.add_parser("stacks", help="Generate stack plots", parents=[common_parser])
stacks.add_argument("workspace", type=str, help="TRExFitter workspace")
stacks.add_argument("-o", "--out-dir", type=str, help="output directory for plots")
stacks.add_argument("--lumi", type=str, default="139", help="Integrated lumi. for text")
stacks.add_argument("--do-postfit", action="store_true", help="produce post fit plots as well")
stacks.add_argument("--skip-regions", type=str, default=None, help="skip regions based on regex")
stacks.add_argument("--band-style", type=str, choices=["hatch", "shade"], default="hatch", help="band art")
stacks.add_argument("--legend-ncol", type=int, choices=[1, 2], default=1, help="number of legend columns")
stacks.set_defaults(func=run_stacks)
pulls = subcommands.add_parser("pulls", help="pull plots", parents=[common_parser])
pulls.add_argument("workspace", type=str, help="TRExFitter workspace")
pulls.add_argument("config", type=str, help="TRExFitter config")
pulls.add_argument("-o", "--out-dir", type=str, help="output directory")
pulls.add_argument("--no-text", action="store_true", help="don't print values on plots")
pulls.set_defaults(func=run_pulls)
# fmt: on
return (parser.parse_args(), parser)
def cli():
args, parser = get_args()
if args.action is None:
parser.print_help()
return 0
default_level = logging.INFO
if args.debug:
default_level = logging.DEBUG
logging.getLogger("matplotlib.backends.backend_pdf").setLevel(logging.INFO)
logging.getLogger("matplotlib.font_manager").setLevel(logging.INFO)
# fmt: off
logging.basicConfig(level=default_level, format="{:20} %(levelname)s %(message)s".format("[%(name)s]"))
logging.addLevelName(logging.WARNING, "\033[1;31m{:8}\033[1;0m".format(logging.getLevelName(logging.WARNING)))
logging.addLevelName(logging.ERROR, "\033[1;35m{:8}\033[1;0m".format(logging.getLevelName(logging.ERROR)))
logging.addLevelName(logging.INFO, "\033[1;32m{:8}\033[1;0m".format(logging.getLevelName(logging.INFO)))
logging.addLevelName(logging.DEBUG, "\033[1;34m{:8}\033[1;0m".format(logging.getLevelName(logging.DEBUG)))
# fmt: on
args.func(args)
return 0
| []
| []
| [
"HELVETICA_MPL"
]
| [] | ["HELVETICA_MPL"] | python | 1 | 0 | |
server/router.go | package server
import (
"giligili/api"
"giligili/middleware"
"os"
"github.com/gin-gonic/gin"
)
// NewRouter 路由配置
func NewRouter() *gin.Engine {
r := gin.Default()
// 中间件, 顺序不能改
r.Use(middleware.Session(os.Getenv("SESSION_SECRET")))
r.Use(middleware.Cors())
r.Use(middleware.CurrentUser())
// 路由
v1 := r.Group("/api/v1")
{
v1.GET("ping", api.Ping)
// 用户登录
v1.POST("user/register", api.UserRegister)
// 用户登录
v1.POST("user/login", api.UserLogin)
// 需要登录保护的
auth := v1.Group("")
auth.Use(middleware.AuthRequired())
{
// User Routing
auth.GET("user/me", api.UserMe)
auth.DELETE("user/logout", api.UserLogout)
}
// 视频相关
v1.POST("videos", api.CreateVideo)
v1.GET("video/:id", api.ShowVideo)
v1.GET("videos", api.ListVideo)
v1.PUT("video/:id", api.UpdateVideo)
v1.DELETE("video/:id", api.DeleteVideo)
}
return r
}
| [
"\"SESSION_SECRET\""
]
| []
| [
"SESSION_SECRET"
]
| [] | ["SESSION_SECRET"] | go | 1 | 0 | |
src/modules/faq.py | # Modulo faq.py v1
#///---- Imports ----///
import re
import os
import logging
from discord.ext import commands
from faunadb import query as q
from faunadb.objects import Ref
from faunadb.client import FaunaClient
#///---- Log ----///
log = logging.getLogger(__name__)
#///---- Clase ----///
class FAQ(commands.Cog):
'''
Consulta y edición de FAQ
'''
def __init__(self, bot):
'''
__init__ del bot
'''
self.bot = bot
self.db = database(bot)
#! Comando faq
@commands.group()
async def faq(self, ctx):
'''
Comando faq
'''
PREFIX = os.getenv("DISCORD_PREFIX")
if ctx.invoked_subcommand is None:
await ctx.send(f"Este comando no existe! Tipea `{PREFIX}faq help` para ver los comandos disponibles :D")
#! Subcomando help
@faq.command()
async def help(self, ctx):
'''
Descripción: Ayuda de FAQ
Precondicion: Escribir en un canal {PREFIX}faq help
Poscondición: El bot escribe lista de comandos con descripción
'''
PREFIX = os.getenv("DISCORD_PREFIX")
lines = f'''
```
{PREFIX}faq help: Ayuda del FAQ
{PREFIX}faq all: Por DM recibís el FAQ completo
{PREFIX}faq general: Preguntas generales sobre el uso de Discord y el servidor
{PREFIX}faq english: Preguntas relacionadas a los eventos para charlar en inglés
{PREFIX}faq mentoring: Dudas sobre el sistema de mentorías
{PREFIX}faq coworking: ¿Qué es el Coworking en FEC?
{PREFIX}faq roles: Que són y cómo se obtienen los roles
{PREFIX}faq projects: Consulta sobre los proyectos grupales de desarrollo
{PREFIX}faq studygroup: Consulta sobre los grupos de estudio
```
'''
await ctx.send(lines)
#! Subcomando all
@faq.command()
async def all(self, ctx):
'''
Descripción: FAQ completo por DM
Precondición: Escribir en un canal {PREFIX}faq all
Poscondición: El bot envía por DM el FAQ
'''
dataPrint = [""] * 4
dataFAQ = self.db.load()
if len(dataFAQ) != 0:
for data in dataFAQ:
if len(dataPrint[0]) < 1500:
dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n"
elif len(dataPrint[1]) < 1500:
dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n"
elif len(dataPrint[2]) < 1500:
dataPrint[2] = dataPrint[2] + f"+{data['Question']}\n{data['Answer']}\n\n"
else:
dataPrint[3] = dataPrint[3] + f"+{data['Question']}\n{data['Answer']}\n\n"
message = ["FAQ completo:\n```diff\n" + dataPrint[0] + "```",
"```diff\n" + dataPrint[1] + "```",
"```diff\n" + dataPrint[2] + "```",
"```diff\n" + dataPrint[3] + "```"]
await ctx.author.send(message[0])
if len(dataPrint[1]) != 0:
await ctx.author.send(message[1])
if len(dataPrint[2]) != 0:
await ctx.author.send(message[2])
if len(dataPrint[3]) != 0:
await ctx.author.send(message[3])
else:
await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!')
#! Subcomando general
@faq.command()
async def general(self, ctx):
'''
Descripción: Consulta de DB sobre categoría General
Precondición: Escribir en un canal {PREFIX}faq general
Poscondición: El bot envía por DM el FAQ de general
'''
dataGen = []
dataPrint = [""] * 2
dataFAQ = self.db.load()
dataGen = [data for data in dataFAQ if data['Category'] == 'General']
if len(dataGen) != 0:
for data in dataGen:
if len(dataPrint[0]) < 1500:
dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n"
else:
dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n"
message = ["General:\n```diff\n" + dataPrint[0] + "```", "General (continuación):\n```diff\n" + dataPrint[1] + "```"]
await ctx.author.send(message[0])
if len(dataPrint[1]) != 0:
await ctx.author.send(message[1])
else:
await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!')
#! Subcomando english
@faq.command()
async def english(self, ctx):
'''
Descripción: Consulta de DB sobre categoría English
Precondición: Escribir en un canal {PREFIX}faq english
Poscondición: El bot envía por DM el FAQ de english
'''
dataGen = []
dataPrint = [""] * 2
dataFAQ = self.db.load()
dataGen = [data for data in dataFAQ if data['Category'] == 'English']
if len(dataGen) != 0:
for data in dataGen:
if len(dataPrint[0]) < 1500:
dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n"
else:
dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n"
message = ["English:\n```diff\n" + dataPrint[0] + "```", "English (continuación):\n```diff\n" + dataPrint[1] + "```"]
await ctx.author.send(message[0])
if len(dataPrint[1]) != 0:
await ctx.author.send(message[1])
else:
await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!')
#! Subcomando mentoring
@faq.command()
async def mentoring(self, ctx):
'''
Descripción: Consulta de DB sobre categoría Mentoring
Precondición: Escribir en un canal {PREFIX}faq mentoring
Poscondición: El bot envía por DM el FAQ de mentoring
'''
dataGen = []
dataPrint = [""] * 2
dataFAQ = self.db.load()
dataGen = [data for data in dataFAQ if data['Category'] == 'Mentoring']
if len(dataGen) != 0:
for data in dataGen:
if len(dataPrint[0]) < 1500:
dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n"
else:
dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n"
message = ["Mentoring:\n```diff\n" + dataPrint[0] + "```", "Mentoring (continuación):\n```diff\n" + dataPrint[1] + "```"]
await ctx.author.send(message[0])
if len(dataPrint[1]) != 0:
await ctx.author.send(message[1])
else:
await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!')
#! Subcomando coworking
@faq.command()
async def coworking(self, ctx):
'''
Descripción: Consulta de DB sobre categoría Coworking
Precondición: Escribir en un canal {PREFIX}faq coworking
Poscondición: El bot envía por DM el FAQ de coworking
'''
dataGen = []
dataPrint = [""] * 2
dataFAQ = self.db.load()
dataGen = [data for data in dataFAQ if data['Category'] == 'Coworking']
if len(dataGen) != 0:
for data in dataGen:
if len(dataPrint[0]) < 1500:
dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n"
else:
dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n"
message = ["Coworking:\n```diff\n" + dataPrint[0] + "```", "Coworking (continuación):\n```diff\n" + dataPrint[1] + "```"]
await ctx.author.send(message[0])
if len(dataPrint[1]) != 0:
await ctx.author.send(message[1])
else:
await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!')
#! Subcomando roles
@faq.command()
async def roles(self, ctx):
'''
Descripción: Consulta de DB sobre categoría Roles
Precondición: Escribir en un canal {PREFIX}faq roles
Poscondición: El bot envía por DM el FAQ de roles
'''
dataGen = []
dataPrint = [""] * 2
dataFAQ = self.db.load()
dataGen = [data for data in dataFAQ if data['Category'] == 'Roles']
if len(dataGen) != 0:
for data in dataGen:
if len(dataPrint[0]) < 1500:
dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n"
else:
dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n"
message = ["Roles:\n```diff\n" + dataPrint[0] + "```", "Roles (continuación):\n```diff\n" + dataPrint[1] + "```"]
await ctx.author.send(message[0])
if len(dataPrint[1]) != 0:
await ctx.author.send(message[1])
else:
await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!')
#! Subcomando projects
@faq.command()
async def projects(self, ctx):
'''
Descripción: Consulta de DB sobre categoría Projects
Precondición: Escribir en un canal {PREFIX}faq projects
Poscondición: El bot envía por DM el FAQ de projects
'''
dataGen = []
dataPrint = [""] * 2
dataFAQ = self.db.load()
dataGen = [data for data in dataFAQ if data['Category'] == 'Projects']
if len(dataGen) != 0:
for data in dataGen:
if len(dataPrint[0]) < 1500:
dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n"
else:
dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n"
message = ["Projects:\n```diff\n" + dataPrint[0] + "```", "Projects (continuación):\n```diff\n" + dataPrint[1] + "```"]
await ctx.author.send(message[0])
if len(dataPrint[1]) != 0:
await ctx.author.send(message[1])
else:
await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!')
#! Subcomando study-group
@faq.command()
async def studygroup(self, ctx):
'''
Descripción: Consulta de DB sobre categoría English
Precondición: Escribir en un canal {PREFIX}faq english
Poscondición: El bot envía por DM el FAQ de english
'''
dataGen = []
dataPrint = [""] * 2
dataFAQ = self.db.load()
dataGen = [data for data in dataFAQ if data['Category'] == 'Study-Group']
if len(dataGen) != 0:
for data in dataGen:
if len(dataPrint[0]) < 1500:
dataPrint[0] = dataPrint[0] + f"+{data['Question']}\n{data['Answer']}\n\n"
else:
dataPrint[1] = dataPrint[1] + f"+{data['Question']}\n{data['Answer']}\n\n"
message = ["Study Group:\n```diff\n" + dataPrint[0] + "```", "Study Group (continuación):\n```diff\n" + dataPrint[1] + "```"]
await ctx.author.send(message[0])
if len(dataPrint[1]) != 0:
await ctx.author.send(message[1])
else:
await ctx.author.send('No hay datos para esta consulta. Contactar con los administradores!')
class database:
'''
Clase database: Realizo la consulta a FaunaDB por todos los datos que existen en la collection FAQs
'''
def __init__(self, bot):
'''
__init__
'''
self.bot = bot
DB_KEY = os.getenv("FAUNADB_SECRET_KEY")
self.client = FaunaClient(secret = DB_KEY)
def load(self):
'''
Descripción: Cargo todos los datos de tipo diccionario a una lista
'''
listFAQ = []
# Indezacion de datos
allfaqs = self.client.query(
q.paginate(
q.match(q.index('all_faqs'))
)
)
allfaqslist = [allfaqs['data']]
result = re.findall('\\d+', str(allfaqslist))
# Creación de lista de diccionarios
for i in range(0, len(result), 1):
faqdetails = self.client.query(q.get(q.ref(q.collection('FAQs'), result[i])))
listFAQ += [faqdetails['data']]
return listFAQ
| []
| []
| [
"DISCORD_PREFIX",
"FAUNADB_SECRET_KEY"
]
| [] | ["DISCORD_PREFIX", "FAUNADB_SECRET_KEY"] | python | 2 | 0 | |
Validation/CaloTowers/test/runNoise_valid_simhits_digis_rechits_calotowers_ZS_cfg.py | import os
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
process = cms.Process("TEST")
### RANDOM setting (change last digit(s) to make runs different !)
process.load("Configuration.StandardSequences.SimulationRandomNumberGeneratorSeeds_cff")
#process.RandomNumberGeneratorService.generator.initialSeed = 12345XXXX
process.load("Configuration.StandardSequences.Simulation_cff")
process.load("SimGeneral.MixingModule.mixNoPU_cfi")
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load('Configuration/StandardSequences/DigiToRaw_cff')
process.load('Configuration/StandardSequences/RawToDigi_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['mc']
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
process.load("Configuration.StandardSequences.GeometryECALHCAL_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.g4SimHits.UseMagneticField = False
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.load("DQMServices.Core.DQM_cfg")
process.DQM.collectorHost = ''
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound')
)
# Input source
process.source = cms.Source("PoolSource",
firstEvent = cms.untracked.uint32(1),
noEventSort = cms.untracked.bool(True),
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(
'file:/afs/cern.ch/cms/data/CMSSW/Validation/HcalHits/data/3_1_X/mc_nue.root'
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.FEVT = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *', 'keep *_MEtoEDMConverter_*_*'),
fileName = cms.untracked.string("HcalValHarvestingEDM.root")
)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
process.hcalDigiAnalyzer = DQMEDAnalyzer('HcalDigiTester',
digiLabel = cms.InputTag("hcalDigis"),
outputFile = cms.untracked.string('HcalDigisValidation_ZS.root'),
hcalselector = cms.untracked.string('noise'),
zside = cms.untracked.string('*')
)
process.hcalRecoAnalyzer = DQMEDAnalyzer('HcalRecHitsValidation',
outputFile = cms.untracked.string('HcalRecHitsValidation_ZS.root'),
HBHERecHitCollectionLabel = cms.untracked.InputTag("hbhereco"),
HFRecHitCollectionLabel = cms.untracked.InputTag("hfreco"),
HORecHitCollectionLabel = cms.untracked.InputTag("horeco"),
eventype = cms.untracked.string('single'),
mc = cms.untracked.string('yes'),
sign = cms.untracked.string('*'),
hcalselector = cms.untracked.string('noise'),
ecalselector = cms.untracked.string('no'),
)
process.hcalTowerAnalyzer = DQMEDAnalyzer('CaloTowersValidation',
outputFile = cms.untracked.string('CaloTowersValidation.root'),
CaloTowerCollectionLabel = cms.untracked.InputTag('towerMaker'),
hcalselector = cms.untracked.string('all'),
useAllHistos = cms.untracked.bool(True)
)
#------------------------------------
#process.simHcalDigis.HBlevel = -1000
#process.simHcalDigis.HElevel = -1000
#process.simHcalDigis.HOlevel = -1000
#process.simHcalDigis.HFlevel = -1000
#process.simHcalDigis.useConfigZSvalues = 1
process.VtxSmeared.SigmaX = 0.00001
process.VtxSmeared.SigmaY = 0.00001
process.VtxSmeared.SigmaZ = 0.00001
### Special - CaloOnly ------------------------------------
#--- comes from DigiToRaw_cff.py
process.ecalPacker.Label = 'simEcalDigis'
process.ecalPacker.InstanceEB = 'ebDigis'
process.ecalPacker.InstanceEE = 'eeDigis'
process.ecalPacker.labelEBSRFlags = "simEcalDigis:ebSrFlags"
process.ecalPacker.labelEESRFlags = "simEcalDigis:eeSrFlags"
#
#- hcalRawData (EventFilter/HcalRawToDigi/python/HcalDigiToRaw_cfi.py
# uses simHcalDigis by default...
#--- to force RAW->Digi
process.ecalDigis.InputLabel = 'rawDataCollector'
process.hcalDigis.InputLabel = 'rawDataCollector'
process.ecalPreshowerDigis.sourceTag = 'rawDataCollector'
#--- calolocalreco = cms.Sequence(ecalLocalRecoSequence+hcalLocalRecoSequence)
# RecoLocalCalo.Configuration.ecalLocalRecoSequence_cff
# RecoLocalCalo.Configuration.hcalLocalReco_cff
#--- To cope with JP Chou pre-reco introduction to bring back hbhe RecHits collection to CaloTowers
delattr(process,"hbhereco")
process.hbhereco = process.hbheprereco.clone()
process.hcalLocalRecoSequence.replace(process.hbheprereco,process.hbhereco)
#------------------------------------------------ processing
process.load('Configuration/StandardSequences/EDMtoMEAtRunEnd_cff')
process.dqmSaver.referenceHandling = cms.untracked.string('all')
cmssw_version = os.environ.get('CMSSW_VERSION','CMSSW_X_Y_Z')
Workflow = '/HcalValidation/'+'Harvesting/'+str(cmssw_version)
process.dqmSaver.workflow = Workflow
process.calotowersClient = DQMEDHarvester("CaloTowersClient",
outputFile = cms.untracked.string('CaloTowersHarvestingME.root'),
DQMDirName = cms.string("/") # root directory
)
process.hcalrechitsClient = DQMEDHarvester("HcalRecHitsClient",
outputFile = cms.untracked.string('HcalRecHitsHarvestingME_ZS.root'),
DQMDirName = cms.string("/") # root directory
)
process.g4SimHits.Generator.HepMCProductLabel = 'VtxSmeared'
process.p = cms.Path(
process.VtxSmeared *
process.g4SimHits *
process.mix *
process.calDigi *
process.ecalPacker *
process.esDigiToRaw *
process.hcalRawData *
process.rawDataCollector *
process.ecalDigis *
process.ecalPreshowerDigis *
process.hcalDigis *
process.calolocalreco *
process.caloTowersRec *
process.hcalDigiAnalyzer *
process.hcalTowerAnalyzer *
process.hcalRecoAnalyzer *
process.calotowersClient *
process.hcalrechitsClient *
process.dqmSaver)
| []
| []
| [
"CMSSW_VERSION"
]
| [] | ["CMSSW_VERSION"] | python | 1 | 0 | |
Supermicro/benchmarks/bert/implementations/pytorch_SYS-420GP-TNAR/function.py | # Copyright (c) 2019-2021 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python function.py [--graph-after-ddp] [--graph-before-ddp]
# python -m torch.distributed.launch --nproc_per_node=2 function.py [--graph-after-ddp] [--graph-before-ddp]
import torch
import types
from itertools import chain
import argparse
import os
# questions:
# is a custom autograd function or graphing around a backward call better?
# how to allow double backward?
# lazily capture as part of live backward, or not?
# capture all the way down to AccumulateGrad functions, or not?
# If yes, need to deal with params used in graphs and non-graphed regions,
# and DDP bucket-slot-ready flags. To help, user could supply a list of params
# known to be exclusive to the graphed region.
# Current limitation: Assumes all args are Tensors.
# Arg tensors may or may not require grad.
# Any temporaries created in func_or_module must not be used
# outside func_or_module unless they are among func_or_module's
# explicit return values.
def graph(func_or_module,
sample_args,
sample_args_eval=None,
graph_stream=None,
warmup_iters=2,
warmup_only=False):
assert isinstance(sample_args, tuple)
# To run a module's forward method as a torch.autograd.Function,
# and ensure gradients of all used tensors are returned by the Function's backward
# so the autograd engine takes care of final accumulation (which includes DDP hooks)
# we need to "functionalize" module.forward:
# createa a wrapper function where module attributes
# and user args all enter through the arglist.
was_module = isinstance(func_or_module, torch.nn.Module)
if was_module:
if isinstance(func_or_module, torch.nn.parallel.DistributedDataParallel):
func_or_module = func_or_module.module
module_params = tuple(func_or_module.parameters())
functional_args = sample_args + module_params
stream = torch.cuda.Stream() if graph_stream is None else graph_stream
ambient_stream = torch.cuda.current_stream()
stream.wait_stream(ambient_stream)
# Most of the spaghetti here comes from handling args that may not require grad.
with torch.cuda.stream(stream):
# warmup iters before capture
for _ in range(warmup_iters):
# Warmup iters should warm up the same memory pool capture will use. If they don't,
# and we use the capture pool for the first time during capture, we'll almost
# certainly capture some cudaMallocs.
outputs = func_or_module(*sample_args)
outputs_was_tensor = isinstance(outputs, torch.Tensor)
outputs = (outputs,) if outputs_was_tensor else outputs
outputs_require_grad = tuple(o for o in outputs if o.requires_grad)
args_require_grad = tuple(i for i in functional_args if i.requires_grad)
buffer_incoming_grads = tuple(torch.empty_like(o) if o.requires_grad else None for o in outputs)
needed_incoming_grads = tuple(b for b in buffer_incoming_grads if b is not None)
torch.cuda.nvtx.range_push("autograd.grad")
grad_inputs = torch.autograd.grad(outputs_require_grad,
args_require_grad,
needed_incoming_grads,
only_inputs=True,
allow_unused=False)
torch.cuda.nvtx.range_pop()
if warmup_iters > 0:
del outputs, outputs_require_grad, args_require_grad, buffer_incoming_grads, needed_incoming_grads, grad_inputs
if warmup_only:
ambient_stream.wait_stream(stream)
return func_or_module
print("Graphing\n", flush=True)
# Capture forward pass
fwd_graph = torch.cuda._Graph()
fwd_graph.capture_begin()
outputs = func_or_module(*sample_args)
fwd_graph.capture_end()
outputs_was_tensor = isinstance(outputs, torch.Tensor)
outputs = (outputs,) if outputs_was_tensor else outputs
outputs_require_grad = tuple(o for o in outputs if o.requires_grad)
args_require_grad = tuple(i for i in functional_args if i.requires_grad)
buffer_incoming_grads = tuple(torch.empty_like(o) if o.requires_grad else None for o in outputs)
needed_incoming_grads = tuple(b for b in buffer_incoming_grads if b is not None)
# Capture gradient creation
bwd_graph = torch.cuda._Graph()
bwd_graph.capture_begin(pool=fwd_graph.pool())
torch.cuda.nvtx.range_push("capturing autograd.grad")
grad_inputs = torch.autograd.grad(outputs_require_grad,
args_require_grad,
needed_incoming_grads,
only_inputs=True,
allow_unused=False)
torch.cuda.nvtx.range_pop()
bwd_graph.capture_end()
buffer_inputs = tuple(i.detach() for i in functional_args)
buffer_outputs = tuple(o.detach().requires_grad_(o.requires_grad) for o in outputs)
# Constructs a list suitable for returning from Graphed.backward:
# Inserts Nones in gradient slots for inputs that don't expect a grad.
buffer_grad_inputs = []
grad_idx = 0
for arg in functional_args:
if arg.requires_grad:
buffer_grad_inputs.append(grad_inputs[grad_idx])
grad_idx += 1
else:
buffer_grad_inputs.append(None)
buffer_grad_inputs = tuple(buffer_grad_inputs)
# Capture eval
capture_eval = (sample_args_eval is not None)
if capture_eval:
assert isinstance(sample_args_eval, tuple)
assert len(sample_args_eval) == len(sample_args)
with torch.no_grad():
func_or_module.eval()
# warmup iters before capture
for _ in range(warmup_iters):
eval_outputs = func_or_module(*sample_args_eval)
eval_outputs_was_tensor = isinstance(eval_outputs, torch.Tensor)
eval_outputs = (eval_outputs,) if eval_outputs_was_tensor else eval_outputs
if warmup_iters > 0:
del eval_outputs
print("Eval-Graphing\n", flush=True)
eval_graph = torch.cuda._Graph()
eval_graph.capture_begin(pool=fwd_graph.pool())
eval_outputs = func_or_module(*sample_args_eval)
eval_graph.capture_end()
eval_outputs_was_tensor = isinstance(eval_outputs, torch.Tensor)
eval_outputs = (eval_outputs,) if eval_outputs_was_tensor else eval_outputs
func_or_module.train()
ambient_stream.wait_stream(stream)
class Graphed(torch.autograd.Function):
@staticmethod
def forward(ctx, *inputs):
if func_or_module.training:
with torch.no_grad():
for i, arg in zip(buffer_inputs, inputs):
if i.data_ptr() != arg.data_ptr():
i.copy_(arg)
fwd_graph.replay()
return buffer_outputs
else: # eval
with torch.no_grad():
if capture_eval:
for i, arg in zip(sample_args_eval, inputs[0:len(sample_args)]):
assert i.shape == arg.shape, "eval capture shape doesn't match run input shape"
if i.data_ptr() != arg.data_ptr():
i.copy_(arg)
eval_graph.replay()
return eval_outputs
else: # execute eval eagerly
outputs = func_or_module.forward_eager(*inputs[0:len(sample_args)])
if not isinstance(outputs, tuple):
outputs = (outputs,)
return outputs
@staticmethod
def backward(ctx, *grads):
with torch.no_grad():
for g, grad in zip(buffer_incoming_grads, grads):
if g is not None:
g.copy_(grad)
bwd_graph.replay()
return tuple(b.detach() if b is not None else b for b in buffer_grad_inputs)
if was_module:
def functionalized(self, *user_args):
out = Graphed.apply(*(user_args + module_params))
return out[0] if outputs_was_tensor else out
func_or_module.forward_eager = func_or_module.forward
func_or_module.forward = types.MethodType(functionalized, func_or_module)
return func_or_module
else:
return Graphed.apply
def main():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--graph-before-ddp", action="store_true")
parser.add_argument("--graph-after-ddp", action="store_true")
args = parser.parse_args()
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
torch.backends.cudnn.benchmark = False
torch.manual_seed(args.local_rank + 1)
torch.cuda.manual_seed(args.local_rank + 1)
print("{} graph_before_ddp {} graph_after_ddp {}\n".format(args.local_rank,
args.graph_before_ddp,
args.graph_after_ddp),
flush=True)
N, D_in, H, D_out = 640, 4096, 2048, 1024
stream = torch.cuda.Stream()
model_segment1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.2),
torch.nn.Dropout(p=0.4)).cuda()
model_segment2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.3),
torch.nn.Dropout(p=0.1)).cuda()
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.SGD(chain(model_segment1.parameters(),
model_segment2.parameters()),
lr = 0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda')
y = torch.randn(N, D_out, device='cuda')
x_eval = torch.randn(2*N, D_in, device='cuda')
h_eval = torch.randn(2*N, H, device='cuda')
y_eval = torch.randn(2*N, D_out, device='cuda')
pure_eager = not (args.graph_before_ddp or args.graph_after_ddp)
if args.graph_before_ddp or pure_eager:
print("Calling graph() before ddp\n")
model_segment1 = graph(model_segment1,
(x.clone(),),
(x_eval.clone(),),
graph_stream=stream,
warmup_only=pure_eager)
model_segment2 = graph(model_segment2,
(h.clone().requires_grad_(),),
(h_eval.clone().requires_grad_(),),
graph_stream=stream,
warmup_only=pure_eager)
model = torch.nn.Sequential(model_segment1, model_segment2)
if args.distributed:
# Small bucket cap to stress DDP
torch.cuda.nvtx.range_push("DDP")
model = torch.nn.parallel.DistributedDataParallel(model,
bucket_cap_mb=1,
device_ids=[args.local_rank],
gradient_as_bucket_view=True)
torch.cuda.nvtx.range_pop()
if args.graph_after_ddp:
if args.distributed:
print("Calling graph() after ddp\n")
model.module[0] = graph(model.module[0], (x.clone(),), stream)
else:
model[0] = graph(model_segment1, (x.clone(),), stream)
for e in range(2):
model.train()
for i in range(10):
torch.cuda.nvtx.range_push("{}".format(i))
optimizer.zero_grad(set_to_none=True)
y_pred = model(x)
loss = loss_fn(y_pred, y)
torch.cuda.nvtx.range_push("backward")
loss.backward()
torch.cuda.nvtx.range_pop()
# possibly needed if post-backward sync is commented out in pytorch
# torch.cuda.synchronize()
torch.cuda.nvtx.range_push("step")
optimizer.step()
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_pop()
print("train: {} {} {} {}".format(args.local_rank,
loss.item(),
tuple(p.grad.sum().item() for p in model_segment1.parameters()),
tuple(p.grad.sum().item() for p in model_segment2.parameters())),
flush=True)
# do eval end of epoch
with torch.no_grad():
model.eval()
y_pred = model(x_eval)
loss = loss_fn(y_pred, y_eval)
print("eval: {} {}".format(args.local_rank,
loss.item()),
flush=True)
if __name__ == "__main__":
main()
| []
| []
| [
"WORLD_SIZE"
]
| [] | ["WORLD_SIZE"] | python | 1 | 0 | |
middleware/token/parsetokenuserid.go | package tokenmiddleware
import (
"context"
"fmt"
"log"
"os"
"strings"
commonmodels "github.com/alubhorta/goth/models/common"
"github.com/gofiber/fiber/v2"
"github.com/golang-jwt/jwt/v4"
)
func ParseTokenUserId(c *fiber.Ctx) error {
authHeader := c.Request().Header.Peek("Authorization")
authHeaderCopy := make([]byte, len(authHeader))
copy(authHeaderCopy, authHeader)
authHeaderStr := string(authHeaderCopy)
splitted := strings.Split(authHeaderStr, " ")
if len(splitted) != 2 {
msg := "invalid token provided."
log.Println(msg)
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"message": msg, "payload": nil})
}
accessToken := splitted[1]
// NOTE: possible to refactor token parsing from header into tokenutils func
token, err := jwt.Parse(accessToken, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
signingKey := os.Getenv("ACCESS_TOKEN_SIGNING_KEY")
return []byte(signingKey), nil
})
if err != nil {
msg := "failed to parse or validate token."
log.Println(msg, err)
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"message": msg, "payload": nil})
}
claims, ok := token.Claims.(jwt.MapClaims)
if !ok || !token.Valid {
msg := "invalid token or claim typecast error."
log.Println(msg, err)
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"message": msg, "payload": nil})
}
userId, ok := claims["userId"].(string)
if !ok || len(userId) <= 0 {
msg := "invalid user id provided in claim."
log.Println(msg, err)
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"message": msg, "payload": nil})
}
prevCtx := c.UserContext().Value(commonmodels.CommonCtx{}).(*commonmodels.CommonCtx)
newCtx := context.WithValue(
context.Background(),
commonmodels.CommonCtx{},
&commonmodels.CommonCtx{
Clients: prevCtx.Clients,
UserId: userId,
},
)
c.SetUserContext(newCtx)
return c.Next()
}
| [
"\"ACCESS_TOKEN_SIGNING_KEY\""
]
| []
| [
"ACCESS_TOKEN_SIGNING_KEY"
]
| [] | ["ACCESS_TOKEN_SIGNING_KEY"] | go | 1 | 0 | |
example/object/get.go | package main
import (
"context"
"fmt"
"net/url"
"os"
"io"
"io/ioutil"
"net/http"
"github.com/tencentyun/cos-go-sdk-v5"
"github.com/tencentyun/cos-go-sdk-v5/debug"
)
func main() {
u, _ := url.Parse("https://test-1253846586.cos.ap-guangzhou.myqcloud.com")
b := &cos.BaseURL{BucketURL: u}
c := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: os.Getenv("COS_SECRETID"),
SecretKey: os.Getenv("COS_SECRETKEY"),
Transport: &debug.DebugRequestTransport{
RequestHeader: true,
RequestBody: true,
ResponseHeader: true,
ResponseBody: false,
},
},
})
// Case1 Download object into ReadCloser(). the body needs to be closed
name := "test/hello.txt"
resp, err := c.Object.Get(context.Background(), name, nil)
if err != nil {
panic(err)
}
bs, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fmt.Printf("%s\n", string(bs))
// Case2 Download object to local file. the body needs to be closed
fd, err := os.OpenFile("hello.txt", os.O_WRONLY|os.O_CREATE, 0660)
if err != nil {
panic(err)
}
defer fd.Close()
resp, err = c.Object.Get(context.Background(), name, nil)
if err != nil {
panic(err)
}
io.Copy(fd, resp.Body)
resp.Body.Close()
// Case3 Download object to local file path
err = c.Object.GetToFile(context.Background(), name, "hello_1.txt", nil)
if err != nil {
panic(err)
}
// Case4 Download object with range header, can used to concurrent download
opt := &cos.ObjectGetOptions{
ResponseContentType: "text/html",
Range: "bytes=0-3",
}
resp, err = c.Object.Get(context.Background(), name, opt)
if err != nil {
panic(err)
}
bs, _ = ioutil.ReadAll(resp.Body)
resp.Body.Close()
fmt.Printf("%s\n", string(bs))
}
| [
"\"COS_SECRETID\"",
"\"COS_SECRETKEY\""
]
| []
| [
"COS_SECRETKEY",
"COS_SECRETID"
]
| [] | ["COS_SECRETKEY", "COS_SECRETID"] | go | 2 | 0 | |
pkg/term/term_windows.go | package term // import "github.com/ellcrys/docker/pkg/term"
import (
"io"
"os"
"os/signal"
"syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE
"github.com/Azure/go-ansiterm/winterm"
"github.com/ellcrys/docker/pkg/term/windows"
)
// State holds the console mode for the terminal.
type State struct {
mode uint32
}
// Winsize is used for window size.
type Winsize struct {
Height uint16
Width uint16
}
// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console
var vtInputSupported bool
// StdStreams returns the standard streams (stdin, stdout, stderr).
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
// Turn on VT handling on all std handles, if possible. This might
// fail, in which case we will fall back to terminal emulation.
var emulateStdin, emulateStdout, emulateStderr bool
fd := os.Stdin.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
// Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil {
emulateStdin = true
} else {
vtInputSupported = true
}
// Unconditionally set the console mode back even on failure because SetConsoleMode
// remembers invalid bits on input handles.
winterm.SetConsoleMode(fd, mode)
}
fd = os.Stdout.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
// Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
emulateStdout = true
} else {
winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
}
}
fd = os.Stderr.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
// Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
emulateStderr = true
} else {
winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
}
}
if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" {
// The ConEmu and ConsoleZ terminals emulate ANSI on output streams well.
emulateStdin = true
emulateStdout = false
emulateStderr = false
}
// Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and
// STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as
// go-ansiterm hasn't switch to x/sys/windows.
// TODO: switch back to x/sys/windows once go-ansiterm has switched
if emulateStdin {
stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE)
} else {
stdIn = os.Stdin
}
if emulateStdout {
stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
} else {
stdOut = os.Stdout
}
if emulateStderr {
stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
} else {
stdErr = os.Stderr
}
return
}
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
func GetFdInfo(in interface{}) (uintptr, bool) {
return windowsconsole.GetHandleInfo(in)
}
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
info, err := winterm.GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil, err
}
winsize := &Winsize{
Width: uint16(info.Window.Right - info.Window.Left + 1),
Height: uint16(info.Window.Bottom - info.Window.Top + 1),
}
return winsize, nil
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
return windowsconsole.IsConsole(fd)
}
// RestoreTerminal restores the terminal connected to the given file descriptor
// to a previous state.
func RestoreTerminal(fd uintptr, state *State) error {
return winterm.SetConsoleMode(fd, state.mode)
}
// SaveState saves the state of the terminal connected to the given file descriptor.
func SaveState(fd uintptr) (*State, error) {
mode, e := winterm.GetConsoleMode(fd)
if e != nil {
return nil, e
}
return &State{mode: mode}, nil
}
// DisableEcho disables echo for the terminal connected to the given file descriptor.
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
func DisableEcho(fd uintptr, state *State) error {
mode := state.mode
mode &^= winterm.ENABLE_ECHO_INPUT
mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
err := winterm.SetConsoleMode(fd, mode)
if err != nil {
return err
}
// Register an interrupt handler to catch and restore prior state
restoreAtInterrupt(fd, state)
return nil
}
// SetRawTerminal puts the terminal connected to the given file descriptor into
// raw mode and returns the previous state. On UNIX, this puts both the input
// and output into raw mode. On Windows, it only puts the input into raw mode.
func SetRawTerminal(fd uintptr) (*State, error) {
state, err := MakeRaw(fd)
if err != nil {
return nil, err
}
// Register an interrupt handler to catch and restore prior state
restoreAtInterrupt(fd, state)
return state, err
}
// SetRawTerminalOutput puts the output of terminal connected to the given file
// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
// state. On Windows, it disables LF -> CRLF translation.
func SetRawTerminalOutput(fd uintptr) (*State, error) {
state, err := SaveState(fd)
if err != nil {
return nil, err
}
// Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this
// version of Windows.
winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN)
return state, err
}
// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be restored.
func MakeRaw(fd uintptr) (*State, error) {
state, err := SaveState(fd)
if err != nil {
return nil, err
}
mode := state.mode
// See
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
// Disable these modes
mode &^= winterm.ENABLE_ECHO_INPUT
mode &^= winterm.ENABLE_LINE_INPUT
mode &^= winterm.ENABLE_MOUSE_INPUT
mode &^= winterm.ENABLE_WINDOW_INPUT
mode &^= winterm.ENABLE_PROCESSED_INPUT
// Enable these modes
mode |= winterm.ENABLE_EXTENDED_FLAGS
mode |= winterm.ENABLE_INSERT_MODE
mode |= winterm.ENABLE_QUICK_EDIT_MODE
if vtInputSupported {
mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT
}
err = winterm.SetConsoleMode(fd, mode)
if err != nil {
return nil, err
}
return state, nil
}
func restoreAtInterrupt(fd uintptr, state *State) {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, os.Interrupt)
go func() {
_ = <-sigchan
RestoreTerminal(fd, state)
os.Exit(0)
}()
}
| [
"\"ConEmuANSI\"",
"\"ConsoleZVersion\""
]
| []
| [
"ConsoleZVersion",
"ConEmuANSI"
]
| [] | ["ConsoleZVersion", "ConEmuANSI"] | go | 2 | 0 | |
config/config.go | package config
import "os"
func IsDebug() bool {
return os.Getenv("ENV") == "debug"
}
func IsTest() bool {
return os.Getenv("ENV") == "test"
}
| [
"\"ENV\"",
"\"ENV\""
]
| []
| [
"ENV"
]
| [] | ["ENV"] | go | 1 | 0 | |
python/paddle/fluid/__init__.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
# import all class inside framework into fluid module
from . import framework
from .framework import *
# import all class inside executor into fluid module
from . import executor
from .executor import *
from . import trainer
from . import inferencer
from . import io
from . import evaluator
from . import initializer
from . import layers
from . import contrib
from . import nets
from . import optimizer
from . import backward
from . import regularizer
from . import average
from . import metrics
from . import transpiler
from . import distribute_lookup_table
from .param_attr import ParamAttr, WeightNormParamAttr
from .data_feeder import DataFeeder
from .core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope
from .transpiler import DistributeTranspiler, \
memory_optimize, release_memory, DistributeTranspilerConfig
from .lod_tensor import create_lod_tensor, create_random_int_lodtensor
from . import clip
from . import profiler
from . import unique_name
from . import recordio_writer
from . import parallel_executor
from .parallel_executor import *
from paddle.fluid.layers.math_op_patch import monkey_patch_variable
Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + \
trainer.__all__ + inferencer.__all__ + transpiler.__all__ + \
parallel_executor.__all__ + lod_tensor.__all__ + [
'io',
'initializer',
'layers',
'contrib',
'transpiler',
'nets',
'optimizer',
'learning_rate_decay',
'backward',
'regularizer',
'LoDTensor',
'LoDTensorArray',
'CPUPlace',
'CUDAPlace',
'CUDAPinnedPlace',
'Tensor',
'ParamAttr',
'WeightNormParamAttr',
'DataFeeder',
'clip',
'profiler',
'unique_name',
'recordio_writer',
'Scope',
]
def __bootstrap__():
"""
Enable reading gflags from environment variables.
Returns:
None
"""
import sys
import os
import platform
from . import core
in_test = 'unittest' in sys.modules
try:
num_threads = int(os.getenv('OMP_NUM_THREADS', '1'))
except ValueError:
num_threads = 1
if num_threads > 1:
print(
'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation '
'speed will not be optimized if you use data parallel. It will '
'fail if this PaddlePaddle binary is compiled with OpenBlas since'
' OpenBlas does not support multi-threads.'.format(num_threads),
file=sys.stderr)
print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr)
os.environ['OMP_NUM_THREADS'] = str(num_threads)
sysstr = platform.system()
read_env_flags = [
'check_nan_inf', 'benchmark', 'eager_delete_scope', 'use_mkldnn',
'use_ngraph', 'initial_cpu_memory_in_mb', 'init_allocated_mem',
'free_idle_memory', 'paddle_num_threads', "dist_threadpool_size",
'eager_delete_tensor_gb', 'allocator_strategy',
'reader_queue_speed_test_mode', 'print_sub_graph_dir'
]
if 'Darwin' not in sysstr:
read_env_flags.append('use_pinned_memory')
if os.name != 'nt':
read_env_flags.append('warpctc_dir')
read_env_flags.append('cpu_deterministic')
if core.is_compiled_with_dist():
read_env_flags.append('rpc_deadline')
read_env_flags.append('rpc_server_profile_path')
read_env_flags.append('enable_rpc_profiler')
read_env_flags.append('rpc_send_thread_num')
read_env_flags.append('rpc_get_thread_num')
read_env_flags.append('rpc_prefetch_thread_num')
read_env_flags.append('rpc_disable_reuse_port')
if core.is_compiled_with_cuda():
read_env_flags += [
'fraction_of_gpu_memory_to_use', 'cudnn_deterministic',
'enable_cublas_tensor_op_math', 'conv_workspace_size_limit',
'cudnn_exhaustive_search'
]
core.init_gflags([sys.argv[0]] +
["--tryfromenv=" + ",".join(read_env_flags)])
core.init_glog(sys.argv[0])
# don't init_p2p when in unittest to save time.
core.init_devices(not in_test)
# TODO(panyx0718): Avoid doing complex initialization logic in __init__.py.
# Consider paddle.init(args) or paddle.main(args)
monkey_patch_variable()
__bootstrap__()
| []
| []
| [
"OMP_NUM_THREADS"
]
| [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
services/users/tests/conftest.py | """Prepare test environment and provide useful fixtures."""
import os
from typing import Any
import pytest
from users.models import Base
os.environ["ENV_FOR_DYNACONF"] = "unittest"
@pytest.fixture(scope='session')
def model_base() -> Any:
"""Return database model Base."""
return Base
| []
| []
| [
"ENV_FOR_DYNACONF"
]
| [] | ["ENV_FOR_DYNACONF"] | python | 1 | 0 | |
models/auth.go | package models
import (
"fmt"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/Massad/gin-boilerplate/db"
"github.com/golang-jwt/jwt"
uuid "github.com/twinj/uuid"
)
//TokenDetails ...
type TokenDetails struct {
AccessToken string
RefreshToken string
AccessUUID string
RefreshUUID string
AtExpires int64
RtExpires int64
}
//AccessDetails ...
type AccessDetails struct {
AccessUUID string
UserID int64
}
//Token ...
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
}
//AuthModel ...
type AuthModel struct{}
//CreateToken ...
func (m AuthModel) CreateToken(userID int64) (*TokenDetails, error) {
td := &TokenDetails{}
td.AtExpires = time.Now().Add(time.Minute * 15).Unix()
td.AccessUUID = uuid.NewV4().String()
td.RtExpires = time.Now().Add(time.Hour * 24 * 7).Unix()
td.RefreshUUID = uuid.NewV4().String()
var err error
//Creating Access Token
atClaims := jwt.MapClaims{}
atClaims["authorized"] = true
atClaims["access_uuid"] = td.AccessUUID
atClaims["user_id"] = userID
atClaims["exp"] = td.AtExpires
at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)
td.AccessToken, err = at.SignedString([]byte(os.Getenv("ACCESS_SECRET")))
if err != nil {
return nil, err
}
//Creating Refresh Token
rtClaims := jwt.MapClaims{}
rtClaims["refresh_uuid"] = td.RefreshUUID
rtClaims["user_id"] = userID
rtClaims["exp"] = td.RtExpires
rt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)
td.RefreshToken, err = rt.SignedString([]byte(os.Getenv("REFRESH_SECRET")))
if err != nil {
return nil, err
}
return td, nil
}
//CreateAuth ...
func (m AuthModel) CreateAuth(userid int64, td *TokenDetails) error {
at := time.Unix(td.AtExpires, 0) //converting Unix to UTC(to Time object)
rt := time.Unix(td.RtExpires, 0)
now := time.Now()
errAccess := db.GetRedis().Set(td.AccessUUID, strconv.Itoa(int(userid)), at.Sub(now)).Err()
if errAccess != nil {
return errAccess
}
errRefresh := db.GetRedis().Set(td.RefreshUUID, strconv.Itoa(int(userid)), rt.Sub(now)).Err()
if errRefresh != nil {
return errRefresh
}
return nil
}
//ExtractToken ...
func (m AuthModel) ExtractToken(r *http.Request) string {
bearToken := r.Header.Get("Authorization")
//normally Authorization the_token_xxx
strArr := strings.Split(bearToken, " ")
if len(strArr) == 2 {
return strArr[1]
}
return ""
}
//VerifyToken ...
func (m AuthModel) VerifyToken(r *http.Request) (*jwt.Token, error) {
tokenString := m.ExtractToken(r)
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
//Make sure that the token method conform to "SigningMethodHMAC"
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("ACCESS_SECRET")), nil
})
if err != nil {
return nil, err
}
return token, nil
}
//TokenValid ...
func (m AuthModel) TokenValid(r *http.Request) error {
token, err := m.VerifyToken(r)
if err != nil {
return err
}
if _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {
return err
}
return nil
}
//ExtractTokenMetadata ...
func (m AuthModel) ExtractTokenMetadata(r *http.Request) (*AccessDetails, error) {
token, err := m.VerifyToken(r)
if err != nil {
return nil, err
}
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
accessUUID, ok := claims["access_uuid"].(string)
if !ok {
return nil, err
}
userID, err := strconv.ParseInt(fmt.Sprintf("%.f", claims["user_id"]), 10, 64)
if err != nil {
return nil, err
}
return &AccessDetails{
AccessUUID: accessUUID,
UserID: userID,
}, nil
}
return nil, err
}
//FetchAuth ...
func (m AuthModel) FetchAuth(authD *AccessDetails) (int64, error) {
userid, err := db.GetRedis().Get(authD.AccessUUID).Result()
if err != nil {
return 0, err
}
userID, _ := strconv.ParseInt(userid, 10, 64)
return userID, nil
}
//DeleteAuth ...
func (m AuthModel) DeleteAuth(givenUUID string) (int64, error) {
deleted, err := db.GetRedis().Del(givenUUID).Result()
if err != nil {
return 0, err
}
return deleted, nil
}
| [
"\"ACCESS_SECRET\"",
"\"REFRESH_SECRET\"",
"\"ACCESS_SECRET\""
]
| []
| [
"ACCESS_SECRET",
"REFRESH_SECRET"
]
| [] | ["ACCESS_SECRET", "REFRESH_SECRET"] | go | 2 | 0 | |
vendor/github.com/rancher/wrangler/pkg/leader/leader.go | package leader
import (
"context"
"os"
"time"
"github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
type Callback func(cb context.Context)
func RunOrDie(ctx context.Context, namespace, name string, client kubernetes.Interface, cb Callback) {
if namespace == "" {
namespace = "kube-system"
}
err := run(ctx, namespace, name, client, cb)
if err != nil {
logrus.Fatalf("Failed to start leader election for %s", name)
}
panic("Failed to start leader election for " + name)
}
func run(ctx context.Context, namespace, name string, client kubernetes.Interface, cb Callback) error {
id, err := os.Hostname()
if err != nil {
return err
}
rl, err := resourcelock.New(resourcelock.ConfigMapsResourceLock,
namespace,
name,
client.CoreV1(),
client.CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: id,
})
if err != nil {
logrus.Fatalf("error creating leader lock for %s: %v", name, err)
}
t := time.Second
if dl := os.Getenv("DEV_LEADERELECTION"); dl != "" {
t = time.Hour
}
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: 45 * t,
RenewDeadline: 30 * t,
RetryPeriod: 2 * t,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
go cb(ctx)
},
OnStoppedLeading: func() {
logrus.Fatalf("leaderelection lost for %s", name)
},
},
ReleaseOnCancel: true,
})
panic("unreachable")
}
| [
"\"DEV_LEADERELECTION\""
]
| []
| [
"DEV_LEADERELECTION"
]
| [] | ["DEV_LEADERELECTION"] | go | 1 | 0 | |
openshift/installer/vendored/openshift-ansible-3.11.0-0.10.0/roles/lib_vendored_deps/library/oo_azure_rm_publish_image_facts.py | #!/usr/bin/env python
# pylint: disable=missing-docstring
# Copyright 2018 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function # noqa: F401
# import httplib
import json
import os
import requests
from ansible.module_utils.basic import AnsibleModule
class AzurePublisherException(Exception):
'''Exception class for AzurePublisher'''
pass
class AzurePublisher(object):
'''Python class to represent the Azure Publishing portal https://cloudpartner.azure.com'''
# pylint: disable=too-many-arguments
def __init__(self,
publisher_id,
client_info,
ssl_verify=True,
api_version='2017-10-31',
debug=False):
'''
:publisher_id: string of the publisher id
:client_info: a dict containing the client_id, client_secret to get an access_token
'''
self._azure_server = 'https://cloudpartner.azure.com/api/publishers/{}'.format(publisher_id)
self.client_info = client_info
self.ssl_verify = ssl_verify
self.api_version = 'api-version={}'.format(api_version)
self.debug = debug
# if self.debug:
# httplib.HTTPSConnection.debuglevel = 1
# httplib.HTTPConnection.debuglevel = 1
self._access_token = None
@property
def server(self):
'''property for server url'''
return self._azure_server
@property
def token(self):
'''property for the access_token
curl --data-urlencode "client_id=$AZURE_CLIENT_ID" \
--data-urlencode "client_secret=$AZURE_CLIENT_SECRET" \
--data-urlencode "grant_type=client_credentials" \
--data-urlencode "resource=https://cloudpartner.azure.com" \
https://login.microsoftonline.com/$AZURE_TENANT_ID/oauth2/token
'''
if self._access_token is None:
url = 'https://login.microsoftonline.com/{}/oauth2/token'.format(self.client_info['tenant_id'])
data = {
'client_id': {self.client_info['client_id']},
'client_secret': self.client_info['client_secret'],
'grant_type': 'client_credentials',
'resource': 'https://cloudpartner.azure.com'
}
results = AzurePublisher.request('POST', url, data, {})
jres = results.json()
self._access_token = jres['access_token']
return self._access_token
def get_offers(self, offer=None, version=None, slot=''):
''' fetch all offers by publisherid '''
url = '/offers'
if offer is not None:
url += '/{}'.format(offer)
if version is not None:
url += '/versions/{}'.format(version)
if slot != '':
url += '/slot/{}'.format(slot)
url += '?{}'.format(self.api_version)
return self.prepare_action(url)
def get_operations(self, offer, operation=None, status=None):
''' create or modify an offer '''
url = '/offers/{0}/submissions'.format(offer)
if operation is not None:
url += '/operations/{0}'.format(operation)
if not url.endswith('/'):
url += '/'
url += '?{0}'.format(self.api_version)
if status is not None:
url += '&status={0}'.format(status)
return self.prepare_action(url, 'GET')
def cancel_operation(self, offer):
''' create or modify an offer '''
url = '/offers/{0}/cancel?{1}'.format(offer, self.api_version)
return self.prepare_action(url, 'POST')
def go_live(self, offer):
''' create or modify an offer '''
url = '/offers/{0}/golive?{1}'.format(offer, self.api_version)
return self.prepare_action(url, 'POST')
def create_or_modify_offer(self, offer, data=None, modify=False):
''' create or modify an offer '''
url = '/offers/{0}?{1}'.format(offer, self.api_version)
headers = None
if modify:
headers = {
'If-Match': '*',
}
return self.prepare_action(url, 'PUT', data=data, add_headers=headers)
def prepare_action(self, url, action='GET', data=None, add_headers=None):
'''perform the http request
:action: string of either GET|POST
'''
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer {}'.format(self.token)
}
if add_headers is not None:
headers.update(add_headers)
if data is None:
data = ''
else:
data = json.dumps(data)
return AzurePublisher.request(action.upper(), self.server + url, data, headers)
def manage_offer(self, params):
''' handle creating or modifying offers'''
# fetch the offer to verify it exists:
results = self.get_offers(offer=params['offer'])
if results.status_code == 200 and params['force']:
return self.create_or_modify_offer(offer=params['offer'], data=params['offer_data'], modify=True)
return self.create_or_modify_offer(offer=params['offer'], data=params['offer_data'])
@staticmethod
def request(action, url, data=None, headers=None, ssl_verify=True):
req = requests.Request(action.upper(), url, data=data, headers=headers)
session = requests.Session()
req_prep = session.prepare_request(req)
response = session.send(req_prep, verify=ssl_verify)
return response
@staticmethod
def run_ansible(params):
'''perform the ansible operations'''
client_info = {
'tenant_id': params['tenant_id'],
'client_id': params['client_id'],
'client_secret': params['client_secret']}
apc = AzurePublisher(params['publisher'],
client_info,
debug=params['debug'])
if params['query'] == 'offer':
results = apc.get_offers(offer=params['offer'])
elif params['query'] == 'operation':
results = apc.get_operations(offer=params['offer'], operation=params['operation'], status=params['status'])
else:
raise AzurePublisherException('Unsupported query type: {}'.format(params['query']))
return {'data': results.json(), 'status_code': results.status_code}
def main():
''' ansible oc module for secrets '''
module = AnsibleModule(
argument_spec=dict(
query=dict(default='offer', choices=['offer', 'operation']),
publisher=dict(default='redhat', type='str'),
debug=dict(default=False, type='bool'),
tenant_id=dict(default=os.environ.get('AZURE_TENANT_ID'), type='str'),
client_id=dict(default=os.environ.get('AZURE_CLIENT_ID'), type='str'),
client_secret=dict(default=os.environ.get('AZURE_CLIENT_SECRET'), type='str'),
offer=dict(default=None, type='str'),
operation=dict(default=None, type='str'),
status=dict(default=None, type='str'),
),
)
# Verify we recieved either a valid key or edits with valid keys when receiving a src file.
# A valid key being not None or not ''.
if (module.params['tenant_id'] is None or module.params['client_id'] is None or
module.params['client_secret'] is None):
return module.fail_json(**{'failed': True,
'msg': 'Please specify tenant_id, client_id, and client_secret'})
rval = AzurePublisher.run_ansible(module.params)
if int(rval['status_code']) == 404:
rval['msg'] = 'Offer does not exist.'
elif int(rval['status_code']) >= 300:
rval['msg'] = 'Error.'
return module.fail_json(**rval)
return module.exit_json(**rval)
if __name__ == '__main__':
main()
| []
| []
| [
"AZURE_CLIENT_ID",
"AZURE_CLIENT_SECRET",
"AZURE_TENANT_ID"
]
| [] | ["AZURE_CLIENT_ID", "AZURE_CLIENT_SECRET", "AZURE_TENANT_ID"] | python | 3 | 0 | |
test/terraform_aws_encrypted_test.go | // =================================================================
//
// Work of the U.S. Department of Defense, Defense Digital Service.
// Released as open source under the MIT License. See LICENSE file.
//
// =================================================================
package test
import (
"fmt"
"os"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/gruntwork-io/terratest/modules/terraform"
)
func TestTerraformEncryptedExample(t *testing.T) {
// Allow test to run in parallel with other tests
t.Parallel()
region := os.Getenv("AWS_DEFAULT_REGION")
// If AWS_DEFAULT_REGION environment variable is not set, then fail the test.
require.NotEmpty(t, region, "missing environment variable AWS_DEFAULT_REGION")
// Append a random suffix to the test name, so individual test runs are unique.
// When the test runs again, it will use the existing terraform state,
// so it should override the existing infrastructure.
testName := fmt.Sprintf("terratest-ecs-task-execution-role-encrypted-%s", strings.ToLower(random.UniqueId()))
terraformOptions := terraform.WithDefaultRetryableErrors(t, &terraform.Options{
// TerraformDir is where the terraform state is found.
TerraformDir: "../examples/encrypted",
// Set the variables passed to terraform
Vars: map[string]interface{}{
"test_name": testName,
"tags": map[string]interface{}{
"Automation": "Terraform",
"Terratest": "yes",
"Test": "TestTerraformEncryptedExample",
},
},
// Set the environment variables passed to terraform.
// AWS_DEFAULT_REGION is the only environment variable strictly required,
// when using the AWS provider.
EnvVars: map[string]string{
"AWS_DEFAULT_REGION": region,
},
})
// If TT_SKIP_DESTROY is set to "1" then do not destroy the intrastructure,
// at the end of the test run
if os.Getenv("TT_SKIP_DESTROY") != "1" {
defer terraform.Destroy(t, terraformOptions)
}
// InitAndApply runs "terraform init" and then "terraform apply"
terraform.InitAndApply(t, terraformOptions)
ecsTaskExecutionRoleName := terraform.Output(t, terraformOptions, "ecs_task_execution_role_name")
require.Equal(t, ecsTaskExecutionRoleName, testName)
}
| [
"\"AWS_DEFAULT_REGION\"",
"\"TT_SKIP_DESTROY\""
]
| []
| [
"TT_SKIP_DESTROY",
"AWS_DEFAULT_REGION"
]
| [] | ["TT_SKIP_DESTROY", "AWS_DEFAULT_REGION"] | go | 2 | 0 | |
issuers/vault/vault_suite_test.go | package vault_test
import (
"archive/tar"
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/pem"
"fmt"
"log"
"math/big"
"net"
"net/url"
"os"
"testing"
"time"
"github.com/hashicorp/vault/api"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/ory/dockertest"
"github.com/ory/dockertest/docker"
)
func TestVault(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Vault Suite")
}
type vaultConfig struct {
Role string
RoleURISANs string
Mount string
Token string
URL *url.URL
CA *x509.Certificate
CertPool *x509.CertPool
}
var (
pool *dockertest.Pool
resource *dockertest.Resource
waiter docker.CloseWaiter
vaultConf, vaultTLSConf vaultConfig
altMount = "mount-test-pki"
defaultTTL, maxTTL time.Duration
)
var _ = BeforeSuite(func() {
host := "localhost"
if os.Getenv("DOCKER_HOST") != "" {
u, err := url.Parse(os.Getenv("DOCKER_HOST"))
Expect(err).To(Succeed())
host, _, err = net.SplitHostPort(u.Host)
Expect(err).To(Succeed())
}
log.SetOutput(GinkgoWriter)
cert, key, err := generateCertAndKey(host, net.IPv4(127, 0, 0, 1))
Expect(err).To(Succeed())
pool, err = dockertest.NewPool("")
Expect(err).To(Succeed())
pool.MaxWait = time.Second * 10
By("Starting the Vault container", func() {
cp := x509.NewCertPool()
Expect(cp.AppendCertsFromPEM(cert)).To(BeTrue())
token := "mysecrettoken"
role := "test"
roleURISANs := "test_uri_sans"
repo := "vault"
version := "1.1.3"
img := repo + ":" + version
_, err = pool.Client.InspectImage(img)
if err != nil {
// Pull image
Expect(pool.Client.PullImage(docker.PullImageOptions{
Repository: repo,
Tag: version,
OutputStream: GinkgoWriter,
}, docker.AuthConfiguration{})).To(Succeed())
}
defaultTTL = 168 * time.Hour
maxTTL = 720 * time.Hour
c, err := pool.Client.CreateContainer(docker.CreateContainerOptions{
Name: "vault",
Config: &docker.Config{
Image: img,
Env: []string{
"VAULT_DEV_ROOT_TOKEN_ID=" + token,
fmt.Sprintf(`VAULT_LOCAL_CONFIG={
"default_lease_ttl": "%s",
"max_lease_ttl": "%s",
"disable_mlock": true,
"listener": [{
"tcp" :{
"address": "0.0.0.0:8201",
"tls_cert_file": "/vault/file/cert.pem",
"tls_key_file": "/vault/file/key.pem"
}
}]
}`, defaultTTL, maxTTL),
},
ExposedPorts: map[docker.Port]struct{}{
docker.Port("8200"): struct{}{},
docker.Port("8201"): struct{}{},
},
},
HostConfig: &docker.HostConfig{
PublishAllPorts: true,
PortBindings: map[docker.Port][]docker.PortBinding{
"8200": []docker.PortBinding{{HostPort: "8200"}},
"8201": []docker.PortBinding{{HostPort: "8201"}},
},
},
})
Expect(err).To(Succeed())
b := &bytes.Buffer{}
archive := tar.NewWriter(b)
Expect(archive.WriteHeader(&tar.Header{
Name: "/cert.pem",
Mode: 0644,
Size: int64(len(cert)),
})).To(Succeed())
Expect(archive.Write(cert)).To(Equal(len(cert)))
Expect(archive.WriteHeader(&tar.Header{
Name: "/key.pem",
Mode: 0644,
Size: int64(len(key)),
})).To(Succeed())
Expect(archive.Write(key)).To(Equal(len(key)))
Expect(archive.Close()).To(Succeed())
Expect(pool.Client.UploadToContainer(c.ID, docker.UploadToContainerOptions{
InputStream: b,
Path: "/vault/file/",
})).To(Succeed())
Expect(pool.Client.StartContainer(c.ID, nil)).To(Succeed())
c, err = pool.Client.InspectContainer(c.ID)
Expect(err).To(Succeed())
waiter, err = pool.Client.AttachToContainerNonBlocking(docker.AttachToContainerOptions{
Container: c.ID,
OutputStream: GinkgoWriter,
ErrorStream: GinkgoWriter,
Stderr: true,
Stdout: true,
Stream: true,
})
Expect(err).To(Succeed())
resource = &dockertest.Resource{Container: c}
conf := api.DefaultConfig()
conf.Address = "http://" + net.JoinHostPort(host, "8200")
cli, err := api.NewClient(conf)
Expect(err).To(Succeed())
cli.SetToken(token)
// Wait for container to start. This is a no op.
Expect(pool.Retry(func() error {
_, err := cli.Logical().Read("pki/certs")
return err
})).To(Succeed())
var vaultCA *x509.Certificate
// Mount PKI at /pki and /altMount
for _, mountPoint := range []string{"pki", altMount} {
Expect(cli.Sys().Mount(mountPoint, &api.MountInput{
Type: "pki",
Config: api.MountConfigInput{
MaxLeaseTTL: "87600h",
},
})).To(Succeed())
_, err = cli.Logical().Write(mountPoint+"/roles/"+role, map[string]interface{}{
"allowed_domains": "myserver.com",
"allow_subdomains": true,
"allow_any_name": true,
"key_type": "any",
"allowed_other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:*",
})
Expect(err).To(Succeed())
_, err = cli.Logical().Write(mountPoint+"/roles/"+roleURISANs, map[string]interface{}{
"allowed_domains": "myserver.com",
"allow_subdomains": true,
"allow_any_name": true,
"use_csr_sans": false,
"key_type": "any",
"allowed_uri_sans": "spiffe://hostname/*",
})
Expect(err).To(Succeed())
resp, err := cli.Logical().Write(mountPoint+"/root/generate/internal", map[string]interface{}{
"ttl": "87600h",
"common_name": "my_vault",
"ip_sans": c.NetworkSettings.IPAddress,
"format": "der",
})
Expect(err).To(Succeed())
if mountPoint == "pki" {
// Parse the generated CA for the TLS connection
caCertDER, err := base64.StdEncoding.DecodeString(resp.Data["certificate"].(string))
Expect(err).To(Succeed())
vaultCA, err = x509.ParseCertificate(caCertDER)
Expect(err).To(Succeed())
}
}
vaultConf = vaultConfig{
Token: token,
Role: role,
RoleURISANs: roleURISANs,
URL: &url.URL{
Scheme: "http",
Host: net.JoinHostPort(host, "8200"),
},
}
vaultTLSConf = vaultConfig{
Token: token,
Role: role,
RoleURISANs: roleURISANs,
CertPool: cp,
CA: vaultCA,
URL: &url.URL{
Scheme: "https",
Host: net.JoinHostPort(host, "8201"),
},
}
})
})
var _ = AfterSuite(func() {
if waiter != nil {
Expect(waiter.Close()).To(Succeed())
Expect(waiter.Wait()).To(Succeed())
}
if pool != nil {
Expect(pool.Purge(resource)).To(Succeed())
}
})
func generateCertAndKey(SAN string, IPSAN net.IP) ([]byte, []byte, error) {
priv, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, nil, err
}
notBefore := time.Now()
notAfter := notBefore.Add(time.Hour)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, nil, err
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: "Certify Test Cert",
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
DNSNames: []string{SAN},
IPAddresses: []net.IP{IPSAN},
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, priv.Public(), priv)
if err != nil {
return nil, nil, err
}
certOut := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: derBytes,
})
keyOut := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(priv),
})
return certOut, keyOut, nil
}
| [
"\"DOCKER_HOST\"",
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.