filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
plugins/registry/istiov2/registry.go
|
package istiov2
import (
"fmt"
"log"
"os"
"strconv"
"strings"
apiv2 "github.com/envoyproxy/go-control-plane/envoy/api/v2"
apiv2endpoint "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
istioinfra "github.com/go-mesh/mesher/pkg/infras/istio"
"github.com/go-chassis/go-chassis/core/common"
"github.com/go-chassis/go-chassis/core/metadata"
"github.com/go-chassis/go-chassis/core/registry"
"github.com/go-chassis/go-chassis/pkg/util/iputil"
"github.com/go-chassis/go-chassis/pkg/util/tags"
"github.com/go-mesh/openlogging"
)
var (
//PodName is the name of the pod that mesher runs in
PodName string
//PodNamespace is the namespace which the pod belongs to
PodNamespace string
//InstanceIP is the IP of the pod(the IP of the first network adaptor)
InstanceIP string
)
const (
PilotV2Registry = "pilotv2"
)
//ServiceDiscovery is the discovery service for istio pilot with xDS v2 API
type ServiceDiscovery struct {
Name string
client *istioinfra.XdsClient
options registry.Options
}
//GetMicroServiceID returns the id of the micro service
func (discovery *ServiceDiscovery) GetMicroServiceID(appID, microServiceName, version, env string) (string, error) {
return microServiceName, nil
}
//GetAllMicroServices returns all the micro services, which is mapped from xDS clusters
func (discovery *ServiceDiscovery) GetAllMicroServices() ([]*registry.MicroService, error) {
clusters, err := discovery.client.CDS()
if err != nil {
return nil, err
}
microServices := []*registry.MicroService{}
for _, cluster := range clusters {
microServices = append(microServices, toMicroService(&cluster))
}
return microServices, nil
}
func toMicroService(cluster *apiv2.Cluster) *registry.MicroService {
svc := ®istry.MicroService{}
svc.ServiceID = cluster.Name
svc.ServiceName = cluster.Name
svc.Version = common.DefaultVersion
svc.AppID = common.DefaultApp
svc.Level = "BACK"
svc.Status = "UP"
svc.Framework = ®istry.Framework{
Name: "Istio",
Version: common.LatestVersion,
}
svc.RegisterBy = metadata.PlatformRegistrationComponent
return svc
}
func toMicroServiceInstance(clusterName string, lbendpoint *apiv2endpoint.LbEndpoint, tags map[string]string) *registry.MicroServiceInstance {
socketAddress := lbendpoint.Endpoint.Address.GetSocketAddress()
addr := socketAddress.Address
port := socketAddress.GetPortValue()
portStr := strconv.FormatUint(uint64(port), 10)
msi := ®istry.MicroServiceInstance{}
msi.InstanceID = addr + "_" + portStr
msi.HostName = clusterName
msi.DefaultEndpoint = addr + ":" + portStr
msi.EndpointsMap = map[string]string{
common.ProtocolRest: msi.DefaultEndpoint,
}
msi.DefaultProtocol = common.ProtocolRest
msi.Metadata = tags
return msi
}
//GetMicroService returns the micro service info
func (discovery *ServiceDiscovery) GetMicroService(microServiceID string) (*registry.MicroService, error) {
// If the service is in the clusters, return it, or nil
clusters, err := discovery.client.CDS()
if err != nil {
return nil, err
}
var targetCluster apiv2.Cluster
for _, cluster := range clusters {
parts := strings.Split(cluster.Name, "|")
if len(parts) < 4 {
openlogging.GetLogger().Warnf("Invalid cluster name: %s", cluster.Name)
continue
}
svcName := parts[3]
if strings.Index(svcName, microServiceID+".") == 0 {
targetCluster = cluster
break
}
}
if &targetCluster == nil {
return nil, nil
}
return toMicroService(&targetCluster), nil
}
//GetMicroServiceInstances returns the instances of the micro service
func (discovery *ServiceDiscovery) GetMicroServiceInstances(consumerID, providerID string) ([]*registry.MicroServiceInstance, error) {
// TODO Handle the registry.MicroserviceIndex cache
// TODO Handle the microServiceName
service, err := discovery.GetMicroService(providerID)
if err != nil {
return nil, err
}
loadAssignment, err := discovery.client.EDS(service.ServiceName)
if err != nil {
return nil, err
}
instances := []*registry.MicroServiceInstance{}
endpionts := loadAssignment.Endpoints
for _, item := range endpionts {
for _, lbendpoint := range item.LbEndpoints {
msi := toMicroServiceInstance(loadAssignment.ClusterName, &lbendpoint, nil) // The cluster without subset doesn't have tags
instances = append(instances, msi)
}
}
return instances, nil
}
//FindMicroServiceInstances returns the micro service's instances filtered with tags
func (discovery *ServiceDiscovery) FindMicroServiceInstances(consumerID, microServiceName string, tags utiltags.Tags) ([]*registry.MicroServiceInstance, error) {
if tags.KV == nil || tags.Label == "" { // Chassis might pass an empty tags
return discovery.GetMicroServiceInstances(consumerID, microServiceName)
}
instances := simpleCache.GetWithTags(microServiceName, tags.KV)
if len(instances) == 0 {
var lbendpoints []apiv2endpoint.LbEndpoint
var err error
lbendpoints, clusterName, err := discovery.client.GetEndpointsByTags(microServiceName, tags.KV)
if err != nil {
return nil, err
}
updateInstanceIndexCache(lbendpoints, clusterName, tags.KV)
instances = simpleCache.GetWithTags(microServiceName, tags.KV)
if instances == nil {
return nil, fmt.Errorf("Failed to find microservice instances of %s from cache", microServiceName)
}
}
return instances, nil
}
var cacheManager *CacheManager
//AutoSync updates the services' info periodically in the background
func (discovery *ServiceDiscovery) AutoSync() {
var err error
cacheManager, err = NewCacheManager(discovery.client)
if err != nil {
openlogging.GetLogger().Errorf("Failed to create cache manager, indexing will not work: %s", err.Error())
} else {
cacheManager.AutoSync()
}
}
//Close closes the discovery service
func (discovery *ServiceDiscovery) Close() error {
return nil
}
//NewDiscoveryService creates the new ServiceDiscovery instance
func NewDiscoveryService(options registry.Options) registry.ServiceDiscovery {
if len(options.Addrs) == 0 {
panic("Failed to create discovery service: Address not specified")
}
pilotAddr := options.Addrs[0]
nodeInfo := &istioinfra.NodeInfo{
PodName: PodName,
Namespace: PodNamespace,
InstanceIP: InstanceIP,
}
xdsClient, err := istioinfra.NewXdsClient(pilotAddr, options.TLSConfig, nodeInfo, options.ConfigPath)
if err != nil {
panic("Failed to create XDS client: " + err.Error())
}
discovery := &ServiceDiscovery{
client: xdsClient,
Name: PilotV2Registry,
options: options,
}
return discovery
}
func init() {
// Init the node info
PodName = os.Getenv("POD_NAME")
PodNamespace = os.Getenv("POD_NAMESPACE")
InstanceIP = os.Getenv("INSTANCE_IP")
// TODO Handle the default value
if PodName == "" {
PodName = "pod_name_default"
}
if PodNamespace == "" {
PodNamespace = "default"
}
if InstanceIP == "" {
log.Println("[WARN] Env var INSTANCE_IP not set, try to get instance ip from local network, the service might not work properly.")
InstanceIP = iputil.GetLocalIP()
if InstanceIP == "" {
// Won't work without instance ip
panic("Failed to get instance ip")
}
}
registry.InstallServiceDiscovery(PilotV2Registry, NewDiscoveryService)
}
|
[
"\"POD_NAME\"",
"\"POD_NAMESPACE\"",
"\"INSTANCE_IP\""
] |
[] |
[
"POD_NAMESPACE",
"INSTANCE_IP",
"POD_NAME"
] |
[]
|
["POD_NAMESPACE", "INSTANCE_IP", "POD_NAME"]
|
go
| 3 | 0 | |
bookmarks/commands.go
|
/*
* This file is part of fishamnium. Copyright (C) 2013 and above Shogun <[email protected]>.
* Licensed under the MIT license, which can be found at https://choosealicense.com/licenses/mit.
*/
package bookmarks
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"regexp"
"sort"
"strings"
"github.com/ShogunPanda/fishamnium/console"
"github.com/ShogunPanda/tempera"
"github.com/apcera/termtables"
"github.com/spf13/cobra"
)
// Bookmark represent a saved bookmark
type Bookmark struct {
Name string `json:"name"`
Bookmark string `json:"bookmark"`
RootPath string `json:"rootPath"`
Paths []string `json:"paths"`
Group string `json:"group"`
}
var bookmarkValidator, _ = regexp.Compile("(?i)(?:^(?:[a-z0-9-_.:@]+)$)")
var rootFormatter, _ = regexp.Compile(fmt.Sprintf("^(?:%s)", regexp.QuoteMeta(os.Getenv("HOME"))))
func nameToRichName(name string) string {
return strings.Title(strings.Replace(strings.Replace(name, "-", " ", -1), "_", " ", -1))
}
func replaceDestination(destination string) string {
return rootFormatter.ReplaceAllString(destination, "$$home")
}
func resolveDestination(bookmark Bookmark) string {
return strings.Replace(bookmark.RootPath, "$home", os.Getenv("HOME"), 1)
}
func humanizeDestination(bookmark Bookmark) string {
return strings.Replace(resolveDestination(bookmark), os.Getenv("HOME"), tempera.ColorizeTemplate("{yellow}$HOME{-}"), 1)
}
func loadBookmarks(filePath string) (bookmarks map[string]Bookmark) {
bookmarksList := make([]Bookmark, 0)
var rawBookmarksList []byte
// Read the file
rawBookmarksList, err := ioutil.ReadFile(filePath)
if err != nil {
console.Fatal("Cannot load file %s", filePath)
return
}
// Parse JSON
if err = json.Unmarshal(rawBookmarksList, &bookmarksList); err != nil {
console.Fatal("Cannot parse JSON file %s", filePath)
return
}
// Convert the list to an array
bookmarks = make(map[string]Bookmark)
for _, b := range bookmarksList {
bookmarks[b.Bookmark] = b
}
return
}
func storeBookmarks(filePath string, bookmarks map[string]Bookmark) {
// Convert back to a list
var list []Bookmark
var keys []string
for k := range bookmarks {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
list = append(list, bookmarks[k])
}
// Serialize and write file
serialized, _ := json.MarshalIndent(list, "", " ")
if err := ioutil.WriteFile(filePath, []byte(serialized), 0755); err != nil {
console.Fatal("Cannot save file %s.", filePath)
}
}
// ReadBookmark shows a bookmark
func ReadBookmark(cmd *cobra.Command, args []string) {
bookmarks := loadBookmarks(getBookmarksFilePath(cmd))
bookmark, present := bookmarks[args[0]]
if !present {
console.Fatal("The bookmark {yellow|bold}%s{-} does not exists.", args[0])
return
}
fmt.Println(resolveDestination(bookmark))
}
// WriteBookmark saves a bookmark
func WriteBookmark(cmd *cobra.Command, args []string) {
bookmarksFilePath := getBookmarksFilePath(cmd)
bookmarks := loadBookmarks(bookmarksFilePath)
bookmarkName := args[0]
bookmark, present := bookmarks[bookmarkName]
if present {
console.Fatal("The bookmark {white}%s{-} already exists and points to {white}%s{-}.", bookmarkName, humanizeDestination(bookmark))
return
} else if !bookmarkValidator.MatchString(bookmarkName) {
console.Fatal(`Use only {white}letters{-}, {white}numbers{-}, and {white}-{-}, {white}_{-}, {white}.{-}, {white}:{-} and {white}@{-} for the name.`)
return
}
// Parse the name
name := ""
if len(args) > 1 {
name = args[1]
} else {
name = nameToRichName(bookmarkName)
}
// Format destination
pwd, _ := os.Getwd()
bookmarks[bookmarkName] = Bookmark{Name: name, Bookmark: bookmarkName, RootPath: replaceDestination(pwd), Paths: make([]string, 0), Group: ""}
storeBookmarks(bookmarksFilePath, bookmarks)
}
// DeleteBookmark deletes a bookmark
func DeleteBookmark(cmd *cobra.Command, args []string) {
bookmarksFilePath := getBookmarksFilePath(cmd)
bookmarks := loadBookmarks(bookmarksFilePath)
_, present := bookmarks[args[0]]
if !present {
console.Fatal("The bookmark {white}%s{-} does not exists.", args[0])
return
}
delete(bookmarks, args[0])
storeBookmarks(bookmarksFilePath, bookmarks)
}
// ListBookmarks lists all bookmarks
func ListBookmarks(cmd *cobra.Command, args []string) {
bookmarks := loadBookmarks(getBookmarksFilePath(cmd))
// Parse arguments
namesOnly, _ := cmd.Flags().GetBool("names-only")
autocomplete, _ := cmd.Flags().GetBool("autocomplete")
var query string
if len(args) > 0 {
query = args[0]
}
// Sort bookmarks by name
var keys []string
for k := range bookmarks {
if query != "" && !strings.Contains(k, query) {
continue
}
keys = append(keys, k)
}
sort.Sort(sort.StringSlice(keys))
// Check if we just want names
if namesOnly {
fmt.Printf(strings.Join(keys, "\n"))
return
}
// Print bookmarks, either in human or autocomplete mode
table := termtables.CreateTable()
table.AddHeaders("ID", "Destination", "Name")
for _, k := range keys {
bookmark := bookmarks[k]
if autocomplete {
fmt.Printf("%s\t%s\n", bookmark.Bookmark, bookmark.Name)
} else {
table.AddRow(
tempera.ColorizeTemplate("{green}"+bookmark.Bookmark+"{-}"),
humanizeDestination(bookmark),
tempera.ColorizeTemplate("{blue}"+bookmark.Name+"{-}"),
)
}
}
if !autocomplete {
fmt.Printf(table.Render())
}
}
// ConvertBookmarks converts a old bookmarks file to a new one .
func ConvertBookmarks(cmd *cobra.Command, args []string) {
// Parse arguments
destination := getBookmarksFilePath(cmd)
source := destination
if len(args) > 0 {
source = args[0]
}
// Read the file
rawOldBookmarks, err := ioutil.ReadFile(source)
if err != nil {
console.Fatal("Cannot load source file %s", source)
return
}
// Parse JSON
var oldBookmarks map[string]string
if err = json.Unmarshal(rawOldBookmarks, &oldBookmarks); err != nil {
console.Fatal("Cannot parse source JSON file %s", source)
return
}
// Create the new bookmarks
bookmarks := map[string]Bookmark{}
for name, path := range oldBookmarks {
bookmarks[name] = Bookmark{Name: name, Bookmark: nameToRichName(name), RootPath: replaceDestination(path), Paths: make([]string, 0), Group: ""}
}
fmt.Println(oldBookmarks)
fmt.Println(bookmarks)
storeBookmarks(destination, bookmarks)
}
|
[
"\"HOME\"",
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
server/model_server/feature_extract_model_server.py
|
import json
import os
import time
from concurrent import futures
import cv2
# rpc imports
import grpc
import numpy as np
import tensorflow as tf
from PIL import Image
from hysia.dataset.srt_handler import extract_srt
from hysia.models.nlp.sentence import TF_Sentence
from hysia.models.object.audioset_feature_extractor import AudiosetFeatureExtractor
from hysia.models.scene.detector import scene_visual
from hysia.utils.logger import Logger
from hysia.utils.perf import StreamSuppressor
from protos import api2msl_pb2, api2msl_pb2_grpc
# Time constant
_ONE_DAY_IN_SECONDS = 24 * 60 * 60
SERVER_ROOT = os.path.dirname(os.path.abspath(__file__)) + '/'
logger = Logger(
name='feature_extract_model_server',
severity_levels={'StreamHandler': 'ERROR'}
)
sentence_model_path = os.path.join(SERVER_ROOT,
'../../weights/sentence/96e8f1d3d4d90ce86b2db128249eb8143a91db73')
vggish_fr_path = os.path.join(SERVER_ROOT, '../../weights/audioset/vggish_fr.pb')
vggish_pca_path = os.path.join(SERVER_ROOT, '../../weights/audioset/vggish_pca_params.npz')
resnet_places365_path = os.path.join(SERVER_ROOT, '../../weights/places365/{}.pth')
place365_category_path = os.path.join(SERVER_ROOT, '../../weights/places365/categories.txt')
def load_sentence_model():
# Instantiate sentence feature extractor
return TF_Sentence(sentence_model_path)
def load_audio_model():
# Instantiate audio feature extractor
with StreamSuppressor():
vgg_graph = tf.Graph()
with vgg_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(vggish_fr_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
audio_model = AudiosetFeatureExtractor(vgg_graph, vggish_pca_path)
return audio_model
def load_image_model():
# Instantiate scene feature extractor
return scene_visual('resnet50', resnet_places365_path, place365_category_path, 'cuda:0')
# Custom request servicer
class Api2MslServicer(api2msl_pb2_grpc.Api2MslServicer):
def __init__(self):
super().__init__()
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
logger.info('Using GPU:' + os.environ['CUDA_VISIBLE_DEVICES'])
self.sentence_model = load_sentence_model()
self.audio_model = load_audio_model()
self.image_model = load_image_model()
def GetJson(self, request, context):
res = {}
meta = request.meta
meta = meta.split(',')
# Process entire audio file
# Extract nlp feature from subtitle
if 'subtitle' in meta:
subtitle_path = request.buf.decode()
logger.info('Extracting from subtitle: ' + subtitle_path)
start_time = int(meta[1])
end_time = int(meta[2])
sentences = extract_srt(start_time, end_time, subtitle_path)
if len(sentences) == 0:
sentences_feature = 'unknown_feature'
sentences = 'unknown_subtitle'
else:
# TODO TEXT support what data types (BLOB only support numpy)
sentences = ' '.join(sentences)
sentences_feature = self.sentence_model.encode(sentences)
res['features'] = sentences_feature
return api2msl_pb2.JsonReply(json=json.dumps(res), meta=sentences)
# Extract audio feature
if 'audio' in meta:
audio_path = request.buf.decode()
logger.info('Extracting from audio: ' + audio_path)
start_time = int(meta[1])
end_time = int(meta[2])
audio_feature = self.audio_model.extract(audio_path, start_time, end_time)[0]
res['features'] = audio_feature.tolist()
return api2msl_pb2.JsonReply(json=json.dumps(res), meta='')
if 'scene' in meta:
img = cv2.imdecode(np.fromstring(request.buf, dtype=np.uint8), -1)
logger.info('Extracting from image of shape ' + str(img.shape))
img_pil = Image.fromarray(img)
scene_feature = self.image_model.extract_vec(img_pil, True)
scene_name = self.image_model.detect(img_pil, True)
res['features'] = scene_feature.tolist()
return api2msl_pb2.JsonReply(json=json.dumps(res), meta=scene_name['scene'][0])
return api2msl_pb2.JsonReply(json=json.dumps(res), meta='')
def main():
# gRPC server configurations
server = grpc.server(futures.ThreadPoolExecutor(max_workers=8))
api2msl_pb2_grpc.add_Api2MslServicer_to_server(Api2MslServicer(), server)
server.add_insecure_port('[::]:50055')
server.start()
logger.info('Listening on port 50055')
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
logger.info('Shutting down feature extract model server')
server.stop(0)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_model_server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import json
import os
import shlex
import socket
import subprocess
import time
# This is a placeholder for a Google-internal import.
import grpc
from six.moves import range
from six.moves import urllib
import tensorflow as tf
from tensorflow.core.framework import types_pb2
from tensorflow.python.platform import flags
from tensorflow.python.saved_model import signature_constants
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
FLAGS = flags.FLAGS
RPC_TIMEOUT = 5.0
HTTP_REST_TIMEOUT_MS = 5000
CHANNEL_WAIT_TIMEOUT = 5.0
WAIT_FOR_SERVER_READY_INT_SECS = 60
GRPC_SOCKET_PATH = '/tmp/tf-serving.sock'
def SetVirtualCpus(num_virtual_cpus):
"""Create virtual CPU devices if they haven't yet been created."""
if num_virtual_cpus < 1:
raise ValueError('`num_virtual_cpus` must be at least 1 not %r' %
(num_virtual_cpus,))
physical_devices = tf.config.experimental.list_physical_devices('CPU')
if not physical_devices:
raise RuntimeError('No CPUs found')
configs = tf.config.experimental.get_virtual_device_configuration(
physical_devices[0])
if configs is None:
virtual_devices = [tf.config.experimental.VirtualDeviceConfiguration()
for _ in range(num_virtual_cpus)]
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0], virtual_devices)
else:
if len(configs) < num_virtual_cpus:
raise RuntimeError('Already configured with %d < %d virtual CPUs' %
(len(configs), num_virtual_cpus))
def PickUnusedPort():
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
def WaitForServerReady(port):
"""Waits for a server on the localhost to become ready."""
for _ in range(0, WAIT_FOR_SERVER_READY_INT_SECS):
time.sleep(1)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'intentionally_missing_model'
try:
# Send empty request to missing model
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
stub.Predict(request, RPC_TIMEOUT)
except grpc.RpcError as error:
# Missing model error will have details containing 'Servable'
if 'Servable' in error.details():
print('Server is ready')
break
def CallREST(url, req, max_attempts=60):
"""Returns HTTP response body from a REST API call."""
for attempt in range(max_attempts):
try:
print('Attempt {}: Sending request to {} with data:\n{}'.format(
attempt, url, req))
json_data = json.dumps(req).encode('utf-8') if req is not None else None
resp = urllib.request.urlopen(urllib.request.Request(url, data=json_data))
resp_data = resp.read()
print('Received response:\n{}'.format(resp_data))
resp.close()
return resp_data
except Exception as e: # pylint: disable=broad-except
print('Failed attempt {}. Error: {}'.format(attempt, e))
if attempt == max_attempts - 1:
raise
print('Retrying...')
time.sleep(1)
def SortedObject(obj):
"""Returns sorted object (with nested list/dictionaries)."""
if isinstance(obj, dict):
return sorted((k, SortedObject(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(SortedObject(x) for x in obj)
if isinstance(obj, tuple):
return list(sorted(SortedObject(x) for x in obj))
else:
return obj
class TensorflowModelServerTestBase(tf.test.TestCase):
"""This class defines integration test cases for tensorflow_model_server."""
@staticmethod
def __TestSrcDirPath(relative_path=''):
return os.path.join(os.environ['TEST_SRCDIR'],
'tf_serving/tensorflow_serving', relative_path)
@staticmethod
def GetArgsKey(*args, **kwargs):
return args + tuple(sorted(kwargs.items()))
# Maps string key -> 2-tuple of 'host:port' string.
model_servers_dict = {}
@staticmethod
def RunServer(model_name,
model_path,
model_type='tf',
model_config_file=None,
monitoring_config_file=None,
batching_parameters_file=None,
grpc_channel_arguments='',
wait_for_server_ready=True,
pipe=None,
model_config_file_poll_period=None):
"""Run tensorflow_model_server using test config.
A unique instance of server is started for each set of arguments.
If called with same arguments, handle to an existing server is
returned.
Args:
model_name: Name of model.
model_path: Path to model.
model_type: Type of model TensorFlow ('tf') or TF Lite ('tflite').
model_config_file: Path to model config file.
monitoring_config_file: Path to the monitoring config file.
batching_parameters_file: Path to batching parameters.
grpc_channel_arguments: Custom gRPC args for server.
wait_for_server_ready: Wait for gRPC port to be ready.
pipe: subpipe.PIPE object to read stderr from server.
model_config_file_poll_period: Period for polling the
filesystem to discover new model configs.
Returns:
3-tuple (<Popen object>, <grpc host:port>, <rest host:port>).
Raises:
ValueError: when both model_path and config_file is empty.
"""
args_key = TensorflowModelServerTestBase.GetArgsKey(**locals())
if args_key in TensorflowModelServerTestBase.model_servers_dict:
return TensorflowModelServerTestBase.model_servers_dict[args_key]
port = PickUnusedPort()
rest_api_port = PickUnusedPort()
print(('Starting test server on port: {} for model_name: '
'{}/model_config_file: {}'.format(port, model_name,
model_config_file)))
command = os.path.join(
TensorflowModelServerTestBase.__TestSrcDirPath('model_servers'),
'tensorflow_model_server')
command += ' --port=' + str(port)
command += ' --rest_api_port=' + str(rest_api_port)
command += ' --rest_api_timeout_in_ms=' + str(HTTP_REST_TIMEOUT_MS)
command += ' --grpc_socket_path=' + GRPC_SOCKET_PATH
if model_config_file:
command += ' --model_config_file=' + model_config_file
elif model_path:
command += ' --model_name=' + model_name
command += ' --model_base_path=' + model_path
else:
raise ValueError('Both model_config_file and model_path cannot be empty!')
if model_type == 'tflite':
command += ' --prefer_tflite_model=true'
if monitoring_config_file:
command += ' --monitoring_config_file=' + monitoring_config_file
if model_config_file_poll_period is not None:
command += ' --model_config_file_poll_wait_seconds=' + str(
model_config_file_poll_period)
if batching_parameters_file:
command += ' --enable_batching'
command += ' --batching_parameters_file=' + batching_parameters_file
if grpc_channel_arguments:
command += ' --grpc_channel_arguments=' + grpc_channel_arguments
print(command)
proc = subprocess.Popen(shlex.split(command), stderr=pipe)
atexit.register(proc.kill)
print('Server started')
if wait_for_server_ready:
WaitForServerReady(port)
hostports = (
proc,
'localhost:' + str(port),
'localhost:' + str(rest_api_port),
)
TensorflowModelServerTestBase.model_servers_dict[args_key] = hostports
return hostports
def VerifyPredictRequest(
self,
model_server_address,
expected_output,
expected_version,
model_name='default',
specify_output=True,
batch_input=False,
signature_name=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
rpc_timeout=RPC_TIMEOUT):
"""Send PredictionService.Predict request and verify output."""
print('Sending Predict request...')
# Prepare request
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = signature_name
request.inputs['x'].dtype = types_pb2.DT_FLOAT
request.inputs['x'].float_val.append(2.0)
dim = request.inputs['x'].tensor_shape.dim.add()
dim.size = 1
if batch_input:
request.inputs['x'].tensor_shape.dim.add().size = 1
if specify_output:
request.output_filter.append('y')
# Send request
channel = grpc.insecure_channel(model_server_address)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
result = stub.Predict(request, rpc_timeout) # 5 secs timeout
# Verify response
self.assertTrue('y' in result.outputs)
self.assertEqual(types_pb2.DT_FLOAT, result.outputs['y'].dtype)
self.assertEqual(1, len(result.outputs['y'].float_val))
self.assertEqual(expected_output, result.outputs['y'].float_val[0])
self._VerifyModelSpec(result.model_spec, request.model_spec.name,
signature_name, expected_version)
def _GetSavedModelBundlePath(self):
"""Returns a path to a model in SavedModel format."""
return os.path.join(self.testdata_dir, 'saved_model_half_plus_two_cpu')
def _GetModelVersion(self, model_path):
"""Returns version of SavedModel/SessionBundle in given path.
This method assumes there is exactly one directory with an 'int' valued
directory name under `model_path`.
Args:
model_path: A string representing path to the SavedModel/SessionBundle.
Returns:
version of SavedModel/SessionBundle in given path.
"""
return int(os.listdir(model_path)[0])
def _GetSavedModelHalfPlusTwoTf2(self):
"""Returns a path to a TF2 half_plus_two model in SavedModel format."""
return os.path.join(self.testdata_dir, 'saved_model_half_plus_two_tf2_cpu')
def _GetSavedModelHalfPlusThreePath(self):
"""Returns a path to a half_plus_three model in SavedModel format."""
return os.path.join(self.testdata_dir, 'saved_model_half_plus_three')
def _GetTfLiteModelPath(self):
"""Returns a path to a model in TF Lite format."""
return os.path.join(self.testdata_dir, 'saved_model_half_plus_two_tflite')
def _GetTfLiteModelWithSigDefPath(self):
"""Returns a path to a model in TF Lite format."""
return os.path.join(self.testdata_dir,
'saved_model_half_plus_two_tflite_with_sigdef')
def _GetSessionBundlePath(self):
"""Returns a path to a model in SessionBundle format."""
return os.path.join(self.session_bundle_testdata_dir, 'half_plus_two')
def _GetGoodModelConfigTemplate(self):
"""Returns a path to a working configuration file template."""
return os.path.join(self.testdata_dir, 'good_model_config.txt')
def _GetGoodModelConfigFile(self):
"""Returns a path to a working configuration file."""
return os.path.join(self.temp_dir, 'good_model_config.conf')
def _GetBadModelConfigFile(self):
"""Returns a path to a improperly formatted configuration file."""
return os.path.join(self.testdata_dir, 'bad_model_config.txt')
def _GetBatchingParametersFile(self):
"""Returns a path to a batching configuration file."""
return os.path.join(self.testdata_dir, 'batching_config.txt')
def _GetModelMetadataFile(self):
"""Returns a path to a sample model metadata file."""
return os.path.join(self.testdata_dir, 'half_plus_two_model_metadata.json')
def _GetMonitoringConfigFile(self):
"""Returns a path to a monitoring configuration file."""
return os.path.join(self.testdata_dir, 'monitoring_config.txt')
def _VerifyModelSpec(self,
actual_model_spec,
exp_model_name,
exp_signature_name,
exp_version):
"""Verifies model_spec matches expected model name, signature, version.
Args:
actual_model_spec: An instance of ModelSpec proto.
exp_model_name: A string that represents expected model name.
exp_signature_name: A string that represents expected signature.
exp_version: An integer that represents expected version.
Returns:
None.
"""
self.assertEqual(actual_model_spec.name, exp_model_name)
self.assertEqual(actual_model_spec.signature_name, exp_signature_name)
self.assertEqual(actual_model_spec.version.value, exp_version)
def _TestPredict(
self,
model_path,
batching_parameters_file=None,
signature_name=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Helper method to test prediction.
Args:
model_path: Path to the model on disk.
batching_parameters_file: Batching parameters file to use (if None
batching is not enabled).
signature_name: Signature name to expect in the PredictResponse.
"""
model_server_address = TensorflowModelServerTestBase.RunServer(
'default',
model_path,
batching_parameters_file=batching_parameters_file)[1]
expected_version = self._GetModelVersion(model_path)
self.VerifyPredictRequest(model_server_address, expected_output=3.0,
expected_version=expected_version,
signature_name=signature_name)
self.VerifyPredictRequest(
model_server_address, expected_output=3.0, specify_output=False,
expected_version=expected_version, signature_name=signature_name)
|
[] |
[] |
[
"TEST_SRCDIR"
] |
[]
|
["TEST_SRCDIR"]
|
python
| 1 | 0 | |
main.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os
import torch
import logging
logging.basicConfig(level=logging.INFO, format=' %(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def get_settings():
import argparse
parser = argparse.ArgumentParser(description='Deep Stereo Matching by pytorch')
parser.add_argument('--mode', default='train',
help='mode of execute [train/finetune/val/submission')
# arguments of datasets
parser.add_argument('--datas_train', default='k2015-tr, k2012-tr',
help='datasets for training')
parser.add_argument('--datas_val', default='k2015-val, k2012-val',
help='datasets for validation')
parser.add_argument('--dir_datas_train', default='/media/qjc/D/data/kitti/',
help='dirpath of datasets for training')
parser.add_argument('--dir_datas_val', default='/media/qjc/D/data/kitti/',
help='dirpath of datasets for validation')
parser.add_argument('--bn', type=int, default=4,
help='batch size')
parser.add_argument('--crop_width', type=int, default=768,
help='width of crop_size')
parser.add_argument('--crop_height', type=int, default=384,
help='height of crop_size')
# arguments of model
parser.add_argument('--arch', default='DispNetC',
help='select arch of model')
parser.add_argument('--maxdisp', type=int ,default=192,
help='maxium disparity')
parser.add_argument('--loadmodel', default=None,
help='path of pretrained weight')
# arguments of lossfun
parser.add_argument('--loss_name', default='SV-SL1',
help='name of lossfun, supported as follow: \
SV-(SL1/CE/SL1+CE), \
DUSV-(A[S(1/2/3)]C(1/2)[-AD][-M], \
LUSV-(A[S(1/2/3)][-AD])/(AS(1/2/3)-EC)')
parser.add_argument('--flag_FC', action='store_true', default=False,
help='enables feature consistency')
parser.add_argument('--flag_FCTF', action='store_true', default=False,
help='enables the mode of training from coarse to fine')
parser.add_argument('--mode_down_disp', type=str ,default='avg',
help='mode of downsample disparity for training with multi-scale [avg/max]')
parser.add_argument('--mode_down_img', type=str ,default='Simple',
help='mode of downsample image for training with multi-scale [Simple/Gaussion/DoG]')
parser.add_argument('--nedge', type=int, default=64,
help='margin of image for learning disparity of region with occlution')
# arguments of optimizer
parser.add_argument('--freq_optim', type=int, default=1,
help='frequent of optimize weight')
parser.add_argument('--lr', type=float, default=0.001,
help='learnig rate')
parser.add_argument('--lr_epoch0', type=int, default=10,
help='the first epoch of adjust learnig rate')
parser.add_argument('--lr_stride', type=int, default=3,
help='epoch stride of adjust learnig rate')
parser.add_argument('--lr_decay', type=float, default=0.5,
help='decay factor of adjust learnig rate')
parser.add_argument('--weight_decay', type=float, default=0.0001,
help='decay factor of weight')
parser.add_argument('--beta1', type=float, default=0.9,
help='beta1 of Adam')
parser.add_argument('--beta2', type=float, default=0.999,
help='beta2 of Adam')
# arguments for training
parser.add_argument('--epochs', type=int, default=20,
help='number of epochs to train')
parser.add_argument('--nloop', type=int, default=1,
help='loop of dataset in a epoch')
parser.add_argument('--epochs_warmup', type=int, default=0,
help='number of epochs to warmup weight')
parser.add_argument('--freq_save', type=int, default=1,
help='frequent of save weight')
parser.add_argument('--freq_print', type=int, default=20,
help='frequent of print infomation')
# other arguments
parser.add_argument('--dir_save', default='./results/',
help='dirpath of save result( weight/submission )')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# parser arguments
args = parser.parse_args()
# add arguments
args.cuda = (not args.no_cuda) and torch.cuda.is_available()
args.beta = (args.beta1, args.beta2)
args.crop_size = (args.crop_width, args.crop_height)
# log arguments
items = sorted(args.__dict__.items())
msg = 'The setted arguments as follow: \n'
msg += '\n'.join([' [%s]: %s' % (k, str(v)) for k, v in items])
logger.info(msg + '\n')
return args
# program entry
if __name__ == '__main__':
# get setting
args = get_settings()
# set gpu id used
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
# set manual seed
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# set manual seed
if(not os.path.isdir(args.dir_save)):
os.mkdir(args.dir_save)
# excute stereo program
import stereo
if(args.mode.lower() in ['train', 'finetune']):
stereo.train_val(args)
elif(args.mode.lower() in ['val', 'validation']):
stereo.val(args)
elif(args.mode.lower() in ['sub', 'submission']):
stereo.submission(args)
else:
logger.error('not support mode[ %s ]' % args.mode)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
model_training.py
|
#Import all Necessary Libraries
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Lambda, MaxPooling2D, Flatten, BatchNormalization, Dense
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.datasets import fetch_openml
from tensorflow.keras.callbacks import EarlyStopping
import pickle
import numpy as np
import pandas as pd
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# batch size and number of epochs
BATCH_SIZE = 32
EPOCHS = 5
#importing dataset, 28x28 images of digits
mnist = fetch_openml('mnist_784')
#unpacking data
X , y = mnist.data, mnist.target
# converting string into int
y = y.astype(np.short)
# Reshape image in 3 dimensions
# canal = 1 for gray scale
X = X.reshape(-1,28,28,1)
# Scaling numbers [0,1], normalization
X = tf.keras.utils.normalize(X, axis = 1)
# Split the train and the test set
X_train, X_test, y_train, y_test = train_test_split(X,y ,test_size=0.3, random_state = 42)
early_stopping_monitor = EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True
)
# Sequential Model
model =tf.keras.models.Sequential()
model.add(Conv2D(filters=64, kernel_size=3, input_shape = (28,28,1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=32, kernel_size = 3, activation='relu'))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(128,activation="relu"))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'],
callbacks=[early_stopping_monitor],)
# Training model
model = model.fit(X_train, y_train,batch_size=BATCH_SIZE,
epochs=EPOCHS, validation_split=0.2)
#Saving model to json file
with open('model.h5', 'wb') as f:
pickle.dump(model.history, f)
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Rewards.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tools/apifrontend/main.go
|
// Copyright (c) 2021 Michael Andersen
// Copyright (c) 2021 Regents of the University Of California
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT.
package main
import (
"context"
"crypto/tls"
"encoding/base64"
"fmt"
"io"
"mime"
"net"
"net/http"
"sync"
"os"
"strings"
"time"
etcd "github.com/coreos/etcd/clientv3"
btrdb "gopkg.in/BTrDB/btrdb.v4"
"github.com/BTrDB/smartgridstore/acl"
"github.com/BTrDB/smartgridstore/tools"
"github.com/BTrDB/smartgridstore/tools/certutils"
assetfs "github.com/elazarl/go-bindata-assetfs"
"github.com/grpc-ecosystem/go-grpc-middleware/auth"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
logging "github.com/op/go-logging"
"github.com/pborman/uuid"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
pb "gopkg.in/BTrDB/btrdb.v4/grpcinterface"
)
const MajorVersion = tools.VersionMajor
const MinorVersion = tools.VersionMinor
type CK string
const UserKey CK = "user"
type apiProvider struct {
s *grpc.Server
downstream *btrdb.BTrDB
ae *acl.ACLEngine
colUUcache map[[16]byte]string
colUUmu sync.Mutex
secure bool
}
var logger *logging.Logger
func init() {
logger = logging.MustGetLogger("log")
}
func (a *apiProvider) authfunc(ctx context.Context) (context.Context, error) {
var userObj *acl.User
auth, err := grpc_auth.AuthFromMD(ctx, "bearer")
if err != nil {
if grpc.Code(err) == codes.Unauthenticated {
userObj, err = a.ae.GetPublicUser()
if err != nil {
panic(err)
}
} else {
return nil, err
}
}
if auth != "" {
//Returns false, nil, nil if password is incorrect or user does not exist
var ok bool
ok, userObj, err = a.ae.AuthenticateUserByKey(auth)
if !ok {
return nil, grpc.Errorf(codes.Unauthenticated, "invalid api key")
}
}
newCtx := context.WithValue(ctx, UserKey, userObj)
return newCtx, nil
}
//go:generate ./genswag.py
//go:generate go-bindata -pkg main swag/...
func serveSwagger(mux *http.ServeMux) {
mime.AddExtensionType(".svg", "image/svg+xml")
// Expose files in third_party/swagger-ui/ on <host>/swagger-ui
fileServer := http.FileServer(&assetfs.AssetFS{
Asset: Asset,
AssetDir: AssetDir,
AssetInfo: AssetInfo,
Prefix: "swag",
})
prefix := "/swag/"
mux.Handle(prefix, http.StripPrefix(prefix, fileServer))
}
type GRPCInterface interface {
InitiateShutdown() chan struct{}
}
//Copied verbatim from golang HTTP package
func parseBasicAuth(auth string) (username, password string, ok bool) {
const prefix = "Basic "
if !strings.HasPrefix(auth, prefix) {
return
}
c, err := base64.StdEncoding.DecodeString(auth[len(prefix):])
if err != nil {
return
}
cs := string(c)
s := strings.IndexByte(cs, ':')
if s < 0 {
return
}
return cs[:s], cs[s+1:], true
}
func (a *apiProvider) writeEndpoint(ctx context.Context, uu uuid.UUID) (*btrdb.Endpoint, error) {
return a.downstream.EndpointFor(ctx, uu)
}
func (a *apiProvider) readEndpoint(ctx context.Context, uu uuid.UUID) (*btrdb.Endpoint, error) {
return a.downstream.ReadEndpointFor(ctx, uu)
}
func (a *apiProvider) anyEndpoint(ctx context.Context) (*btrdb.Endpoint, error) {
return a.downstream.GetAnyEndpoint(ctx)
}
// func (a *apiProvider) getUser(ctx context.Context) (*User, error) {
// return nil, nil
// }
func (a *apiProvider) checkPermissionsByUUID(ctx context.Context, uu uuid.UUID, cap ...string) error {
a.colUUmu.Lock()
col, ok := a.colUUcache[uu.Array()]
a.colUUmu.Unlock()
if !ok {
var err error
s := a.downstream.StreamFromUUID(uu)
col, err = s.Collection(ctx)
if err != nil {
if e := btrdb.ToCodedError(err); e != nil && e.Code == 404 {
return grpc.Errorf(codes.PermissionDenied, "user does not have permission on this stream")
}
return err
}
a.colUUmu.Lock()
a.colUUcache[uu.Array()] = col
a.colUUmu.Unlock()
}
return a.checkPermissionsByCollection(ctx, col, cap...)
}
func (a *apiProvider) checkPermissionsByCollection(ctx context.Context, collection string, cap ...string) error {
u, ok := ctx.Value(UserKey).(*acl.User)
if !ok {
return grpc.Errorf(codes.PermissionDenied, "could not resolve user")
}
for _, cp := range cap {
if !u.HasCapabilityOnPrefix(cp, collection) {
return grpc.Errorf(codes.PermissionDenied, "user does not have permission %q on %q", cp, collection)
}
}
return nil
}
func ProxyGRPCSecure(laddr string) *tls.Config {
etcdEndpoint := os.Getenv("ETCD_ENDPOINT")
if len(etcdEndpoint) == 0 {
etcdEndpoint = "http://etcd:2379"
}
etcdClient, err := etcd.New(etcd.Config{
Endpoints: []string{etcdEndpoint},
DialTimeout: 5 * time.Second})
if err != nil {
fmt.Printf("Could not connect to etcd: %v\n", err)
os.Exit(1)
}
cfg, err := certutils.GetAPIConfig(etcdClient)
if cfg == nil {
fmt.Printf("TLS config is incomplete (%v), disabling secure endpoints\n", err)
return nil
}
creds := credentials.NewTLS(cfg)
l, err := net.Listen("tcp", laddr)
if err != nil {
panic(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
downstream, err := btrdb.Connect(ctx, btrdb.EndpointsFromEnv()...)
cancel()
if err != nil {
panic(err)
}
api := &apiProvider{downstream: downstream, colUUcache: make(map[[16]byte]string)}
api.secure = true
ae := acl.NewACLEngine(acl.DefaultPrefix, etcdClient)
api.ae = ae
//--
grpcServer := grpc.NewServer(grpc.Creds(creds),
grpc.StreamInterceptor(grpc_auth.StreamServerInterceptor(api.authfunc)),
grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(api.authfunc)))
//--
api.s = grpcServer
pb.RegisterBTrDBServer(grpcServer, api)
go grpcServer.Serve(l)
return cfg
}
func ProxyGRPC(laddr string) GRPCInterface {
etcdEndpoint := os.Getenv("ETCD_ENDPOINT")
if len(etcdEndpoint) == 0 {
etcdEndpoint = "http://etcd:2379"
}
etcdClient, err := etcd.New(etcd.Config{
Endpoints: []string{etcdEndpoint},
DialTimeout: 5 * time.Second})
if err != nil {
fmt.Printf("Could not connect to etcd: %v\n", err)
os.Exit(1)
}
l, err := net.Listen("tcp", laddr)
if err != nil {
panic(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
downstream, err := btrdb.Connect(ctx, btrdb.EndpointsFromEnv()...)
cancel()
if err != nil {
panic(err)
}
api := &apiProvider{downstream: downstream, colUUcache: make(map[[16]byte]string)}
api.secure = false
ae := acl.NewACLEngine(acl.DefaultPrefix, etcdClient)
api.ae = ae
//--
grpcServer := grpc.NewServer(
grpc.StreamInterceptor(grpc_auth.StreamServerInterceptor(api.authfunc)),
grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(api.authfunc)))
//--
api.s = grpcServer
pb.RegisterBTrDBServer(grpcServer, api)
go grpcServer.Serve(l)
return api
}
func main() {
if len(os.Args) == 2 && os.Args[1] == "-version" {
fmt.Printf("%d.%d.%d\n", tools.VersionMajor, tools.VersionMinor, tools.VersionPatch)
os.Exit(0)
}
disable_insecure := strings.ToLower(os.Getenv("DISABLE_INSECURE")) == "yes"
insecure_listen := "0.0.0.0:4410"
if disable_insecure {
insecure_listen = "127.0.0.1:4410"
}
ProxyGRPC(insecure_listen)
tlsconfig := ProxyGRPCSecure("0.0.0.0:4411")
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
mux := http.NewServeMux()
mux.HandleFunc("/v4/swagger.json", func(w http.ResponseWriter, req *http.Request) {
io.Copy(w, strings.NewReader(SwaggerJSON))
})
mux.HandleFunc("/v4/query", queryhandler)
gwmux := runtime.NewServeMux()
opts := []grpc.DialOption{grpc.WithInsecure()}
err := pb.RegisterBTrDBHandlerFromEndpoint(ctx, gwmux, "127.0.0.1:4410", opts)
if err != nil {
panic(err)
}
mux.Handle("/", gwmux)
serveSwagger(mux)
if !disable_insecure {
go func() {
err := http.ListenAndServe(":9000", mux)
if err != nil {
panic(err)
}
}()
}
if tlsconfig != nil {
fmt.Printf("starting secure http\n")
go func() {
server := &http.Server{Addr: ":9001", Handler: mux, TLSConfig: tlsconfig}
err := server.ListenAndServeTLS("", "")
if err != nil {
panic(err)
}
}()
} else {
fmt.Printf("skipping secure http\n")
}
for {
time.Sleep(10 * time.Second)
}
}
func (a *apiProvider) InitiateShutdown() chan struct{} {
done := make(chan struct{})
go func() {
a.s.GracefulStop()
close(done)
}()
return done
}
func (a *apiProvider) RawValues(p *pb.RawValuesParams, r pb.BTrDB_RawValuesServer) error {
ctx := r.Context()
uu := p.Uuid
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "read")
if err != nil {
return err
}
var ep *btrdb.Endpoint
for a.downstream.TestEpError(ep, err) {
ep, err = a.readEndpoint(ctx, uu)
if err != nil {
continue
}
client, err := ep.GetGRPC().RawValues(ctx, p)
if err != nil {
continue
}
for {
resp, err := client.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
err = r.Send(resp)
if err != nil {
return err
}
}
}
return err
}
func (a *apiProvider) AlignedWindows(p *pb.AlignedWindowsParams, r pb.BTrDB_AlignedWindowsServer) error {
ctx := r.Context()
uu := p.Uuid
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "read")
if err != nil {
return err
}
var ep *btrdb.Endpoint
for a.downstream.TestEpError(ep, err) {
ep, err = a.readEndpoint(ctx, uu)
if err != nil {
continue
}
client, err := ep.GetGRPC().AlignedWindows(ctx, p)
if err != nil {
continue
}
for {
resp, err := client.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
err = r.Send(resp)
if err != nil {
return err
}
}
}
return err
}
func (a *apiProvider) Windows(p *pb.WindowsParams, r pb.BTrDB_WindowsServer) error {
ctx := r.Context()
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "read")
if err != nil {
return err
}
var ep *btrdb.Endpoint
for a.downstream.TestEpError(ep, err) {
ep, err = a.readEndpoint(ctx, p.Uuid)
if err != nil {
continue
}
client, err := ep.GetGRPC().Windows(ctx, p)
if err != nil {
continue
}
for {
resp, err := client.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
err = r.Send(resp)
if err != nil {
return err
}
}
}
return err
}
func (a *apiProvider) GenerateCSV(p *pb.GenerateCSVParams, r pb.BTrDB_GenerateCSVServer) error {
ctx := r.Context()
for _, s := range p.Streams {
err := a.checkPermissionsByUUID(ctx, s.Uuid, "api", "read")
if err != nil {
return err
}
}
var ep *btrdb.Endpoint
var err error
for a.downstream.TestEpError(ep, err) {
ep, err = a.anyEndpoint(ctx)
if err != nil {
continue
}
client, err := ep.GetGRPC().GenerateCSV(ctx, p)
if err != nil {
continue
}
for {
resp, err := client.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
err = r.Send(resp)
if err != nil {
return err
}
}
}
return err
}
func (a *apiProvider) StreamInfo(ctx context.Context, p *pb.StreamInfoParams) (*pb.StreamInfoResponse, error) {
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "read")
if err != nil {
return nil, err
}
var ep *btrdb.Endpoint
var rv *pb.StreamInfoResponse
for a.downstream.TestEpError(ep, err) {
ep, err = a.readEndpoint(ctx, p.Uuid)
if err != nil {
continue
}
rv, err = ep.GetGRPC().StreamInfo(ctx, p)
}
return rv, err
}
func (a *apiProvider) GetMetadataUsage(ctx context.Context, p *pb.MetadataUsageParams) (*pb.MetadataUsageResponse, error) {
err := a.checkPermissionsByCollection(ctx, p.Prefix, "api", "read")
if err != nil {
return nil, err
}
var ep *btrdb.Endpoint
var rv *pb.MetadataUsageResponse
for a.downstream.TestEpError(ep, err) {
ep, err = a.anyEndpoint(ctx)
if err != nil {
continue
}
rv, err = ep.GetGRPC().GetMetadataUsage(ctx, p)
}
return rv, err
}
func (a *apiProvider) SetStreamAnnotations(ctx context.Context, p *pb.SetStreamAnnotationsParams) (*pb.SetStreamAnnotationsResponse, error) {
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "read")
if err != nil {
return nil, err
}
var ep *btrdb.Endpoint
var rv *pb.SetStreamAnnotationsResponse
for a.downstream.TestEpError(ep, err) {
ep, err = a.writeEndpoint(ctx, p.Uuid)
if err != nil {
continue
}
rv, err = ep.GetGRPC().SetStreamAnnotations(ctx, p)
}
return rv, err
}
func (a *apiProvider) Changes(p *pb.ChangesParams, r pb.BTrDB_ChangesServer) error {
ctx := r.Context()
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "read")
if err != nil {
return err
}
var ep *btrdb.Endpoint
for a.downstream.TestEpError(ep, err) {
ep, err = a.readEndpoint(ctx, p.Uuid)
if err != nil {
continue
}
client, err := ep.GetGRPC().Changes(ctx, p)
if err != nil {
continue
}
for {
resp, err := client.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
err = r.Send(resp)
if err != nil {
return err
}
}
}
return err
}
func (a *apiProvider) Create(ctx context.Context, p *pb.CreateParams) (*pb.CreateResponse, error) {
err := a.checkPermissionsByCollection(ctx, p.Collection, "api", "insert")
if err != nil {
return nil, err
}
var ep *btrdb.Endpoint
var rv *pb.CreateResponse
for a.downstream.TestEpError(ep, err) {
ep, err = a.writeEndpoint(ctx, p.Uuid)
if err != nil {
continue
}
rv, err = ep.GetGRPC().Create(ctx, p)
}
return rv, err
}
func (a *apiProvider) ListCollections(ctx context.Context, p *pb.ListCollectionsParams) (*pb.ListCollectionsResponse, error) {
var ep *btrdb.Endpoint
var rv *pb.ListCollectionsResponse
var err error
for a.downstream.TestEpError(ep, err) {
ep, err = a.anyEndpoint(ctx)
if err != nil {
continue
}
rv, err = ep.GetGRPC().ListCollections(ctx, p)
}
if err != nil {
return nil, err
}
filtCollections := make([]string, 0, len(rv.Collections))
for _, col := range rv.Collections {
cerr := a.checkPermissionsByCollection(ctx, col, "api", "read")
if cerr != nil {
continue
}
filtCollections = append(filtCollections, col)
}
rv.Collections = filtCollections
return rv, err
}
func (a *apiProvider) LookupStreams(p *pb.LookupStreamsParams, r pb.BTrDB_LookupStreamsServer) error {
ctx := r.Context()
var ep *btrdb.Endpoint
var err error
for a.downstream.TestEpError(ep, err) {
ep, err = a.anyEndpoint(ctx)
if err != nil {
continue
}
client, err := ep.GetGRPC().LookupStreams(ctx, p)
if err != nil {
continue
}
for {
resp, err := client.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
if resp.Stat != nil {
cerr := r.Send(resp)
if cerr != nil {
return cerr
}
}
//Filter the results by permitted ones
nr := make([]*pb.StreamDescriptor, 0, len(resp.Results))
for _, res := range resp.Results {
sterr := a.checkPermissionsByCollection(ctx, res.Collection, "api", "read")
if sterr != nil {
continue
}
nr = append(nr, res)
}
resp.Results = nr
err = r.Send(resp)
if err != nil {
return err
}
}
}
return err
}
func (a *apiProvider) Nearest(ctx context.Context, p *pb.NearestParams) (*pb.NearestResponse, error) {
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "read")
if err != nil {
return nil, err
}
var ep *btrdb.Endpoint
var rv *pb.NearestResponse
for a.downstream.TestEpError(ep, err) {
ep, err = a.readEndpoint(ctx, p.Uuid)
if err != nil {
continue
}
rv, err = ep.GetGRPC().Nearest(ctx, p)
}
return rv, err
}
func (a *apiProvider) Insert(ctx context.Context, p *pb.InsertParams) (*pb.InsertResponse, error) {
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "insert")
if err != nil {
return nil, err
}
var ep *btrdb.Endpoint
var rv *pb.InsertResponse
for a.downstream.TestEpError(ep, err) {
ep, err = a.writeEndpoint(ctx, p.Uuid)
if err != nil {
continue
}
rv, err = ep.GetGRPC().Insert(ctx, p)
}
return rv, err
}
func (a *apiProvider) Delete(ctx context.Context, p *pb.DeleteParams) (*pb.DeleteResponse, error) {
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "delete")
if err != nil {
return nil, err
}
var ep *btrdb.Endpoint
var rv *pb.DeleteResponse
for a.downstream.TestEpError(ep, err) {
ep, err = a.writeEndpoint(ctx, p.GetUuid())
if err != nil {
continue
}
rv, err = ep.GetGRPC().Delete(ctx, p)
}
return rv, err
}
func (a *apiProvider) Flush(ctx context.Context, p *pb.FlushParams) (*pb.FlushResponse, error) {
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "insert")
if err != nil {
return nil, err
}
var ep *btrdb.Endpoint
var rv *pb.FlushResponse
for a.downstream.TestEpError(ep, err) {
ep, err = a.writeEndpoint(ctx, p.GetUuid())
if err != nil {
continue
}
rv, err = ep.GetGRPC().Flush(ctx, p)
}
return rv, err
}
func (a *apiProvider) Obliterate(ctx context.Context, p *pb.ObliterateParams) (*pb.ObliterateResponse, error) {
err := a.checkPermissionsByUUID(ctx, p.GetUuid(), "api", "obliterate")
if err != nil {
return nil, err
}
var ep *btrdb.Endpoint
var rv *pb.ObliterateResponse
for a.downstream.TestEpError(ep, err) {
ep, err = a.writeEndpoint(ctx, p.GetUuid())
if err != nil {
continue
}
rv, err = ep.GetGRPC().Obliterate(ctx, p)
}
return rv, err
}
func (a *apiProvider) FaultInject(ctx context.Context, p *pb.FaultInjectParams) (*pb.FaultInjectResponse, error) {
err := a.checkPermissionsByUUID(ctx, uuid.NewRandom(), "api", "admin")
if err != nil {
return nil, err
}
ds, err := a.writeEndpoint(ctx, uuid.NewRandom())
if err != nil {
return nil, err
}
rv, e := ds.GetGRPC().FaultInject(ctx, p)
return rv, e
}
func (a *apiProvider) Info(ctx context.Context, params *pb.InfoParams) (*pb.InfoResponse, error) {
//We do not forward the info call, as we want the client to always contact us
ourip := "localhost"
if ex := os.Getenv("EXTERNAL_ADDRESS"); ex != "" {
ourip = ex
}
parts := strings.SplitN(ourip, ":", 2)
ourip = parts[0]
suffix := ":4410"
if a.secure {
suffix = ":4411"
}
ProxyInfo := &pb.ProxyInfo{
ProxyEndpoints: []string{ourip + suffix},
}
return &pb.InfoResponse{
MajorVersion: MajorVersion,
MinorVersion: MinorVersion,
Build: fmt.Sprintf("%d.%d.%d", tools.VersionMajor, tools.VersionMinor, tools.VersionPatch),
Proxy: ProxyInfo,
}, nil
}
|
[
"\"ETCD_ENDPOINT\"",
"\"ETCD_ENDPOINT\"",
"\"DISABLE_INSECURE\"",
"\"EXTERNAL_ADDRESS\""
] |
[] |
[
"DISABLE_INSECURE",
"EXTERNAL_ADDRESS",
"ETCD_ENDPOINT"
] |
[]
|
["DISABLE_INSECURE", "EXTERNAL_ADDRESS", "ETCD_ENDPOINT"]
|
go
| 3 | 0 | |
notify_test.go
|
package pq
import (
"github.com/ivansukach/super-sql"
"github.com/ivansukach/super-sql/driver"
"errors"
"fmt"
"io"
"os"
"runtime"
"sync"
"testing"
"time"
)
var errNilNotification = errors.New("nil notification")
func expectNotification(t *testing.T, ch <-chan *Notification, relname string, extra string) error {
select {
case n := <-ch:
if n == nil {
return errNilNotification
}
if n.Channel != relname || n.Extra != extra {
return fmt.Errorf("unexpected notification %v", n)
}
return nil
case <-time.After(1500 * time.Millisecond):
return fmt.Errorf("timeout")
}
}
func expectNoNotification(t *testing.T, ch <-chan *Notification) error {
select {
case n := <-ch:
return fmt.Errorf("unexpected notification %v", n)
case <-time.After(100 * time.Millisecond):
return nil
}
}
func expectEvent(t *testing.T, eventch <-chan ListenerEventType, et ListenerEventType) error {
select {
case e := <-eventch:
if e != et {
return fmt.Errorf("unexpected event %v", e)
}
return nil
case <-time.After(1500 * time.Millisecond):
panic("expectEvent timeout")
}
}
func expectNoEvent(t *testing.T, eventch <-chan ListenerEventType) error {
select {
case e := <-eventch:
return fmt.Errorf("unexpected event %v", e)
case <-time.After(100 * time.Millisecond):
return nil
}
}
func newTestListenerConn(t *testing.T) (*ListenerConn, <-chan *Notification) {
datname := os.Getenv("PGDATABASE")
sslmode := os.Getenv("PGSSLMODE")
if datname == "" {
os.Setenv("PGDATABASE", "pqgotest")
}
if sslmode == "" {
os.Setenv("PGSSLMODE", "disable")
}
notificationChan := make(chan *Notification)
l, err := NewListenerConn("", notificationChan)
if err != nil {
t.Fatal(err)
}
return l, notificationChan
}
func TestNewListenerConn(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
}
func TestConnListen(t *testing.T) {
l, channel := newTestListenerConn(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
ok, err := l.Listen("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, channel, "notify_test", "")
if err != nil {
t.Fatal(err)
}
}
func TestConnUnlisten(t *testing.T) {
l, channel := newTestListenerConn(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
ok, err := l.Listen("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, channel, "notify_test", "")
if err != nil {
t.Fatal(err)
}
ok, err = l.Unlisten("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNoNotification(t, channel)
if err != nil {
t.Fatal(err)
}
}
func TestConnUnlistenAll(t *testing.T) {
l, channel := newTestListenerConn(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
ok, err := l.Listen("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, channel, "notify_test", "")
if err != nil {
t.Fatal(err)
}
ok, err = l.UnlistenAll()
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test")
if err != nil {
t.Fatal(err)
}
err = expectNoNotification(t, channel)
if err != nil {
t.Fatal(err)
}
}
func TestConnClose(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
err := l.Close()
if err != nil {
t.Fatal(err)
}
err = l.Close()
if err != errListenerConnClosed {
t.Fatalf("expected errListenerConnClosed; got %v", err)
}
}
func TestConnPing(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
err := l.Ping()
if err != nil {
t.Fatal(err)
}
err = l.Close()
if err != nil {
t.Fatal(err)
}
err = l.Ping()
if err != errListenerConnClosed {
t.Fatalf("expected errListenerConnClosed; got %v", err)
}
}
// Test for deadlock where a query fails while another one is queued
func TestConnExecDeadlock(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
var wg sync.WaitGroup
wg.Add(2)
go func() {
l.ExecSimpleQuery("SELECT pg_sleep(60)")
wg.Done()
}()
runtime.Gosched()
go func() {
l.ExecSimpleQuery("SELECT 1")
wg.Done()
}()
// give the two goroutines some time to get into position
runtime.Gosched()
// calls Close on the net.Conn; equivalent to a network failure
l.Close()
defer time.AfterFunc(10*time.Second, func() {
panic("timed out")
}).Stop()
wg.Wait()
}
// Test for ListenerConn being closed while a slow query is executing
func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) {
l, _ := newTestListenerConn(t)
defer l.Close()
var wg sync.WaitGroup
wg.Add(1)
go func() {
sent, err := l.ExecSimpleQuery("SELECT pg_sleep(60)")
if sent {
panic("expected sent=false")
}
// could be any of a number of errors
if err == nil {
panic("expected error")
}
wg.Done()
}()
// give the above goroutine some time to get into position
runtime.Gosched()
err := l.Close()
if err != nil {
t.Fatal(err)
}
defer time.AfterFunc(10*time.Second, func() {
panic("timed out")
}).Stop()
wg.Wait()
}
func TestNotifyExtra(t *testing.T) {
db := openTestConn(t)
defer db.Close()
if getServerVersion(t, db) < 90000 {
t.Skip("skipping NOTIFY payload test since the server does not appear to support it")
}
l, channel := newTestListenerConn(t)
defer l.Close()
ok, err := l.Listen("notify_test")
if !ok || err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_test, 'something'")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, channel, "notify_test", "something")
if err != nil {
t.Fatal(err)
}
}
// create a new test listener and also set the timeouts
func newTestListenerTimeout(t *testing.T, min time.Duration, max time.Duration) (*Listener, <-chan ListenerEventType) {
datname := os.Getenv("PGDATABASE")
sslmode := os.Getenv("PGSSLMODE")
if datname == "" {
os.Setenv("PGDATABASE", "pqgotest")
}
if sslmode == "" {
os.Setenv("PGSSLMODE", "disable")
}
eventch := make(chan ListenerEventType, 16)
l := NewListener("", min, max, func(t ListenerEventType, err error) { eventch <- t })
err := expectEvent(t, eventch, ListenerEventConnected)
if err != nil {
t.Fatal(err)
}
return l, eventch
}
func newTestListener(t *testing.T) (*Listener, <-chan ListenerEventType) {
return newTestListenerTimeout(t, time.Hour, time.Hour)
}
func TestListenerListen(t *testing.T) {
l, _ := newTestListener(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
}
func TestListenerUnlisten(t *testing.T) {
l, _ := newTestListener(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = l.Unlisten("notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNoNotification(t, l.Notify)
if err != nil {
t.Fatal(err)
}
}
func TestListenerUnlistenAll(t *testing.T) {
l, _ := newTestListener(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = l.UnlistenAll()
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNoNotification(t, l.Notify)
if err != nil {
t.Fatal(err)
}
}
func TestListenerFailedQuery(t *testing.T) {
l, eventch := newTestListener(t)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
// shouldn't cause a disconnect
ok, err := l.cn.ExecSimpleQuery("SELECT error")
if !ok {
t.Fatalf("could not send query to server: %v", err)
}
_, ok = err.(PGError)
if !ok {
t.Fatalf("unexpected error %v", err)
}
err = expectNoEvent(t, eventch)
if err != nil {
t.Fatal(err)
}
// should still work
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
}
func TestListenerReconnect(t *testing.T) {
l, eventch := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
defer l.Close()
db := openTestConn(t)
defer db.Close()
err := l.Listen("notify_listen_test")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
// kill the connection and make sure it comes back up
ok, err := l.cn.ExecSimpleQuery("SELECT pg_terminate_backend(pg_backend_pid())")
if ok {
t.Fatalf("could not kill the connection: %v", err)
}
if err != io.EOF {
t.Fatalf("unexpected error %v", err)
}
err = expectEvent(t, eventch, ListenerEventDisconnected)
if err != nil {
t.Fatal(err)
}
err = expectEvent(t, eventch, ListenerEventReconnected)
if err != nil {
t.Fatal(err)
}
// should still work
_, err = db.Exec("NOTIFY notify_listen_test")
if err != nil {
t.Fatal(err)
}
// should get nil after Reconnected
err = expectNotification(t, l.Notify, "", "")
if err != errNilNotification {
t.Fatal(err)
}
err = expectNotification(t, l.Notify, "notify_listen_test", "")
if err != nil {
t.Fatal(err)
}
}
func TestListenerClose(t *testing.T) {
l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
defer l.Close()
err := l.Close()
if err != nil {
t.Fatal(err)
}
err = l.Close()
if err != errListenerClosed {
t.Fatalf("expected errListenerClosed; got %v", err)
}
}
func TestListenerPing(t *testing.T) {
l, _ := newTestListenerTimeout(t, 20*time.Millisecond, time.Hour)
defer l.Close()
err := l.Ping()
if err != nil {
t.Fatal(err)
}
err = l.Close()
if err != nil {
t.Fatal(err)
}
err = l.Ping()
if err != errListenerClosed {
t.Fatalf("expected errListenerClosed; got %v", err)
}
}
func TestConnectorWithNotificationHandler_Simple(t *testing.T) {
b, err := NewConnector("")
if err != nil {
t.Fatal(err)
}
var notification *Notification
// Make connector w/ handler to set the local var
c := ConnectorWithNotificationHandler(b, func(n *Notification) { notification = n })
sendNotification(c, t, "Test notification #1")
if notification == nil || notification.Extra != "Test notification #1" {
t.Fatalf("Expected notification w/ message, got %v", notification)
}
// Unset the handler on the same connector
prevC := c
if c = ConnectorWithNotificationHandler(c, nil); c != prevC {
t.Fatalf("Expected to not create new connector but did")
}
sendNotification(c, t, "Test notification #2")
if notification == nil || notification.Extra != "Test notification #1" {
t.Fatalf("Expected notification to not change, got %v", notification)
}
// Set it back on the same connector
if c = ConnectorWithNotificationHandler(c, func(n *Notification) { notification = n }); c != prevC {
t.Fatal("Expected to not create new connector but did")
}
sendNotification(c, t, "Test notification #3")
if notification == nil || notification.Extra != "Test notification #3" {
t.Fatalf("Expected notification w/ message, got %v", notification)
}
}
func sendNotification(c driver.Connector, t *testing.T, escapedNotification string) {
db := sql.OpenDB(c)
defer db.Close()
sql := fmt.Sprintf("LISTEN foo; NOTIFY foo, '%s';", escapedNotification)
if _, err := db.Exec(sql); err != nil {
t.Fatal(err)
}
}
|
[
"\"PGDATABASE\"",
"\"PGSSLMODE\"",
"\"PGDATABASE\"",
"\"PGSSLMODE\""
] |
[] |
[
"PGSSLMODE",
"PGDATABASE"
] |
[]
|
["PGSSLMODE", "PGDATABASE"]
|
go
| 2 | 0 | |
server/pkg/k8s/k8s.go
|
package k8s
import (
"fmt"
"os"
"strings"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
versionedclient "istio.io/client-go/pkg/clientset/versioned"
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
func Setup() (*versionedclient.Clientset, error) {
ic := &versionedclient.Clientset{}
//kubeconfig := os.Getenv("KUBECONFIG")
home, _ := os.UserHomeDir()
kubeconfig := strings.Replace("~/.kube/config", "~", home, 1)
if len(kubeconfig) == 0 {
return ic, fmt.Errorf("environment variables kubeconfig need to be set")
}
var restConfig *rest.Config
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
restConfig, err = rest.InClusterConfig()
if err != nil {
return ic, fmt.Errorf("failed to create k8s rest client: %s", err)
}
}
ic, err = versionedclient.NewForConfig(restConfig)
if err != nil {
return ic, fmt.Errorf("failed to create istio client: %s", err)
}
return ic, nil
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
oshi-core/src/main/java/oshi/software/os/linux/LinuxOperatingSystem.java
|
/**
* MIT License
*
* Copyright (c) 2010 - 2020 The OSHI Project Contributors: https://github.com/oshi/oshi/graphs/contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package oshi.software.os.linux;
import static oshi.software.os.OSService.State.RUNNING;
import static oshi.software.os.OSService.State.STOPPED;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.sun.jna.Memory; // NOSONAR squid:S1191
import com.sun.jna.Native;
import com.sun.jna.Pointer;
import com.sun.jna.platform.linux.LibC;
import com.sun.jna.platform.linux.LibC.Sysinfo;
import oshi.jna.platform.linux.LinuxLibc;
import oshi.software.common.AbstractOperatingSystem;
import oshi.software.os.FileSystem;
import oshi.software.os.NetworkParams;
import oshi.software.os.OSProcess;
import oshi.software.os.OSService;
import oshi.software.os.OSUser;
import oshi.util.ExecutingCommand;
import oshi.util.FileUtil;
import oshi.util.ParseUtil;
import oshi.util.platform.linux.ProcUtil;
/**
* <p>
* LinuxOperatingSystem class.
* </p>
*/
public class LinuxOperatingSystem extends AbstractOperatingSystem {
private static final Logger LOG = LoggerFactory.getLogger(LinuxOperatingSystem.class);
private static final long BOOTTIME;
static {
// Boot time given by btime variable in /proc/stat.
List<String> procStat = FileUtil.readFile("/proc/stat");
long tempBT = 0;
for (String stat : procStat) {
if (stat.startsWith("btime")) {
String[] bTime = ParseUtil.whitespaces.split(stat);
tempBT = ParseUtil.parseLongOrDefault(bTime[1], 0L);
break;
}
}
// If above fails, current time minus uptime.
if (tempBT == 0) {
tempBT = System.currentTimeMillis() / 1000L - (long) ProcUtil.getSystemUptimeSeconds();
}
BOOTTIME = tempBT;
}
// Populated with results of reading /etc/os-release or other files
private String versionId;
private String codeName;
// Resident Set Size is given as number of pages the process has in real
// memory.
// To get the actual size in bytes we need to multiply that with page size.
private final int memoryPageSize;
// Order the field is in /proc/pid/stat
enum ProcPidStat {
// The parsing implementation in ParseUtil requires these to be declared
// in increasing order
PPID(4), USER_TIME(14), KERNEL_TIME(15), PRIORITY(18), THREAD_COUNT(20), START_TIME(22), VSZ(23), RSS(24);
private int order;
public int getOrder() {
return this.order;
}
ProcPidStat(int order) {
this.order = order;
}
}
// Get a list of orders to pass to ParseUtil
private static final int[] PROC_PID_STAT_ORDERS = new int[ProcPidStat.values().length];
static {
for (ProcPidStat stat : ProcPidStat.values()) {
// The PROC_PID_STAT enum indices are 1-indexed.
// Subtract one to get a zero-based index
PROC_PID_STAT_ORDERS[stat.ordinal()] = stat.getOrder() - 1;
}
}
// 2.6 Kernel has 44 elements, 3.3 has 47, and 3.5 has 52.
// Check /proc/self/stat to find its length
private static final int PROC_PID_STAT_LENGTH;
static {
String stat = FileUtil.getStringFromFile(ProcUtil.getProcPath() + "/self/stat");
if (!stat.isEmpty() && stat.contains(")")) {
// add 3 to account for pid, process name in prarenthesis, and state
PROC_PID_STAT_LENGTH = ParseUtil.countStringToLongArray(stat, ' ') + 3;
} else {
// Default assuming recent kernel
PROC_PID_STAT_LENGTH = 52;
}
}
// Jiffies per second, used for process time counters.
private static final long USER_HZ = ParseUtil.parseLongOrDefault(ExecutingCommand.getFirstAnswer("getconf CLK_TCK"),
100L);
// Boot time in MS.
private static final long BOOT_TIME;
static {
// Uptime is only in hundredths of seconds but we need thousandths.
// We can grab uptime twice and take average to reduce error, getting
// current time in between
double uptime = ProcUtil.getSystemUptimeSeconds();
long now = System.currentTimeMillis();
uptime += ProcUtil.getSystemUptimeSeconds();
// Uptime is now 2x seconds, so divide by 2, but
// we want milliseconds so multiply by 1000
// Ultimately multiply by 1000/2 = 500
BOOT_TIME = now - (long) (500d * uptime + 0.5);
// Cast/truncation is effectively rounding. Boot time could
// be +/- 5 ms due to System Uptime rounding to nearest 10ms.
}
/**
* <p>
* Constructor for LinuxOperatingSystem.
* </p>
*/
@SuppressWarnings("deprecation")
public LinuxOperatingSystem() {
super.getVersionInfo();
// The above call may also populate versionId and codeName
// to pass to version constructor
this.version = new LinuxOSVersionInfoEx(this.versionId, this.codeName);
this.memoryPageSize = ParseUtil.parseIntOrDefault(ExecutingCommand.getFirstAnswer("getconf PAGESIZE"), 4096);
}
@Override
public String queryManufacturer() {
return "GNU/Linux";
}
@Override
public FamilyVersionInfo queryFamilyVersionInfo() {
String family = queryFamilyFromReleaseFiles();
String buildNumber = null;
List<String> procVersion = FileUtil.readFile("/proc/version");
if (!procVersion.isEmpty()) {
String[] split = ParseUtil.whitespaces.split(procVersion.get(0));
for (String s : split) {
if (!"Linux".equals(s) && !"version".equals(s)) {
buildNumber = s;
break;
}
}
}
OSVersionInfo versionInfo = new OSVersionInfo(this.versionId, this.codeName, buildNumber);
return new FamilyVersionInfo(family, versionInfo);
}
@Override
protected int queryBitness(int jvmBitness) {
if (jvmBitness < 64 && ExecutingCommand.getFirstAnswer("uname -m").indexOf("64") == -1) {
return jvmBitness;
}
return 64;
}
@Override
protected boolean queryElevated() {
return System.getenv("SUDO_COMMAND") != null;
}
@Override
public FileSystem getFileSystem() {
return new LinuxFileSystem();
}
@Override
public OSProcess[] getProcesses(int limit, ProcessSort sort, boolean slowFields) {
List<OSProcess> procs = new ArrayList<>();
File[] pids = ProcUtil.getPidFiles();
LinuxUserGroupInfo userGroupInfo = new LinuxUserGroupInfo();
// now for each file (with digit name) get process info
for (File pidFile : pids) {
int pid = ParseUtil.parseIntOrDefault(pidFile.getName(), 0);
OSProcess proc = getProcess(pid, userGroupInfo, slowFields);
if (proc != null) {
procs.add(proc);
}
}
// Sort
List<OSProcess> sorted = processSort(procs, limit, sort);
return sorted.toArray(new OSProcess[0]);
}
@Override
public OSProcess getProcess(int pid, boolean slowFields) {
return getProcess(pid, new LinuxUserGroupInfo(), slowFields);
}
private OSProcess getProcess(int pid, LinuxUserGroupInfo userGroupInfo, boolean slowFields) {
String path = "";
Pointer buf = new Memory(1024);
int size = LinuxLibc.INSTANCE.readlink(String.format("/proc/%d/exe", pid), buf, 1023);
if (size > 0) {
String tmp = buf.getString(0);
path = tmp.substring(0, tmp.length() < size ? tmp.length() : size);
}
Map<String, String> io = FileUtil.getKeyValueMapFromFile(String.format("/proc/%d/io", pid), ":");
// See man proc for how to parse /proc/[pid]/stat
long now = System.currentTimeMillis();
String stat = FileUtil.getStringFromFile(String.format("/proc/%d/stat", pid));
// A race condition may leave us with an empty string
if (stat.isEmpty()) {
return null;
}
// We can get name and status more easily from /proc/pid/status which we
// call later, so just get the numeric bits here
long[] statArray = ParseUtil.parseStringToLongArray(stat, PROC_PID_STAT_ORDERS, PROC_PID_STAT_LENGTH, ' ');
// Fetch cached process if it exists
OSProcess proc = new OSProcess(this);
proc.setProcessID(pid);
// The /proc/pid/cmdline value is null-delimited
proc.setCommandLine(FileUtil.getStringFromFile(String.format("/proc/%d/cmdline", pid)));
long startTime = BOOT_TIME + statArray[ProcPidStat.START_TIME.ordinal()] * 1000L / USER_HZ;
// BOOT_TIME could be up to 5ms off. In rare cases when a process has
// started within 5ms of boot it is possible to get negative uptime.
if (startTime >= now) {
startTime = now - 1;
}
proc.setStartTime(startTime);
proc.setParentProcessID((int) statArray[ProcPidStat.PPID.ordinal()]);
proc.setThreadCount((int) statArray[ProcPidStat.THREAD_COUNT.ordinal()]);
proc.setPriority((int) statArray[ProcPidStat.PRIORITY.ordinal()]);
proc.setVirtualSize(statArray[ProcPidStat.VSZ.ordinal()]);
proc.setResidentSetSize(statArray[ProcPidStat.RSS.ordinal()] * this.memoryPageSize);
proc.setKernelTime(statArray[ProcPidStat.KERNEL_TIME.ordinal()] * 1000L / USER_HZ);
proc.setUserTime(statArray[ProcPidStat.USER_TIME.ordinal()] * 1000L / USER_HZ);
proc.setUpTime(now - proc.getStartTime());
// See man proc for how to parse /proc/[pid]/io
proc.setBytesRead(ParseUtil.parseLongOrDefault(io.getOrDefault("read_bytes", ""), 0L));
proc.setBytesWritten(ParseUtil.parseLongOrDefault(io.getOrDefault("write_bytes", ""), 0L));
// gets the open files count
if (slowFields) {
List<String> openFilesList = ExecutingCommand.runNative(String.format("ls -f /proc/%d/fd", pid));
proc.setOpenFiles(openFilesList.size() - 1L);
// get 5th byte of file for 64-bit check
// https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
byte[] buffer = new byte[5];
if (!path.isEmpty()) {
try (InputStream is = new FileInputStream(path)) {
if (is.read(buffer) == buffer.length) {
proc.setBitness(buffer[4] == 1 ? 32 : 64);
}
} catch (IOException e) {
LOG.warn("Failed to read process file: {}", path);
}
}
}
Map<String, String> status = FileUtil.getKeyValueMapFromFile(String.format("/proc/%d/status", pid), ":");
proc.setName(status.getOrDefault("Name", ""));
proc.setPath(path);
switch (status.getOrDefault("State", "U").charAt(0)) {
case 'R':
proc.setState(OSProcess.State.RUNNING);
break;
case 'S':
proc.setState(OSProcess.State.SLEEPING);
break;
case 'D':
proc.setState(OSProcess.State.WAITING);
break;
case 'Z':
proc.setState(OSProcess.State.ZOMBIE);
break;
case 'T':
proc.setState(OSProcess.State.STOPPED);
break;
default:
proc.setState(OSProcess.State.OTHER);
break;
}
proc.setUserID(ParseUtil.whitespaces.split(status.getOrDefault("Uid", ""))[0]);
proc.setGroupID(ParseUtil.whitespaces.split(status.getOrDefault("Gid", ""))[0]);
OSUser user = userGroupInfo.getUser(proc.getUserID());
if (user != null) {
proc.setUser(user.getUserName());
}
proc.setGroup(userGroupInfo.getGroupName(proc.getGroupID()));
try {
String cwdLink = String.format("/proc/%d/cwd", pid);
String cwd = new File(cwdLink).getCanonicalPath();
if (!cwd.equals(cwdLink)) {
proc.setCurrentWorkingDirectory(cwd);
}
} catch (IOException e) {
LOG.trace("Couldn't find cwd for pid {}: {}", pid, e);
}
return proc;
}
@Override
public OSProcess[] getChildProcesses(int parentPid, int limit, ProcessSort sort) {
List<OSProcess> procs = new ArrayList<>();
File[] procFiles = ProcUtil.getPidFiles();
LinuxUserGroupInfo userGroupInfo = new LinuxUserGroupInfo();
// now for each file (with digit name) get process info
for (File procFile : procFiles) {
int pid = ParseUtil.parseIntOrDefault(procFile.getName(), 0);
if (parentPid == getParentPidFromProcFile(pid)) {
OSProcess proc = getProcess(pid, userGroupInfo, true);
if (proc != null) {
procs.add(proc);
}
}
}
List<OSProcess> sorted = processSort(procs, limit, sort);
return sorted.toArray(new OSProcess[0]);
}
private static int getParentPidFromProcFile(int pid) {
String stat = FileUtil.getStringFromFile(String.format("/proc/%d/stat", pid));
long[] statArray = ParseUtil.parseStringToLongArray(stat, PROC_PID_STAT_ORDERS, PROC_PID_STAT_LENGTH, ' ');
return (int) statArray[ProcPidStat.PPID.ordinal()];
}
@Override
public long getProcessAffinityMask(int processId) {
// Would prefer to use native sched_getaffinity call but variable sizing is
// kernel-dependent and requires C macros, so we use command line instead.
String mask = ExecutingCommand.getFirstAnswer("taskset -p " + processId);
// Output:
// pid 3283's current affinity mask: 3
// pid 9726's current affinity mask: f
String[] split = ParseUtil.whitespaces.split(mask);
try {
return new BigInteger(split[split.length - 1], 16).longValue();
} catch (NumberFormatException e) {
return 0;
}
}
@Override
public int getProcessId() {
return LinuxLibc.INSTANCE.getpid();
}
@Override
public int getProcessCount() {
return ProcUtil.getPidFiles().length;
}
@Override
public int getThreadCount() {
try {
Sysinfo info = new Sysinfo();
if (0 != LibC.INSTANCE.sysinfo(info)) {
LOG.error("Failed to get process thread count. Error code: {}", Native.getLastError());
return 0;
}
return info.procs;
} catch (UnsatisfiedLinkError | NoClassDefFoundError e) {
LOG.error("Failed to get procs from sysinfo. {}", e);
}
return 0;
}
@Override
public long getSystemUptime() {
return (long) ProcUtil.getSystemUptimeSeconds();
}
@Override
public long getSystemBootTime() {
return BOOTTIME;
}
@Override
public NetworkParams getNetworkParams() {
return new LinuxNetworkParams();
}
private String queryFamilyFromReleaseFiles() {
String family;
// There are two competing options for family/version information.
// Newer systems are adopting a standard /etc/os-release file:
// https://www.freedesktop.org/software/systemd/man/os-release.html
//
// Some systems are still using the lsb standard which parses a
// variety of /etc/*-release files and is most easily accessed via
// the commandline lsb_release -a, see here:
// http://linux.die.net/man/1/lsb_release
// In this case, the /etc/lsb-release file (if it exists) has
// optional overrides to the information in the /etc/distrib-release
// files, which show: "Distributor release x.x (Codename)"
//
// Attempt to read /etc/system-release which has more details than
// os-release on (CentOS and Fedora)
if ((family = readDistribRelease("/etc/system-release")) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return family;
}
// Attempt to read /etc/os-release file.
if ((family = readOsRelease()) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return family;
}
// Attempt to execute the `lsb_release` command
if ((family = execLsbRelease()) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return family;
}
// The above two options should hopefully work on most
// distributions. If not, we keep having fun.
// Attempt to read /etc/lsb-release file
if ((family = readLsbRelease()) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return family;
}
// If we're still looking, we search for any /etc/*-release (or
// similar) filename, for which the first line should be of the
// "Distributor release x.x (Codename)" format or possibly a
// "Distributor VERSION x.x (Codename)" format
String etcDistribRelease = getReleaseFilename();
if ((family = readDistribRelease(etcDistribRelease)) != null) {
// If successful, we're done. this.family has been set and
// possibly the versionID and codeName
return family;
}
// If we've gotten this far with no match, use the distrib-release
// filename (defaults will eventually give "Unknown")
return filenameToFamily(etcDistribRelease.replace("/etc/", "").replace("release", "").replace("version", "")
.replace("-", "").replace("_", ""));
}
/**
* Attempts to read /etc/os-release
*
* @return true if file successfully read and NAME= found
*/
private String readOsRelease() {
String family = null;
if (new File("/etc/os-release").exists()) {
List<String> osRelease = FileUtil.readFile("/etc/os-release");
// Search for NAME=
for (String line : osRelease) {
if (line.startsWith("VERSION=")) {
LOG.debug("os-release: {}", line);
// remove beginning and ending '"' characters, etc from
// VERSION="14.04.4 LTS, Trusty Tahr" (Ubuntu style)
// or VERSION="17 (Beefy Miracle)" (os-release doc style)
line = line.replace("VERSION=", "").replaceAll("^\"|\"$", "").trim();
String[] split = line.split("[()]");
if (split.length <= 1) {
// If no parentheses, check for Ubuntu's comma format
split = line.split(", ");
}
if (split.length > 0) {
this.versionId = split[0].trim();
}
if (split.length > 1) {
this.codeName = split[1].trim();
}
} else if (line.startsWith("NAME=") && family == null) {
LOG.debug("os-release: {}", line);
// remove beginning and ending '"' characters, etc from
// NAME="Ubuntu"
family = line.replace("NAME=", "").replaceAll("^\"|\"$", "").trim();
} else if (line.startsWith("VERSION_ID=") && this.versionId == null) {
LOG.debug("os-release: {}", line);
// remove beginning and ending '"' characters, etc from
// VERSION_ID="14.04"
this.versionId = line.replace("VERSION_ID=", "").replaceAll("^\"|\"$", "").trim();
}
}
}
return family;
}
/**
* Attempts to execute `lsb_release -a`
*
* @return true if the command successfully executed and Distributor ID: or
* Description: found
*/
private String execLsbRelease() {
String family = null;
// If description is of the format Distrib release x.x (Codename)
// that is primary, otherwise use Distributor ID: which returns the
// distribution concatenated, e.g., RedHat instead of Red Hat
for (String line : ExecutingCommand.runNative("lsb_release -a")) {
if (line.startsWith("Description:")) {
LOG.debug("lsb_release -a: {}", line);
line = line.replace("Description:", "").trim();
if (line.contains(" release ")) {
family = parseRelease(line, " release ");
}
} else if (line.startsWith("Distributor ID:") && family == null) {
LOG.debug("lsb_release -a: {}", line);
family = line.replace("Distributor ID:", "").trim();
} else if (line.startsWith("Release:") && this.versionId == null) {
LOG.debug("lsb_release -a: {}", line);
this.versionId = line.replace("Release:", "").trim();
} else if (line.startsWith("Codename:") && this.codeName == null) {
LOG.debug("lsb_release -a: {}", line);
this.codeName = line.replace("Codename:", "").trim();
}
}
return family;
}
/**
* Attempts to read /etc/lsb-release
*
* @return true if file successfully read and DISTRIB_ID or DISTRIB_DESCRIPTION
* found
*/
private String readLsbRelease() {
String family = null;
if (new File("/etc/lsb-release").exists()) {
List<String> osRelease = FileUtil.readFile("/etc/lsb-release");
// Search for NAME=
for (String line : osRelease) {
if (line.startsWith("DISTRIB_DESCRIPTION=")) {
LOG.debug("lsb-release: {}", line);
line = line.replace("DISTRIB_DESCRIPTION=", "").replaceAll("^\"|\"$", "").trim();
if (line.contains(" release ")) {
family = parseRelease(line, " release ");
}
} else if (line.startsWith("DISTRIB_ID=") && family == null) {
LOG.debug("lsb-release: {}", line);
family = line.replace("DISTRIB_ID=", "").replaceAll("^\"|\"$", "").trim();
} else if (line.startsWith("DISTRIB_RELEASE=") && this.versionId == null) {
LOG.debug("lsb-release: {}", line);
this.versionId = line.replace("DISTRIB_RELEASE=", "").replaceAll("^\"|\"$", "").trim();
} else if (line.startsWith("DISTRIB_CODENAME=") && this.codeName == null) {
LOG.debug("lsb-release: {}", line);
this.codeName = line.replace("DISTRIB_CODENAME=", "").replaceAll("^\"|\"$", "").trim();
}
}
}
return family;
}
/**
* Attempts to read /etc/distrib-release (for some value of distrib)
*
* @return true if file successfully read and " release " or " VERSION " found
*/
private String readDistribRelease(String filename) {
String family = null;
if (new File(filename).exists()) {
List<String> osRelease = FileUtil.readFile(filename);
// Search for Distrib release x.x (Codename)
for (String line : osRelease) {
LOG.debug("{}: {}", filename, line);
if (line.contains(" release ")) {
family = parseRelease(line, " release ");
// If this parses properly we're done
break;
} else if (line.contains(" VERSION ")) {
family = parseRelease(line, " VERSION ");
// If this parses properly we're done
break;
}
}
}
return family;
}
/**
* Helper method to parse version description line style
*
* @param line
* a String of the form "Distributor release x.x (Codename)"
* @param splitLine
* A regex to split on, e.g. " release "
* @return the parsed family (versionID and codeName may have also been set)
*/
private String parseRelease(String line, String splitLine) {
String[] split = line.split(splitLine);
String family = split[0].trim();
if (split.length > 1) {
split = split[1].split("[()]");
if (split.length > 0) {
this.versionId = split[0].trim();
}
if (split.length > 1) {
this.codeName = split[1].trim();
}
}
return family;
}
/**
* Looks for a collection of possible distrib-release filenames
*
* @return The first valid matching filename
*/
protected static String getReleaseFilename() {
// Look for any /etc/*-release, *-version, and variants
File etc = new File("/etc");
// Find any *_input files in that path
File[] matchingFiles = etc.listFiles(//
f -> (f.getName().endsWith("-release") || //
f.getName().endsWith("-version") || //
f.getName().endsWith("_release") || //
f.getName().endsWith("_version")) //
&& !(f.getName().endsWith("os-release") || //
f.getName().endsWith("lsb-release") || //
f.getName().endsWith("system-release")));
if (matchingFiles != null && matchingFiles.length > 0) {
return matchingFiles[0].getPath();
}
if (new File("/etc/release").exists()) {
return "/etc/release";
}
// If all else fails, try this
return "/etc/issue";
}
/**
* Converts a portion of a filename (e.g. the 'redhat' in /etc/redhat-release)
* to a mixed case string representing the family (e.g., Red Hat)
*
* @param name
* Stripped version of filename after removing /etc and -release
* @return Mixed case family
*/
private static String filenameToFamily(String name) {
switch (name.toLowerCase()) {
// Handle known special cases
case "":
return "Solaris";
case "blackcat":
return "Black Cat";
case "bluewhite64":
return "BlueWhite64";
case "e-smith":
return "SME Server";
case "eos":
return "FreeEOS";
case "hlfs":
return "HLFS";
case "lfs":
return "Linux-From-Scratch";
case "linuxppc":
return "Linux-PPC";
case "meego":
return "MeeGo";
case "mandakelinux":
return "Mandrake";
case "mklinux":
return "MkLinux";
case "nld":
return "Novell Linux Desktop";
case "novell":
case "SuSE":
return "SUSE Linux";
case "pld":
return "PLD";
case "redhat":
return "Red Hat Linux";
case "sles":
return "SUSE Linux ES9";
case "sun":
return "Sun JDS";
case "synoinfo":
return "Synology";
case "tinysofa":
return "Tiny Sofa";
case "turbolinux":
return "TurboLinux";
case "ultrapenguin":
return "UltraPenguin";
case "va":
return "VA-Linux";
case "vmware":
return "VMWareESX";
case "yellowdog":
return "Yellow Dog";
// /etc/issue will end up here:
case "issue":
return "Unknown";
// If not a special case just capitalize first letter
default:
return name.substring(0, 1).toUpperCase() + name.substring(1);
}
}
/**
* Gets Jiffies per second, useful for converting ticks to milliseconds and vice
* versa.
*
* @return Jiffies per second.
*/
public static long getHz() {
return USER_HZ;
}
@Override
public OSService[] getServices() {
// Get running services
List<OSService> services = new ArrayList<>();
Set<String> running = new HashSet<>();
for (OSProcess p : getChildProcesses(1, 0, ProcessSort.PID)) {
OSService s = new OSService(p.getName(), p.getProcessID(), RUNNING);
services.add(s);
running.add(p.getName());
}
boolean systemctlFound = false;
List<String> systemctl = ExecutingCommand.runNative("systemctl list-unit-files");
for (String str : systemctl) {
String[] split = ParseUtil.whitespaces.split(str);
if (split.length == 2 && split[0].endsWith(".service") && "enabled".equals(split[1])) {
// remove .service extension
String name = split[0].substring(0, split[0].length() - 8);
int index = name.lastIndexOf('.');
String shortName = (index < 0 || index > name.length() - 2) ? name : name.substring(index + 1);
if (!running.contains(name) && !running.contains(shortName)) {
OSService s = new OSService(name, 0, STOPPED);
services.add(s);
systemctlFound = true;
}
}
}
if (!systemctlFound) {
// Get Directories for stopped services
File dir = new File("/etc/init");
if (dir.exists() && dir.isDirectory()) {
for (File f : dir.listFiles((f, name) -> name.toLowerCase().endsWith(".conf"))) {
// remove .conf extension
String name = f.getName().substring(0, f.getName().length() - 5);
int index = name.lastIndexOf('.');
String shortName = (index < 0 || index > name.length() - 2) ? name : name.substring(index + 1);
if (!running.contains(name) && !running.contains(shortName)) {
OSService s = new OSService(name, 0, STOPPED);
services.add(s);
}
}
} else {
LOG.error("Directory: /etc/init does not exist");
}
}
return services.toArray(new OSService[0]);
}
}
|
[
"\"SUDO_COMMAND\""
] |
[] |
[
"SUDO_COMMAND"
] |
[]
|
["SUDO_COMMAND"]
|
java
| 1 | 0 | |
app/app/settings.py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$i(vzh5+hee&fg4yp@8nglgw%ksh1wz995xa0llelm8syb*9-o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/vol/web/static'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
AUTH_USER_MODEL = 'core.User'
|
[] |
[] |
[
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"]
|
python
| 4 | 0 | |
singa_auto/utils/service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import signal
import traceback
import logging
from datetime import datetime
from singa_auto.utils.log import configure_logging
logger = logging.getLogger(__name__)
curr_time = datetime.now().strftime("%d-%m-%Y_%I-%M-%S_%p")
def run_worker(meta_store, start_worker, stop_worker):
service_id = os.environ['SINGA_AUTO_SERVICE_ID']
service_type = os.environ['SINGA_AUTO_SERVICE_TYPE']
container_id = os.environ.get('HOSTNAME', 'localhost')
configure_logging('{}-ServiceID-{}-ContainerID-{}'.format(
curr_time, service_id, container_id))
def _sigterm_handler(_signo, _stack_frame):
logger.warn("Terminal signal received: %s, %s" % (_signo, _stack_frame))
stop_worker()
exit(0)
signal.signal(signal.SIGINT, _sigterm_handler)
signal.signal(signal.SIGTERM, _sigterm_handler)
# Mark service as running in DB
with meta_store:
service = meta_store.get_service(service_id)
meta_store.mark_service_as_running(service)
try:
logger.info('Starting worker "{}" for service of ID "{}"...'.format(
container_id, service_id))
start_worker(service_id, service_type, container_id)
logger.info('Stopping worker...')
stop_worker()
except Exception as e:
logger.error('Error while running worker:')
logger.error(traceback.format_exc())
# Mark service as errored in DB
with meta_store:
service = meta_store.get_service(service_id)
meta_store.mark_service_as_errored(service)
stop_worker()
raise e
|
[] |
[] |
[
"SINGA_AUTO_SERVICE_TYPE",
"SINGA_AUTO_SERVICE_ID",
"HOSTNAME"
] |
[]
|
["SINGA_AUTO_SERVICE_TYPE", "SINGA_AUTO_SERVICE_ID", "HOSTNAME"]
|
python
| 3 | 0 | |
app/app/settings.py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dl4)ayzbvmxmaok1cf!uu!d=c9zplo#s=3q7+u6vn=6^k$x$sb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
|
[] |
[] |
[
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"]
|
python
| 4 | 0 | |
test/opa/opa_test.go
|
package opa
import (
"os"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/istio-ecosystem/wasm-extensions/test"
opa "github.com/istio-ecosystem/wasm-extensions/test/opa/server"
"istio.io/proxy/test/envoye2e/driver"
"istio.io/proxy/test/envoye2e/env"
"istio.io/proxy/testdata"
)
func TestOPA(t *testing.T) {
var tests = []struct {
name string
method string
cacheHit int
cacheMiss int
requestCount int
delay time.Duration
wantRespCode int
}{
{"allow", "GET", 9, 1, 10, 0, 200},
{"deny", "POST", 9, 1, 10, 0, 403},
{"cache_expire", "POST", 2, 2, 4, 4 * time.Second, 403},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
params := driver.NewTestParams(t, map[string]string{
"ClientTLSContext": driver.LoadTestData("test/opa/testdata/transport_socket/client_tls_context.yaml.tmpl"),
"ServerTLSContext": driver.LoadTestData("test/opa/testdata/transport_socket/server_tls_context.yaml.tmpl"),
"ServerStaticCluster": driver.LoadTestData("test/opa/testdata/resource/opa_cluster.yaml.tmpl"),
"ServerMetadata": driver.LoadTestData("test/opa/testdata/resource/server_node_metadata.yaml.tmpl"),
"OpaPluginFilePath": filepath.Join(env.GetBazelBinOrDie(), "extensions/open_policy_agent/open_policy_agent.wasm"),
"CacheHit": strconv.Itoa(tt.cacheHit),
"CacheMiss": strconv.Itoa(tt.cacheMiss),
}, test.ExtensionE2ETests)
params.Vars["ServerHTTPFilters"] = params.LoadTestData("test/opa/testdata/resource/opa_filter.yaml.tmpl")
if err := (&driver.Scenario{
Steps: []driver.Step{
&driver.XDS{},
&driver.Update{
Node: "server", Version: "0", Listeners: []string{string(testdata.MustAsset("listener/server.yaml.tmpl"))},
},
&driver.Update{
Node: "client", Version: "0", Listeners: []string{string(testdata.MustAsset("listener/client.yaml.tmpl"))},
},
&opa.OpaServer{RuleFilePath: driver.TestPath("test/opa/testdata/rule/opa_rule.rego")},
&driver.Envoy{
Bootstrap: params.FillTestData(string(testdata.MustAsset("bootstrap/server.yaml.tmpl"))),
DownloadVersion: os.Getenv("ISTIO_TEST_VERSION"),
},
&driver.Envoy{
Bootstrap: params.FillTestData(string(testdata.MustAsset("bootstrap/client.yaml.tmpl"))),
DownloadVersion: os.Getenv("ISTIO_TEST_VERSION"),
},
&driver.Repeat{
N: tt.requestCount,
Step: &driver.Scenario{
Steps: []driver.Step{
&driver.HTTPCall{
Port: params.Ports.ClientPort,
Method: tt.method,
Path: "/echo",
ResponseCode: tt.wantRespCode,
},
&driver.Sleep{Duration: tt.delay},
},
},
},
&driver.Stats{
AdminPort: params.Ports.ServerAdmin,
Matchers: map[string]driver.StatMatcher{
"envoy_wasm_filter_opa_filter_cache_hit_policy_cache_count": &driver.
ExactStat{Metric: "test/opa/testdata/stats/cache_hit.yaml.tmpl"},
"envoy_wasm_filter_opa_filter_cache_miss_policy_cache_count": &driver.
ExactStat{Metric: "test/opa/testdata/stats/cache_miss.yaml.tmpl"},
},
},
}}).Run(params); err != nil {
t.Fatal(err)
}
})
}
}
|
[
"\"ISTIO_TEST_VERSION\"",
"\"ISTIO_TEST_VERSION\""
] |
[] |
[
"ISTIO_TEST_VERSION"
] |
[]
|
["ISTIO_TEST_VERSION"]
|
go
| 1 | 0 | |
bot/constants.py
|
import os
import pathlib
from typing import NamedTuple
import yaml
ENVIRONMENT = os.getenv("ENVIRONMENT")
if ENVIRONMENT is None:
from dotenv import load_dotenv
load_dotenv(dotenv_path=f"{os.getcwd()}/.env")
# env vars
PREFIX = os.getenv("PREFIX", "!")
TOKEN = os.getenv("TOKEN")
BOT_REPO_URL = "https://github.com/gurkult/gurkbot"
DATABASE_URL = os.getenv("DATABASE_URL")
# paths
EXTENSIONS = pathlib.Path("bot/exts/")
LOG_FILE = pathlib.Path("log/gurkbot.log")
if TEST_GUILDS := os.getenv("TEST_GUILDS"):
TEST_GUILDS = [int(x) for x in TEST_GUILDS.split(",")]
class Emojis(NamedTuple):
issue_emoji = "<:IssueOpen:794834041450266624>"
issue_closed_emoji = "<:IssueClosed:794834041240289321>"
pull_request_emoji = "<:PROpen:794834041416187935>"
pull_request_closed_emoji = "<:PRClosed:794834041073172501>"
merge_emoji = "<:PRMerged:794834041173704744>"
cucumber_emoji = "\U0001f952"
invalid_emoji = "\u274c"
trashcan = str(os.getenv("EMOJI_TRASHCAN", "<:trash:798179380626587658>"))
confirmation_emoji = "<:confirmation:824252277262123029>"
warning_emoji = "\u26a0"
CHECK_MARK_EMOJI = "\U00002705"
CROSS_MARK_EMOJI = "\U0000274C"
MAG_RIGHT_EMOJI = "\U0001f50e"
class Colours:
green = 0x1F8B4C
yellow = 0xF1C502
soft_red = 0xCD6D6D
class GurkanNameEndings:
name_endings = ["gurk", "gurkan", "urkan"]
class Channels(NamedTuple):
off_topic = int(os.getenv("CHANNEL_OFF_TOPIC", 789198156218892358))
gurkcraft = int(os.getenv("CHANNEL_GURKCRAFT", 878159594189381662))
gurkcraft_relay = int(os.getenv("CHANNEL_GURKCRAFT_RELAY", 932334985053102101))
devalerts = int(os.getenv("CHANNEL_DEVALERTS", 796695123177766982))
devlog = int(os.getenv("CHANNEL_DEVLOG", 789431367167377448))
dev_gurkbot = int(os.getenv("CHANNEL_DEV_GURKBOT", 789295038315495455))
dev_reagurk = int(os.getenv("CHANNEL_DEV_REAGURK", 789241204696416287))
dev_gurklang = int(os.getenv("CHANNEL_DEV_GURKLANG", 789249499800535071))
dev_branding = int(os.getenv("CHANNEL_DEV_BRANDING", 789193817051234306))
log = int(os.getenv("CHANNEL_LOGS", 831432092226158652))
dm_log = int(os.getenv("CHANNEL_LOGS", 833345326675918900))
class Roles(NamedTuple):
gurkans = int(os.getenv("ROLE_GURKANS", 789195552121290823))
steering_council = int(os.getenv("ROLE_STEERING_COUNCIL", 789213682332598302))
moderators = int(os.getenv("ROLE_MODERATORS", 818107766585163808))
gurkult_lords = int(os.getenv("ROLE_GURKULT_LORDS", 789197216869777440))
announcements = int(os.getenv("ANNOUNCEMENTS_ID", 789978290844598272))
polls = int(os.getenv("POLLS_ID", 790043110360350740))
events = int(os.getenv("EVENTS_ID", 890656665328820224))
# Bot replies
with pathlib.Path("bot/resources/bot_replies.yml").open(encoding="utf8") as file:
bot_replies = yaml.safe_load(file)
ERROR_REPLIES = bot_replies["ERROR_REPLIES"]
POSITIVE_REPLIES = bot_replies["POSITIVE_REPLIES"]
NEGATIVE_REPLIES = bot_replies["NEGATIVE_REPLIES"]
# Minecraft Server
class Minecraft(NamedTuple):
server_address = "mc.gurkult.com"
|
[] |
[] |
[
"CHANNEL_DEV_GURKLANG",
"CHANNEL_GURKCRAFT_RELAY",
"ANNOUNCEMENTS_ID",
"CHANNEL_DEV_GURKBOT",
"CHANNEL_DEVALERTS",
"DATABASE_URL",
"TOKEN",
"EVENTS_ID",
"EMOJI_TRASHCAN",
"POLLS_ID",
"CHANNEL_DEVLOG",
"ENVIRONMENT",
"CHANNEL_DEV_BRANDING",
"CHANNEL_OFF_TOPIC",
"PREFIX",
"CHANNEL_DEV_REAGURK",
"ROLE_STEERING_COUNCIL",
"TEST_GUILDS",
"ROLE_MODERATORS",
"ROLE_GURKANS",
"ROLE_GURKULT_LORDS",
"CHANNEL_GURKCRAFT",
"CHANNEL_LOGS"
] |
[]
|
["CHANNEL_DEV_GURKLANG", "CHANNEL_GURKCRAFT_RELAY", "ANNOUNCEMENTS_ID", "CHANNEL_DEV_GURKBOT", "CHANNEL_DEVALERTS", "DATABASE_URL", "TOKEN", "EVENTS_ID", "EMOJI_TRASHCAN", "POLLS_ID", "CHANNEL_DEVLOG", "ENVIRONMENT", "CHANNEL_DEV_BRANDING", "CHANNEL_OFF_TOPIC", "PREFIX", "CHANNEL_DEV_REAGURK", "ROLE_STEERING_COUNCIL", "TEST_GUILDS", "ROLE_MODERATORS", "ROLE_GURKANS", "ROLE_GURKULT_LORDS", "CHANNEL_GURKCRAFT", "CHANNEL_LOGS"]
|
python
| 23 | 0 | |
integrationtest/vm/virtualrouter/ps/test_disable_ps_expunge_vm.py
|
'''
New Integration Test for delete vm under PS disable mode.
@author: SyZhao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.primarystorage_operations as ps_ops
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.header.vm as vm_header
import os
_config_ = {
'timeout' : 1000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
ps_uuid = None
host_uuid = None
vr_uuid = None
def test():
global test_obj_dict
global ps_uuid
global host_uuid
global vr_uuid
test_util.test_dsc('Create test vm and check')
test_lib.lib_set_delete_policy('vm', 'Delay')
test_lib.lib_set_delete_policy('volume', 'Delay')
l3_1_name = os.environ.get('l3VlanNetworkName1')
vm = test_stub.create_vlan_vm(l3_name=l3_1_name)
l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
vr = test_lib.lib_find_vr_by_l3_uuid(l3_1.uuid)[0]
vr_uuid = vr.uuid
l3_1 = test_lib.lib_get_l3_by_name(l3_1_name)
host = test_lib.lib_get_vm_host(vm.get_vm())
host_uuid = host.uuid
test_obj_dict.add_vm(vm)
vm.check()
ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm())
ps_uuid = ps.uuid
ps_ops.change_primary_storage_state(ps_uuid, 'disable')
if not test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, '22', 90):
test_util.test_fail('VM is expected to running when PS change to disable state')
vm.set_state(vm_header.RUNNING)
vm.check()
vm.destroy()
vm.check()
vm.expunge()
vm.check()
ps_ops.change_primary_storage_state(ps_uuid, 'enable')
host_ops.reconnect_host(host_uuid)
vm_ops.reconnect_vr(vr_uuid)
test_lib.lib_set_delete_policy('vm', 'Direct')
test_lib.lib_set_delete_policy('volume', 'Direct')
test_util.test_pass('PS disable mode Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global ps_uuid
test_lib.lib_set_delete_policy('vm', 'Direct')
test_lib.lib_set_delete_policy('volume', 'Direct')
if ps_uuid != None:
ps_ops.change_primary_storage_state(ps_uuid, 'enable')
global host_uuid
if host_uuid != None:
host_ops.reconnect_host(host_uuid)
global vr_uuid
if vr_uuid != None:
vm_ops.reconnect_vr(vr_uuid)
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
|
[] |
[] |
[
"l3VlanNetworkName1"
] |
[]
|
["l3VlanNetworkName1"]
|
python
| 1 | 0 | |
cmd/registry-proxy-cache/main.go
|
package main
import (
"fmt"
"os"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/version"
"github.com/octohelm/registry-proxy-cache/pkg/configuration"
"github.com/octohelm/registry-proxy-cache/pkg/registryproxycache"
)
func main() {
// setup context
ctx := dcontext.WithVersion(dcontext.Background(), version.Version)
log := dcontext.GetLogger(ctx)
config, err := resolveConfiguration()
if err != nil {
log.Fatalf("configuration error: %v", err)
os.Exit(1)
}
r, err := registryproxycache.NewRegistryProxyCache(ctx, config)
if err != nil {
log.Fatalln(err)
return
}
if err = r.ListenAndServe(); err != nil {
log.Fatalln(err)
}
}
func resolveConfiguration() (*configuration.Configuration, error) {
var configurationPath string
if p := os.Getenv("REGISTRY_CONFIGURATION_PATH"); p != "" {
configurationPath = p
}
if configurationPath == "" {
return nil, fmt.Errorf("configuration path unspecified")
}
fp, err := os.Open(configurationPath)
if err != nil {
return nil, err
}
defer fp.Close()
config, err := configuration.Parse(fp)
if err != nil {
return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err)
}
if v := os.Getenv("REGISTRY_PROXIES"); v != "" {
registryProxies, err := configuration.ParseProxies(v)
if err != nil {
return nil, err
}
config.Proxies = registryProxies
}
return config, nil
}
|
[
"\"REGISTRY_CONFIGURATION_PATH\"",
"\"REGISTRY_PROXIES\""
] |
[] |
[
"REGISTRY_PROXIES",
"REGISTRY_CONFIGURATION_PATH"
] |
[]
|
["REGISTRY_PROXIES", "REGISTRY_CONFIGURATION_PATH"]
|
go
| 2 | 0 | |
server.go
|
package main
import (
"fmt"
"net/http"
"os"
"strings"
log "github.com/sirupsen/logrus"
"github.com/hand-writing-authentication-team/HAPI/controllers"
"github.com/hand-writing-authentication-team/HAPI/queue"
)
type HAPIServerConfig struct {
addr string
server *controllers.ControllerConf
QC *queue.Queue
RQ *queue.ResultQueue
}
func config() HAPIServerConfig {
var conf HAPIServerConfig
port := strings.TrimSpace(os.Getenv("SERVER_PORT"))
if port == "" {
port = "9099"
}
conf.addr = fmt.Sprintf("0.0.0.0:%s", port)
mqHost := strings.TrimSpace(os.Getenv("MQ_HOST"))
mqPort := strings.TrimSpace(os.Getenv("MQ_PORT"))
mqUsername := strings.TrimSpace(os.Getenv("MQ_USER"))
mqPassword := strings.TrimSpace(os.Getenv("MQ_PASSWORD"))
mqQueue := strings.TrimSpace(os.Getenv("QUEUE"))
redisAddr := strings.TrimSpace(os.Getenv("REDIS_ADDR"))
if mqHost == "" || mqPassword == "" || mqPort == "" || mqUsername == "" || mqQueue == "" {
log.Fatal("one of the mq config env is not set!")
os.Exit(1)
}
if redisAddr == "" {
log.Fatal("one of the redis configuration is not set")
os.Exit(1)
}
queueClient, err := queue.NewQueueInstance(mqHost, mqPort, mqUsername, mqPassword, mqQueue)
if err != nil {
os.Exit(1)
}
conf.QC = queueClient
conf.RQ, err = queue.NewRedisClient(redisAddr)
if err != nil {
os.Exit(1)
}
conf.server = controllers.NewServerControllerSet()
conf.server.RQ = conf.RQ
conf.server.QC = conf.QC
return conf
}
func main() {
serverConf := config()
log.Info("start to start the server")
log.Fatal(http.ListenAndServe(serverConf.addr, serverConf.server.Server))
}
|
[
"\"SERVER_PORT\"",
"\"MQ_HOST\"",
"\"MQ_PORT\"",
"\"MQ_USER\"",
"\"MQ_PASSWORD\"",
"\"QUEUE\"",
"\"REDIS_ADDR\""
] |
[] |
[
"SERVER_PORT",
"QUEUE",
"MQ_USER",
"MQ_HOST",
"MQ_PASSWORD",
"REDIS_ADDR",
"MQ_PORT"
] |
[]
|
["SERVER_PORT", "QUEUE", "MQ_USER", "MQ_HOST", "MQ_PASSWORD", "REDIS_ADDR", "MQ_PORT"]
|
go
| 7 | 0 | |
clients/oauth/client.go
|
package oauth
import (
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"os"
"github.com/electivetechnology/utility-library-go/logger"
)
const AUTH_TOKEN_URL = "/v1/oauth2/authorizations/:state/token"
var log logger.Logging
func init() {
// Add generic logger
log = logger.NewLogger("clients/oauth")
}
type OAuthClient interface {
GetToken(auth Authorization) (Token, error)
RefreshToken(Token Token, clientId string, clientSecret string) (Token, error)
Refresh(refreshToken string) (Token, error)
}
type Client struct {
BaseUrl string
Jwt string
}
func NewClient(jwt string) *Client {
// Get Base URL
url := os.Getenv("OAUTH_HOST")
if url == "" {
url = "http://oauth"
}
return &Client{BaseUrl: url, Jwt: jwt}
}
func (client *Client) GetAccessToken(auth string) (*AccessToken, error) {
log.Printf("Sending request to Oauth to get new Token for Auth %s", auth)
// Prepare Request
c := &http.Client{}
r, _ := http.NewRequest(http.MethodGet, client.BaseUrl+"/v1/oauth2/authorizations/"+auth+"/token", nil)
r.Header.Add("Authorization", "Bearer "+client.Jwt)
// Send Request
res, err := c.Do(r)
// Check for errors, default evaluation is false
if err != nil {
log.Printf("Error getting Access token: %v", err)
return &AccessToken{}, errors.New("error getting Access token")
}
// read all response body
data, _ := ioutil.ReadAll(res.Body)
// defer closing response body
defer res.Body.Close()
// print `data` as a string
log.Printf("%s", data)
// Success, populate token
if res.StatusCode == http.StatusOK {
token := &AccessToken{}
json.Unmarshal(data, &token)
// Return token
return token, nil
}
// If we got here there was some kind of error with exchange
return &AccessToken{}, nil
}
|
[
"\"OAUTH_HOST\""
] |
[] |
[
"OAUTH_HOST"
] |
[]
|
["OAUTH_HOST"]
|
go
| 1 | 0 | |
main.py
|
import torch
from utils import get_config, get_log_dir, get_cuda
from data_loader import get_loader
from trainer import Trainer
import warnings
warnings.filterwarnings('ignore')
resume = ''
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
# Model hyper-parameters
parser.add_argument('--mode',
type=str,
default='train',
choices=['train', 'val', 'trainval', 'demo'])
parser.add_argument("--gpu_id", type=int, default=-1)
parser.add_argument("--backbone", type=str, default='vgg')
parser.add_argument("--root_dataset",
type=str,
default='./data/Pascal_VOC')
parser.add_argument("--resume", type=str, default='')
parser.add_argument("--fcn",
type=str,
default='32s',
choices=['32s', '16s', '8s', '50', '101'])
opts = parser.parse_args()
# os.environ['CUDA_VISIBLE_DEVICES'] = str(opts.gpu_id)
opts.cuda = get_cuda(torch.cuda.is_available() and opts.gpu_id != -1,
opts.gpu_id)
print('Cuda', opts.cuda)
cfg = get_config()[1]
opts.cfg = cfg
if opts.mode in ['train', 'trainval']:
opts.out = get_log_dir('fcn' + opts.fcn, 1, cfg)
print('Output logs: ', opts.out)
data = get_loader(opts)
trainer = Trainer(data, opts)
if opts.mode == 'val':
trainer.Test()
elif opts.mode == 'demo':
trainer.Demo()
else:
trainer.Train()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
autofit/mapper/link.py
|
import hashlib
import logging
import os
import shutil
from os.path import expanduser
SUB_PATH_LENGTH = 10
AUTOFIT_FOLDER = ".autofit"
logger = logging.getLogger(__file__)
try:
autolens_dir = expanduser(os.environ["SYMDIR"])
except KeyError:
autolens_dir = os.path.join("{}".format(expanduser('~')), "{}".format('.autofit'))
try:
os.mkdir(autolens_dir)
except FileExistsError as ex:
logger.debug(ex)
def path_for(path):
"""
Generate a path in the ~/.autolens directory by taking the provided path, base64 encoding it and extracting the
first and last five characters.
Parameters
----------
path: str
The path where multinest output is apparently saved
Returns
-------
actual_path: str
The path where multinest output is actually saved
"""
start = int(SUB_PATH_LENGTH / 2)
end = SUB_PATH_LENGTH - start
encoded_string = str(hashlib.sha224(path.encode("utf-8")).hexdigest())
return os.path.join(
"{}".format(autolens_dir),
"al_{}".format(encoded_string[:start] + encoded_string[-end:]).replace('-', '')
)
def make_linked_folder(sym_path):
"""
Returns a folder in the ~/.autolens directory and create a sym link to it at the provided path.
If both folders already exist then nothing is changed. If the source folder exists but the destination folder does
not then the source folder is removed and replaced so as to conform to the behaviour that the user would expect
should they delete the sym linked folder.
Parameters
----------
sym_path: str
The path where multinest output is apparently saved
Returns
-------
actual_path: str
The path where multinest output is actually saved
"""
source_path = path_for(sym_path)
if os.path.exists(source_path) and not os.path.exists(sym_path):
logger.debug(
"Source {} exists but target {} does not. Removing source.".format(
source_path, sym_path
)
)
shutil.rmtree(source_path)
try:
logger.debug("Making source {}".format(source_path))
os.mkdir(source_path)
logger.debug("Success")
except FileExistsError as e:
logger.debug(e)
try:
logger.debug(
"Making linking from source {} to sym {}".format(source_path, sym_path)
)
os.symlink(source_path, sym_path)
logger.debug("Success")
except (FileExistsError, IsADirectoryError) as e:
logger.debug("Sym already existed")
logger.debug(e)
return source_path
|
[] |
[] |
[
"SYMDIR"
] |
[]
|
["SYMDIR"]
|
python
| 1 | 0 | |
pyvcloud/system_test_framework/base_test.py
|
# VMware vCloud Director Python SDK
# Copyright (c) 2018 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from pyvcloud.system_test_framework.environment import Environment
import yaml
class BaseTestCase(unittest.TestCase):
_config_file = 'base_config.yaml'
_config_yaml = None
@classmethod
def setUpClass(cls):
if 'VCD_TEST_BASE_CONFIG_FILE' in os.environ:
cls._config_file = os.environ['VCD_TEST_BASE_CONFIG_FILE']
with open(cls._config_file, 'r') as f:
cls._config_yaml = yaml.safe_load(f)
Environment.init(cls._config_yaml)
Environment.attach_vc()
Environment.create_pvdc()
Environment.create_external_network()
Environment.create_org()
Environment.create_users()
Environment.create_ovdc()
Environment.create_direct_ovdc_network()
Environment.create_advanced_gateway()
Environment.create_ovdc_network()
Environment.create_routed_ovdc_network()
Environment.create_catalog()
Environment.share_catalog()
Environment.upload_template()
Environment.upload_media()
@classmethod
def tearDownClass(cls):
Environment.cleanup()
|
[] |
[] |
[
"VCD_TEST_BASE_CONFIG_FILE"
] |
[]
|
["VCD_TEST_BASE_CONFIG_FILE"]
|
python
| 1 | 0 | |
backend/src/routes.py
|
import os
import datetime
import copy
import flask
import pymongo
from flask_cors import CORS
from jsonschema import validate
from bson.objectid import ObjectId
from src import exceptions
from src.json_response import jsonify
from src.permission import require_permission, get_sub
from src.util import find_data_links
def time_now():
return datetime.datetime.utcnow().replace(microsecond=0).isoformat()+'Z'
def get_routes(db, schemas):
api = flask.Blueprint("api", __name__, url_prefix=os.environ.get('BACKEND_ROOT', '/'))
cors = CORS(api, resources={r"*": {"origins": "*"}})
@api.route('/<resource_type>/<id>', methods=["GET"])
@jsonify
def _get_resource(resource_type, id):
splitted = resource_type.split('_')
if (len(splitted) > 1 and splitted[1]) == 'cache':
raw_type = splitted[0]
else:
raw_type = resource_type
if raw_type not in schemas.keys():
raise exceptions.NotFoundException('Resource of type \'%s\' not found' % raw_type)
resource = None
if 'slug' in schemas[raw_type]['properties'].keys():
resource = db[resource_type].find_one({'slug': id})
if not resource:
resource = db[resource_type].find_one({'primaryKey.id': id})
if not resource:
raise exceptions.NotFoundException('Resource not found')
return resource, 200
@api.route('/<resource_type>', methods=["GET"])
@jsonify
def _get_resources(resource_type):
"""
Get list of resource `resource_type`, forwards GET parameters sort, direction, skip, limit to mongo query
:param resource_type: The resource type
:return: list of resources
"""
splitted = resource_type.split('_')
if (len(splitted) > 1 and splitted[1]) == 'cache':
raw_type = splitted[0]
else:
raw_type = resource_type
if raw_type not in schemas.keys():
raise exceptions.NotFoundException('No such resource type exists: \'%s\'' % resource_type)
keyword_arg_keys = ['sort', 'skip', 'limit', 'direction', 'count']
searches = {}
for key in [key for key in flask.request.args.keys() if key not in keyword_arg_keys]:
arg_value = flask.request.args[key]
if arg_value in ['true', 'True']:
arg_value = True
if arg_value in ['false', 'False']:
arg_value = False
searches[key] = arg_value
query = db[resource_type].find(searches)
if 'sort' in flask.request.args.keys():
direction = pymongo.DESCENDING if flask.request.args.get('direction') == 'desc' else pymongo.ASCENDING
query.sort(flask.request.args.get('sort'), direction)
query.skip(int(flask.request.args.get('skip', 0)))
query.limit(int(flask.request.args.get('limit', 0)))
if 'count' in flask.request.args:
return {'count': query.count()}, 200
return list(query), 200
@api.route('/<resource_type>', methods=["POST"])
@require_permission(['write'])
@jsonify
def _create_resources(resource_type):
"""
Save one or more new resources
:param resource_type: The resource type
:return: the saved resource
"""
def add_fields_and_validate(res):
res['createdAt'] = time_now()
res['updatedAt'] = time_now()
res['createdBy'] = get_sub()
res['updatedBy'] = get_sub()
if 'primaryKey' not in res:
res['primaryKey'] = {
'collection': resource_type,
'id': str(ObjectId())
}
validate(res, schemas.get(resource_type))
if db[resource_type].find_one({'primaryKey.id': res['primaryKey']['id']}):
raise exceptions.DuplicatePrimaryKeyException(res['primaryKey']['id'])
if resource_type not in schemas.keys():
raise exceptions.NotFoundException('No such resource type exists: \'%s\'' % resource_type)
data = flask.request.get_json(force=True, silent=True)
if not data or not (isinstance(data, dict) or isinstance(data, list)):
raise exceptions.BadRequestException('Malformed JSON in POST data')
if isinstance(data, dict):
add_fields_and_validate(data)
if 'test' not in flask.request.args:
db[resource_type].insert(data)
return data, 200
else: # list
for resource in data:
add_fields_and_validate(resource)
if 'test' not in flask.request.args:
db[resource_type].insert(resource)
return {"ok": True}, 200
@api.route('/<resource_type>', methods=["PUT"])
@require_permission(['write'])
@jsonify
def _update_or_insert(resource_type):
"""
Update or insert a list of resources
:param resource_type: The resource type
:return: the resources list.
"""
if resource_type not in schemas.keys():
raise exceptions.NotFoundException('Resource of type \'%s\' not found' % resource_type)
data = flask.request.get_json(force=True, silent=True)
if not data or not isinstance(data, list):
raise exceptions.BadRequestException('Malformed JSON in PATCH data (expected JSON list)')
resources_to_update = []
resources_to_create = []
for resource in data:
if 'primaryKey' not in resource:
resource['primaryKey'] = {
'collection': resource_type,
'id': str(ObjectId())
}
old_resource = db[resource_type].find_one({'primaryKey.id': resource['primaryKey']['id']})
if not old_resource:
resource['createdAt'] = time_now()
resource['updatedAt'] = time_now()
resource['createdBy'] = get_sub()
resource['updatedBy'] = get_sub()
validate(resource, schemas.get(resource_type))
resources_to_create.append(resource)
else:
updated_resource = {}
updated_resource['createdAt'] = old_resource['createdAt']
updated_resource['createdBy'] = old_resource['createdBy']
updated_resource['primaryKey'] = old_resource['primaryKey']
updated_resource['updatedAt'] = time_now()
updated_resource['updatedBy'] = get_sub()
resource.pop('_id', None)
resource.pop('createdAt', None)
resource.pop('createdBy', None)
resource.pop('updatedAt', None)
resource.pop('updatedBy', None)
resource.pop('primaryKey', None)
# update resource dict with POSTed data
updated_resource.update(resource)
validate(updated_resource, schemas.get(resource_type))
updated_resource['_id'] = old_resource['_id']
resources_to_update.append(updated_resource)
if 'test' not in flask.request.args:
for resource in resources_to_create:
db[resource_type].insert(resource)
for resource in resources_to_update:
db[resource_type].update_one({'_id': resource['_id']}, {'$set': resource})
return {"ok": True}, 200
@api.route('/<resource_type>/<id>', methods=["PUT"])
@require_permission(['write'])
@jsonify
def _update_resource(resource_type, id):
"""
Update a resource, can update whole resource or a subset of fields
:param resource_type: The resource type
:return: the updated resource
"""
if resource_type not in schemas.keys():
raise exceptions.NotFoundException('Resource of type \'%s\' not found' % resource_type)
resource = None
if 'slug' in schemas[resource_type]['properties'].keys():
resource = db[resource_type].find_one({'slug': id})
if not resource:
resource = db[resource_type].find_one({'primaryKey.id': id})
if not resource:
raise exceptions.NotFoundException('Resource not found')
if 'save_history' in flask.request.args:
old_data = copy.deepcopy(resource)
data = flask.request.get_json(force=True, silent=True)
if not data or not isinstance(data, dict):
raise exceptions.BadRequestException('Malformed JSON in PATCH data')
# save keys that cannot be changed on update
resource_id = resource.pop('_id')
# restore keys that cannot be changed on update
data['updatedAt'] = time_now()
data['updatedBy'] = get_sub()
data['createdAt'] = resource.pop('createdAt')
data['createdBy'] = resource.pop('createdBy', 'Unknown')
data['primaryKey'] = resource.pop('primaryKey')
validate(data, schemas.get(resource_type))
# _id is not part of the schema, so restore this after validation
data['_id'] = resource_id
if 'test' not in flask.request.args:
db[resource_type].update_one({'_id': resource_id}, {'$set': data})
if 'save_history' in flask.request.args:
old_data.pop('_id')
db[resource_type+'_history'].insert(old_data)
return data, 200
@api.route('/')
@require_permission(['read'])
@jsonify
def _root():
"""
Get all resources as a dict, excluding commits and software cache
:return: All resources
"""
results = {}
BLACKLIST = {'commit', 'software_cache'}
for resource_type in schemas.keys():
if resource_type not in BLACKLIST:
resource_cursor = db[resource_type].find()
resources = list(resource_cursor)
for resource in resources:
if '_id' in resource:
del resource['_id']
results[resource_type] = resources
return results, 200
@api.route('/schema')
@jsonify
def _schema():
return schemas, 200
@api.route('/<resource_type>/<id>/links')
@jsonify
def _links(resource_type, id):
if resource_type not in schemas.keys():
raise exceptions.NotFoundException('Resource of type \'%s\' not found' % resource_type)
resource = None
if 'slug' in schemas[resource_type]['properties'].keys():
resource = db[resource_type].find_one({'slug': id})
if not resource:
resource = db[resource_type].find_one({'primaryKey.id': id})
if not resource:
raise exceptions.NotFoundException('Resource not found')
return find_data_links(db, schemas, resource_type, id), 200
@api.route('/<resource_type>/<id>', methods=["DELETE"])
@require_permission(['write'])
@jsonify
def _delete(resource_type, id):
if resource_type not in schemas.keys():
raise exceptions.NotFoundException('Resource of type \'%s\' not found' % resource_type)
resource = None
if 'slug' in schemas[resource_type]['properties'].keys():
resource = db[resource_type].find_one({'slug': id})
if not resource:
resource = db[resource_type].find_one({'primaryKey.id': id})
if not resource:
raise exceptions.NotFoundException('Resource not found')
links = find_data_links(db, schemas, resource_type, id)
if links:
raise exceptions.HasLinksException('Cannot delete resource (links exist)', data=links)
db[resource_type].remove({"_id": resource['_id']})
return {"ok": True}, 200
return api
|
[] |
[] |
[
"BACKEND_ROOT"
] |
[]
|
["BACKEND_ROOT"]
|
python
| 1 | 0 | |
src/history_test.go
|
package fzf
import (
"io/ioutil"
"os"
"runtime"
"testing"
)
func TestHistory(t *testing.T) {
maxHistory := 50
// Invalid arguments
var paths []string
if runtime.GOOS == "windows" {
// GOPATH should exist, so we shouldn't be able to override it
paths = []string{os.Getenv("GOPATH")}
} else {
paths = []string{"/etc", "/proc"}
}
for _, path := range paths {
if _, e := NewHistory(path, maxHistory); e == nil {
t.Error("Error expected for: " + path)
}
}
f, _ := ioutil.TempFile("", "fzf-history")
f.Close()
{ // Append lines
h, _ := NewHistory(f.Name(), maxHistory)
for i := 0; i < maxHistory+10; i++ {
h.append("foobar")
}
}
{ // Read lines
h, _ := NewHistory(f.Name(), maxHistory)
if len(h.lines) != maxHistory+1 {
t.Errorf("Expected: %d, actual: %d\n", maxHistory+1, len(h.lines))
}
for i := 0; i < maxHistory; i++ {
if h.lines[i] != "foobar" {
t.Error("Expected: foobar, actual: " + h.lines[i])
}
}
}
{ // Append lines
h, _ := NewHistory(f.Name(), maxHistory)
h.append("barfoo")
h.append("")
h.append("foobarbaz")
}
{ // Read lines again
h, _ := NewHistory(f.Name(), maxHistory)
if len(h.lines) != maxHistory+1 {
t.Errorf("Expected: %d, actual: %d\n", maxHistory+1, len(h.lines))
}
compare := func(idx int, exp string) {
if h.lines[idx] != exp {
t.Errorf("Expected: %s, actual: %s\n", exp, h.lines[idx])
}
}
compare(maxHistory-3, "foobar")
compare(maxHistory-2, "barfoo")
compare(maxHistory-1, "foobarbaz")
}
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
examples/NewtonSketch/tune_gaussian.py
|
#! /usr/bin/env python
# GPTune Copyright (c) 2019, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S.Dept. of Energy) and the University of
# California, Berkeley. All rights reserved.
#
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Intellectual Property Office at [email protected].
#
# NOTICE. This Software was developed under funding from the U.S. Department
# of Energy and the U.S. Government consequently retains certain rights.
# As such, the U.S. Government has been granted for itself and others acting
# on its behalf a paid-up, nonexclusive, irrevocable, worldwide license in
# the Software to reproduce, distribute copies to the public, prepare
# derivative works, and perform publicly and display publicly, and to permit
# other to do so.
#
import sys
import os
import logging
sys.path.insert(0, os.path.abspath(__file__ + "/../../../GPTune/"))
sys.path.insert(0, os.path.abspath(__file__ + "/newtonsketch/"))
from autotune.search import *
from autotune.space import *
from autotune.problem import *
from gptune import *
import argparse
import numpy as np
import time
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from sketches import gaussian, less, sparse_rademacher, srht, rrs, rrs_lev_scores
from sklearn.kernel_approximation import RBFSampler
from solvers_lr import LogisticRegression
import generate_dataset
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-optimization', type=str,default='GPTune', help='Optimization algorithm (opentuner, hpbandster, GPTune)')
parser.add_argument('-dataset', type=str, default='cifar-10', help='Dataset')
parser.add_argument('-nrun', type=int, default=20, help='Number of runs per task')
parser.add_argument('-npilot', type=int, default=10, help='Number of initial runs per task')
args = parser.parse_args()
return args
def objectives(point):
dataset = point['dataset']
sketch = point['sketch']
n = point["n"]
d = point["d"]
m = int(d*point['sketch_size'])
#nnz = point['sparsity_parameter']*point["d"]/point["n"]
lambd = point['lambd']
error_threshold = point['error_threshold']
niter = point['niter']
print ("Dataset: ", dataset, "n: ", n, "d: ", d, "sketch: ", sketch, "lambda: ", lambd, "m: ", m, "error_threshold: ", error_threshold, "niter: ", niter)
times_spent = []
for i in range(niter):
_, losses_, times_ = lreg.ihs_tuning(sketch_size=m, sketch=sketch, nnz=None, error_threshold=error_threshold)
print (losses_)
print (times_)
time_spent = times_[-1]
times_spent.append(time_spent)
loss_final = losses_[-1]
return [times_spent]
def cst1(sketch_size, n, d):
num_sketch_rows = int(d*sketch_size)
return num_sketch_rows >= 1 and num_sketch_rows <= n
def cst2(sparsity_parameter, n, d):
nnzs_per_row = int(sparsity_parameter*d/n*n)
return nnzs_per_row >= 1 and nnzs_per_row <= n
def main():
global nodes
global cores
# Parse command line arguments
args = parse_args()
dataset = args.dataset
nrun = args.nrun
npilot = args.npilot
TUNER_NAME = args.optimization
tuning_metadata = {
"tuning_problem_name": "gaussian-"+dataset,
"machine_configuration": {
"machine_name": "Cori",
"haswell": { "nodes": 1, "cores": 32 }
}
}
(machine, processor, nodes, cores) = GetMachineConfiguration(meta_dict = tuning_metadata)
print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores))
os.environ['MACHINE_NAME'] = machine
os.environ['TUNER_NAME'] = TUNER_NAME
global A, b, lreg
if dataset == 'cifar-10':
A, b = generate_dataset.load_data('cifar-10', n=2**14, d=1000)
lambd = 1e-4
error_threshold = 1e-6
elif dataset == 'synthetic_high_coherence_10000_2000':
A, b = generate_dataset.load_data('synthetic_high_coherence', n=10000, d=2000, df=1)
lambd = 1e-4
error_threshold = 1e-6
elif dataset == 'synthetic_high_coherence_20000_2000':
A, b = generate_dataset.load_data('synthetic_high_coherence', n=20000, d=2000, df=1)
lambd = 1e-4
error_threshold = 1e-6
elif dataset == 'synthetic_high_coherence_100000_2000':
A, b = generate_dataset.load_data('synthetic_high_coherence', n=100000, d=2000, df=1)
lambd = 1e-4
error_threshold = 1e-6
elif dataset == "epsilon_normalized_20Kn_spread":
A, b = generate_dataset.load_data('epsilon_normalized_20Kn', option="spread")
lambd = 1e-4
error_threshold = 1e-6
elif dataset == "epsilon_normalized_100Kn_spread":
A, b = generate_dataset.load_data('epsilon_normalized_100Kn', option="spread")
lambd = 1e-4
error_threshold = 1e-6
else:
A, b = generate_dataset.load_data('synthetic_orthogonal')
lambd = 1e-4
error_threshold = 1e-6
n, d = A.shape
niter = 5
lreg = LogisticRegression(A, b, lambd)
x, losses = lreg.solve_exactly(n_iter=20, eps=1e-15)
datasets = Categoricalnorm([dataset], transform="onehot", name="dataset")
sketch = Categoricalnorm(["gaussian"], transform="onehot", name="sketch")
if "susy" in dataset:
sketch_size = Real(1./d, 1000, transform="normalize", name="sketch_size")
elif "synthetic_high_coherence" in dataset:
sketch_size = Real(1./d, 0.1, transform="normalize", name="sketch_size")
elif "epsilon" in dataset:
sketch_size = Real(1./d, 2.0, transform="normalize", name="sketch_size")
else:
sketch_size = Real(1./d, n/d, transform="normalize", name="sketch_size")
wall_clock_time = Real(float("-Inf"), float("Inf"), name="wall_clock_time")
input_space = Space([datasets])
parameter_space = Space([sketch, sketch_size])
output_space = Space([wall_clock_time])
constraints = {"cst1": cst1, "cst2": cst2}
constants={"n":n, "d":d, "lambd":lambd, "error_threshold":error_threshold, "niter":niter, "sparsity_parameter":n/d}
problem = TuningProblem(input_space, parameter_space, output_space, objectives, constraints, None, constants=constants)
historydb = HistoryDB(meta_dict=tuning_metadata)
computer = Computer(nodes=nodes, cores=cores, hosts=None)
options = Options()
options['model_restarts'] = 1
options['distributed_memory_parallelism'] = False
options['shared_memory_parallelism'] = False
options['objective_evaluation_parallelism'] = False
options['objective_multisample_threads'] = 1
options['objective_multisample_processes'] = 1
options['objective_nprocmax'] = 1
options['model_processes'] = 1
options['model_class'] = 'Model_GPy_LCM'
options['verbose'] = False
options['sample_class'] = 'SampleOpenTURNS'
options.validate(computer=computer)
TUNER_NAME = os.environ['TUNER_NAME']
giventask = [[dataset]]
NI=len(giventask)
NS=nrun
if(TUNER_NAME=='GPTune'):
data = Data(problem)
gt = GPTune(problem, computer=computer, data=data, options=options, historydb=historydb, driverabspath=os.path.abspath(__file__))
(data, modeler, stats) = gt.MLA(NS=NS, Igiven=giventask, NI=NI, NS1=npilot)
print("stats: ", stats)
""" Print all input and parameter samples """
for tid in range(NI):
print("tid: %d" % (tid))
print(" t:%s " % (data.I[tid][0]))
print(" Ps ", data.P[tid])
print(" Os ", data.O[tid].tolist())
print(' Popt ', data.P[tid][np.argmin(data.O[tid])], 'Oopt ', min(data.O[tid])[0], 'nth ', np.argmin(data.O[tid]))
if __name__ == "__main__":
main()
|
[] |
[] |
[
"TUNER_NAME",
"MACHINE_NAME"
] |
[]
|
["TUNER_NAME", "MACHINE_NAME"]
|
python
| 2 | 0 | |
src/syscall/exec_linux_test.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package syscall_test
import (
"flag"
"fmt"
"internal/testenv"
"io"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
"unsafe"
)
func isDocker() bool {
_, err := os.Stat("/.dockerenv")
return err == nil
}
func isLXC() bool {
return os.Getenv("container") == "lxc"
}
func skipInContainer(t *testing.T) {
if isDocker() {
t.Skip("skip this test in Docker container")
}
if isLXC() {
t.Skip("skip this test in LXC container")
}
}
func skipNoUserNamespaces(t *testing.T) {
if _, err := os.Stat("/proc/self/ns/user"); err != nil {
if os.IsNotExist(err) {
t.Skip("kernel doesn't support user namespaces")
}
if os.IsPermission(err) {
t.Skip("unable to test user namespaces due to permissions")
}
t.Fatalf("Failed to stat /proc/self/ns/user: %v", err)
}
}
func skipUnprivilegedUserClone(t *testing.T) {
// Skip the test if the sysctl that prevents unprivileged user
// from creating user namespaces is enabled.
data, errRead := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone")
if errRead != nil || len(data) < 1 || data[0] == '0' {
t.Skip("kernel prohibits user namespace in unprivileged process")
}
}
// Check if we are in a chroot by checking if the inode of / is
// different from 2 (there is no better test available to non-root on
// linux).
func isChrooted(t *testing.T) bool {
root, err := os.Stat("/")
if err != nil {
t.Fatalf("cannot stat /: %v", err)
}
return root.Sys().(*syscall.Stat_t).Ino != 2
}
func checkUserNS(t *testing.T) {
skipInContainer(t)
skipNoUserNamespaces(t)
if isChrooted(t) {
// create_user_ns in the kernel (see
// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c)
// forbids the creation of user namespaces when chrooted.
t.Skip("cannot create user namespaces when chrooted")
}
// On some systems, there is a sysctl setting.
if os.Getuid() != 0 {
skipUnprivilegedUserClone(t)
}
// On Centos 7 make sure they set the kernel parameter user_namespace=1
// See issue 16283 and 20796.
if _, err := os.Stat("/sys/module/user_namespace/parameters/enable"); err == nil {
buf, _ := ioutil.ReadFile("/sys/module/user_namespace/parameters/enabled")
if !strings.HasPrefix(string(buf), "Y") {
t.Skip("kernel doesn't support user namespaces")
}
}
// On Centos 7.5+, user namespaces are disabled if user.max_user_namespaces = 0
if _, err := os.Stat("/proc/sys/user/max_user_namespaces"); err == nil {
buf, errRead := ioutil.ReadFile("/proc/sys/user/max_user_namespaces")
if errRead == nil && buf[0] == '0' {
t.Skip("kernel doesn't support user namespaces")
}
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
}
func whoamiCmd(t *testing.T, uid, gid int, setgroups bool) *exec.Cmd {
checkUserNS(t)
cmd := exec.Command("whoami")
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUSER,
UidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: uid, Size: 1},
},
GidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: gid, Size: 1},
},
GidMappingsEnableSetgroups: setgroups,
}
return cmd
}
func testNEWUSERRemap(t *testing.T, uid, gid int, setgroups bool) {
cmd := whoamiCmd(t, uid, gid, setgroups)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
sout := strings.TrimSpace(string(out))
want := "root"
if sout != want {
t.Fatalf("whoami = %q; want %q", out, want)
}
}
func TestCloneNEWUSERAndRemapRootDisableSetgroups(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
testNEWUSERRemap(t, 0, 0, false)
}
func TestCloneNEWUSERAndRemapRootEnableSetgroups(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
testNEWUSERRemap(t, 0, 0, true)
}
func TestCloneNEWUSERAndRemapNoRootDisableSetgroups(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("skipping unprivileged user only test")
}
testNEWUSERRemap(t, os.Getuid(), os.Getgid(), false)
}
func TestCloneNEWUSERAndRemapNoRootSetgroupsEnableSetgroups(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("skipping unprivileged user only test")
}
cmd := whoamiCmd(t, os.Getuid(), os.Getgid(), true)
err := cmd.Run()
if err == nil {
t.Skip("probably old kernel without security fix")
}
if !os.IsPermission(err) {
t.Fatalf("Unprivileged gid_map rewriting with GidMappingsEnableSetgroups must fail")
}
}
func TestEmptyCredGroupsDisableSetgroups(t *testing.T) {
cmd := whoamiCmd(t, os.Getuid(), os.Getgid(), false)
cmd.SysProcAttr.Credential = &syscall.Credential{}
if err := cmd.Run(); err != nil {
t.Fatal(err)
}
}
func TestUnshare(t *testing.T) {
skipInContainer(t)
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
path := "/proc/net/dev"
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Skip("kernel doesn't support proc filesystem")
}
if os.IsPermission(err) {
t.Skip("unable to test proc filesystem due to permissions")
}
t.Fatal(err)
}
if _, err := os.Stat("/proc/self/ns/net"); err != nil {
if os.IsNotExist(err) {
t.Skip("kernel doesn't support net namespace")
}
t.Fatal(err)
}
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
origLines := strings.Split(strings.TrimSpace(string(orig)), "\n")
cmd := exec.Command("cat", path)
cmd.SysProcAttr = &syscall.SysProcAttr{
Unshareflags: syscall.CLONE_NEWNET,
}
out, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(err.Error(), "operation not permitted") {
// Issue 17206: despite all the checks above,
// this still reportedly fails for some users.
// (older kernels?). Just skip.
t.Skip("skipping due to permission error")
}
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
// Check there is only the local network interface
sout := strings.TrimSpace(string(out))
if !strings.Contains(sout, "lo:") {
t.Fatalf("Expected lo network interface to exist, got %s", sout)
}
lines := strings.Split(sout, "\n")
if len(lines) >= len(origLines) {
t.Fatalf("Got %d lines of output, want <%d", len(lines), len(origLines))
}
}
func TestGroupCleanup(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("we need root for credential")
}
cmd := exec.Command("id")
cmd.SysProcAttr = &syscall.SysProcAttr{
Credential: &syscall.Credential{
Uid: 0,
Gid: 0,
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
strOut := strings.TrimSpace(string(out))
expected := "uid=0(root) gid=0(root)"
// Just check prefix because some distros reportedly output a
// context parameter; see https://golang.org/issue/16224.
// Alpine does not output groups; see https://golang.org/issue/19938.
if !strings.HasPrefix(strOut, expected) {
t.Errorf("id command output: %q, expected prefix: %q", strOut, expected)
}
}
func TestGroupCleanupUserNamespace(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("we need root for credential")
}
checkUserNS(t)
cmd := exec.Command("id")
uid, gid := os.Getuid(), os.Getgid()
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUSER,
Credential: &syscall.Credential{
Uid: uint32(uid),
Gid: uint32(gid),
},
UidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: uid, Size: 1},
},
GidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: gid, Size: 1},
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
strOut := strings.TrimSpace(string(out))
// Strings we've seen in the wild.
expected := []string{
"uid=0(root) gid=0(root) groups=0(root)",
"uid=0(root) gid=0(root) groups=0(root),65534(nobody)",
"uid=0(root) gid=0(root) groups=0(root),65534(nogroup)",
"uid=0(root) gid=0(root) groups=0(root),65534",
"uid=0(root) gid=0(root) groups=0(root),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody)", // Alpine; see https://golang.org/issue/19938
"uid=0(root) gid=0(root) groups=0(root) context=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023", // CentOS with SELinux context, see https://golang.org/issue/34547
}
for _, e := range expected {
if strOut == e {
return
}
}
t.Errorf("id command output: %q, expected one of %q", strOut, expected)
}
// TestUnshareHelperProcess isn't a real test. It's used as a helper process
// for TestUnshareMountNameSpace.
func TestUnshareMountNameSpaceHelper(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
if err := syscall.Mount("none", flag.Args()[0], "proc", 0, ""); err != nil {
fmt.Fprintf(os.Stderr, "unshare: mount %v failed: %v", os.Args, err)
os.Exit(2)
}
}
// Test for Issue 38471: unshare fails because systemd has forced / to be shared
func TestUnshareMountNameSpace(t *testing.T) {
skipInContainer(t)
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
d, err := ioutil.TempDir("", "unshare")
if err != nil {
t.Fatalf("tempdir: %v", err)
}
cmd := exec.Command(os.Args[0], "-test.run=TestUnshareMountNameSpaceHelper", d)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
cmd.SysProcAttr = &syscall.SysProcAttr{Unshareflags: syscall.CLONE_NEWNS}
o, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(err.Error(), ": permission denied") {
t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err)
}
t.Fatalf("unshare failed: %s, %v", o, err)
}
// How do we tell if the namespace was really unshared? It turns out
// to be simple: just try to remove the directory. If it's still mounted
// on the rm will fail with EBUSY. Then we have some cleanup to do:
// we must unmount it, then try to remove it again.
if err := os.Remove(d); err != nil {
t.Errorf("rmdir failed on %v: %v", d, err)
if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil {
t.Errorf("Can't unmount %v: %v", d, err)
}
if err := os.Remove(d); err != nil {
t.Errorf("rmdir after unmount failed on %v: %v", d, err)
}
}
}
// Test for Issue 20103: unshare fails when chroot is used
func TestUnshareMountNameSpaceChroot(t *testing.T) {
skipInContainer(t)
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
d, err := ioutil.TempDir("", "unshare")
if err != nil {
t.Fatalf("tempdir: %v", err)
}
// Since we are doing a chroot, we need the binary there,
// and it must be statically linked.
x := filepath.Join(d, "syscall.test")
cmd := exec.Command(testenv.GoToolPath(t), "test", "-c", "-o", x, "syscall")
cmd.Env = append(os.Environ(), "CGO_ENABLED=0")
if o, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("Build of syscall in chroot failed, output %v, err %v", o, err)
}
cmd = exec.Command("/syscall.test", "-test.run=TestUnshareMountNameSpaceHelper", "/")
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
cmd.SysProcAttr = &syscall.SysProcAttr{Chroot: d, Unshareflags: syscall.CLONE_NEWNS}
o, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(err.Error(), ": permission denied") {
t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err)
}
t.Fatalf("unshare failed: %s, %v", o, err)
}
// How do we tell if the namespace was really unshared? It turns out
// to be simple: just try to remove the executable. If it's still mounted
// on, the rm will fail. Then we have some cleanup to do:
// we must force unmount it, then try to remove it again.
if err := os.Remove(x); err != nil {
t.Errorf("rm failed on %v: %v", x, err)
if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil {
t.Fatalf("Can't unmount %v: %v", d, err)
}
if err := os.Remove(x); err != nil {
t.Fatalf("rm failed on %v: %v", x, err)
}
}
if err := os.Remove(d); err != nil {
t.Errorf("rmdir failed on %v: %v", d, err)
}
}
func TestUnshareUidGidMappingHelper(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
if err := syscall.Chroot(os.TempDir()); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
}
// Test for Issue 29789: unshare fails when uid/gid mapping is specified
func TestUnshareUidGidMapping(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("test exercises unprivileged user namespace, fails with privileges")
}
checkUserNS(t)
cmd := exec.Command(os.Args[0], "-test.run=TestUnshareUidGidMappingHelper")
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.SysProcAttr = &syscall.SysProcAttr{
Unshareflags: syscall.CLONE_NEWNS | syscall.CLONE_NEWUSER,
GidMappingsEnableSetgroups: false,
UidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: syscall.Getuid(),
Size: 1,
},
},
GidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: syscall.Getgid(),
Size: 1,
},
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
}
type capHeader struct {
version uint32
pid int32
}
type capData struct {
effective uint32
permitted uint32
inheritable uint32
}
const CAP_SYS_TIME = 25
const CAP_SYSLOG = 34
type caps struct {
hdr capHeader
data [2]capData
}
func getCaps() (caps, error) {
var c caps
// Get capability version
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(nil)), 0); errno != 0 {
return c, fmt.Errorf("SYS_CAPGET: %v", errno)
}
// Get current capabilities
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(&c.data[0])), 0); errno != 0 {
return c, fmt.Errorf("SYS_CAPGET: %v", errno)
}
return c, nil
}
func mustSupportAmbientCaps(t *testing.T) {
var uname syscall.Utsname
if err := syscall.Uname(&uname); err != nil {
t.Fatalf("Uname: %v", err)
}
var buf [65]byte
for i, b := range uname.Release {
buf[i] = byte(b)
}
ver := string(buf[:])
if i := strings.Index(ver, "\x00"); i != -1 {
ver = ver[:i]
}
if strings.HasPrefix(ver, "2.") ||
strings.HasPrefix(ver, "3.") ||
strings.HasPrefix(ver, "4.1.") ||
strings.HasPrefix(ver, "4.2.") {
t.Skipf("kernel version %q predates required 4.3; skipping test", ver)
}
}
// TestAmbientCapsHelper isn't a real test. It's used as a helper process for
// TestAmbientCaps.
func TestAmbientCapsHelper(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
caps, err := getCaps()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
if caps.data[0].effective&(1<<uint(CAP_SYS_TIME)) == 0 {
fmt.Fprintln(os.Stderr, "CAP_SYS_TIME unexpectedly not in the effective capability mask")
os.Exit(2)
}
if caps.data[1].effective&(1<<uint(CAP_SYSLOG&31)) == 0 {
fmt.Fprintln(os.Stderr, "CAP_SYSLOG unexpectedly not in the effective capability mask")
os.Exit(2)
}
}
func TestAmbientCaps(t *testing.T) {
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
testAmbientCaps(t, false)
}
func TestAmbientCapsUserns(t *testing.T) {
checkUserNS(t)
testAmbientCaps(t, true)
}
func testAmbientCaps(t *testing.T, userns bool) {
skipInContainer(t)
mustSupportAmbientCaps(t)
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
skipUnprivilegedUserClone(t)
// skip on android, due to lack of lookup support
if runtime.GOOS == "android" {
t.Skip("skipping test on android; see Issue 27327")
}
u, err := user.Lookup("nobody")
if err != nil {
t.Fatal(err)
}
uid, err := strconv.ParseInt(u.Uid, 0, 32)
if err != nil {
t.Fatal(err)
}
gid, err := strconv.ParseInt(u.Gid, 0, 32)
if err != nil {
t.Fatal(err)
}
// Copy the test binary to a temporary location which is readable by nobody.
f, err := ioutil.TempFile("", "gotest")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
defer f.Close()
e, err := os.Open(os.Args[0])
if err != nil {
t.Fatal(err)
}
defer e.Close()
if _, err := io.Copy(f, e); err != nil {
t.Fatal(err)
}
if err := f.Chmod(0755); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
cmd := exec.Command(f.Name(), "-test.run=TestAmbientCapsHelper")
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.SysProcAttr = &syscall.SysProcAttr{
Credential: &syscall.Credential{
Uid: uint32(uid),
Gid: uint32(gid),
},
AmbientCaps: []uintptr{CAP_SYS_TIME, CAP_SYSLOG},
}
if userns {
cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWUSER
const nobody = 65534
uid := os.Getuid()
gid := os.Getgid()
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{{
ContainerID: int(nobody),
HostID: int(uid),
Size: int(1),
}}
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{{
ContainerID: int(nobody),
HostID: int(gid),
Size: int(1),
}}
// Set credentials to run as user and group nobody.
cmd.SysProcAttr.Credential = &syscall.Credential{
Uid: nobody,
Gid: nobody,
}
}
if err := cmd.Run(); err != nil {
t.Fatal(err.Error())
}
}
|
[
"\"container\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\""
] |
[] |
[
"GO_BUILDER_NAME",
"GO_WANT_HELPER_PROCESS",
"IN_KUBERNETES",
"container"
] |
[]
|
["GO_BUILDER_NAME", "GO_WANT_HELPER_PROCESS", "IN_KUBERNETES", "container"]
|
go
| 4 | 0 | |
figtree.go
|
package figtree
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"unicode"
"github.com/fatih/camelcase"
"github.com/pkg/errors"
"gopkg.in/yaml.v3"
)
type Logger interface {
Debugf(format string, args ...interface{})
}
type nullLogger struct{}
func (*nullLogger) Debugf(string, ...interface{}) {}
var Log Logger = &nullLogger{}
func defaultApplyChangeSet(changeSet map[string]*string) error {
for k, v := range changeSet {
if v != nil {
os.Setenv(k, *v)
} else {
os.Unsetenv(k)
}
}
return nil
}
type Option func(*FigTree)
func WithHome(home string) Option {
return func(f *FigTree) {
f.home = home
}
}
func WithCwd(cwd string) Option {
return func(f *FigTree) {
f.workDir = cwd
}
}
func WithEnvPrefix(env string) Option {
return func(f *FigTree) {
f.envPrefix = env
}
}
func WithConfigDir(dir string) Option {
return func(f *FigTree) {
f.configDir = dir
}
}
type ChangeSetFunc func(map[string]*string) error
func WithApplyChangeSet(apply ChangeSetFunc) Option {
return func(f *FigTree) {
f.applyChangeSet = apply
}
}
type PreProcessor func([]byte) ([]byte, error)
func WithPreProcessor(pp PreProcessor) Option {
return func(f *FigTree) {
f.preProcessor = pp
}
}
type FilterOut func([]byte) bool
func WithFilterOut(filt FilterOut) Option {
return func(f *FigTree) {
f.filterOut = filt
}
}
func defaultFilterOut(f *FigTree) FilterOut {
// looking for:
// ```
// config:
// stop: true|false
// ```
configStop := struct {
Config struct {
Stop bool `json:"stop" yaml:"stop"`
} `json:"config" yaml:"config"`
}{}
return func(config []byte) bool {
// if previous parse found a stop we should abort here
if configStop.Config.Stop {
return true
}
// now check if current doc has a stop
f.unmarshal(config, &configStop)
// even if current doc has a stop, we should continue to
// process it, we dont want to process the "next" document
return false
}
}
func WithUnmarshaller(unmarshaller func(in []byte, out interface{}) error) Option {
return func(f *FigTree) {
f.unmarshal = unmarshaller
}
}
func WithoutExec() Option {
return func(f *FigTree) {
f.exec = false
}
}
type FigTree struct {
home string
workDir string
configDir string
envPrefix string
preProcessor PreProcessor
applyChangeSet ChangeSetFunc
exec bool
filterOut FilterOut
unmarshal func(in []byte, out interface{}) error
}
func NewFigTree(opts ...Option) *FigTree {
wd, _ := os.Getwd()
fig := &FigTree{
home: os.Getenv("HOME"),
workDir: wd,
envPrefix: "FIGTREE",
applyChangeSet: defaultApplyChangeSet,
exec: true,
unmarshal: yaml.Unmarshal,
}
for _, opt := range opts {
opt(fig)
}
return fig
}
func (f *FigTree) WithHome(home string) {
WithHome(home)(f)
}
func (f *FigTree) WithCwd(cwd string) {
WithCwd(cwd)(f)
}
func (f *FigTree) WithEnvPrefix(env string) {
WithEnvPrefix(env)(f)
}
func (f *FigTree) WithConfigDir(dir string) {
WithConfigDir(dir)(f)
}
func (f *FigTree) WithPreProcessor(pp PreProcessor) {
WithPreProcessor(pp)(f)
}
func (f *FigTree) WithFilterOut(filt FilterOut) {
WithFilterOut(filt)(f)
}
func (f *FigTree) WithUnmarshaller(unmarshaller func(in []byte, out interface{}) error) {
WithUnmarshaller(unmarshaller)(f)
}
func (f *FigTree) WithApplyChangeSet(apply ChangeSetFunc) {
WithApplyChangeSet(apply)(f)
}
func (f *FigTree) WithIgnoreChangeSet() {
WithApplyChangeSet(func(_ map[string]*string) error {
return nil
})(f)
}
func (f *FigTree) WithoutExec() {
WithoutExec()(f)
}
func (f *FigTree) Copy() *FigTree {
cp := *f
return &cp
}
func (f *FigTree) LoadAllConfigs(configFile string, options interface{}) error {
if f.configDir != "" {
configFile = path.Join(f.configDir, configFile)
}
paths := FindParentPaths(f.home, f.workDir, configFile)
paths = append([]string{fmt.Sprintf("/etc/%s", configFile)}, paths...)
configSources := []ConfigSource{}
// iterate paths in reverse
for i := len(paths) - 1; i >= 0; i-- {
file := paths[i]
cs, err := f.ReadFile(file)
if err != nil {
return err
}
if cs == nil {
// no file contents to parse, file likely does not exist
continue
}
configSources = append(configSources, *cs)
}
return f.LoadAllConfigSources(configSources, options)
}
type ConfigSource struct {
Config []byte
Filename string
}
func (f *FigTree) LoadAllConfigSources(sources []ConfigSource, options interface{}) error {
m := NewMerger()
filterOut := f.filterOut
if filterOut == nil {
filterOut = defaultFilterOut(f)
}
for _, source := range sources {
// automatically skip empty configs
if len(source.Config) == 0 {
continue
}
skip := filterOut(source.Config)
if skip {
continue
}
m.sourceFile = source.Filename
err := f.loadConfigBytes(m, source.Config, options)
if err != nil {
return err
}
m.advance()
}
return nil
}
func (f *FigTree) LoadConfigBytes(config []byte, source string, options interface{}) error {
m := NewMerger(WithSourceFile(source))
return f.loadConfigBytes(m, config, options)
}
func (f *FigTree) loadConfigBytes(m *Merger, config []byte, options interface{}) error {
if !reflect.ValueOf(options).IsValid() {
return fmt.Errorf("options argument [%#v] is not valid", options)
}
var err error
if f.preProcessor != nil {
config, err = f.preProcessor(config)
if err != nil {
return errors.Wrapf(err, "Failed to process config file: %s", m.sourceFile)
}
}
tmp := reflect.New(reflect.ValueOf(options).Elem().Type()).Interface()
// look for config settings first
err = f.unmarshal(config, m)
if err != nil {
return errors.Wrapf(err, "Unable to parse %s", m.sourceFile)
}
// then parse document into requested struct
err = f.unmarshal(config, tmp)
if err != nil {
return errors.Wrapf(err, "Unable to parse %s", m.sourceFile)
}
m.setSource(reflect.ValueOf(tmp))
m.mergeStructs(
reflect.ValueOf(options),
reflect.ValueOf(tmp),
)
changeSet := f.PopulateEnv(options)
return f.applyChangeSet(changeSet)
}
func (f *FigTree) LoadConfig(file string, options interface{}) error {
cs, err := f.ReadFile(file)
if err != nil {
return err
}
if cs == nil {
// no file contents to parse, file likely does not exist
return nil
}
return f.LoadConfigBytes(cs.Config, cs.Filename, options)
}
// ReadFile will return a ConfigSource for given file path. If the
// file is executable (and WithoutExec was not used), it will execute
// the file and return the stdout otherwise it will return the file
// contents directly.
func (f *FigTree) ReadFile(file string) (*ConfigSource, error) {
rel, err := filepath.Rel(f.workDir, file)
if err != nil {
rel = file
}
if stat, err := os.Stat(file); err == nil {
if stat.Mode()&0111 == 0 || !f.exec {
Log.Debugf("Reading config %s", file)
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, errors.Wrapf(err, "Failed to read %s", rel)
}
return &ConfigSource{
Config: data,
Filename: rel,
}, nil
} else {
Log.Debugf("Found Executable Config file: %s", file)
// it is executable, so run it and try to parse the output
cmd := exec.Command(file)
stdout := bytes.NewBufferString("")
cmd.Stdout = stdout
cmd.Stderr = bytes.NewBufferString("")
if err := cmd.Run(); err != nil {
return nil, errors.Wrapf(err, "%s is exectuable, but it failed to execute:\n%s", file, cmd.Stderr)
}
return &ConfigSource{
Config: stdout.Bytes(),
Filename: rel,
}, nil
}
}
return nil, nil
}
func FindParentPaths(homedir, cwd, fileName string) []string {
paths := make([]string, 0)
if filepath.IsAbs(fileName) {
// dont recursively look for files when fileName is an abspath
_, err := os.Stat(fileName)
if err == nil {
paths = append(paths, fileName)
}
return paths
}
// special case if homedir is not in current path then check there anyway
if !strings.HasPrefix(cwd, homedir) {
file := path.Join(homedir, fileName)
if _, err := os.Stat(file); err == nil {
paths = append(paths, filepath.FromSlash(file))
}
}
var dir string
for _, part := range strings.Split(cwd, string(os.PathSeparator)) {
if part == "" && dir == "" {
dir = "/"
} else {
dir = path.Join(dir, part)
}
file := path.Join(dir, fileName)
if _, err := os.Stat(file); err == nil {
paths = append(paths, filepath.FromSlash(file))
}
}
return paths
}
func (f *FigTree) FindParentPaths(fileName string) []string {
return FindParentPaths(f.home, f.workDir, fileName)
}
var camelCaseWords = regexp.MustCompile("[0-9A-Za-z]+")
func camelCase(name string) string {
words := camelCaseWords.FindAllString(name, -1)
for i, word := range words {
words[i] = strings.Title(word)
}
return strings.Join(words, "")
}
type Merger struct {
sourceFile string
preserveMap map[string]struct{}
Config ConfigOptions `json:"config,omitempty" yaml:"config,omitempty"`
ignore []string
}
type MergeOption func(*Merger)
func WithSourceFile(source string) MergeOption {
return func(m *Merger) {
m.sourceFile = source
}
}
func PreserveMap(keys ...string) MergeOption {
return func(m *Merger) {
for _, key := range keys {
m.preserveMap[key] = struct{}{}
}
}
}
func NewMerger(options ...MergeOption) *Merger {
m := &Merger{
sourceFile: "merge",
preserveMap: make(map[string]struct{}),
}
for _, opt := range options {
opt(m)
}
return m
}
// advance will move all the current overwrite properties to
// the ignore properties, then reset the overwrite properties.
// This is used after a document has be processed so the next
// document does not modify overwritten fields.
func (m *Merger) advance() {
for _, overwrite := range m.Config.Overwrite {
found := false
for _, ignore := range m.ignore {
if ignore == overwrite {
found = true
break
}
}
if !found {
m.ignore = append(m.ignore, overwrite)
}
}
m.Config.Overwrite = nil
}
// Merge will attempt to merge the data from src into dst. They shoud be either both maps or both structs.
// The structs do not need to have the same structure, but any field name that exists in both
// structs will must be the same type.
func Merge(dst, src interface{}) {
m := NewMerger()
m.mergeStructs(reflect.ValueOf(dst), reflect.ValueOf(src))
}
// MakeMergeStruct will take multiple structs and return a pointer to a zero value for the
// anonymous struct that has all the public fields from all the structs merged into one struct.
// If there are multiple structs with the same field names, the first appearance of that name
// will be used.
func MakeMergeStruct(structs ...interface{}) interface{} {
m := NewMerger()
return m.MakeMergeStruct(structs...)
}
func (m *Merger) MakeMergeStruct(structs ...interface{}) interface{} {
values := []reflect.Value{}
for _, data := range structs {
values = append(values, reflect.ValueOf(data))
}
return m.makeMergeStruct(values...).Interface()
}
func inlineField(field reflect.StructField) bool {
if tag := field.Tag.Get("figtree"); tag != "" {
return strings.HasSuffix(tag, ",inline")
}
if tag := field.Tag.Get("yaml"); tag != "" {
return strings.HasSuffix(tag, ",inline")
}
return false
}
func (m *Merger) makeMergeStruct(values ...reflect.Value) reflect.Value {
foundFields := map[string]reflect.StructField{}
for i := 0; i < len(values); i++ {
v := values[i]
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
typ := v.Type()
var field reflect.StructField
if typ.Kind() == reflect.Struct {
for j := 0; j < typ.NumField(); j++ {
field = typ.Field(j)
if field.PkgPath != "" {
// unexported field, skip
continue
}
field.Name = canonicalFieldName(field)
if f, ok := foundFields[field.Name]; ok {
if f.Type.Kind() == reflect.Struct && field.Type.Kind() == reflect.Struct {
if fName, fieldName := f.Type.Name(), field.Type.Name(); fName == "" || fieldName == "" || fName != fieldName {
// we have 2 fields with the same name and they are both structs, so we need
// to merge the existing struct with the new one in case they are different
newval := m.makeMergeStruct(reflect.New(f.Type).Elem(), reflect.New(field.Type).Elem()).Elem()
f.Type = newval.Type()
foundFields[field.Name] = f
}
}
// field already found, skip
continue
}
if inlineField(field) {
// insert inline after this value, it will have a higher
// "type" priority than later values
values = append(values[:i+1], append([]reflect.Value{v.Field(j)}, values[i+1:]...)...)
continue
}
foundFields[field.Name] = field
}
} else if typ.Kind() == reflect.Map {
for _, key := range v.MapKeys() {
keyval := reflect.ValueOf(v.MapIndex(key).Interface())
if _, ok := m.preserveMap[key.String()]; !ok {
if keyval.Kind() == reflect.Ptr && keyval.Elem().Kind() == reflect.Map {
keyval = m.makeMergeStruct(keyval.Elem())
} else if keyval.Kind() == reflect.Map {
keyval = m.makeMergeStruct(keyval).Elem()
}
}
var t reflect.Type
if !keyval.IsValid() {
// this nonsense is to create a generic `interface{}` type. There is
// probably an easier to do this, but it eludes me at the moment.
var dummy interface{}
t = reflect.ValueOf(&dummy).Elem().Type()
} else {
t = reflect.ValueOf(keyval.Interface()).Type()
}
field = reflect.StructField{
Name: camelCase(key.String()),
Type: t,
Tag: reflect.StructTag(fmt.Sprintf(`json:"%s" yaml:"%s"`, key.String(), key.String())),
}
if f, ok := foundFields[field.Name]; ok {
if f.Type.Kind() == reflect.Struct && t.Kind() == reflect.Struct {
if fName, tName := f.Type.Name(), t.Name(); fName == "" || tName == "" || fName != tName {
// we have 2 fields with the same name and they are both structs, so we need
// to merge the existig struct with the new one in case they are different
newval := m.makeMergeStruct(reflect.New(f.Type).Elem(), reflect.New(t).Elem()).Elem()
f.Type = newval.Type()
foundFields[field.Name] = f
}
}
// field already found, skip
continue
}
foundFields[field.Name] = field
}
}
}
fields := []reflect.StructField{}
for _, value := range foundFields {
fields = append(fields, value)
}
sort.Slice(fields, func(i, j int) bool {
return fields[i].Name < fields[j].Name
})
newType := reflect.StructOf(fields)
return reflect.New(newType)
}
func (m *Merger) mapToStruct(src reflect.Value) reflect.Value {
if src.Kind() != reflect.Map {
return reflect.Value{}
}
dest := m.makeMergeStruct(src)
if dest.Kind() == reflect.Ptr {
dest = dest.Elem()
}
for _, key := range src.MapKeys() {
structFieldName := camelCase(key.String())
keyval := reflect.ValueOf(src.MapIndex(key).Interface())
// skip invalid (ie nil) key values
if !keyval.IsValid() {
continue
}
if keyval.Kind() == reflect.Ptr && keyval.Elem().Kind() == reflect.Map {
keyval = m.mapToStruct(keyval.Elem()).Addr()
m.mergeStructs(dest.FieldByName(structFieldName), reflect.ValueOf(keyval.Interface()))
} else if keyval.Kind() == reflect.Map {
keyval = m.mapToStruct(keyval)
m.mergeStructs(dest.FieldByName(structFieldName), reflect.ValueOf(keyval.Interface()))
} else {
dest.FieldByName(structFieldName).Set(reflect.ValueOf(keyval.Interface()))
}
}
return dest
}
func structToMap(src reflect.Value) reflect.Value {
if src.Kind() != reflect.Struct {
return reflect.Value{}
}
dest := reflect.ValueOf(map[string]interface{}{})
typ := src.Type()
for i := 0; i < typ.NumField(); i++ {
structField := typ.Field(i)
if structField.PkgPath != "" {
// skip private fields
continue
}
name := yamlFieldName(structField)
dest.SetMapIndex(reflect.ValueOf(name), src.Field(i))
}
return dest
}
type ConfigOptions struct {
Overwrite []string `json:"overwrite,omitempty" yaml:"overwrite,omitempty"`
}
func yamlFieldName(sf reflect.StructField) string {
if tag, ok := sf.Tag.Lookup("yaml"); ok {
// with yaml:"foobar,omitempty"
// we just want to the "foobar" part
parts := strings.Split(tag, ",")
if parts[0] != "" && parts[0] != "-" {
return parts[0]
}
}
// guess the field name from reversing camel case
// so "FooBar" becomes "foo-bar"
parts := camelcase.Split(sf.Name)
for i := range parts {
parts[i] = strings.ToLower(parts[i])
}
return strings.Join(parts, "-")
}
func canonicalFieldName(sf reflect.StructField) string {
if tag, ok := sf.Tag.Lookup("figtree"); ok {
for _, part := range strings.Split(tag, ",") {
if strings.HasPrefix(part, "name=") {
return strings.TrimPrefix(part, "name=")
}
}
}
// For consistency with YAML data, determine a canonical field name
// based on the YAML tag. Do not rely on the Go struct field name unless
// there is no YAML tag.
return camelCase(yamlFieldName(sf))
}
func (m *Merger) mustOverwrite(name string) bool {
for _, prop := range m.Config.Overwrite {
if name == prop {
return true
}
}
return false
}
func (m *Merger) mustIgnore(name string) bool {
for _, prop := range m.ignore {
if name == prop {
return true
}
}
return false
}
func isDefault(v reflect.Value) bool {
if v.CanAddr() {
if option, ok := v.Addr().Interface().(option); ok {
if option.GetSource() == "default" {
return true
}
}
}
return false
}
func isZero(v reflect.Value) bool {
if !v.IsValid() {
return true
}
return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
}
func isSame(v1, v2 reflect.Value) bool {
return reflect.DeepEqual(v1.Interface(), v2.Interface())
}
// recursively set the Source attribute of the Options
func (m *Merger) setSource(v reflect.Value) {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
switch v.Kind() {
case reflect.Map:
for _, key := range v.MapKeys() {
keyval := v.MapIndex(key)
if keyval.Kind() == reflect.Struct && keyval.FieldByName("Source").IsValid() {
// map values are immutable, so we need to copy the value
// update the value, then re-insert the value to the map
newval := reflect.New(keyval.Type())
newval.Elem().Set(keyval)
m.setSource(newval)
v.SetMapIndex(key, newval.Elem())
}
}
case reflect.Struct:
if v.CanAddr() {
if option, ok := v.Addr().Interface().(option); ok {
if option.IsDefined() {
option.SetSource(m.sourceFile)
}
return
}
}
for i := 0; i < v.NumField(); i++ {
structField := v.Type().Field(i)
// PkgPath is empty for upper case (exported) field names.
if structField.PkgPath != "" {
// unexported field, skipping
continue
}
m.setSource(v.Field(i))
}
case reflect.Array:
fallthrough
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
m.setSource(v.Index(i))
}
}
}
func (m *Merger) assignValue(dest, src reflect.Value, overwrite bool) {
if src.Type().AssignableTo(dest.Type()) {
shouldAssignDest := overwrite || isZero(dest) || (isDefault(dest) && !isDefault(src))
isValidSrc := !isZero(src)
if shouldAssignDest && isValidSrc {
if src.Kind() == reflect.Map {
// maps are mutable, so create a brand new shiny one
dup := reflect.New(src.Type()).Elem()
m.mergeMaps(dup, src)
dest.Set(dup)
} else {
dest.Set(src)
}
return
}
return
}
if dest.CanAddr() {
if option, ok := dest.Addr().Interface().(option); ok {
destOptionValue := reflect.ValueOf(option.GetValue())
// map interface type to real-ish type:
src = reflect.ValueOf(src.Interface())
if !src.IsValid() {
Log.Debugf("assignValue: src isValid: %t", src.IsValid())
return
}
// if destOptionValue is a Zero value then the Type call will panic. If it
// dest is zero, we should just overwrite it with src anyway.
if isZero(destOptionValue) || src.Type().AssignableTo(destOptionValue.Type()) {
option.SetValue(src.Interface())
option.SetSource(m.sourceFile)
Log.Debugf("assignValue: assigned %#v to %#v", destOptionValue, src)
return
}
if destOptionValue.Kind() == reflect.Bool && src.Kind() == reflect.String {
b, err := strconv.ParseBool(src.Interface().(string))
if err != nil {
panic(fmt.Errorf("%s is not assignable to %s, invalid bool value: %s", src.Type(), destOptionValue.Type(), err))
}
option.SetValue(b)
option.SetSource(m.sourceFile)
Log.Debugf("assignValue: assigned %#v to %#v", destOptionValue, b)
return
}
if destOptionValue.Kind() == reflect.String && src.Kind() != reflect.String {
option.SetValue(fmt.Sprintf("%v", src.Interface()))
option.SetSource(m.sourceFile)
Log.Debugf("assignValue: assigned %#v to %#v", destOptionValue, src)
return
}
panic(fmt.Errorf("%s is not assignable to %s", src.Type(), destOptionValue.Type()))
}
}
// make copy so we can reliably Addr it to see if it fits the
// Option interface.
srcCopy := reflect.New(src.Type()).Elem()
srcCopy.Set(src)
if option, ok := srcCopy.Addr().Interface().(option); ok {
srcOptionValue := reflect.ValueOf(option.GetValue())
if srcOptionValue.Type().AssignableTo(dest.Type()) {
m.assignValue(dest, srcOptionValue, overwrite)
return
} else {
panic(fmt.Errorf("%s is not assignable to %s", srcOptionValue.Type(), dest.Type()))
}
}
}
func fromInterface(v reflect.Value) (reflect.Value, func()) {
if v.Kind() == reflect.Interface {
realV := reflect.ValueOf(v.Interface())
if !realV.IsValid() {
realV = reflect.New(v.Type()).Elem()
v.Set(realV)
return v, func() {}
}
tmp := reflect.New(realV.Type()).Elem()
tmp.Set(realV)
return tmp, func() {
v.Set(tmp)
}
}
return v, func() {}
}
func (m *Merger) mergeStructs(ov, nv reflect.Value) {
ov = reflect.Indirect(ov)
nv = reflect.Indirect(nv)
ov, restore := fromInterface(ov)
defer restore()
if nv.Kind() == reflect.Interface {
nv = reflect.ValueOf(nv.Interface())
}
if ov.Kind() == reflect.Map {
if nv.Kind() == reflect.Struct {
nv = structToMap(nv)
}
m.mergeMaps(ov, nv)
return
}
if ov.Kind() == reflect.Struct && nv.Kind() == reflect.Map {
nv = m.mapToStruct(nv)
}
if !ov.IsValid() || !nv.IsValid() {
Log.Debugf("Valid: ov:%v nv:%t", ov.IsValid(), nv.IsValid())
return
}
ovFieldTypesByYAML := make(map[string]reflect.StructField)
ovFieldValuesByYAML := make(map[string]reflect.Value)
var populateYAMLMaps func(reflect.Value)
populateYAMLMaps = func(ov reflect.Value) {
for i := 0; i < ov.NumField(); i++ {
fieldType := ov.Type().Field(i)
yamlName := yamlFieldName(fieldType)
if _, ok := ovFieldTypesByYAML[yamlName]; !ok {
ovFieldTypesByYAML[yamlName] = fieldType
ovFieldValuesByYAML[yamlName] = ov.Field(i)
}
}
for i := 0; i < ov.NumField(); i++ {
fieldType := ov.Type().Field(i)
if fieldType.Anonymous && reflect.Indirect(ov.Field(i)).Type().Kind() == reflect.Struct {
populateYAMLMaps(reflect.Indirect(ov.Field(i)))
}
}
}
populateYAMLMaps(ov)
for i := 0; i < nv.NumField(); i++ {
nvField := nv.Field(i)
if nvField.Kind() == reflect.Interface {
nvField = reflect.ValueOf(nvField.Interface())
}
if !nvField.IsValid() {
continue
}
nvStructField := nv.Type().Field(i)
fieldName := yamlFieldName(nvStructField)
ovStructField, ok := ovFieldTypesByYAML[fieldName]
if !ok {
if nvStructField.Anonymous {
// this is an embedded struct, and the destination does not contain
// the same embeded struct, so try to merge the embedded struct
// directly with the destination
m.mergeStructs(ov, nvField)
continue
}
// if original value does not have the same struct field
// then just skip this field.
continue
}
// PkgPath is empty for upper case (exported) field names.
if ovStructField.PkgPath != "" || nvStructField.PkgPath != "" {
// unexported field, skipping
continue
}
ovField := ovFieldValuesByYAML[fieldName]
ovField, restore := fromInterface(ovField)
defer restore()
if m.mustIgnore(fieldName) {
continue
}
if (isZero(ovField) || isDefault(ovField) || m.mustOverwrite(fieldName)) && !isSame(ovField, nvField) {
Log.Debugf("Setting %s to %#v", nv.Type().Field(i).Name, nvField.Interface())
m.assignValue(ovField, nvField, m.mustOverwrite(fieldName))
}
switch ovField.Kind() {
case reflect.Map:
Log.Debugf("Merging Map: %#v with %#v", ovField, nvField)
m.mergeStructs(ovField, nvField)
case reflect.Slice:
if nvField.Len() > 0 {
Log.Debugf("Merging Slice: %#v with %#v", ovField, nvField)
ovField.Set(m.mergeArrays(ovField, nvField))
}
case reflect.Array:
if nvField.Len() > 0 {
Log.Debugf("Merging Array: %v with %v", ovField, nvField)
ovField.Set(m.mergeArrays(ovField, nvField))
}
case reflect.Struct:
// only merge structs if they are not an Option type:
if _, ok := ovField.Addr().Interface().(option); !ok {
Log.Debugf("Merging Struct: %v with %v", ovField, nvField)
m.mergeStructs(ovField, nvField)
}
}
}
}
func (m *Merger) mergeMaps(ov, nv reflect.Value) {
for _, key := range nv.MapKeys() {
if !ov.MapIndex(key).IsValid() {
ovElem := reflect.New(ov.Type().Elem()).Elem()
m.assignValue(ovElem, nv.MapIndex(key), false)
if ov.IsNil() {
if !ov.CanSet() {
continue
}
ov.Set(reflect.MakeMap(ov.Type()))
}
Log.Debugf("Setting %v to %#v", key.Interface(), ovElem.Interface())
ov.SetMapIndex(key, ovElem)
} else {
ovi := reflect.ValueOf(ov.MapIndex(key).Interface())
nvi := reflect.ValueOf(nv.MapIndex(key).Interface())
if !nvi.IsValid() {
continue
}
switch ovi.Kind() {
case reflect.Map:
Log.Debugf("Merging: %v with %v", ovi.Interface(), nvi.Interface())
m.mergeStructs(ovi, nvi)
case reflect.Slice:
Log.Debugf("Merging: %v with %v", ovi.Interface(), nvi.Interface())
ov.SetMapIndex(key, m.mergeArrays(ovi, nvi))
case reflect.Array:
Log.Debugf("Merging: %v with %v", ovi.Interface(), nvi.Interface())
ov.SetMapIndex(key, m.mergeArrays(ovi, nvi))
default:
if isZero(ovi) {
if !ovi.IsValid() || nvi.Type().AssignableTo(ovi.Type()) {
ov.SetMapIndex(key, nvi)
} else {
// to check for the Option interface we need the Addr of the value, but
// we cannot take the Addr of a map value, so we have to first copy
// it, meh not optimal
newVal := reflect.New(nvi.Type())
newVal.Elem().Set(nvi)
if nOption, ok := newVal.Interface().(option); ok {
ov.SetMapIndex(key, reflect.ValueOf(nOption.GetValue()))
continue
}
panic(fmt.Errorf("map value %T is not assignable to %T", nvi.Interface(), ovi.Interface()))
}
}
}
}
}
}
func (m *Merger) mergeArrays(ov, nv reflect.Value) reflect.Value {
var zero interface{}
Outer:
for ni := 0; ni < nv.Len(); ni++ {
niv := nv.Index(ni)
n := niv
if n.CanAddr() {
if nOption, ok := n.Addr().Interface().(option); ok {
if !nOption.IsDefined() {
continue
}
n = reflect.ValueOf(nOption.GetValue())
}
}
if reflect.DeepEqual(n.Interface(), zero) {
continue
}
for oi := 0; oi < ov.Len(); oi++ {
o := ov.Index(oi)
if o.CanAddr() {
if oOption, ok := o.Addr().Interface().(option); ok {
o = reflect.ValueOf(oOption.GetValue())
}
}
if reflect.DeepEqual(n.Interface(), o.Interface()) {
continue Outer
}
}
nvElem := reflect.New(ov.Type().Elem()).Elem()
m.assignValue(nvElem, niv, false)
Log.Debugf("Appending %v to %v", nvElem.Interface(), ov)
ov = reflect.Append(ov, nvElem)
}
return ov
}
func (f *FigTree) formatEnvName(name string) string {
name = fmt.Sprintf("%s_%s", f.envPrefix, strings.ToUpper(name))
return strings.Map(func(r rune) rune {
if unicode.IsDigit(r) || unicode.IsLetter(r) {
return r
}
return '_'
}, name)
}
func (f *FigTree) formatEnvValue(value reflect.Value) (string, bool) {
switch t := value.Interface().(type) {
case string:
return t, true
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool:
return fmt.Sprintf("%v", t), true
default:
switch value.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if value.IsNil() {
return "", false
}
}
if t == nil {
return "", false
}
type definable interface {
IsDefined() bool
}
if def, ok := t.(definable); ok {
// skip fields that are not defined
if !def.IsDefined() {
return "", false
}
}
type gettable interface {
GetValue() interface{}
}
if get, ok := t.(gettable); ok {
return fmt.Sprintf("%v", get.GetValue()), true
} else {
if b, err := json.Marshal(t); err == nil {
val := strings.TrimSpace(string(b))
if val == "null" {
return "", true
}
return val, true
}
}
}
return "", false
}
func (f *FigTree) PopulateEnv(data interface{}) (changeSet map[string]*string) {
changeSet = make(map[string]*string)
options := reflect.ValueOf(data)
if options.Kind() == reflect.Ptr {
options = reflect.ValueOf(options.Elem().Interface())
}
if options.Kind() == reflect.Map {
for _, key := range options.MapKeys() {
if strKey, ok := key.Interface().(string); ok {
// first chunk up string so that `foo-bar` becomes ["foo", "bar"]
parts := strings.FieldsFunc(strKey, func(r rune) bool {
return !unicode.IsLetter(r) && !unicode.IsNumber(r)
})
// now for each chunk split again on camelcase so ["fooBar", "baz"]
// becomes ["foo", "Bar", "baz"]
allParts := []string{}
for _, part := range parts {
allParts = append(allParts, camelcase.Split(part)...)
}
name := strings.Join(allParts, "_")
envName := f.formatEnvName(name)
val, ok := f.formatEnvValue(options.MapIndex(key))
if ok {
changeSet[envName] = &val
} else {
changeSet[envName] = nil
}
}
}
} else if options.Kind() == reflect.Struct {
for i := 0; i < options.NumField(); i++ {
structField := options.Type().Field(i)
// PkgPath is empty for upper case (exported) field names.
if structField.PkgPath != "" {
// unexported field, skipping
continue
}
envNames := []string{strings.Join(camelcase.Split(structField.Name), "_")}
formatName := true
if tag := structField.Tag.Get("figtree"); tag != "" {
if strings.Contains(tag, ",inline") {
// if we have a tag like: `figtree:",inline"` then we
// want to the field as a top level member and not serialize
// the raw struct to json, so just recurse here
nestedEnvSet := f.PopulateEnv(options.Field(i).Interface())
for k, v := range nestedEnvSet {
changeSet[k] = v
}
continue
}
if strings.Contains(tag, ",raw") {
formatName = false
}
// next look for `figtree:"env,..."` to set the env name to that
parts := strings.Split(tag, ",")
if len(parts) > 0 {
// if the env name is "-" then we should not populate this data into the env
if parts[0] == "-" {
continue
}
for _, part := range parts {
if strings.HasPrefix(part, "name=") {
continue
}
envNames = strings.Split(part, ";")
break
}
}
}
for _, name := range envNames {
envName := name
if formatName {
envName = f.formatEnvName(name)
}
val, ok := f.formatEnvValue(options.Field(i))
if ok {
changeSet[envName] = &val
} else {
changeSet[envName] = nil
}
}
}
}
return changeSet
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
cairis/test/test_DependencyAPI.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import jsonpickle
from cairis.core.Dependency import Dependency
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
from cairis.tools.ModelDefinitions import DependencyModel
import os
from cairis.mio.ModelImport import importModelFile
__author__ = 'Robin Quetin, Shamal Faily'
class DependencyAPITests(CairisDaemonTestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
self.working_name_1 = ('Stroke', 'all', 'all', 'all')
self.working_name_2 = ('Stroke', 'Data%20Consumer', 'Certificate%20Authority', 'Personal%20certificate')
self.existing_environment_1 = 'Stroke'
self.existing_environment_2 = 'Psychosis'
self.existing_role_1 = 'Data Consumer'
self.existing_role_2 = 'Certificate Authority'
self.existing_type = 'goal'
self.existing_dependency = 'Upload authorisation'
def test_all_get(self):
method = 'test_all_get'
url = '/api/dependencies?session_id=test'
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, dict, 'The response is not a valid JSON dictionary')
self.assertGreater(len(json_dict), 0, 'No dependencies found')
assert isinstance(json_dict, dict)
item = json_dict.items()[0]
self.logger.info('[%s] First dependency: %s [%d]\n', method, item[0], item[1]['theId'])
def test_dependencies_name_get(self):
method = 'test_dependencies_name_get'
url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % self.working_name_1
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, list, 'The response is not a valid JSON dictionary')
self.assertGreater(len(json_dict), 0, 'No dependencies found')
assert isinstance(json_dict, list)
ids = []
for dep in json_dict:
ids.append(str(dep['theId']))
self.logger.info('[%s] Dependency IDs: %s\n', method, ', '.join(ids))
def test_dependency_name_get(self):
method = 'test_dependency_name_get'
url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % self.working_name_2
rv = self.app.get(url)
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, list, 'The response is not a valid JSON dictionary')
self.assertEqual(len(json_dict), 1, 'Result is not unique')
assert isinstance(json_dict, list)
item = json_dict[0]
has_keys = all (k in item for k in DependencyModel.required)
self.assertTrue(has_keys, 'Result is not a dependency')
dep_name = '/'.join([item['theEnvironmentName'], item['theDepender'], item['theDependee'], item['theDependency']])
self.logger.info('[%s] Dependency: %s [%d]\n', method, dep_name, item['theId'])
def test_dependency_post(self):
method = 'test_dependency_post'
url = '/api/dependencies'
new_dep = self.prepare_new_dependency()
json_dict = {
'session_id': 'test',
'object': new_dep
}
json_body = jsonpickle.encode(json_dict)
new_name = (new_dep.theEnvironmentName, new_dep.theDepender, new_dep.theDependee, new_dep.theDependency)
delete_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % new_name
self.app.delete(delete_url)
rv = self.app.post(url, data=json_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, dict, 'Response is not a valid JSON dictionary')
message = json_dict.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.assertNotIsInstance(message, dict, 'Message is an object')
self.logger.info('[%s] Message: %s', method, message)
dep_id = json_dict.get('dependency_id', None)
self.assertIsNotNone(dep_id, 'No dependency ID returned')
self.logger.info('[%s] New dependency ID: %d\n', method, dep_id)
new_name = (new_dep.theEnvironmentName, new_dep.theDepender, new_dep.theDependee, new_dep.theDependency)
delete_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % new_name
self.app.delete(delete_url)
def test_dependency_name_delete(self):
method = 'test_dependency_name_delete'
url = '/api/dependencies'
new_dep = self.prepare_new_dependency()
json_dict = {
'session_id': 'test',
'object': new_dep
}
json_body = jsonpickle.encode(json_dict)
self.app.post(url, data=json_body, content_type='application/json')
new_name = (new_dep.theEnvironmentName, new_dep.theDepender, new_dep.theDependee, new_dep.theDependency)
delete_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % new_name
rv = self.app.delete(delete_url)
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, dict, 'Response is not a valid JSON dictionary')
message = json_dict.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.assertNotIsInstance(message, dict, 'Message is an object')
self.logger.info('[%s] Message: %s\n', method, message)
def test_dependency_name_put(self):
method = 'test_dependency_name_put'
url = '/api/dependencies'
new_dep = self.prepare_new_dependency()
json_dict = {
'session_id': 'test',
'object': new_dep
}
json_body = jsonpickle.encode(json_dict)
self.app.post(url, data=json_body, content_type='application/json')
new_name = (new_dep.theEnvironmentName, new_dep.theDepender, new_dep.theDependee, new_dep.theDependency)
upd_dep = new_dep
upd_dep.theEnvironmentName = self.existing_environment_2
json_dict = {
'session_id': 'test',
'object': upd_dep
}
json_body = jsonpickle.encode(json_dict)
upd_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % new_name
rv = self.app.put(upd_url, data=json_body, content_type='application/json')
self.assertIsNotNone(rv.data, 'No response')
json_dict = jsonpickle.decode(rv.data)
self.assertIsInstance(json_dict, dict, 'Response is not a valid JSON dictionary')
message = json_dict.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.assertNotIsInstance(message, dict, 'Message is an object')
self.logger.info('[%s] Message: %s\n', method, message)
delete_name = (upd_dep.theEnvironmentName, upd_dep.theDepender, upd_dep.theDependee, upd_dep.theDependency)
del_get_url = '/api/dependencies/environment/%s/depender/%s/dependee/%s/dependency/%s?session_id=test' % delete_name
rv = self.app.get(del_get_url)
self.logger.debug('[%s] Updated dependency:\n%s\n', method, rv.data)
self.app.delete(del_get_url)
def prepare_new_dependency(self):
d = Dependency(
-1,
self.existing_environment_1,
self.existing_role_1,
self.existing_role_2,
self.existing_type,
self.existing_dependency,
'This is a test dependency'
)
return d
|
[] |
[] |
[
"CAIRIS_SRC"
] |
[]
|
["CAIRIS_SRC"]
|
python
| 1 | 0 | |
src/app/backend/client/verber.go
|
// Copyright 2017 The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"fmt"
"github.com/kubernetes/dashboard/src/app/backend/api"
clientapi "github.com/kubernetes/dashboard/src/app/backend/client/api"
"github.com/kubernetes/dashboard/src/app/backend/errors"
"github.com/kubernetes/dashboard/src/app/backend/resource/customresourcedefinition"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
restclient "k8s.io/client-go/rest"
)
// resourceVerber is a struct responsible for doing common verb operations on resources, like
// DELETE, PUT, UPDATE.
type resourceVerber struct {
client RESTClient
extensionsClient RESTClient
appsClient RESTClient
batchClient RESTClient
betaBatchClient RESTClient
autoscalingClient RESTClient
storageClient RESTClient
rbacClient RESTClient
apiExtensionsClient RESTClient
pluginsClient RESTClient
config *restclient.Config
}
func (verber *resourceVerber) getRESTClientByType(clientType api.ClientType) RESTClient {
switch clientType {
case api.ClientTypeExtensionClient:
return verber.extensionsClient
case api.ClientTypeAppsClient:
return verber.appsClient
case api.ClientTypeBatchClient:
return verber.batchClient
case api.ClientTypeBetaBatchClient:
return verber.betaBatchClient
case api.ClientTypeAutoscalingClient:
return verber.autoscalingClient
case api.ClientTypeStorageClient:
return verber.storageClient
case api.ClientTypeRbacClient:
return verber.rbacClient
case api.ClientTypeAPIExtensionsClient:
return verber.apiExtensionsClient
case api.ClientTypePluginsClient:
return verber.pluginsClient
default:
return verber.client
}
}
func (verber *resourceVerber) getResourceSpecFromKind(kind string, namespaceSet bool) (client RESTClient, resourceSpec api.APIMapping, err error) {
resourceSpec, ok := api.KindToAPIMapping[kind]
if !ok {
// check if kind is CRD
var crd apiextensions.CustomResourceDefinition
err = verber.apiExtensionsClient.Get().Resource("customresourcedefinitions").Name(kind).Do().Into(&crd)
if err != nil {
if errors.IsNotFoundError(err) {
err = errors.NewInvalid(fmt.Sprintf("Unknown resource kind: %s", kind))
}
return
}
client, err = customresourcedefinition.NewRESTClient(verber.config, &crd)
if err != nil {
return
}
resourceSpec = api.APIMapping{
Resource: crd.Status.AcceptedNames.Plural,
Namespaced: crd.Spec.Scope == apiextensions.NamespaceScoped,
}
}
if namespaceSet != resourceSpec.Namespaced {
if namespaceSet {
err = errors.NewInvalid(fmt.Sprintf("Set namespace for not-namespaced resource kind: %s", kind))
return
} else {
err = errors.NewInvalid(fmt.Sprintf("Set no namespace for namespaced resource kind: %s", kind))
return
}
}
if client == nil {
client = verber.getRESTClientByType(resourceSpec.ClientType)
}
return
}
// RESTClient is an interface for REST operations used in this file.
type RESTClient interface {
Delete() *restclient.Request
Put() *restclient.Request
Get() *restclient.Request
}
// NewResourceVerber creates a new resource verber that uses the given client for performing operations.
func NewResourceVerber(client, extensionsClient, appsClient, batchClient, betaBatchClient, autoscalingClient, storageClient, rbacClient, apiExtensionsClient, pluginsClient RESTClient, config *restclient.Config) clientapi.ResourceVerber {
return &resourceVerber{client, extensionsClient, appsClient,
batchClient, betaBatchClient, autoscalingClient, storageClient, rbacClient, apiExtensionsClient, pluginsClient, config}
}
// Delete deletes the resource of the given kind in the given namespace with the given name.
func (verber *resourceVerber) Delete(kind string, namespaceSet bool, namespace string, name string) error {
client, resourceSpec, err := verber.getResourceSpecFromKind(kind, namespaceSet)
if err != nil {
return err
}
// Do cascade delete by default, as this is what users typically expect.
defaultPropagationPolicy := v1.DeletePropagationForeground
defaultDeleteOptions := &v1.DeleteOptions{
PropagationPolicy: &defaultPropagationPolicy,
}
req := client.Delete().Resource(resourceSpec.Resource).Name(name).Body(defaultDeleteOptions)
if resourceSpec.Namespaced {
req.Namespace(namespace)
}
return req.Do().Error()
}
// Put puts new resource version of the given kind in the given namespace with the given name.
func (verber *resourceVerber) Put(kind string, namespaceSet bool, namespace string, name string,
object *runtime.Unknown) error {
client, resourceSpec, err := verber.getResourceSpecFromKind(kind, namespaceSet)
if err != nil {
return err
}
req := client.Put().
Resource(resourceSpec.Resource).
Name(name).
SetHeader("Content-Type", "application/json").
Body([]byte(object.Raw))
if resourceSpec.Namespaced {
req.Namespace(namespace)
}
return req.Do().Error()
}
// Get gets the resource of the given kind in the given namespace with the given name.
func (verber *resourceVerber) Get(kind string, namespaceSet bool, namespace string, name string) (runtime.Object, error) {
client, resourceSpec, err := verber.getResourceSpecFromKind(kind, namespaceSet)
if err != nil {
return nil, err
}
result := &runtime.Unknown{}
req := client.Get().Resource(resourceSpec.Resource).Name(name).SetHeader("Accept", "application/json")
if resourceSpec.Namespaced {
req.Namespace(namespace)
}
err = req.Do().Into(result)
return result, err
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
gdb/.gdbinit-gef.py
|
# -*- coding: utf-8 -*-
#
#
#######################################################################################
# GEF - Multi-Architecture GDB Enhanced Features for Exploiters & Reverse-Engineers
#
# by @_hugsy_
#######################################################################################
#
# GEF is a kick-ass set of commands for X86, ARM, MIPS, PowerPC and SPARC to
# make GDB cool again for exploit dev. It is aimed to be used mostly by exploit
# devs and reversers, to provides additional features to GDB using the Python
# API to assist during the process of dynamic analysis.
#
# GEF fully relies on GDB API and other Linux-specific sources of information
# (such as /proc/<pid>). As a consequence, some of the features might not work
# on custom or hardened systems such as GrSec.
#
# It has full support for both Python2 and Python3 and works on
# * x86-32 & x86-64
# * arm v5,v6,v7
# * aarch64 (armv8)
# * mips & mips64
# * powerpc & powerpc64
# * sparc & sparc64(v9)
#
# Requires GDB 7.x compiled with Python (2.x, or 3.x)
#
# To start: in gdb, type `source /path/to/gef.py`
#
#######################################################################################
#
# gef is distributed under the MIT License (MIT)
# Copyright (c) 2013-2018 crazy rabbidz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
from __future__ import print_function, division, absolute_import
import abc
import binascii
import codecs
import collections
import ctypes
import fcntl
import functools
import getopt
import hashlib
import imp
import inspect
import itertools
import os
import platform
import re
import shutil
import site
import socket
import string
import struct
import subprocess
import sys
import tempfile
import termios
import time
import traceback
PYTHON_MAJOR = sys.version_info[0]
if PYTHON_MAJOR == 2:
from HTMLParser import HTMLParser #pylint: disable=import-error
from cStringIO import StringIO #pylint: disable=import-error
from urllib import urlopen #pylint: disable=no-name-in-module
import ConfigParser as configparser #pylint: disable=import-error
import xmlrpclib #pylint: disable=import-error
# Compat Py2/3 hacks
def range(*args):
"""Replace range() builtin with an iterator version."""
if len(args) < 1:
raise TypeError()
start, end, step = 0, args[0], 1
if len(args) == 2: start, end = args
if len(args) == 3: start, end, step = args
for n in itertools.count(start=start, step=step):
if (step>0 and n >= end) or (step<0 and n<=end): break
yield n
FileNotFoundError = IOError #pylint: disable=redefined-builtin
ConnectionRefusedError = socket.error #pylint: disable=redefined-builtin
LEFT_ARROW = "<-"
RIGHT_ARROW = "->"
DOWN_ARROW = "\\->"
HORIZONTAL_LINE = "-"
VERTICAL_LINE = "|"
CROSS = "x"
TICK = "v"
GEF_PROMPT = "gef> "
GEF_PROMPT_ON = "\001\033[1;32m\002{0:s}\001\033[0m\002".format(GEF_PROMPT)
GEF_PROMPT_OFF = "\001\033[1;31m\002{0:s}\001\033[0m\002".format(GEF_PROMPT)
elif PYTHON_MAJOR == 3:
from html.parser import HTMLParser #pylint: disable=import-error
from io import StringIO
from urllib.request import urlopen #pylint: disable=import-error,no-name-in-module
import configparser
import xmlrpc.client as xmlrpclib #pylint: disable=import-error
# Compat Py2/3 hack
long = int
unicode = str
LEFT_ARROW = " \u2190 "
RIGHT_ARROW = " \u2192 "
DOWN_ARROW = "\u21b3"
HORIZONTAL_LINE = "\u2500"
VERTICAL_LINE = "\u2502"
CROSS = "\u2718 "
TICK = "\u2713 "
GEF_PROMPT = "gef\u27a4 "
GEF_PROMPT_ON = "\001\033[1;32m\002{0:s}\001\033[0m\002".format(GEF_PROMPT)
GEF_PROMPT_OFF = "\001\033[1;31m\002{0:s}\001\033[0m\002".format(GEF_PROMPT)
else:
raise Exception("WTF is this Python version??")
def http_get(url):
"""Basic HTTP wrapper for GET request. Return the body of the page if HTTP code is OK,
otherwise return None."""
try:
http = urlopen(url)
if http.getcode() != 200:
return None
return http.read()
except Exception:
return None
def update_gef(argv):
"""Try to update `gef` to the latest version pushed on GitHub. Return 0 on success,
1 on failure. """
gef_local = os.path.realpath(argv[0])
hash_gef_local = hashlib.sha512(open(gef_local, "rb").read()).digest()
gef_remote = "https://raw.githubusercontent.com/hugsy/gef/master/gef.py"
gef_remote_data = http_get(gef_remote)
if gef_remote_data is None:
print("[-] Failed to get remote gef")
return 1
hash_gef_remote = hashlib.sha512(gef_remote_data).digest()
if hash_gef_local == hash_gef_remote:
print("[-] No update")
else:
with open(gef_local, "wb") as f:
f.write(gef_remote_data)
print("[+] Updated")
return 0
try:
import gdb
except ImportError:
# if out of gdb, the only action allowed is to update gef.py
if len(sys.argv)==2 and sys.argv[1]=="--update":
sys.exit(update_gef(sys.argv))
print("[-] gef cannot run as standalone")
sys.exit(0)
__gef__ = None
__commands__ = []
__functions__ = []
__aliases__ = []
__config__ = {}
__watches__ = {}
__infos_files__ = []
__gef_convenience_vars_index__ = 0
__context_messages__ = []
__heap_allocated_list__ = []
__heap_freed_list__ = []
__heap_uaf_watchpoints__ = []
__pie_breakpoints__ = {}
__pie_counter__ = 1
__gef_remote__ = None
__gef_qemu_mode__ = False
__gef_default_main_arena__ = "main_arena"
__gef_int_stream_buffer__ = None
DEFAULT_PAGE_ALIGN_SHIFT = 12
DEFAULT_PAGE_SIZE = 1 << DEFAULT_PAGE_ALIGN_SHIFT
GEF_RC = os.path.join(os.getenv("HOME"), ".gef.rc")
GEF_TEMP_DIR = os.path.join(tempfile.gettempdir(), "gef")
GEF_MAX_STRING_LENGTH = 50
GDB_MIN_VERSION = (7, 7)
GDB_VERSION_MAJOR, GDB_VERSION_MINOR = [int(_) for _ in re.search(r"(\d+)[^\d]+(\d+)", gdb.VERSION).groups()]
GDB_VERSION = (GDB_VERSION_MAJOR, GDB_VERSION_MINOR)
current_elf = None
current_arch = None
highlight_table = {}
ANSI_SPLIT_RE = "(\033\[[\d;]*m)"
if PYTHON_MAJOR==3:
lru_cache = functools.lru_cache #pylint: disable=no-member
else:
def lru_cache(maxsize = 128):
"""Port of the Python3 LRU cache mechanism provided by itertools."""
class GefLruCache(object):
"""Local LRU cache for Python2."""
def __init__(self, input_func, max_size):
self._input_func = input_func
self._max_size = max_size
self._caches_dict = {}
self._caches_info = {}
return
def cache_info(self, caller=None):
"""Return a string with statistics of cache usage."""
if caller not in self._caches_dict:
return ""
hits = self._caches_info[caller]["hits"]
missed = self._caches_info[caller]["missed"]
cursz = len(self._caches_dict[caller])
return "CacheInfo(hits={}, misses={}, maxsize={}, currsize={})".format(hits, missed, self._max_size, cursz)
def cache_clear(self, caller=None):
"""Clear a cache."""
if caller in self._caches_dict:
self._caches_dict[caller] = collections.OrderedDict()
return
def __get__(self, obj, objtype):
"""Cache getter."""
return_func = functools.partial(self._cache_wrapper, obj)
return_func.cache_clear = functools.partial(self.cache_clear, obj)
return functools.wraps(self._input_func)(return_func)
def __call__(self, *args, **kwargs):
"""Invoking the wrapped function, by attempting to get its value from cache if existing."""
return self._cache_wrapper(None, *args, **kwargs)
__call__.cache_clear = cache_clear
__call__.cache_info = cache_info
def _cache_wrapper(self, caller, *args, **kwargs):
"""Defines the caching mechanism."""
kwargs_key = "".join(map(lambda x : str(x) + str(type(kwargs[x])) + str(kwargs[x]), sorted(kwargs)))
key = "".join(map(lambda x : str(type(x)) + str(x) , args)) + kwargs_key
if caller not in self._caches_dict:
self._caches_dict[caller] = collections.OrderedDict()
self._caches_info[caller] = {"hits":0, "missed":0}
cur_caller_cache_dict = self._caches_dict[caller]
if key in cur_caller_cache_dict:
self._caches_info[caller]["hits"] += 1
return cur_caller_cache_dict[key]
self._caches_info[caller]["missed"] += 1
if self._max_size is not None:
if len(cur_caller_cache_dict) >= self._max_size:
cur_caller_cache_dict.popitem(False)
cur_caller_cache_dict[key] = self._input_func(caller, *args, **kwargs) if caller != None else self._input_func(*args, **kwargs)
return cur_caller_cache_dict[key]
return lambda input_func: functools.wraps(input_func)(GefLruCache(input_func, maxsize))
def reset_all_caches():
"""Free all caches. If an object is cached, it will have a callable attribute `cache_clear`
which will be invoked to purge the function cache."""
for mod in dir(sys.modules["__main__"]):
obj = getattr(sys.modules["__main__"], mod)
if hasattr(obj, "cache_clear"):
obj.cache_clear()
return
def highlight_text(text):
"""
Highlight text using highlight_table { match -> color } settings.
If RegEx is enabled it will create a match group around all items in the
highlight_table and wrap the specified color in the highlight_table
around those matches.
If RegEx is disabled, split by ANSI codes and 'colorify' each match found
within the specified string.
"""
if not highlight_table:
return text
if get_gef_setting("highlight.regex"):
for match, color in highlight_table.items():
text = re.sub("(" + match + ")", Color.colorify("\\1", color), text)
return text
ansiSplit = re.split(ANSI_SPLIT_RE, text)
for match, color in highlight_table.items():
for index, val in enumerate(ansiSplit):
found = val.find(match)
if found > -1:
ansiSplit[index] = val.replace(match, Color.colorify(match, color))
break
text = "".join(ansiSplit)
ansiSplit = re.split(ANSI_SPLIT_RE, text)
return "".join(ansiSplit)
def gef_print(x="", *args, **kwargs):
"""Wrapper around print(), using string buffering feature."""
x = highlight_text(x)
if __gef_int_stream_buffer__ and not is_debug():
return __gef_int_stream_buffer__.write(x + kwargs.get("end", "\n"))
return print(x, *args, **kwargs)
def bufferize(f):
"""Store the content to be printed for a function in memory, and flush it on function exit."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
global __gef_int_stream_buffer__
if __gef_int_stream_buffer__:
return f(*args, **kwargs)
__gef_int_stream_buffer__ = StringIO()
try:
rv = f(*args, **kwargs)
finally:
sys.stdout.write(__gef_int_stream_buffer__.getvalue())
sys.stdout.flush()
__gef_int_stream_buffer__ = None
return rv
return wrapper
class Color:
"""Used to colorify terminal output."""
colors = {
"normal" : "\033[0m",
"gray" : "\033[1;38;5;240m",
"red" : "\033[31m",
"green" : "\033[32m",
"yellow" : "\033[33m",
"blue" : "\033[34m",
"pink" : "\033[35m",
"cyan" : "\033[36m",
"bold" : "\033[1m",
"underline" : "\033[4m",
"underline_off" : "\033[24m",
"highlight" : "\033[3m",
"highlight_off" : "\033[23m",
"blink" : "\033[5m",
"blink_off" : "\033[25m",
}
@staticmethod
def redify(msg): return Color.colorify(msg, "red")
@staticmethod
def greenify(msg): return Color.colorify(msg, "green")
@staticmethod
def blueify(msg): return Color.colorify(msg, "blue")
@staticmethod
def yellowify(msg): return Color.colorify(msg, "yellow")
@staticmethod
def grayify(msg): return Color.colorify(msg, "gray")
@staticmethod
def pinkify(msg): return Color.colorify(msg, "pink")
@staticmethod
def cyanify(msg): return Color.colorify(msg, "cyan")
@staticmethod
def boldify(msg): return Color.colorify(msg, "bold")
@staticmethod
def underlinify(msg): return Color.colorify(msg, "underline")
@staticmethod
def highlightify(msg): return Color.colorify(msg, "highlight")
@staticmethod
def blinkify(msg): return Color.colorify(msg, "blink")
@staticmethod
def colorify(text, attrs):
"""Color text according to the given attributes."""
if get_gef_setting("gef.disable_color") is True: return text
colors = Color.colors
msg = [colors[attr] for attr in attrs.split() if attr in colors]
msg.append(str(text))
if colors["highlight"] in msg : msg.append(colors["highlight_off"])
if colors["underline"] in msg : msg.append(colors["underline_off"])
if colors["blink"] in msg : msg.append(colors["blink_off"])
msg.append(colors["normal"])
return "".join(msg)
class Address:
"""GEF representation of memory addresses."""
def __init__(self, *args, **kwargs):
self.value = kwargs.get("value", 0)
self.section = kwargs.get("section", None)
self.info = kwargs.get("info", None)
self.valid = kwargs.get("valid", True)
return
def __str__(self):
value = format_address(self.value)
code_color = get_gef_setting("theme.address_code")
stack_color = get_gef_setting("theme.address_stack")
heap_color = get_gef_setting("theme.address_heap")
if self.is_in_text_segment():
return Color.colorify(value, code_color)
if self.is_in_heap_segment():
return Color.colorify(value, heap_color)
if self.is_in_stack_segment():
return Color.colorify(value, stack_color)
return value
def is_in_text_segment(self):
return (hasattr(self.info, "name") and ".text" in self.info.name) or \
(hasattr(self.section, "path") and get_filepath() == self.section.path and self.section.is_executable())
def is_in_stack_segment(self):
return hasattr(self.section, "path") and "[stack]" == self.section.path
def is_in_heap_segment(self):
return hasattr(self.section, "path") and "[heap]" == self.section.path
def dereference(self):
addr = align_address(long(self.value))
derefed = dereference(addr)
return None if derefed is None else long(derefed)
class Permission:
"""GEF representation of Linux permission."""
NONE = 0
READ = 1
WRITE = 2
EXECUTE = 4
ALL = READ | WRITE | EXECUTE
def __init__(self, **kwargs):
self.value = kwargs.get("value", 0)
return
def __or__(self, value):
return self.value | value
def __and__(self, value):
return self.value & value
def __xor__(self, value):
return self.value ^ value
def __eq__(self, value):
return self.value == value
def __ne__(self, value):
return self.value != value
def __str__(self):
perm_str = ""
perm_str += "r" if self & Permission.READ else "-"
perm_str += "w" if self & Permission.WRITE else "-"
perm_str += "x" if self & Permission.EXECUTE else "-"
return perm_str
@staticmethod
def from_info_sections(*args):
perm = Permission()
for arg in args:
if "READONLY" in arg:
perm.value += Permission.READ
if "DATA" in arg:
perm.value += Permission.WRITE
if "CODE" in arg:
perm.value += Permission.EXECUTE
return perm
@staticmethod
def from_process_maps(perm_str):
perm = Permission()
if perm_str[0] == "r":
perm.value += Permission.READ
if perm_str[1] == "w":
perm.value += Permission.WRITE
if perm_str[2] == "x":
perm.value += Permission.EXECUTE
return perm
class Section:
"""GEF representation of process memory sections."""
def __init__(self, *args, **kwargs):
self.page_start = kwargs.get("page_start")
self.page_end = kwargs.get("page_end")
self.offset = kwargs.get("offset")
self.permission = kwargs.get("permission")
self.inode = kwargs.get("inode")
self.path = kwargs.get("path")
return
def is_readable(self):
return self.permission.value and self.permission.value&Permission.READ
def is_writable(self):
return self.permission.value and self.permission.value&Permission.WRITE
def is_executable(self):
return self.permission.value and self.permission.value&Permission.EXECUTE
@property
def size(self):
if self.page_end is None or self.page_start is None:
return -1
return self.page_end - self.page_start
@property
def realpath(self):
# when in a `gef-remote` session, realpath returns the path to the binary on the local disk, not remote
return self.path if __gef_remote__ is None else "/tmp/gef/{:d}/{:s}".format(__gef_remote__, self.path)
Zone = collections.namedtuple("Zone", ["name", "zone_start", "zone_end", "filename"])
class Elf:
"""Basic ELF parsing.
Ref:
- http://www.skyfree.org/linux/references/ELF_Format.pdf
- http://refspecs.freestandards.org/elf/elfspec_ppc.pdf
- http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.html
"""
LITTLE_ENDIAN = 1
BIG_ENDIAN = 2
ELF_32_BITS = 0x01
ELF_64_BITS = 0x02
X86_64 = 0x3e
X86_32 = 0x03
ARM = 0x28
MIPS = 0x08
POWERPC = 0x14
POWERPC64 = 0x15
SPARC = 0x02
SPARC64 = 0x2b
AARCH64 = 0xb7
RISCV = 0xf3
ET_EXEC = 2
ET_DYN = 3
ET_CORE = 4
e_magic = b"\x7fELF"
e_class = ELF_32_BITS
e_endianness = LITTLE_ENDIAN
e_eiversion = None
e_osabi = None
e_abiversion = None
e_pad = None
e_type = ET_EXEC
e_machine = X86_32
e_version = None
e_entry = 0x00
e_phoff = None
e_shoff = None
e_flags = None
e_ehsize = None
e_phentsize = None
e_phnum = None
e_shentsize = None
e_shnum = None
e_shstrndx = None
def __init__(self, elf="", minimalist=False):
"""
Instantiate an ELF object. The default behavior is to create the object by parsing the ELF file.
But in some cases (QEMU-stub), we may just want a simple minimal object with default values."""
if minimalist:
return
if not os.access(elf, os.R_OK):
err("'{0}' not found/readable".format(elf))
err("Failed to get file debug information, most of gef features will not work")
return
with open(elf, "rb") as fd:
# off 0x0
self.e_magic, self.e_class, self.e_endianness, self.e_eiversion = struct.unpack(">IBBB", fd.read(7))
# adjust endianness in bin reading
endian = "<" if self.e_endianness == Elf.LITTLE_ENDIAN else ">"
# off 0x7
self.e_osabi, self.e_abiversion = struct.unpack("{}BB".format(endian), fd.read(2))
# off 0x9
self.e_pad = fd.read(7)
# off 0x10
self.e_type, self.e_machine, self.e_version = struct.unpack("{}HHI".format(endian), fd.read(8))
# off 0x18
if self.e_class == Elf.ELF_64_BITS:
# if arch 64bits
self.e_entry, self.e_phoff, self.e_shoff = struct.unpack("{}QQQ".format(endian), fd.read(24))
else:
# else arch 32bits
self.e_entry, self.e_phoff, self.e_shoff = struct.unpack("{}III".format(endian), fd.read(12))
self.e_flags, self.e_ehsize, self.e_phentsize, self.e_phnum = struct.unpack("{}HHHH".format(endian), fd.read(8))
self.e_shentsize, self.e_shnum, self.e_shstrndx = struct.unpack("{}HHH".format(endian), fd.read(6))
return
class Instruction:
"""GEF representation of a CPU instruction."""
def __init__(self, address, location, mnemo, operands):
self.address, self.location, self.mnemonic, self.operands = address, location, mnemo, operands
return
def __str__(self):
return "{:#10x} {:16} {:6} {:s}".format(self.address,
self.location,
self.mnemonic,
", ".join(self.operands))
def is_valid(self):
return "(bad)" not in self.mnemonic
@lru_cache()
def search_for_main_arena():
global __gef_default_main_arena__
malloc_hook_addr = to_unsigned_long(gdb.parse_and_eval("(void *)&__malloc_hook"))
if is_x86():
addr = align_address_to_size(malloc_hook_addr + current_arch.ptrsize, 0x20)
elif is_arch(Elf.AARCH64) or is_arch(Elf.ARM):
addr = malloc_hook_addr - current_arch.ptrsize*2 - MallocStateStruct("*0").struct_size
else:
raise OSError("Cannot find main_arena for {}".format(current_arch.arch))
__gef_default_main_arena__ = "*0x{:x}".format(addr)
return addr
class MallocStateStruct(object):
"""GEF representation of malloc_state from https://github.com/bminor/glibc/blob/glibc-2.28/malloc/malloc.c#L1658"""
def __init__(self, addr):
try:
self.__addr = to_unsigned_long(gdb.parse_and_eval("&{}".format(addr)))
except gdb.error:
self.__addr = search_for_main_arena()
self.num_fastbins = 10
self.num_bins = 254
self.int_size = cached_lookup_type("int").sizeof
self.size_t = cached_lookup_type("size_t")
if not self.size_t:
ptr_type = "unsigned long" if current_arch.ptrsize == 8 else "unsigned int"
self.size_t = cached_lookup_type(ptr_type)
if get_libc_version() >= (2, 26):
self.fastbin_offset = align_address_to_size(self.int_size*3, 8)
else:
self.fastbin_offset = self.int_size*2
return
# struct offsets
@property
def addr(self):
return self.__addr
@property
def fastbins_addr(self):
return self.__addr + self.fastbin_offset
@property
def top_addr(self):
return self.fastbins_addr + self.num_fastbins*current_arch.ptrsize
@property
def last_remainder_addr(self):
return self.top_addr + current_arch.ptrsize
@property
def bins_addr(self):
return self.last_remainder_addr + current_arch.ptrsize
@property
def next_addr(self):
return self.bins_addr + self.num_bins*current_arch.ptrsize + self.int_size*4
@property
def next_free_addr(self):
return self.next_addr + current_arch.ptrsize
@property
def system_mem_addr(self):
return self.next_free_addr + current_arch.ptrsize*2
@property
def struct_size(self):
return self.system_mem_addr + current_arch.ptrsize*2 - self.__addr
# struct members
@property
def fastbinsY(self):
return self.get_size_t_array(self.fastbins_addr, self.num_fastbins)
@property
def top(self):
return self.get_size_t_pointer(self.top_addr)
@property
def last_remainder(self):
return self.get_size_t_pointer(self.last_remainder_addr)
@property
def bins(self):
return self.get_size_t_array(self.bins_addr, self.num_bins)
@property
def next(self):
return self.get_size_t_pointer(self.next_addr)
@property
def next_free(self):
return self.get_size_t_pointer(self.next_free_addr)
@property
def system_mem(self):
return self.get_size_t(self.system_mem_addr)
# helper methods
def get_size_t(self, addr):
return dereference(addr).cast(self.size_t)
def get_size_t_pointer(self, addr):
size_t_pointer = self.size_t.pointer()
return dereference(addr).cast(size_t_pointer)
def get_size_t_array(self, addr, length):
size_t_array = self.size_t.array(length)
return dereference(addr).cast(size_t_array)
def __getitem__(self, item):
return getattr(self, item)
class GlibcArena:
"""Glibc arena class
Ref: https://github.com/sploitfun/lsploits/blob/master/glibc/malloc/malloc.c#L1671 """
TCACHE_MAX_BINS = 0x40
def __init__(self, addr, name=None):
self.__name = name or __gef_default_main_arena__
try:
arena = gdb.parse_and_eval(addr)
malloc_state_t = cached_lookup_type("struct malloc_state")
self.__arena = arena.cast(malloc_state_t)
self.__addr = long(arena.address)
except:
self.__arena = MallocStateStruct(addr)
self.__addr = self.__arena.addr
return
def __getitem__(self, item):
return self.__arena[item]
def __getattr__(self, item):
return self.__arena[item]
def __int__(self):
return self.__addr
def tcachebin(self, i):
"""Return head chunk in tcache[i]."""
heap_base = HeapBaseFunction.heap_base()
addr = dereference(heap_base + 2*current_arch.ptrsize + self.TCACHE_MAX_BINS + i*current_arch.ptrsize)
if not addr:
return None
return GlibcChunk(long(addr))
def fastbin(self, i):
"""Return head chunk in fastbinsY[i]."""
addr = dereference_as_long(self.fastbinsY[i])
if addr == 0:
return None
return GlibcChunk(addr + 2 * current_arch.ptrsize)
def bin(self, i):
idx = i * 2
fd = dereference_as_long(self.bins[idx])
bw = dereference_as_long(self.bins[idx + 1])
return fd, bw
def get_next(self):
addr_next = dereference_as_long(self.next)
arena_main = GlibcArena(self.__name)
if addr_next == arena_main.__addr:
return None
return GlibcArena("*{:#x} ".format(addr_next))
def __str__(self):
top = dereference_as_long(self.top)
last_remainder = dereference_as_long(self.last_remainder)
n = dereference_as_long(self.next)
nfree = dereference_as_long(self.next_free)
sysmem = long(self.system_mem)
fmt = "Arena (base={:#x}, top={:#x}, last_remainder={:#x}, next={:#x}, next_free={:#x}, system_mem={:#x})"
return fmt.format(self.__addr, top, last_remainder, n, nfree, sysmem)
class GlibcChunk:
"""Glibc chunk class.
Ref: https://sploitfun.wordpress.com/2015/02/10/understanding-glibc-malloc/."""
def __init__(self, addr, from_base=False):
self.ptrsize = current_arch.ptrsize
if from_base:
self.chunk_base_address = addr
self.address = addr + 2 * self.ptrsize
else:
self.chunk_base_address = int(addr - 2 * self.ptrsize)
self.address = addr
self.size_addr = int(self.address - self.ptrsize)
self.prev_size_addr = self.chunk_base_address
return
def get_chunk_size(self):
return read_int_from_memory(self.size_addr) & (~0x07)
@property
def size(self):
return self.get_chunk_size()
def get_usable_size(self):
# https://github.com/sploitfun/lsploits/blob/master/glibc/malloc/malloc.c#L4537
cursz = self.get_chunk_size()
if cursz == 0: return cursz
if self.has_m_bit(): return cursz - 2 * self.ptrsize
return cursz - self.ptrsize
@property
def usable_size(self):
return self.get_usable_size()
def get_prev_chunk_size(self):
return read_int_from_memory(self.prev_size_addr)
def get_next_chunk(self):
addr = self.address + self.get_chunk_size()
return GlibcChunk(addr)
# if free-ed functions
def get_fwd_ptr(self):
return read_int_from_memory(self.address)
@property
def fwd(self):
return self.get_fwd_ptr()
fd = fwd # for compat
def get_bkw_ptr(self):
return read_int_from_memory(self.address + self.ptrsize)
@property
def bck(self):
return self.get_bkw_ptr()
bk = bck # for compat
# endif free-ed functions
def has_p_bit(self):
return read_int_from_memory(self.size_addr) & 0x01
def has_m_bit(self):
return read_int_from_memory(self.size_addr) & 0x02
def has_n_bit(self):
return read_int_from_memory(self.size_addr) & 0x04
def is_used(self):
"""Check if the current block is used by:
- checking the M bit is true
- or checking that next chunk PREV_INUSE flag is true """
if self.has_m_bit():
return True
next_chunk = self.get_next_chunk()
return True if next_chunk.has_p_bit() else False
def str_chunk_size_flag(self):
msg = []
msg.append("PREV_INUSE flag: {}".format(Color.greenify("On") if self.has_p_bit() else Color.redify("Off")))
msg.append("IS_MMAPPED flag: {}".format(Color.greenify("On") if self.has_m_bit() else Color.redify("Off")))
msg.append("NON_MAIN_ARENA flag: {}".format(Color.greenify("On") if self.has_n_bit() else Color.redify("Off")))
return "\n".join(msg)
def _str_sizes(self):
msg = []
failed = False
try:
msg.append("Chunk size: {0:d} ({0:#x})".format(self.get_chunk_size()))
msg.append("Usable size: {0:d} ({0:#x})".format(self.get_usable_size()))
failed = True
except gdb.MemoryError:
msg.append("Chunk size: Cannot read at {:#x} (corrupted?)".format(self.size_addr))
try:
msg.append("Previous chunk size: {0:d} ({0:#x})".format(self.get_prev_chunk_size()))
failed = True
except gdb.MemoryError:
msg.append("Previous chunk size: Cannot read at {:#x} (corrupted?)".format(self.chunk_base_address))
if failed:
msg.append(self.str_chunk_size_flag())
return "\n".join(msg)
def _str_pointers(self):
fwd = self.address
bkw = self.address + self.ptrsize
msg = []
try:
msg.append("Forward pointer: {0:#x}".format(self.get_fwd_ptr()))
except gdb.MemoryError:
msg.append("Forward pointer: {0:#x} (corrupted?)".format(fwd))
try:
msg.append("Backward pointer: {0:#x}".format(self.get_bkw_ptr()))
except gdb.MemoryError:
msg.append("Backward pointer: {0:#x} (corrupted?)".format(bkw))
return "\n".join(msg)
def str_as_alloced(self):
return self._str_sizes()
def str_as_freed(self):
return "{}\n\n{}".format(self._str_sizes(), self._str_pointers())
def flags_as_string(self):
flags = []
if self.has_p_bit():
flags.append(Color.colorify("PREV_INUSE", "red bold"))
if self.has_m_bit():
flags.append(Color.colorify("IS_MMAPPED", "red bold"))
if self.has_n_bit():
flags.append(Color.colorify("NON_MAIN_ARENA", "red bold"))
return "|".join(flags)
def __str__(self):
msg = "{:s}(addr={:#x}, size={:#x}, flags={:s})".format(Color.colorify("Chunk", "yellow bold underline"),
long(self.address),
self.get_chunk_size(),
self.flags_as_string())
return msg
def psprint(self):
msg = []
msg.append(str(self))
if self.is_used():
msg.append(self.str_as_alloced())
else:
msg.append(self.str_as_freed())
return "\n".join(msg) + "\n"
@lru_cache()
def get_libc_version():
sections = get_process_maps()
try:
for section in sections:
if "libc-" in section.path:
libc_version = tuple(int(_) for _ in
re.search(r"libc-(\d+)\.(\d+)\.so", section.path).groups())
break
else:
libc_version = 0, 0
except AttributeError:
libc_version = 0, 0
return libc_version
@lru_cache()
def get_main_arena():
try:
return GlibcArena(__gef_default_main_arena__)
except Exception as e:
err("Failed to get the main arena, heap commands may not work properly: {}".format(e))
return None
def titlify(text, color=None, msg_color=None):
"""Print a centered title."""
cols = get_terminal_size()[1]
nb = (cols - len(text) - 2)//2
if color is None:
color = __config__.get("theme.default_title_line")[0]
if msg_color is None:
msg_color = __config__.get("theme.default_title_message")[0]
msg = []
msg.append(Color.colorify("{} ".format(HORIZONTAL_LINE * nb), color))
msg.append(Color.colorify(text, msg_color))
msg.append(Color.colorify(" {}".format(HORIZONTAL_LINE * nb), color))
return "".join(msg)
def err(msg): return gef_print("{} {}".format(Color.colorify("[!]", "bold red"), msg))
def warn(msg): return gef_print("{} {}".format(Color.colorify("[*]", "bold yellow"), msg))
def ok(msg): return gef_print("{} {}".format(Color.colorify("[+]", "bold green"), msg))
def info(msg): return gef_print("{} {}".format(Color.colorify("[+]", "bold blue"), msg))
def push_context_message(level, message):
"""Push the message to be displayed the next time the context is invoked."""
global __context_messages__
if level not in ("error", "warn", "ok", "info"):
err("Invalid level '{}', discarding message".format(level))
return
__context_messages__.append((level, message))
return
def show_last_exception():
"""Display the last Python exception."""
def _show_code_line(fname, idx):
fname = os.path.expanduser(os.path.expandvars(fname))
__data = open(fname, "r").read().splitlines()
return __data[idx-1] if idx < len(__data) else ""
gef_print("")
exc_type, exc_value, exc_traceback = sys.exc_info()
gef_print(" Exception raised ".center(80, HORIZONTAL_LINE))
gef_print("{}: {}".format(Color.colorify(exc_type.__name__, "bold underline red"), exc_value))
gef_print(" Detailed stacktrace ".center(80, HORIZONTAL_LINE))
for fs in traceback.extract_tb(exc_traceback)[::-1]:
filename, lineno, method, code = fs
if not code or not code.strip():
code = _show_code_line(filename, lineno)
gef_print("""{} File "{}", line {:d}, in {}()""".format(DOWN_ARROW, Color.yellowify(filename),
lineno, Color.greenify(method)))
gef_print(" {} {}".format(RIGHT_ARROW, code))
gef_print(" Last 10 GDB commands ".center(80, HORIZONTAL_LINE))
gdb.execute("show commands")
gef_print(" Runtime environment ".center(80, HORIZONTAL_LINE))
gef_print("* GDB: {}".format(gdb.VERSION))
gef_print("* Python: {:d}.{:d}.{:d} - {:s}".format(sys.version_info.major, sys.version_info.minor,
sys.version_info.micro, sys.version_info.releaselevel))
gef_print("* OS: {:s} - {:s} ({:s}) on {:s}".format(platform.system(), platform.release(),
platform.architecture()[0],
" ".join(platform.dist()))) #pylint: disable=deprecated-method
gef_print(HORIZONTAL_LINE*80)
gef_print("")
return
def gef_pystring(x):
"""Python 2 & 3 compatibility function for strings handling."""
res = str(x, encoding="utf-8") if PYTHON_MAJOR == 3 else x
substs = [("\n","\\n"), ("\r","\\r"), ("\t","\\t"), ("\v","\\v"), ("\b","\\b"), ]
for x,y in substs: res = res.replace(x,y)
return res
def gef_pybytes(x):
"""Python 2 & 3 compatibility function for bytes handling."""
return bytes(str(x), encoding="utf-8") if PYTHON_MAJOR == 3 else x
@lru_cache()
def which(program):
"""Locate a command on the filesystem."""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath = os.path.split(program)[0]
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
raise FileNotFoundError("Missing file `{:s}`".format(program))
def style_byte(b, color=True):
style = {
"nonprintable": "yellow",
"printable": "white",
"00": "gray",
"0a": "blue",
"ff": "green",
}
sbyte = "{:02x}".format(b)
if not color or get_gef_setting("highlight.regex"):
return sbyte
if sbyte in style:
st = style[sbyte]
elif chr(b) in (string.ascii_letters + string.digits + string.punctuation + " "):
st = style.get("printable")
else:
st = style.get("nonprintable")
if st:
sbyte = Color.colorify(sbyte, st)
return sbyte
def hexdump(source, length=0x10, separator=".", show_raw=False, base=0x00):
"""Return the hexdump of `src` argument.
@param source *MUST* be of type bytes or bytearray
@param length is the length of items per line
@param separator is the default character to use if one byte is not printable
@param show_raw if True, do not add the line nor the text translation
@param base is the start address of the block being hexdump
@return a string with the hexdump"""
result = []
align = get_memory_alignment()*2+2 if is_alive() else 18
for i in range(0, len(source), length):
chunk = bytearray(source[i:i + length])
hexa = " ".join([style_byte(b, color=not show_raw) for b in chunk])
if show_raw:
result.append(hexa)
continue
text = "".join([chr(b) if 0x20 <= b < 0x7F else separator for b in chunk])
sym = gdb_get_location_from_symbol(base+i)
sym = "<{:s}+{:04x}>".format(*sym) if sym else ""
result.append("{addr:#0{aw}x} {sym} {data:<{dw}} {text}".format(aw=align,
addr=base+i,
sym=sym,
dw=3*length,
data=hexa,
text=text))
return "\n".join(result)
def is_debug():
"""Check if debug mode is enabled."""
return get_gef_setting("gef.debug") is True
context_hidden = False
def hide_context():
global context_hidden
context_hidden = True
def unhide_context():
global context_hidden
context_hidden = False
def enable_redirect_output(to_file="/dev/null"):
"""Redirect all GDB output to `to_file` parameter. By default, `to_file` redirects to `/dev/null`."""
gdb.execute("set logging overwrite")
gdb.execute("set logging file {:s}".format(to_file))
gdb.execute("set logging redirect on")
gdb.execute("set logging on")
return
def disable_redirect_output():
"""Disable the output redirection, if any."""
gdb.execute("set logging off")
gdb.execute("set logging redirect off")
return
@lru_cache()
def get_gef_setting(name):
"""Read global gef settings.
Return None if not found. A valid config setting can never return None,
but False, 0 or ""."""
global __config__
setting = __config__.get(name, None)
if not setting:
return None
return setting[0]
def set_gef_setting(name, value, _type=None, _desc=None):
"""Set global gef settings.
Raise ValueError if `name` doesn't exist and `type` and `desc`
are not provided."""
global __config__
if name not in __config__:
# create new setting
if _type is None or _desc is None:
raise ValueError("Setting '{}' is undefined, need to provide type and description".format(name))
__config__[name] = [_type(value), _type, _desc]
return
# set existing setting
func = __config__[name][1]
__config__[name][0] = func(value)
reset_all_caches()
return
def gef_makedirs(path, mode=0o755):
"""Recursive mkdir() creation. If successful, return the absolute path of the directory created."""
abspath = os.path.realpath(path)
if os.path.isdir(abspath):
return abspath
if PYTHON_MAJOR == 3:
os.makedirs(path, mode=mode, exist_ok=True) #pylint: disable=unexpected-keyword-arg
else:
try:
os.makedirs(path, mode=mode)
except os.error:
pass
return abspath
@lru_cache()
def gdb_lookup_symbol(sym):
"""Fetch the proper symbol or None if not defined."""
try:
return gdb.decode_line(sym)[1]
except gdb.error:
return None
@lru_cache(maxsize=512)
def gdb_get_location_from_symbol(address):
"""Retrieve the location of the `address` argument from the symbol table.
Return a tuple with the name and offset if found, None otherwise."""
# this is horrible, ugly hack and shitty perf...
# find a *clean* way to get gdb.Location from an address
name = None
sym = gdb.execute("info symbol {:#x}".format(address), to_string=True)
if sym.startswith("No symbol matches"):
return None
i = sym.find(" in section ")
sym = sym[:i].split()
name, offset = sym[0], 0
if len(sym) == 3 and sym[2].isdigit():
offset = int(sym[2])
return name, offset
def gdb_disassemble(start_pc, **kwargs):
"""Disassemble instructions from `start_pc` (Integer). Accepts the following named parameters:
- `end_pc` (Integer) only instructions whose start address fall in the interval from start_pc to end_pc are returned.
- `count` (Integer) list at most this many disassembled instructions
If `end_pc` and `count` are not provided, the function will behave as if `count=1`.
Return an iterator of Instruction objects
"""
frame = gdb.selected_frame()
arch = frame.architecture()
for insn in arch.disassemble(start_pc, **kwargs):
address = insn["addr"]
asm = insn["asm"].rstrip().split(None, 1)
if len(asm) > 1:
mnemo, operands = asm
operands = operands.split(",")
else:
mnemo, operands = asm[0], []
loc = gdb_get_location_from_symbol(address)
location = "<{}+{}>".format(*loc) if loc else ""
yield Instruction(address, location, mnemo, operands)
def gdb_get_nth_previous_instruction_address(addr, n):
"""Return the address (Integer) of the `n`-th instruction before `addr`."""
# fixed-length ABI
if current_arch.instruction_length:
return addr - n*current_arch.instruction_length
# variable-length ABI
cur_insn_addr = gef_current_instruction(addr).address
# we try to find a good set of previous instructions by "guessing" disassembling backwards
# the 15 comes from the longest instruction valid size
for i in range(15*n, 0, -1):
try:
insns = list(gdb_disassemble(addr-i, end_pc=cur_insn_addr, count=n+1))
except gdb.MemoryError:
# this is because we can hit an unmapped page trying to read backward
break
# 1. check that the disassembled instructions list size is correct
if len(insns)!=n+1: # we expect the current instruction plus the n before it
continue
# 2. check all instructions are valid
for insn in insns:
if not insn.is_valid():
continue
# 3. if cur_insn is at the end of the set
if insns[-1].address==cur_insn_addr:
return insns[0].address
return None
def gdb_get_nth_next_instruction_address(addr, n):
"""Return the address (Integer) of the `n`-th instruction after `addr`."""
# fixed-length ABI
if current_arch.instruction_length:
return addr + n*current_arch.instruction_length
# variable-length ABI
insn = list(gdb_disassemble(addr, count=n))[-1]
return insn.address
def gef_instruction_n(addr, n):
"""Return the `n`-th instruction after `addr` as an Instruction object."""
return list(gdb_disassemble(addr, count=n+1))[n]
def gef_get_instruction_at(addr):
"""Return the full Instruction found at the specified address."""
insn = next(gef_disassemble(addr, 1))
return insn
def gef_current_instruction(addr):
"""Return the current instruction as an Instruction object."""
return gef_instruction_n(addr, 0)
def gef_next_instruction(addr):
"""Return the next instruction as an Instruction object."""
return gef_instruction_n(addr, 1)
def gef_disassemble(addr, nb_insn, nb_prev=0):
"""Disassemble `nb_insn` instructions after `addr` and `nb_prev` before `addr`.
Return an iterator of Instruction objects."""
count = nb_insn + 1 if nb_insn & 1 else nb_insn
if nb_prev:
start_addr = gdb_get_nth_previous_instruction_address(addr, nb_prev)
if start_addr:
for insn in gdb_disassemble(start_addr, count=nb_prev):
if insn.address == addr: break
yield insn
for insn in gdb_disassemble(addr, count=count):
yield insn
def capstone_disassemble(location, nb_insn, **kwargs):
"""Disassemble `nb_insn` instructions after `addr` and `nb_prev` before
`addr` using the Capstone-Engine disassembler, if available.
Return an iterator of Instruction objects."""
def cs_insn_to_gef_insn(cs_insn):
sym_info = gdb_get_location_from_symbol(cs_insn.address)
loc = "<{}+{}>".format(*sym_info) if sym_info else ""
ops = [] + cs_insn.op_str.split(", ")
return Instruction(cs_insn.address, loc, cs_insn.mnemonic, ops)
capstone = sys.modules["capstone"]
arch, mode = get_capstone_arch(arch=kwargs.get("arch", None), mode=kwargs.get("mode", None), endian=kwargs.get("endian", None))
cs = capstone.Cs(arch, mode)
cs.detail = True
page_start = align_address_to_page(location)
offset = location - page_start
pc = current_arch.pc
skip = int(kwargs.get("skip", 0))
nb_prev = int(kwargs.get("nb_prev", 0))
if nb_prev > 0:
location = gdb_get_nth_previous_instruction_address(pc, nb_prev)
nb_insn += nb_prev
code = kwargs.get("code", read_memory(location, gef_getpagesize() - offset - 1))
code = bytes(code)
for insn in cs.disasm(code, location):
if skip:
skip -= 1
continue
nb_insn -= 1
yield cs_insn_to_gef_insn(insn)
if nb_insn==0:
break
return
def gef_execute_external(command, as_list=False, *args, **kwargs):
"""Execute an external command and return the result."""
res = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=kwargs.get("shell", False))
return [gef_pystring(_) for _ in res.splitlines()] if as_list else gef_pystring(res)
def gef_execute_gdb_script(commands):
"""Execute the parameter `source` as GDB command. This is done by writing `commands` to
a temporary file, which is then executed via GDB `source` command. The tempfile is then deleted."""
fd, fname = tempfile.mkstemp(suffix=".gdb", prefix="gef_")
with os.fdopen(fd, "w") as f:
f.write(commands)
f.flush()
if os.access(fname, os.R_OK):
gdb.execute("source {:s}".format(fname))
os.unlink(fname)
return
@lru_cache(32)
def checksec(filename):
"""Check the security property of the ELF binary. The following properties are:
- Canary
- NX
- PIE
- Fortify
- Partial/Full RelRO.
Return a dict() with the different keys mentioned above, and the boolean
associated whether the protection was found."""
try:
readelf = which("readelf")
except IOError:
err("Missing `readelf`")
return
def __check_security_property(opt, filename, pattern):
cmd = [readelf,]
cmd += opt.split()
cmd += [filename,]
lines = gef_execute_external(cmd, as_list=True)
for line in lines:
if re.search(pattern, line):
return True
return False
results = collections.OrderedDict()
results["Canary"] = __check_security_property("-s", filename, r"__stack_chk_fail") is True
has_gnu_stack = __check_security_property("-W -l", filename, r"GNU_STACK") is True
if has_gnu_stack:
results["NX"] = __check_security_property("-W -l", filename, r"GNU_STACK.*RWE") is False
else:
results["NX"] = False
results["PIE"] = __check_security_property("-h", filename, r":.*EXEC") is False
results["Fortify"] = __check_security_property("-s", filename, r"_chk@GLIBC") is True
results["Partial RelRO"] = __check_security_property("-l", filename, r"GNU_RELRO") is True
results["Full RelRO"] = __check_security_property("-d", filename, r"BIND_NOW") is True
return results
@lru_cache()
def get_arch():
"""Return the binary's architecture."""
if is_alive():
arch = gdb.selected_frame().architecture()
return arch.name()
arch_str = gdb.execute("show architecture", to_string=True).strip()
if "The target architecture is set automatically (currently " in arch_str:
# architecture can be auto detected
arch_str = arch_str.split("(currently ", 1)[1]
arch_str = arch_str.split(")", 1)[0]
elif "The target architecture is assumed to be " in arch_str:
# architecture can be assumed
arch_str = arch_str.replace("The target architecture is assumed to be ", "")
else:
# unknown, we throw an exception to be safe
raise RuntimeError("Unknown architecture: {}".format(arch_str))
return arch_str
@lru_cache()
def get_endian():
"""Return the binary endianness."""
if is_alive():
return get_elf_headers().e_endianness
if gdb.execute("show endian", to_string=True).strip().split()[7] == "little" :
return Elf.LITTLE_ENDIAN
raise EnvironmentError("Invalid endianess")
def is_big_endian(): return get_endian() == Elf.BIG_ENDIAN
def is_little_endian(): return not is_big_endian()
def flags_to_human(reg_value, value_table):
"""Return a human readable string showing the flag states."""
flags = []
for i in value_table:
flag_str = Color.boldify(value_table[i].upper()) if reg_value & (1<<i) else value_table[i].lower()
flags.append(flag_str)
return "[{}]".format(" ".join(flags))
class Architecture(object):
"""Generic metaclass for the architecture supported by GEF."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def all_registers(self): pass
@abc.abstractproperty
def instruction_length(self): pass
@abc.abstractproperty
def nop_insn(self): pass
@abc.abstractproperty
def return_register(self): pass
@abc.abstractproperty
def flag_register(self): pass
@abc.abstractproperty
def flags_table(self): pass
@abc.abstractproperty
def function_parameters(self): pass
@abc.abstractmethod
def flag_register_to_human(self, val=None): pass
@abc.abstractmethod
def is_call(self, insn): pass
@abc.abstractmethod
def is_ret(self, insn): pass
@abc.abstractmethod
def is_conditional_branch(self, insn): pass
@abc.abstractmethod
def is_branch_taken(self, insn): pass
@abc.abstractmethod
def get_ra(self, insn, frame): pass
special_registers = []
@property
def pc(self):
return get_register("$pc")
@property
def sp(self):
return get_register("$sp")
@property
def fp(self):
return get_register("$fp")
@property
def ptrsize(self):
return get_memory_alignment()
def get_ith_parameter(self, i):
"""Retrieves the correct parameter used for the current function call."""
reg = self.function_parameters[i]
val = get_register(reg)
key = reg
return key, val
class RISCV(Architecture):
arch = "RISCV"
mode = "RISCV"
all_registers = ["$zero", "$ra", "$sp", "$gp", "$x4", "$t0", "$t1",
"$t2", "$fp", "$s1", "$a1", "$a2", "$a3", "$a4",
"$a5", "$a6", "$a7", "$s2", "$s3", "$s4", "$s5",
"$s6", "$s7", "$s8", "$s9", "$s10", "$s11", "$t3",
"$t4", "$t5", "$t6",]
return_register = "$a0"
function_parameters = ["$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7"]
syscall_register = "$a7"
syscall_register = "ecall"
nop_insn = b"\x00\x00\x00\x13"
# RISC-V has no flags registers
flag_register = None
flag_register_to_human = None
flags_table = None
@property
def instruction_length(self):
return 4
def is_call(self, insn):
return insn.mnemonic == "call"
def is_ret(self, insn):
mnemo = insn.mnemonic
if mnemo == "ret":
return True
elif (mnemo == "jalr" and insn.operands[0] == "zero" and
insn.operands[1] == "ra" and insn.operands[2] == 0):
return True
elif (mnemo == "c.jalr" and insn.operands[0] == "ra"):
return True
return False
@classmethod
def mprotect_asm(cls, addr, size, perm):
raise OSError("Architecture {:s} not supported yet".format(cls.arch))
def is_conditional_branch(self, insn):
return insn.mnemonic.startswith("b")
def is_branch_taken(self, insn):
def long_to_twos_complement(v):
"""Convert a python long value to its two's complement."""
if is_elf32():
if v & 0x80000000:
return v - 0x100000000
elif is_elf64():
if v & 0x8000000000000000:
return v - 0x10000000000000000
else:
raise OSError("RISC-V: ELF file is not ELF32 or ELF64. This is not currently supported")
return v
mnemo = insn.mnemonic
condition = mnemo[1:]
if condition.endswith("z"):
# r2 is the zero register if we are comparing to 0
rs1 = get_register(insn.operands[0])
rs2 = get_register("$zero")
condition = condition[:-1]
elif len(insn.operands) > 2:
# r2 is populated with the second operand
rs1 = get_register(insn.operands[0])
rs2 = get_register(insn.operands[1])
else:
raise OSError("RISC-V: Failed to get rs1 and rs2 for instruction: `{}`".format(insn))
# If the conditional operation is not unsigned, convert the python long into
# its two's complement
if not condition.endswith("u"):
rs2 = long_to_twos_complement(rs2)
rs1 = long_to_twos_complement(rs1)
else:
condition = condition[:-1]
if condition == "eq":
if rs1 == rs2: taken, reason = True, "{}={}".format(rs1, rs2)
else: taken, reason = False, "{}!={}".format(rs1, rs2)
elif condition == "ne":
if rs1 != rs2: taken, reason = True, "{}!={}".format(rs1, rs2)
else: taken, reason = False, "{}={}".format(rs1, rs2)
elif condition == "lt":
if rs1 < rs2: taken, reason = True, "{}<{}".format(rs1, rs2)
else: taken, reason = False, "{}>={}".format(rs1, rs2)
elif condition == "ge":
if rs1 < rs2: taken, reason = True, "{}>={}".format(rs1, rs2)
else: taken, reason = False, "{}<{}".format(rs1, rs2)
else:
raise OSError("RISC-V: Conditional instruction `{:s}` not supported yet".format(insn))
return taken, reason
def get_ra(self, insn, frame):
ra = None
if self.is_ret(insn):
ra = get_register("$ra")
elif frame.older():
ra = frame.older().pc()
return ra
class ARM(Architecture):
arch = "ARM"
all_registers = ["$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6",
"$r7", "$r8", "$r9", "$r10", "$r11", "$r12", "$sp",
"$lr", "$pc", "$cpsr",]
# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0041c/Caccegih.html
# return b"\x00\x00\xa0\xe1" # mov r0,r0
nop_insn = b"\x01\x10\xa0\xe1" # mov r1,r1
return_register = "$r0"
flag_register = "$cpsr"
flags_table = {
31: "negative",
30: "zero",
29: "carry",
28: "overflow",
7: "interrupt",
6: "fast",
5: "thumb"
}
function_parameters = ["$r0", "$r1", "$r2", "$r3"]
syscall_register = "$r7"
syscall_instructions = ["swi 0x0", "swi NR"]
@lru_cache()
def is_thumb(self):
"""Determine if the machine is currently in THUMB mode."""
return is_alive() and get_register("$cpsr") & (1<<5)
@property
def pc(self):
pc = get_register("$pc")
if self.is_thumb():
pc += 1
return pc
@property
def mode(self):
return "THUMB" if self.is_thumb() else "ARM"
@property
def instruction_length(self):
# Thumb instructions have variable-length (2 or 4-byte)
return None if self.is_thumb() else 4
def is_call(self, insn):
mnemo = insn.mnemonic
call_mnemos = {"bl", "blx"}
return mnemo in call_mnemos
def is_ret(self, insn):
pop_mnemos = {"pop"}
branch_mnemos = {"bl", "bx"}
write_mnemos = {"ldr", "add"}
if insn.mnemonic in pop_mnemos:
return insn.operands[-1] == " pc}"
if insn.mnemonic in branch_mnemos:
return insn.operands[-1] == "lr"
if insn.mnemonic in write_mnemos:
return insn.operands[0] == "pc"
return
def flag_register_to_human(self, val=None):
# http://www.botskool.com/user-pages/tutorials/electronics/arm-7-tutorial-part-1
if val is None:
reg = self.flag_register
val = get_register(reg)
return flags_to_human(val, self.flags_table)
def is_conditional_branch(self, insn):
conditions = {"eq", "ne", "lt", "le", "gt", "ge", "vs", "vc", "mi", "pl", "hi", "ls"}
return insn.mnemonic[-2:] in conditions
def is_branch_taken(self, insn):
mnemo = insn.mnemonic
# ref: http://www.davespace.co.uk/arm/introduction-to-arm/conditional.html
flags = dict((self.flags_table[k], k) for k in self.flags_table)
val = get_register(self.flag_register)
taken, reason = False, ""
if mnemo.endswith("eq"): taken, reason = bool(val&(1<<flags["zero"])), "Z"
elif mnemo.endswith("ne"): taken, reason = not val&(1<<flags["zero"]), "!Z"
elif mnemo.endswith("lt"):
taken, reason = bool(val&(1<<flags["negative"])) != bool(val&(1<<flags["overflow"])), "N!=V"
elif mnemo.endswith("le"):
taken, reason = val&(1<<flags["zero"]) or \
bool(val&(1<<flags["negative"])) != bool(val&(1<<flags["overflow"])), "Z || N!=V"
elif mnemo.endswith("gt"):
taken, reason = val&(1<<flags["zero"]) == 0 and \
bool(val&(1<<flags["negative"])) == bool(val&(1<<flags["overflow"])), "!Z && N==V"
elif mnemo.endswith("ge"):
taken, reason = bool(val&(1<<flags["negative"])) == bool(val&(1<<flags["overflow"])), "N==V"
elif mnemo.endswith("vs"): taken, reason = bool(val&(1<<flags["overflow"])), "V"
elif mnemo.endswith("vc"): taken, reason = not val&(1<<flags["overflow"]), "!V"
elif mnemo.endswith("mi"):
taken, reason = bool(val&(1<<flags["negative"])), "N"
elif mnemo.endswith("pl"):
taken, reason = not val&(1<<flags["negative"]), "N==0"
elif mnemo.endswith("hi"):
taken, reason = val&(1<<flags["carry"]) and not val&(1<<flags["zero"]), "C && !Z"
elif mnemo.endswith("ls"):
taken, reason = not val&(1<<flags["carry"]) or val&(1<<flags["zero"]), "!C || Z"
return taken, reason
def get_ra(self, insn, frame):
ra = None
if self.is_ret(insn):
# If it's a pop, we have to peek into the stack, otherwise use lr
if insn.mnemonic == "pop":
ra_addr = current_arch.sp + (len(insn.operands)-1) * get_memory_alignment()
ra = to_unsigned_long(dereference(ra_addr))
elif insn.mnemonic == "ldr":
return to_unsigned_long(dereference(current_arch.sp))
else: # 'bx lr' or 'add pc, lr, #0'
return get_register("$lr")
elif frame.older():
ra = frame.older().pc()
return ra
@classmethod
def mprotect_asm(cls, addr, size, perm):
_NR_mprotect = 125
insns = [
"push {r0-r2, r7}",
"mov r0, {:d}".format(addr),
"mov r1, {:d}".format(size),
"mov r2, {:d}".format(perm),
"mov r7, {:d}".format(_NR_mprotect),
"svc 0",
"pop {r0-r2, r7}",]
return "; ".join(insns)
class AARCH64(ARM):
arch = "ARM64"
mode = "ARM"
all_registers = [
"$x0", "$x1", "$x2", "$x3", "$x4", "$x5", "$x6", "$x7",
"$x8", "$x9", "$x10", "$x11", "$x12", "$x13", "$x14", "$x15",
"$x16", "$x17", "$x18", "$x19", "$x20", "$x21", "$x22", "$x23",
"$x24", "$x25", "$x26", "$x27", "$x28", "$x29", "$x30", "$sp",
"$pc", "$cpsr", "$fpsr", "$fpcr",]
return_register = "$x0"
flag_register = "$cpsr"
flags_table = {
31: "negative",
30: "zero",
29: "carry",
28: "overflow",
7: "interrupt",
6: "fast"
}
function_parameters = ["$x0", "$x1", "$x2", "$x3", "$x4", "$x5", "$x6", "$x7"]
syscall_register = "$x8"
syscall_instructions = ["svc $x0"]
def is_call(self, insn):
mnemo = insn.mnemonic
call_mnemos = {"bl", "blr"}
return mnemo in call_mnemos
def flag_register_to_human(self, val=None):
# http://events.linuxfoundation.org/sites/events/files/slides/KoreaLinuxForum-2014.pdf
reg = self.flag_register
if not val:
val = get_register(reg)
return flags_to_human(val, self.flags_table)
@classmethod
def mprotect_asm(cls, addr, size, perm):
raise OSError("Architecture {:s} not supported yet".format(cls.arch))
def is_conditional_branch(self, insn):
# https://www.element14.com/community/servlet/JiveServlet/previewBody/41836-102-1-229511/ARM.Reference_Manual.pdf
# sect. 5.1.1
mnemo = insn.mnemonic
branch_mnemos = {"cbnz", "cbz", "tbnz", "tbz"}
return mnemo.startswith("b.") or mnemo in branch_mnemos
def is_branch_taken(self, insn):
mnemo, operands = insn.mnemonic, insn.operands
flags = dict((self.flags_table[k], k) for k in self.flags_table)
val = get_register(self.flag_register)
taken, reason = False, ""
if mnemo in {"cbnz", "cbz", "tbnz", "tbz"}:
reg = operands[0]
op = get_register(reg)
if mnemo=="cbnz":
if op!=0: taken, reason = True, "{}!=0".format(reg)
else: taken, reason = False, "{}==0".format(reg)
elif mnemo=="cbz":
if op==0: taken, reason = True, "{}==0".format(reg)
else: taken, reason = False, "{}!=0".format(reg)
elif mnemo=="tbnz":
# operands[1] has one or more white spaces in front, then a #, then the number
# so we need to eliminate them
i = int(operands[1].strip().lstrip("#"))
if (op & 1<<i) != 0: taken, reason = True, "{}&1<<{}!=0".format(reg,i)
else: taken, reason = False, "{}&1<<{}==0".format(reg,i)
elif mnemo=="tbz":
# operands[1] has one or more white spaces in front, then a #, then the number
# so we need to eliminate them
i = int(operands[1].strip().lstrip("#"))
if (op & 1<<i) == 0: taken, reason = True, "{}&1<<{}==0".format(reg,i)
else: taken, reason = False, "{}&1<<{}!=0".format(reg,i)
if not reason:
taken, reason = super(AARCH64, self).is_branch_taken(insn)
return taken, reason
class X86(Architecture):
arch = "X86"
mode = "32"
nop_insn = b"\x90"
flag_register = "$eflags"
special_registers = ["$cs", "$ss", "$ds", "$es", "$fs", "$gs", ]
gpr_registers = ["$eax", "$ebx", "$ecx", "$edx", "$esp", "$ebp", "$esi", "$edi", "$eip", ]
all_registers = gpr_registers + [ flag_register, ] + special_registers
instruction_length = None
return_register = "$eax"
function_parameters = ["$esp", ]
flags_table = {
6: "zero",
0: "carry",
2: "parity",
4: "adjust",
7: "sign",
8: "trap",
9: "interrupt",
10: "direction",
11: "overflow",
16: "resume",
17: "virtualx86",
21: "identification",
}
syscall_register = "$eax"
syscall_instructions = ["sysenter", "int 0x80"]
def flag_register_to_human(self, val=None):
reg = self.flag_register
if not val:
val = get_register(reg)
return flags_to_human(val, self.flags_table)
def is_call(self, insn):
mnemo = insn.mnemonic
call_mnemos = {"call", "callq"}
return mnemo in call_mnemos
def is_ret(self, insn):
return insn.mnemonic == "ret"
def is_conditional_branch(self, insn):
mnemo = insn.mnemonic
branch_mnemos = {
"ja", "jnbe", "jae", "jnb", "jnc", "jb", "jc", "jnae", "jbe", "jna",
"jcxz", "jecxz", "jrcxz", "je", "jz", "jg", "jnle", "jge", "jnl",
"jl", "jnge", "jle", "jng", "jne", "jnz", "jno", "jnp", "jpo", "jns",
"jo", "jp", "jpe", "js"
}
return mnemo in branch_mnemos
def is_branch_taken(self, insn):
mnemo = insn.mnemonic
# all kudos to fG! (https://github.com/gdbinit/Gdbinit/blob/master/gdbinit#L1654)
flags = dict((self.flags_table[k], k) for k in self.flags_table)
val = get_register(self.flag_register)
taken, reason = False, ""
if mnemo in ("ja", "jnbe"):
taken, reason = not val&(1<<flags["carry"]) and not val&(1<<flags["zero"]), "!C && !Z"
elif mnemo in ("jae", "jnb", "jnc"):
taken, reason = not val&(1<<flags["carry"]), "!C"
elif mnemo in ("jb", "jc", "jnae"):
taken, reason = val&(1<<flags["carry"]), "C"
elif mnemo in ("jbe", "jna"):
taken, reason = val&(1<<flags["carry"]) or val&(1<<flags["zero"]), "C || Z"
elif mnemo in ("jcxz", "jecxz", "jrcxz"):
cx = get_register("$rcx") if self.mode == 64 else get_register("$ecx")
taken, reason = cx == 0, "!$CX"
elif mnemo in ("je", "jz"):
taken, reason = val&(1<<flags["zero"]), "Z"
elif mnemo in ("jne", "jnz"):
taken, reason = not val&(1<<flags["zero"]), "!Z"
elif mnemo in ("jg", "jnle"):
taken, reason = not val&(1<<flags["zero"]) and bool(val&(1<<flags["overflow"])) == bool(val&(1<<flags["sign"])), "!Z && S==O"
elif mnemo in ("jge", "jnl"):
taken, reason = bool(val&(1<<flags["sign"])) == bool(val&(1<<flags["overflow"])), "S==O"
elif mnemo in ("jl", "jnge"):
taken, reason = val&(1<<flags["overflow"]) != val&(1<<flags["sign"]), "S!=O"
elif mnemo in ("jle", "jng"):
taken, reason = val&(1<<flags["zero"]) or bool(val&(1<<flags["overflow"])) != bool(val&(1<<flags["sign"])), "Z || S!=O"
elif mnemo in ("jo",):
taken, reason = val&(1<<flags["overflow"]), "O"
elif mnemo in ("jno",):
taken, reason = not val&(1<<flags["overflow"]), "!O"
elif mnemo in ("jpe", "jp"):
taken, reason = val&(1<<flags["parity"]), "P"
elif mnemo in ("jnp", "jpo"):
taken, reason = not val&(1<<flags["parity"]), "!P"
elif mnemo in ("js",):
taken, reason = val&(1<<flags["sign"]), "S"
elif mnemo in ("jns",):
taken, reason = not val&(1<<flags["sign"]), "!S"
return taken, reason
def get_ra(self, insn, frame):
ra = None
if self.is_ret(insn):
ra = to_unsigned_long(dereference(current_arch.sp))
if frame.older():
ra = frame.older().pc()
return ra
@classmethod
def mprotect_asm(cls, addr, size, perm):
_NR_mprotect = 125
insns = [
"pushad",
"mov eax, {:d}".format(_NR_mprotect),
"mov ebx, {:d}".format(addr),
"mov ecx, {:d}".format(size),
"mov edx, {:d}".format(perm),
"int 0x80",
"popad",]
return "; ".join(insns)
def get_ith_parameter(self, i):
sp = current_arch.sp
sz = current_arch.ptrsize
loc = sp + (i * sz)
val = read_int_from_memory(loc)
key = "[sp + {:#x}]".format(i * sz)
return key, val
class X86_64(X86):
arch = "X86"
mode = "64"
gpr_registers = [
"$rax", "$rbx", "$rcx", "$rdx", "$rsp", "$rbp", "$rsi", "$rdi", "$rip",
"$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15", ]
all_registers = gpr_registers + [ X86.flag_register, ] + X86.special_registers
return_register = "$rax"
function_parameters = ["$rdi", "$rsi", "$rdx", "$rcx", "$r8", "$r9"]
syscall_register = "$rax"
syscall_instructions = ["syscall"]
# We don't want to inherit x86's stack based param getter
get_ith_parameter = Architecture.get_ith_parameter
@classmethod
def mprotect_asm(cls, addr, size, perm):
_NR_mprotect = 10
insns = ["push rax", "push rdi", "push rsi", "push rdx",
"mov rax, {:d}".format(_NR_mprotect),
"mov rdi, {:d}".format(addr),
"mov rsi, {:d}".format(size),
"mov rdx, {:d}".format(perm),
"syscall",
"pop rdx", "pop rsi", "pop rdi", "pop rax"]
return "; ".join(insns)
class PowerPC(Architecture):
arch = "PPC"
mode = "PPC32"
all_registers = [
"$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7",
"$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15",
"$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23",
"$r24", "$r25", "$r26", "$r27", "$r28", "$r29", "$r30", "$r31",
"$pc", "$msr", "$cr", "$lr", "$ctr", "$xer", "$trap",]
instruction_length = 4
nop_insn = b"\x60\x00\x00\x00" # http://www.ibm.com/developerworks/library/l-ppc/index.html
return_register = "$r0"
flag_register = "$cr"
flags_table = {
3: "negative[0]",
2: "positive[0]",
1: "equal[0]",
0: "overflow[0]",
# cr7
31: "less[7]",
30: "greater[7]",
29: "equal[7]",
28: "overflow[7]",
}
function_parameters = ["$i0", "$i1", "$i2", "$i3", "$i4", "$i5"]
syscall_register = "$r0"
syscall_instructions = ["sc"]
def flag_register_to_human(self, val=None):
# http://www.cebix.net/downloads/bebox/pem32b.pdf (% 2.1.3)
if not val:
reg = self.flag_register
val = get_register(reg)
return flags_to_human(val, self.flags_table)
def is_call(self, insn):
return False
def is_ret(self, insn):
return insn.mnemonic == "blr"
def is_conditional_branch(self, insn):
mnemo = insn.mnemonic
branch_mnemos = {"beq", "bne", "ble", "blt", "bgt", "bge"}
return mnemo in branch_mnemos
def is_branch_taken(self, insn):
mnemo = insn.mnemonic
flags = dict((self.flags_table[k], k) for k in self.flags_table)
val = get_register(self.flag_register)
taken, reason = False, ""
if mnemo == "beq": taken, reason = val&(1<<flags["equal[7]"]), "E"
elif mnemo == "bne": taken, reason = val&(1<<flags["equal[7]"]) == 0, "!E"
elif mnemo == "ble": taken, reason = val&(1<<flags["equal[7]"]) or val&(1<<flags["less[7]"]), "E || L"
elif mnemo == "blt": taken, reason = val&(1<<flags["less[7]"]), "L"
elif mnemo == "bge": taken, reason = val&(1<<flags["equal[7]"]) or val&(1<<flags["greater[7]"]), "E || G"
elif mnemo == "bgt": taken, reason = val&(1<<flags["greater[7]"]), "G"
return taken, reason
def get_ra(self, insn, frame):
ra = None
if self.is_ret(insn):
ra = get_register("$lr")
elif frame.older():
ra = frame.older().pc()
return ra
@classmethod
def mprotect_asm(cls, addr, size, perm):
# Ref: http://www.ibm.com/developerworks/library/l-ppc/index.html
_NR_mprotect = 125
insns = ["addi 1, 1, -16", # 1 = r1 = sp
"stw 0, 0(1)", "stw 3, 4(1)", # r0 = syscall_code | r3, r4, r5 = args
"stw 4, 8(1)", "stw 5, 12(1)",
"li 0, {:d}".format(_NR_mprotect),
"lis 3, {:#x}@h".format(addr),
"ori 3, 3, {:#x}@l".format(addr),
"lis 4, {:#x}@h".format(size),
"ori 4, 4, {:#x}@l".format(size),
"li 5, {:d}".format(perm),
"sc",
"lwz 0, 0(1)", "lwz 3, 4(1)",
"lwz 4, 8(1)", "lwz 5, 12(1)",
"addi 1, 1, 16",]
return ";".join(insns)
class PowerPC64(PowerPC):
arch = "PPC"
mode = "PPC64"
class SPARC(Architecture):
""" Refs:
- http://www.cse.scu.edu/~atkinson/teaching/sp05/259/sparc.pdf
"""
arch = "SPARC"
mode = ""
all_registers = [
"$g0", "$g1", "$g2", "$g3", "$g4", "$g5", "$g6", "$g7",
"$o0", "$o1", "$o2", "$o3", "$o4", "$o5", "$o7",
"$l0", "$l1", "$l2", "$l3", "$l4", "$l5", "$l6", "$l7",
"$i0", "$i1", "$i2", "$i3", "$i4", "$i5", "$i7",
"$pc", "$npc","$sp ","$fp ","$psr",]
instruction_length = 4
nop_insn = b"\x00\x00\x00\x00" # sethi 0, %g0
return_register = "$i0"
flag_register = "$psr"
flags_table = {
23: "negative",
22: "zero",
21: "overflow",
20: "carry",
7: "supervisor",
5: "trap",
}
function_parameters = ["$o0 ", "$o1 ", "$o2 ", "$o3 ", "$o4 ", "$o5 ", "$o7 ",]
syscall_register = "%g1"
syscall_instructions = ["t 0x10"]
def flag_register_to_human(self, val=None):
# http://www.gaisler.com/doc/sparcv8.pdf
reg = self.flag_register
if not val:
val = get_register(reg)
return flags_to_human(val, self.flags_table)
def is_call(self, insn):
return False
def is_ret(self, insn):
# TODO: rett?
return insn.mnemonic == "ret"
def is_conditional_branch(self, insn):
mnemo = insn.mnemonic
# http://moss.csc.ncsu.edu/~mueller/codeopt/codeopt00/notes/condbranch.html
branch_mnemos = {
"be", "bne", "bg", "bge", "bgeu", "bgu", "bl", "ble", "blu", "bleu",
"bneg", "bpos", "bvs", "bvc", "bcs", "bcc"
}
return mnemo in branch_mnemos
def is_branch_taken(self, insn):
mnemo = insn.mnemonic
flags = dict((self.flags_table[k], k) for k in self.flags_table)
val = get_register(self.flag_register)
taken, reason = False, ""
if mnemo == "be": taken, reason = val&(1<<flags["zero"]), "Z"
elif mnemo == "bne": taken, reason = val&(1<<flags["zero"]) == 0, "!Z"
elif mnemo == "bg": taken, reason = val&(1<<flags["zero"]) == 0 and (val&(1<<flags["negative"]) == 0 or val&(1<<flags["overflow"]) == 0), "!Z && (!N || !O)"
elif mnemo == "bge": taken, reason = val&(1<<flags["negative"]) == 0 or val&(1<<flags["overflow"]) == 0, "!N || !O"
elif mnemo == "bgu": taken, reason = val&(1<<flags["carry"]) == 0 and val&(1<<flags["zero"]) == 0, "!C && !Z"
elif mnemo == "bgeu": taken, reason = val&(1<<flags["carry"]) == 0, "!C"
elif mnemo == "bl": taken, reason = val&(1<<flags["negative"]) and val&(1<<flags["overflow"]), "N && O"
elif mnemo == "blu": taken, reason = val&(1<<flags["carry"]), "C"
elif mnemo == "ble": taken, reason = val&(1<<flags["zero"]) or (val&(1<<flags["negative"]) or val&(1<<flags["overflow"])), "Z || (N || O)"
elif mnemo == "bleu": taken, reason = val&(1<<flags["carry"]) or val&(1<<flags["zero"]), "C || Z"
elif mnemo == "bneg": taken, reason = val&(1<<flags["negative"]), "N"
elif mnemo == "bpos": taken, reason = val&(1<<flags["negative"]) == 0, "!N"
elif mnemo == "bvs": taken, reason = val&(1<<flags["overflow"]), "O"
elif mnemo == "bvc": taken, reason = val&(1<<flags["overflow"]) == 0, "!O"
elif mnemo == "bcs": taken, reason = val&(1<<flags["carry"]), "C"
elif mnemo == "bcc": taken, reason = val&(1<<flags["carry"]) == 0, "!C"
return taken, reason
def get_ra(self, insn, frame):
ra = None
if self.is_ret(insn):
ra = get_register("$o7")
elif frame.older():
ra = frame.older().pc()
return ra
@classmethod
def mprotect_asm(cls, addr, size, perm):
hi = (addr & 0xffff0000) >> 16
lo = (addr & 0x0000ffff)
_NR_mprotect = 125
insns = ["add %sp, -16, %sp",
"st %g1, [ %sp ]", "st %o0, [ %sp + 4 ]",
"st %o1, [ %sp + 8 ]", "st %o2, [ %sp + 12 ]",
"sethi %hi({}), %o0".format(hi),
"or %o0, {}, %o0".format(lo),
"clr %o1",
"clr %o2",
"mov {}, %g1".format(_NR_mprotect),
"t 0x10",
"ld [ %sp ], %g1", "ld [ %sp + 4 ], %o0",
"ld [ %sp + 8 ], %o1", "ld [ %sp + 12 ], %o2",
"add %sp, 16, %sp",]
return "; ".join(insns)
class SPARC64(SPARC):
""" Refs:
- http://math-atlas.sourceforge.net/devel/assembly/abi_sysV_sparc.pdf
- https://cr.yp.to/2005-590/sparcv9.pdf
"""
arch = "SPARC"
mode = "V9"
all_registers = [
"$g0", "$g1", "$g2", "$g3", "$g4", "$g5", "$g6", "$g7",
"$o0", "$o1", "$o2", "$o3", "$o4", "$o5", "$o7",
"$l0", "$l1", "$l2", "$l3", "$l4", "$l5", "$l6", "$l7",
"$i0", "$i1", "$i2", "$i3", "$i4", "$i5", "$i7",
"$pc", "$npc", "$sp", "$fp", "$state", ]
flag_register = "$state" # sparcv9.pdf, 5.1.5.1 (ccr)
flags_table = {
35: "negative",
34: "zero",
33: "overflow",
32: "carry",
}
syscall_instructions = ["t 0x6d"]
@classmethod
def mprotect_asm(cls, addr, size, perm):
hi = (addr & 0xffff0000) >> 16
lo = (addr & 0x0000ffff)
_NR_mprotect = 125
insns = ["add %sp, -16, %sp",
"st %g1, [ %sp ]", "st %o0, [ %sp + 4 ]",
"st %o1, [ %sp + 8 ]", "st %o2, [ %sp + 12 ]",
"sethi %hi({}), %o0".format(hi),
"or %o0, {}, %o0".format(lo),
"clr %o1",
"clr %o2",
"mov {}, %g1".format(_NR_mprotect),
"t 0x6d",
"ld [ %sp ], %g1", "ld [ %sp + 4 ], %o0",
"ld [ %sp + 8 ], %o1", "ld [ %sp + 12 ], %o2",
"add %sp, 16, %sp",]
return "; ".join(insns)
class MIPS(Architecture):
arch = "MIPS"
mode = "MIPS32"
# http://vhouten.home.xs4all.nl/mipsel/r3000-isa.html
all_registers = [
"$zero", "$at", "$v0", "$v1", "$a0", "$a1", "$a2", "$a3",
"$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
"$s0", "$s1", "$s2", "$s3", "$s4", "$s5", "$s6", "$s7",
"$t8", "$t9", "$k0", "$k1", "$s8", "$pc", "$sp", "$hi",
"$lo", "$fir", "$ra", "$gp", ]
instruction_length = 4
nop_insn = b"\x00\x00\x00\x00" # sll $0,$0,0
return_register = "$v0"
flag_register = "$fcsr"
flags_table = {}
function_parameters = ["$a0", "$a1", "$a2", "$a3"]
syscall_register = "$v0"
syscall_instructions = ["syscall"]
def flag_register_to_human(self, val=None):
return Color.colorify("No flag register", "yellow underline")
def is_call(self, insn):
return False
def is_ret(self, insn):
return insn.mnemonic == "jr" and insn.operands[0] == "ra"
def is_conditional_branch(self, insn):
mnemo = insn.mnemonic
branch_mnemos = {"beq", "bne", "beqz", "bnez", "bgtz", "bgez", "bltz", "blez"}
return mnemo in branch_mnemos
def is_branch_taken(self, insn):
mnemo, ops = insn.mnemonic, insn.operands
taken, reason = False, ""
if mnemo == "beq":
taken, reason = get_register(ops[0]) == get_register(ops[1]), "{0[0]} == {0[1]}".format(ops)
elif mnemo == "bne":
taken, reason = get_register(ops[0]) != get_register(ops[1]), "{0[0]} != {0[1]}".format(ops)
elif mnemo == "beqz":
taken, reason = get_register(ops[0]) == 0, "{0[0]} == 0".format(ops)
elif mnemo == "bnez":
taken, reason = get_register(ops[0]) != 0, "{0[0]} != 0".format(ops)
elif mnemo == "bgtz":
taken, reason = get_register(ops[0]) > 0, "{0[0]} > 0".format(ops)
elif mnemo == "bgez":
taken, reason = get_register(ops[0]) >= 0, "{0[0]} >= 0".format(ops)
elif mnemo == "bltz":
taken, reason = get_register(ops[0]) < 0, "{0[0]} < 0".format(ops)
elif mnemo == "blez":
taken, reason = get_register(ops[0]) <= 0, "{0[0]} <= 0".format(ops)
return taken, reason
def get_ra(self, insn, frame):
ra = None
if self.is_ret(insn):
ra = get_register("$ra")
elif frame.older():
ra = frame.older().pc()
return ra
@classmethod
def mprotect_asm(cls, addr, size, perm):
_NR_mprotect = 4125
insns = ["addi $sp, $sp, -16",
"sw $v0, 0($sp)", "sw $a0, 4($sp)",
"sw $a3, 8($sp)", "sw $a3, 12($sp)",
"li $v0, {:d}".format(_NR_mprotect),
"li $a0, {:d}".format(addr),
"li $a1, {:d}".format(size),
"li $a2, {:d}".format(perm),
"syscall",
"lw $v0, 0($sp)", "lw $a1, 4($sp)",
"lw $a3, 8($sp)", "lw $a3, 12($sp)",
"addi $sp, $sp, 16",]
return "; ".join(insns)
def write_memory(address, buffer, length=0x10):
"""Write `buffer` at address `address`."""
if PYTHON_MAJOR == 2: buffer = str(buffer)
return gdb.selected_inferior().write_memory(address, buffer, length)
def read_memory(addr, length=0x10):
"""Return a `length` long byte array with the copy of the process memory at `addr`."""
if PYTHON_MAJOR == 2:
return gdb.selected_inferior().read_memory(addr, length)
return gdb.selected_inferior().read_memory(addr, length).tobytes()
def read_int_from_memory(addr):
"""Return an integer read from memory."""
sz = current_arch.ptrsize
mem = read_memory(addr, sz)
fmt = "{}{}".format(endian_str(), "I" if sz==4 else "Q")
return struct.unpack(fmt, mem)[0]
def read_cstring_from_memory(address, max_length=GEF_MAX_STRING_LENGTH, encoding=None):
"""Return a C-string read from memory."""
if not encoding:
encoding = "unicode_escape" if PYTHON_MAJOR==3 else "ascii"
char_ptr = cached_lookup_type("char").pointer()
try:
res = gdb.Value(address).cast(char_ptr).string(encoding=encoding).strip()
except gdb.error:
length = min(address|(DEFAULT_PAGE_SIZE-1), max_length+1)
mem = bytes(read_memory(address, length)).decode("utf-8")
res = mem.split("\x00", 1)[0]
ustr = res.replace("\n","\\n").replace("\r","\\r").replace("\t","\\t")
if max_length and len(res) > max_length:
return "{}[...]".format(ustr[:max_length])
return ustr
def read_ascii_string(address):
"""Read an ASCII string from memory"""
cstr = read_cstring_from_memory(address)
if isinstance(cstr, unicode) and cstr and all([x in string.printable for x in cstr]):
return cstr
return None
def is_ascii_string(address):
"""Helper function to determine if the buffer pointed by `address` is an ASCII string (in GDB)"""
try:
return read_ascii_string(address) is not None
except Exception:
return False
def is_alive():
"""Check if GDB is running."""
try:
return gdb.selected_inferior().pid > 0
except Exception:
return False
return False
def only_if_gdb_running(f):
"""Decorator wrapper to check if GDB is running."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if is_alive():
return f(*args, **kwargs)
else:
warn("No debugging session active")
return wrapper
def only_if_gdb_target_local(f):
"""Decorator wrapper to check if GDB is running locally (target not remote)."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
if not is_remote_debug():
return f(*args, **kwargs)
else:
warn("This command cannot work for remote sessions.")
return wrapper
def experimental_feature(f):
"""Decorator to add a warning when a feature is experimental."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
warn("This feature is under development, expect bugs and unstability...")
return f(*args, **kwargs)
return wrapper
def only_if_gdb_version_higher_than(required_gdb_version):
"""Decorator to check whether current GDB version requirements."""
def wrapper(f):
def inner_f(*args, **kwargs):
if GDB_VERSION >= required_gdb_version:
f(*args, **kwargs)
else:
reason = "GDB >= {} for this command".format(required_gdb_version)
raise EnvironmentError(reason)
return inner_f
return wrapper
def use_stdtype():
if is_elf32(): return "uint32_t"
elif is_elf64(): return "uint64_t"
return "uint16_t"
def use_default_type():
if is_elf32(): return "unsigned int"
elif is_elf64(): return "unsigned long"
return "unsigned short"
def use_golang_type():
if is_elf32(): return "uint32"
elif is_elf64(): return "uint64"
return "uint16"
def to_unsigned_long(v):
"""Cast a gdb.Value to unsigned long."""
mask = (1 << 64) - 1
return int(v.cast(gdb.Value(mask).type)) & mask
def get_register(regname):
"""Return a register's value."""
try:
value = gdb.parse_and_eval(regname)
return to_unsigned_long(value) if value.type.code == gdb.TYPE_CODE_INT else long(value)
except gdb.error:
value = gdb.selected_frame().read_register(regname)
return long(value)
def get_path_from_info_proc():
for x in gdb.execute("info proc", to_string=True).splitlines():
if x.startswith("exe = "):
return x.split(" = ")[1].replace("'", "")
return None
@lru_cache()
def get_os():
"""Return the current OS."""
return platform.system().lower()
@lru_cache()
def get_pid():
"""Return the PID of the debuggee process."""
return gdb.selected_inferior().pid
@lru_cache()
def get_filepath():
"""Return the local absolute path of the file currently debugged."""
filename = gdb.current_progspace().filename
if is_remote_debug():
# if no filename specified, try downloading target from /proc
if filename is None:
pid = get_pid()
if pid > 0:
return download_file("/proc/{:d}/exe".format(pid), use_cache=True)
return None
# if target is remote file, download
elif filename.startswith("target:"):
fname = filename[len("target:"):]
return download_file(fname, use_cache=True, local_name=fname)
elif __gef_remote__ is not None:
return "/tmp/gef/{:d}/{:s}".format(__gef_remote__, get_path_from_info_proc())
return filename
else:
if filename is not None:
return filename
# inferior probably did not have name, extract cmdline from info proc
return get_path_from_info_proc()
@lru_cache()
def get_filename():
"""Return the full filename of the file currently debugged."""
return os.path.basename(get_filepath())
def download_file(target, use_cache=False, local_name=None):
"""Download filename `target` inside the mirror tree inside the GEF_TEMP_DIR.
The tree architecture must be GEF_TEMP_DIR/gef/<local_pid>/<remote_filepath>.
This allow a "chroot-like" tree format."""
try:
local_root = os.path.sep.join([GEF_TEMP_DIR, str(get_pid())])
if local_name is None:
local_path = os.path.sep.join([local_root, os.path.dirname(target)])
local_name = os.path.sep.join([local_path, os.path.basename(target)])
else:
local_path = os.path.sep.join([local_root, os.path.dirname(local_name)])
local_name = os.path.sep.join([local_path, os.path.basename(local_name)])
if use_cache and os.access(local_name, os.R_OK):
return local_name
gef_makedirs(local_path)
gdb.execute("remote get {0:s} {1:s}".format(target, local_name))
except gdb.error:
# gdb-stub compat
with open(local_name, "w") as f:
if is_elf32():
f.write("00000000-ffffffff rwxp 00000000 00:00 0 {}\n".format(get_filepath()))
else:
f.write("0000000000000000-ffffffffffffffff rwxp 00000000 00:00 0 {}\n".format(get_filepath()))
except Exception as e:
err("download_file() failed: {}".format(str(e)))
local_name = None
return local_name
def open_file(path, use_cache=False):
"""Attempt to open the given file, if remote debugging is active, download
it first to the mirror in /tmp/."""
if is_remote_debug():
lpath = download_file(path, use_cache)
if not lpath:
raise IOError("cannot open remote path {:s}".format(path))
path = lpath
return open(path, "r")
def get_function_length(sym):
"""Attempt to get the length of the raw bytes of a function."""
dis = gdb.execute("disassemble {:s}".format(sym), to_string=True).splitlines()
start_addr = int(dis[1].split()[0], 16)
end_addr = int(dis[-2].split()[0], 16)
return end_addr - start_addr
def get_process_maps_linux(proc_map_file):
"""Parse the Linux process `/proc/pid/maps` file."""
for line in open_file(proc_map_file, use_cache=False):
line = line.strip()
addr, perm, off, _, rest = line.split(" ", 4)
rest = rest.split(" ", 1)
if len(rest) == 1:
inode = rest[0]
pathname = ""
else:
inode = rest[0]
pathname = rest[1].lstrip()
addr_start, addr_end = list(map(lambda x: long(x, 16), addr.split("-")))
off = long(off, 16)
perm = Permission.from_process_maps(perm)
yield Section(page_start=addr_start,
page_end=addr_end,
offset=off,
permission=perm,
inode=inode,
path=pathname)
return
@lru_cache()
def get_process_maps():
"""Parse the `/proc/pid/maps` file."""
sections = []
try:
pid = get_pid()
fpath = "/proc/{:d}/maps".format(pid)
sections = get_process_maps_linux(fpath)
return list(sections)
except FileNotFoundError as e:
warn("Failed to read /proc/<PID>/maps, using GDB sections info: {}".format(e))
return list(get_info_sections())
@lru_cache()
def get_info_sections():
"""Retrieve the debuggee sections."""
stream = StringIO(gdb.execute("maintenance info sections", to_string=True))
for line in stream:
if not line:
break
try:
parts = [x.strip() for x in line.split()]
addr_start, addr_end = [long(x, 16) for x in parts[1].split("->")]
off = long(parts[3][:-1], 16)
path = parts[4]
inode = ""
perm = Permission.from_info_sections(parts[5:])
yield Section(page_start=addr_start,
page_end=addr_end,
offset=off,
permission=perm,
inode=inode,
path=path)
except IndexError:
continue
except ValueError:
continue
return
@lru_cache()
def get_info_files():
"""Retrieve all the files loaded by debuggee."""
lines = gdb.execute("info files", to_string=True).splitlines()
if len(lines) < len(__infos_files__):
return __infos_files__
for line in lines:
line = line.strip()
if not line:
break
if not line.startswith("0x"):
continue
blobs = [x.strip() for x in line.split(" ")]
addr_start = long(blobs[0], 16)
addr_end = long(blobs[2], 16)
section_name = blobs[4]
if len(blobs) == 7:
filename = blobs[6]
else:
filename = get_filepath()
info = Zone(section_name, addr_start, addr_end, filename)
__infos_files__.append(info)
return __infos_files__
def process_lookup_address(address):
"""Look up for an address in memory.
Return an Address object if found, None otherwise."""
if not is_alive():
err("Process is not running")
return None
if is_x86() :
if is_in_x86_kernel(address):
return None
for sect in get_process_maps():
if sect.page_start <= address < sect.page_end:
return sect
return None
def process_lookup_path(name, perm=Permission.ALL):
"""Look up for a path in the process memory mapping.
Return a Section object if found, None otherwise."""
if not is_alive():
err("Process is not running")
return None
for sect in get_process_maps():
if name in sect.path and sect.permission.value & perm:
return sect
return None
def file_lookup_name_path(name, path):
"""Look up a file by name and path.
Return a Zone object if found, None otherwise."""
for xfile in get_info_files():
if path == xfile.filename and name == xfile.name:
return xfile
return None
def file_lookup_address(address):
"""Look up for a file by its address.
Return a Zone object if found, None otherwise."""
for info in get_info_files():
if info.zone_start <= address < info.zone_end:
return info
return None
def lookup_address(address):
"""Try to find the address in the process address space.
Return an Address object, with validity flag set based on success."""
sect = process_lookup_address(address)
info = file_lookup_address(address)
if sect is None and info is None:
# i.e. there is no info on this address
return Address(value=address, valid=False)
return Address(value=address, section=sect, info=info)
def xor(data, key):
"""Return `data` xor-ed with `key`."""
key = key.lstrip("0x")
key = binascii.unhexlify(key)
if PYTHON_MAJOR == 2:
return b"".join([chr(ord(x) ^ ord(y)) for x, y in zip(data, itertools.cycle(key))])
return bytearray([x ^ y for x, y in zip(data, itertools.cycle(key))])
def is_hex(pattern):
"""Return whether provided string is a hexadecimal value."""
if not pattern.startswith("0x") and not pattern.startswith("0X"):
return False
return len(pattern)%2==0 and all(c in string.hexdigits for c in pattern[2:])
def ida_synchronize_handler(event):
gdb.execute("ida-interact sync", from_tty=True)
return
def continue_handler(event):
"""GDB event handler for new object continue cases."""
return
def hook_stop_handler(event):
"""GDB event handler for stop cases."""
reset_all_caches()
gdb.execute("context")
return
def new_objfile_handler(event):
"""GDB event handler for new object file cases."""
reset_all_caches()
set_arch()
return
def exit_handler(event):
"""GDB event handler for exit cases."""
global __gef_remote__, __gef_qemu_mode__
reset_all_caches()
__gef_qemu_mode__ = False
if __gef_remote__ and get_gef_setting("gef-remote.clean_on_exit") is True:
shutil.rmtree("/tmp/gef/{:d}".format(__gef_remote__))
__gef_remote__ = None
return
def get_terminal_size():
"""Return the current terminal size."""
if is_debug():
return 600, 100
try:
cmd = struct.unpack("hh", fcntl.ioctl(1, termios.TIOCGWINSZ, "1234"))
tty_rows, tty_columns = int(cmd[0]), int(cmd[1])
return tty_rows, tty_columns
except OSError:
return 600, 100
def get_generic_arch(module, prefix, arch, mode, big_endian, to_string=False):
"""
Retrieves architecture and mode from the arguments for use for the holy
{cap,key}stone/unicorn trinity.
"""
if to_string:
arch = "{:s}.{:s}_ARCH_{:s}".format(module.__name__, prefix, arch)
if mode:
mode = "{:s}.{:s}_MODE_{:s}".format(module.__name__, prefix, str(mode))
else:
mode = ""
if is_big_endian():
mode += " + {:s}.{:s}_MODE_BIG_ENDIAN".format(module.__name__, prefix)
else:
mode += " + {:s}.{:s}_MODE_LITTLE_ENDIAN".format(module.__name__, prefix)
else:
arch = getattr(module, "{:s}_ARCH_{:s}".format(prefix, arch))
if mode:
mode = getattr(module, "{:s}_MODE_{:s}".format(prefix, mode))
else:
mode = 0
if big_endian:
mode |= getattr(module, "{:s}_MODE_BIG_ENDIAN".format(prefix))
else:
mode |= getattr(module, "{:s}_MODE_LITTLE_ENDIAN".format(prefix))
return arch, mode
def get_generic_running_arch(module, prefix, to_string=False):
"""
Retrieves architecture and mode from the current context.
"""
if not is_alive():
return None, None
if current_arch is not None:
arch, mode = current_arch.arch, current_arch.mode
else:
raise OSError("Emulation not supported for your OS")
return get_generic_arch(module, prefix, arch, mode, is_big_endian(), to_string)
def get_unicorn_arch(arch=None, mode=None, endian=None, to_string=False):
unicorn = sys.modules["unicorn"]
if (arch, mode, endian) == (None,None,None):
return get_generic_running_arch(unicorn, "UC", to_string)
return get_generic_arch(unicorn, "UC", arch, mode, endian, to_string)
def get_capstone_arch(arch=None, mode=None, endian=None, to_string=False):
capstone = sys.modules["capstone"]
# hacky patch to unify capstone/ppc syntax with keystone & unicorn:
# CS_MODE_PPC32 does not exist (but UC_MODE_32 & KS_MODE_32 do)
if is_arch(Elf.POWERPC64):
raise OSError("Capstone not supported for PPC64 yet.")
if is_alive() and is_arch(Elf.POWERPC):
arch = "PPC"
mode = "32"
endian = is_big_endian()
return get_generic_arch(capstone, "CS",
arch or current_arch.arch,
mode or current_arch.mode,
endian or is_big_endian(),
to_string)
if (arch, mode, endian) == (None,None,None):
return get_generic_running_arch(capstone, "CS", to_string)
return get_generic_arch(capstone, "CS",
arch or current_arch.arch,
mode or current_arch.mode,
endian or is_big_endian(),
to_string)
def get_keystone_arch(arch=None, mode=None, endian=None, to_string=False):
keystone = sys.modules["keystone"]
if (arch, mode, endian) == (None,None,None):
return get_generic_running_arch(keystone, "KS", to_string)
return get_generic_arch(keystone, "KS", arch, mode, endian, to_string)
def get_unicorn_registers(to_string=False):
"Return a dict matching the Unicorn identifier for a specific register."
unicorn = sys.modules["unicorn"]
regs = {}
if current_arch is not None:
arch = current_arch.arch.lower()
else:
raise OSError("Oops")
const = getattr(unicorn, "{}_const".format(arch))
for reg in current_arch.all_registers:
regname = "UC_{:s}_REG_{:s}".format(arch.upper(), reg[1:].upper())
if to_string:
regs[reg] = "{:s}.{:s}".format(const.__name__, regname)
else:
regs[reg] = getattr(const, regname)
return regs
def keystone_assemble(code, arch, mode, *args, **kwargs):
"""Assembly encoding function based on keystone."""
keystone = sys.modules["keystone"]
code = gef_pybytes(code)
addr = kwargs.get("addr", 0x1000)
try:
ks = keystone.Ks(arch, mode)
enc, cnt = ks.asm(code, addr)
except keystone.KsError as e:
err("Keystone assembler error: {:s}".format(str(e)))
return None
if cnt==0:
return ""
enc = bytearray(enc)
if "raw" not in kwargs:
s = binascii.hexlify(enc)
enc = b"\\x" + b"\\x".join([s[i:i + 2] for i in range(0, len(s), 2)])
enc = enc.decode("utf-8")
return enc
@lru_cache()
def get_elf_headers(filename=None):
"""Return an Elf object with info from `filename`. If not provided, will return
the currently debugged file."""
if filename is None:
filename = get_filepath()
if filename.startswith("target:"):
warn("Your file is remote, you should try using `gef-remote` instead")
return
return Elf(filename)
@lru_cache()
def is_elf64(filename=None):
"""Checks if `filename` is an ELF64."""
elf = current_elf or get_elf_headers(filename)
return elf.e_class == Elf.ELF_64_BITS
@lru_cache()
def is_elf32(filename=None):
"""Checks if `filename` is an ELF32."""
elf = current_elf or get_elf_headers(filename)
return elf.e_class == Elf.ELF_32_BITS
@lru_cache()
def is_x86_64(filename=None):
"""Checks if `filename` is an x86-64 ELF."""
elf = current_elf or get_elf_headers(filename)
return elf.e_machine == Elf.X86_64
@lru_cache()
def is_x86_32(filename=None):
"""Checks if `filename` is an x86-32 ELF."""
elf = current_elf or get_elf_headers(filename)
return elf.e_machine == Elf.X86_32
@lru_cache()
def is_x86(filename=None):
return is_x86_32(filename) or is_x86_64(filename)
@lru_cache()
def is_arch(arch):
elf = current_elf or get_elf_headers()
return elf.e_machine == arch
def set_arch(arch=None, default=None):
"""Sets the current architecture.
If an arch is explicitly specified, use that one, otherwise try to parse it
out of the ELF header. If that fails, and default is specified, select and
set that arch.
Return the selected arch, or raise an OSError.
"""
arches = {
"ARM": ARM, Elf.ARM: ARM,
"AARCH64": AARCH64, "ARM64": AARCH64, Elf.AARCH64: AARCH64,
"X86": X86, Elf.X86_32: X86,
"X86_64": X86_64, Elf.X86_64: X86_64,
"PowerPC": PowerPC, "PPC": PowerPC, Elf.POWERPC: PowerPC,
"PowerPC64": PowerPC64, "PPC64": PowerPC64, Elf.POWERPC64: PowerPC64,
"RISCV": RISCV, Elf.RISCV: RISCV,
"SPARC": SPARC, Elf.SPARC: SPARC,
"SPARC64": SPARC64, Elf.SPARC64: SPARC64,
"MIPS": MIPS, Elf.MIPS: MIPS,
}
global current_arch, current_elf
if arch:
try:
current_arch = arches[arch.upper()]()
return current_arch
except KeyError:
raise OSError("Specified arch {:s} is not supported".format(arch.upper()))
current_elf = current_elf or get_elf_headers()
try:
current_arch = arches[current_elf.e_machine]()
except KeyError:
if default:
try:
current_arch = arches[default.upper()]()
except KeyError:
raise OSError("CPU not supported, neither is default {:s}".format(default.upper()))
else:
raise OSError("CPU type is currently not supported: {:s}".format(get_arch()))
return current_arch
@lru_cache()
def cached_lookup_type(_type):
try:
return gdb.lookup_type(_type).strip_typedefs()
except RuntimeError:
return None
@lru_cache()
def get_memory_alignment(in_bits=False):
"""Try to determine the size of a pointer on this system.
First, try to parse it out of the ELF header.
Next, use the size of `size_t`.
Finally, try the size of $pc.
If `in_bits` is set to True, the result is returned in bits, otherwise in
bytes."""
if is_elf32():
return 4 if not in_bits else 32
elif is_elf64():
return 8 if not in_bits else 64
res = cached_lookup_type("size_t")
if res is not None:
return res.sizeof if not in_bits else res.sizeof * 8
try:
return gdb.parse_and_eval("$pc").type.sizeof
except:
pass
raise EnvironmentError("GEF is running under an unsupported mode")
def clear_screen(tty=""):
"""Clear the screen."""
if not tty:
gdb.execute("shell clear")
return
with open(tty, "w") as f:
f.write("\x1b[H\x1b[J")
return
def format_address(addr):
"""Format the address according to its size."""
memalign_size = get_memory_alignment()
addr = align_address(addr)
if memalign_size == 4:
return "0x{:08x}".format(addr)
return "0x{:016x}".format(addr)
def format_address_spaces(addr, left=True):
"""Format the address according to its size, but with spaces instead of zeroes."""
width = get_memory_alignment() * 2 + 2
addr = align_address(addr)
if not left:
return "0x{:x}".format(addr).rjust(width)
return "0x{:x}".format(addr).ljust(width)
def align_address(address):
"""Align the provided address to the process's native length."""
if get_memory_alignment() == 4:
return address & 0xFFFFFFFF
return address & 0xFFFFFFFFFFFFFFFF
def align_address_to_size(address, align):
"""Align the address to the given size."""
return address + ((align - (address % align)) % align)
def align_address_to_page(address):
"""Align the address to a page."""
a = align_address(address) >> DEFAULT_PAGE_ALIGN_SHIFT
return a << DEFAULT_PAGE_ALIGN_SHIFT
def parse_address(address):
"""Parse an address and return it as an Integer."""
if is_hex(address):
return long(address, 16)
return to_unsigned_long(gdb.parse_and_eval(address))
def is_in_x86_kernel(address):
address = align_address(address)
memalign = get_memory_alignment(in_bits=True) - 1
return (address >> memalign) == 0xF
@lru_cache()
def endian_str():
elf = current_elf or get_elf_headers()
return "<" if elf.e_endianness == Elf.LITTLE_ENDIAN else ">"
@lru_cache()
def is_remote_debug():
""""Return True is the current debugging session is running through GDB remote session."""
return __gef_remote__ is not None or "remote" in gdb.execute("maintenance print target-stack", to_string=True)
def de_bruijn(alphabet, n):
"""De Bruijn sequence for alphabet and subsequences of length n (for compat. w/ pwnlib)."""
k = len(alphabet)
a = [0] * k * n
def db(t, p):
if t > n:
if n % p == 0:
for j in range(1, p + 1):
yield alphabet[a[j]]
else:
a[t] = a[t - p]
for c in db(t + 1, p):
yield c
for j in range(a[t - p] + 1, k):
a[t] = j
for c in db(t + 1, t):
yield c
return db(1,1)
def generate_cyclic_pattern(length):
"""Create a `length` byte bytearray of a de Bruijn cyclic pattern."""
charset = bytearray(b"abcdefghijklmnopqrstuvwxyz")
cycle = get_memory_alignment()
res = bytearray()
for i, c in enumerate(de_bruijn(charset, cycle)):
if i == length:
break
res.append(c)
return res
def safe_parse_and_eval(value):
"""GEF wrapper for gdb.parse_and_eval(): this function returns None instead of raising
gdb.error if the eval failed."""
try:
return gdb.parse_and_eval(value)
except gdb.error:
return None
def dereference(addr):
"""GEF wrapper for gdb dereference function."""
try:
ulong_t = cached_lookup_type(use_stdtype()) or \
cached_lookup_type(use_default_type()) or \
cached_lookup_type(use_golang_type())
unsigned_long_type = ulong_t.pointer()
res = gdb.Value(addr).cast(unsigned_long_type).dereference()
# GDB does lazy fetch by default so we need to force access to the value
res.fetch_lazy()
return res
except gdb.MemoryError:
pass
return None
def dereference_as_long(addr):
derefed = dereference(addr)
return long(derefed.address) if derefed is not None else 0
def gef_convenience(value):
"""Defines a new convenience value."""
global __gef_convenience_vars_index__
var_name = "$_gef{:d}".format(__gef_convenience_vars_index__)
__gef_convenience_vars_index__ += 1
gdb.execute("""set {:s} = "{:s}" """.format(var_name, value))
return var_name
def parse_string_range(s):
"""Parses an address range (e.g. 0x400000-0x401000)"""
addrs = s.split("-")
return map(lambda x: int(x, 16), addrs)
@lru_cache()
def gef_get_auxiliary_values():
"""Retrieves the auxiliary values of the current execution. Returns None if not running, or a dict()
of values."""
if not is_alive():
return None
res = {}
for line in gdb.execute("info auxv", to_string=True).splitlines():
tmp = line.split()
_type = tmp[1]
if _type in ("AT_PLATFORM", "AT_EXECFN"):
idx = line[:-1].rfind('"') - 1
tmp = line[:idx].split()
res[_type] = int(tmp[-1], base=0)
return res
def gef_read_canary():
"""Read the canary of a running process using Auxiliary Vector. Return a tuple of (canary, location)
if found, None otherwise."""
auxval = gef_get_auxiliary_values()
if not auxval:
return None
canary_location = auxval["AT_RANDOM"]
canary = read_int_from_memory(canary_location)
canary &= ~0xff
return canary, canary_location
def gef_get_pie_breakpoint(num):
global __pie_breakpoints__
return __pie_breakpoints__[num]
@lru_cache()
def gef_getpagesize():
"""Get the page size from auxiliary values."""
auxval = gef_get_auxiliary_values()
if not auxval:
return DEFAULT_PAGE_SIZE
return auxval["AT_PAGESZ"]
def only_if_events_supported(event_type):
"""Checks if GDB supports events without crashing."""
def wrap(f):
def wrapped_f(*args, **kwargs):
if getattr(gdb, "events") and getattr(gdb.events, event_type):
return f(*args, **kwargs)
warn("GDB events cannot be set")
return wrapped_f
return wrap
#
# Event hooking
#
@only_if_events_supported("cont")
def gef_on_continue_hook(func): return gdb.events.cont.connect(func)
@only_if_events_supported("cont")
def gef_on_continue_unhook(func): return gdb.events.cont.disconnect(func)
@only_if_events_supported("stop")
def gef_on_stop_hook(func): return gdb.events.stop.connect(func)
@only_if_events_supported("stop")
def gef_on_stop_unhook(func): return gdb.events.stop.disconnect(func)
@only_if_events_supported("exited")
def gef_on_exit_hook(func): return gdb.events.exited.connect(func)
@only_if_events_supported("exited")
def gef_on_exit_unhook(func): return gdb.events.exited.disconnect(func)
@only_if_events_supported("new_objfile")
def gef_on_new_hook(func): return gdb.events.new_objfile.connect(func)
@only_if_events_supported("new_objfile")
def gef_on_new_unhook(func): return gdb.events.new_objfile.disconnect(func)
#
# Virtual breakpoints
#
class PieVirtualBreakpoint(object):
"""PIE virtual breakpoint (not real breakpoint)."""
def __init__(self, set_func, vbp_num, addr):
# set_func(base): given a base address return a
# set breakpoint gdb command string
self.set_func = set_func
self.vbp_num = vbp_num
# breakpoint num, 0 represents not instantiated yet
self.bp_num = 0
self.bp_addr = 0
# this address might be a symbol, just to know where to break
if isinstance(addr, int):
self.addr = hex(addr)
else:
self.addr = addr
def instantiate(self, base):
if self.bp_num:
self.destroy()
try:
res = gdb.execute(self.set_func(base), to_string=True)
except gdb.error as e:
err(e)
return
if "Breakpoint" not in res:
err(res)
return
res_list = res.split()
# Breakpoint (no) at (addr)
self.bp_num = res_list[1]
self.bp_addr = res_list[3]
def destroy(self):
if not self.bp_num:
err("Destroy PIE breakpoint not even set")
return
gdb.execute("delete {}".format(self.bp_num))
self.bp_num = 0
#
# Breakpoints
#
class FormatStringBreakpoint(gdb.Breakpoint):
"""Inspect stack for format string."""
def __init__(self, spec, num_args):
super(FormatStringBreakpoint, self).__init__(spec, type=gdb.BP_BREAKPOINT, internal=False)
self.num_args = num_args
self.enabled = True
return
def stop(self):
msg = []
ptr, addr = current_arch.get_ith_parameter(self.num_args)
addr = lookup_address(addr)
if not addr.valid:
return False
if addr.section.permission.value & Permission.WRITE:
content = read_cstring_from_memory(addr.value)
name = addr.info.name if addr.info else addr.section.path
msg.append(Color.colorify("Format string helper", "yellow bold"))
msg.append("Possible insecure format string: {:s}('{:s}' {:s} {:#x}: '{:s}')".format(self.location, ptr, RIGHT_ARROW, addr.value, content))
msg.append("Reason: Call to '{:s}()' with format string argument in position "
"#{:d} is in page {:#x} ({:s}) that has write permission".format(self.location, self.num_args, addr.section.page_start, name))
push_context_message("warn", "\n".join(msg))
return True
return False
class StubBreakpoint(gdb.Breakpoint):
"""Create a breakpoint to permanently disable a call (fork/alarm/signal/etc.)."""
def __init__(self, func, retval):
super(StubBreakpoint, self).__init__(func, gdb.BP_BREAKPOINT, internal=False)
self.func = func
self.retval = retval
m = "All calls to '{:s}' will be skipped".format(self.func)
if self.retval is not None:
m += " (with return value set to {:#x})".format(self.retval)
info(m)
return
def stop(self):
m = "Ignoring call to '{:s}' ".format(self.func)
m+= "(setting return value to {:#x})".format(self.retval)
gdb.execute("return (unsigned int){:#x}".format(self.retval))
ok(m)
return False
class ChangePermissionBreakpoint(gdb.Breakpoint):
"""When hit, this temporary breakpoint will restore the original code, and position
$pc correctly."""
def __init__(self, loc, code, pc):
super(ChangePermissionBreakpoint, self).__init__(loc, gdb.BP_BREAKPOINT, internal=False)
self.original_code = code
self.original_pc = pc
return
def stop(self):
info("Restoring original context")
write_memory(self.original_pc, self.original_code, len(self.original_code))
info("Restoring $pc")
gdb.execute("set $pc = {:#x}".format(self.original_pc))
return True
class TraceMallocBreakpoint(gdb.Breakpoint):
"""Track allocations done with malloc() or calloc()."""
def __init__(self, name):
super(TraceMallocBreakpoint, self).__init__(name, gdb.BP_BREAKPOINT, internal=True)
self.silent = True
self.name = name
return
def stop(self):
_, size = current_arch.get_ith_parameter(0)
self.retbp = TraceMallocRetBreakpoint(size, self.name)
return False
class TraceMallocRetBreakpoint(gdb.FinishBreakpoint):
"""Internal temporary breakpoint to retrieve the return value of malloc()."""
def __init__(self, size, name):
super(TraceMallocRetBreakpoint, self).__init__(gdb.newest_frame(), internal=True)
self.size = size
self.name = name
self.silent = True
return
def stop(self):
global __heap_uaf_watchpoints__, __heap_freed_list__, __heap_allocated_list__
if self.return_value:
loc = long(self.return_value)
else:
loc = to_unsigned_long(gdb.parse_and_eval(current_arch.return_register))
size = self.size
ok("{} - {}({})={:#x}".format(Color.colorify("Heap-Analysis", "yellow bold"), self.name, size, loc))
check_heap_overlap = get_gef_setting("heap-analysis-helper.check_heap_overlap")
# pop from free-ed list if it was in it
if __heap_freed_list__:
idx = 0
for item in __heap_freed_list__:
addr = item[0]
if addr==loc:
__heap_freed_list__.remove(item)
continue
idx+=1
# pop from uaf watchlist
if __heap_uaf_watchpoints__:
idx = 0
for wp in __heap_uaf_watchpoints__:
wp_addr = wp.address
if loc <= wp_addr < loc+size:
__heap_uaf_watchpoints__.remove(wp)
wp.enabled = False
continue
idx+=1
item = (loc, size)
if check_heap_overlap:
# seek all the currently allocated chunks, read their effective size and check for overlap
msg = []
align = get_memory_alignment()
for chunk_addr, _ in __heap_allocated_list__:
current_chunk = GlibcChunk(chunk_addr)
current_chunk_size = current_chunk.get_chunk_size()
if chunk_addr <= loc < chunk_addr + current_chunk_size:
offset = loc - chunk_addr - 2*align
if offset < 0: continue # false positive, discard
msg.append(Color.colorify("Heap-Analysis", "yellow bold"))
msg.append("Possible heap overlap detected")
msg.append("Reason {} new allocated chunk {:#x} (of size {:d}) overlaps in-used chunk {:#x} (of size {:#x})".format(RIGHT_ARROW, loc, size, chunk_addr, current_chunk_size))
msg.append("Writing {0:d} bytes from {1:#x} will reach chunk {2:#x}".format(offset, chunk_addr, loc))
msg.append("Payload example for chunk {1:#x} (to overwrite {0:#x} headers):".format(loc, chunk_addr))
msg.append(" data = 'A'*{0:d} + 'B'*{1:d} + 'C'*{1:d}".format(offset, align))
push_context_message("warn", "\n".join(msg))
return True
# add it to alloc-ed list
__heap_allocated_list__.append(item)
return False
class TraceReallocBreakpoint(gdb.Breakpoint):
"""Track re-allocations done with realloc()."""
def __init__(self):
super(TraceReallocBreakpoint, self).__init__("__libc_realloc", gdb.BP_BREAKPOINT, internal=True)
self.silent = True
return
def stop(self):
_, ptr = current_arch.get_ith_parameter(0)
_, size = current_arch.get_ith_parameter(1)
self.retbp = TraceReallocRetBreakpoint(ptr, size)
return False
class TraceReallocRetBreakpoint(gdb.FinishBreakpoint):
"""Internal temporary breakpoint to retrieve the return value of realloc()."""
def __init__(self, ptr, size):
super(TraceReallocRetBreakpoint, self).__init__(gdb.newest_frame(), internal=True)
self.ptr = ptr
self.size = size
self.silent = True
return
def stop(self):
global __heap_uaf_watchpoints__, __heap_freed_list__, __heap_allocated_list__
if self.return_value:
newloc = long(self.return_value)
else:
newloc = to_unsigned_long(gdb.parse_and_eval(current_arch.return_register))
if newloc != self:
ok("{} - realloc({:#x}, {})={}".format(Color.colorify("Heap-Analysis", "yellow bold"),
self.ptr, self.size,
Color.colorify("{:#x}".format(newloc), "green"),))
else:
ok("{} - realloc({:#x}, {})={}".format(Color.colorify("Heap-Analysis", "yellow bold"),
self.ptr, self.size,
Color.colorify("{:#x}".format(newloc), "red"),))
item = (newloc, self.size)
try:
# check if item was in alloc-ed list
idx = [x for x,y in __heap_allocated_list__].index(self.ptr)
# if so pop it out
item = __heap_allocated_list__.pop(idx)
except ValueError:
if is_debug():
warn("Chunk {:#x} was not in tracking list".format(self.ptr))
finally:
# add new item to alloc-ed list
__heap_allocated_list__.append(item)
return False
class TraceFreeBreakpoint(gdb.Breakpoint):
"""Track calls to free() and attempts to detect inconsistencies."""
def __init__(self):
super(TraceFreeBreakpoint, self).__init__("__libc_free", gdb.BP_BREAKPOINT, internal=True)
self.silent = True
return
def stop(self):
_, addr = current_arch.get_ith_parameter(0)
msg = []
check_free_null = get_gef_setting("heap-analysis-helper.check_free_null")
check_double_free = get_gef_setting("heap-analysis-helper.check_double_free")
check_weird_free = get_gef_setting("heap-analysis-helper.check_weird_free")
check_uaf = get_gef_setting("heap-analysis-helper.check_uaf")
ok("{} - free({:#x})".format(Color.colorify("Heap-Analysis", "yellow bold"), addr))
if addr==0:
if check_free_null:
msg.append(Color.colorify("Heap-Analysis", "yellow bold"))
msg.append("Attempting to free(NULL) at {:#x}".format(current_arch.pc))
msg.append("Reason: if NULL page is allocatable, this can lead to code execution.")
push_context_message("warn", "\n".join(msg))
return True
return False
if addr in [x for (x,y) in __heap_freed_list__]:
if check_double_free:
msg.append(Color.colorify("Heap-Analysis", "yellow bold"))
msg.append("Double-free detected {} free({:#x}) is called at {:#x} but is already in the free-ed list".format(RIGHT_ARROW, addr, current_arch.pc))
msg.append("Execution will likely crash...")
push_context_message("warn", "\n".join(msg))
return True
return False
# if here, no error
# 1. move alloc-ed item to free list
try:
# pop from alloc-ed list
idx = [x for x,y in __heap_allocated_list__].index(addr)
item = __heap_allocated_list__.pop(idx)
except ValueError:
if check_weird_free:
msg.append(Color.colorify("Heap-Analysis", "yellow bold"))
msg.append("Heap inconsistency detected:")
msg.append("Attempting to free an unknown value: {:#x}".format(addr))
push_context_message("warn", "\n".join(msg))
return True
return False
# 2. add it to free-ed list
__heap_freed_list__.append(item)
self.retbp = None
if check_uaf:
# 3. (opt.) add a watchpoint on pointer
self.retbp = TraceFreeRetBreakpoint(addr)
return False
class TraceFreeRetBreakpoint(gdb.FinishBreakpoint):
"""Internal temporary breakpoint to track free()d values."""
def __init__(self, addr):
super(TraceFreeRetBreakpoint, self).__init__(gdb.newest_frame(), internal=True)
self.silent = True
self.addr = addr
return
def stop(self):
wp = UafWatchpoint(self.addr)
__heap_uaf_watchpoints__.append(wp)
ok("{} - watching {:#x}".format(Color.colorify("Heap-Analysis", "yellow bold"), self.addr))
return False
class UafWatchpoint(gdb.Breakpoint):
"""Custom watchpoints set TraceFreeBreakpoint() to monitor free()d pointers being used."""
def __init__(self, addr):
super(UafWatchpoint, self).__init__("*{:#x}".format(addr), gdb.BP_WATCHPOINT, internal=True)
self.address = addr
self.silent = True
self.enabled = True
return
def stop(self):
"""If this method is triggered, we likely have a UaF. Break the execution and report it."""
frame = gdb.selected_frame()
if frame.name() in ("_int_malloc", "malloc_consolidate", "__libc_calloc"):
# ignore when the watchpoint is raised by malloc() - due to reuse
return False
# software watchpoints stop after the next statement (see
# https://sourceware.org/gdb/onlinedocs/gdb/Set-Watchpoints.html)
pc = gdb_get_nth_previous_instruction_address(current_arch.pc, 2)
insn = gef_current_instruction(pc)
msg = []
msg.append(Color.colorify("Heap-Analysis", "yellow bold"))
msg.append("Possible Use-after-Free in '{:s}': pointer {:#x} was freed, but is attempted to be used at {:#x}"
.format(get_filepath(), self.address, pc))
msg.append("{:#x} {:s} {:s}".format(insn.address, insn.mnemonic, Color.yellowify(", ".join(insn.operands))))
push_context_message("warn", "\n".join(msg))
return True
class EntryBreakBreakpoint(gdb.Breakpoint):
"""Breakpoint used internally to stop execution at the most convenient entry point."""
def __init__(self, location):
super(EntryBreakBreakpoint, self).__init__(location, gdb.BP_BREAKPOINT, internal=True, temporary=True)
self.silent = True
return
def stop(self):
return True
class NamedBreakpoint(gdb.Breakpoint):
"""Breakpoint which shows a specified name, when hit."""
def __init__(self, location, name):
super(NamedBreakpoint, self).__init__(spec=location, type=gdb.BP_BREAKPOINT, internal=False, temporary=False)
self.name = name
self.loc = location
return
def stop(self):
push_context_message("info", "Hit breakpoint {} ({})".format(self.loc, Color.colorify(self.name, "red bold")))
return True
#
# Commands
#
def register_external_command(obj):
"""Registering function for new GEF (sub-)command to GDB."""
global __commands__, __gef__
cls = obj.__class__
__commands__.append(cls)
__gef__.load(initial=False)
__gef__.doc.add_command_to_doc((cls._cmdline_, cls, None))
__gef__.doc.refresh()
return cls
def register_command(cls):
"""Decorator for registering new GEF (sub-)command to GDB."""
global __commands__
__commands__.append(cls)
return cls
def register_priority_command(cls):
"""Decorator for registering new command with priority, meaning that it must
loaded before the other generic commands."""
global __commands__
__commands__.insert(0, cls)
return cls
def register_function(cls):
"""Decorator for registering a new convenience function to GDB."""
global __functions__
__functions__.append(cls)
return cls
class GenericCommand(gdb.Command):
"""This is an abstract class for invoking commands, should not be instantiated."""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
self.pre_load()
syntax = Color.yellowify("\nSyntax: ") + self._syntax_
example = Color.yellowify("\nExample: ") + self._example_ if self._example_ else ""
self.__doc__ = self.__doc__.replace(" "*4, "") + syntax + example
self.repeat = False
self.repeat_count = 0
self.__last_command = None
command_type = kwargs.setdefault("command", gdb.COMMAND_OBSCURE)
complete_type = kwargs.setdefault("complete", gdb.COMPLETE_NONE)
prefix = kwargs.setdefault("prefix", False)
super(GenericCommand, self).__init__(self._cmdline_, command_type, complete_type, prefix)
self.post_load()
return
def invoke(self, args, from_tty):
try:
argv = gdb.string_to_argv(args)
self.__set_repeat_count(argv, from_tty)
bufferize(self.do_invoke)(argv)
except Exception as e:
# Note: since we are intercepting cleaning exceptions here, commands preferably should avoid
# catching generic Exception, but rather specific ones. This is allows a much cleaner use.
if is_debug():
show_last_exception()
else:
err("Command '{:s}' failed to execute properly, reason: {:s}".format(self._cmdline_, str(e)))
return
def usage(self):
err("Syntax\n{}".format(self._syntax_))
return
@abc.abstractproperty
def _cmdline_(self): pass
@abc.abstractproperty
def _syntax_(self): pass
@abc.abstractproperty
def _example_(self): return ""
@abc.abstractmethod
def do_invoke(self, argv): pass
def pre_load(self): pass
def post_load(self): pass
def __get_setting_name(self, name):
def __sanitize_class_name(clsname):
if " " not in clsname:
return clsname
return "-".join(clsname.split())
class_name = __sanitize_class_name(self.__class__._cmdline_)
return "{:s}.{:s}".format(class_name, name)
@property
def settings(self):
"""Return the list of settings for this command."""
return [ x.split(".", 1)[1] for x in __config__
if x.startswith("{:s}.".format(self._cmdline_)) ]
def get_setting(self, name):
key = self.__get_setting_name(name)
setting = __config__[key]
return setting[1](setting[0])
def has_setting(self, name):
key = self.__get_setting_name(name)
return key in __config__
def add_setting(self, name, value, description=""):
key = self.__get_setting_name(name)
__config__[key] = [value, type(value), description]
return
def del_setting(self, name):
key = self.__get_setting_name(name)
del __config__[key]
return
def __set_repeat_count(self, argv, from_tty):
if not from_tty:
self.repeat = False
self.repeat_count = 0
return
command = gdb.execute("show commands", to_string=True).strip().split("\n")[-1]
self.repeat = self.__last_command == command
self.repeat_count = self.repeat_count + 1 if self.repeat else 0
self.__last_command = command
return
# Copy/paste this template for new command
# @register_command
# class TemplateCommand(GenericCommand):
# """TemplateCommand: description here will be seen in the help menu for the command."""
# _cmdline_ = "template-fake"
# _syntax_ = "{:s}".format(_cmdline_)
# _aliases_ = ["tpl-fk",]
# def __init__(self):
# super(TemplateCommand, self).__init__(complete=gdb.COMPLETE_FILENAME)
# return
# def do_invoke(self, argv):
# return
@register_command
class PrintFormatCommand(GenericCommand):
"""Print bytes format in high level languages."""
_cmdline_ = "print-format"
_syntax_ = "{:s} [-f FORMAT] [-b BITSIZE] [-l LENGTH] [-c] [-h] LOCATION".format(_cmdline_)
_aliases_ = ["pf",]
_example_ = "{0:s} -f py -b 8 -l 256 $rsp".format(_cmdline_)
bitformat = {8: "<B", 16: "<H", 32: "<I", 64: "<Q"}
c_type = {8: "char", 16: "short", 32: "int", 64: "long long"}
asm_type = {8: "db", 16: "dw", 32: "dd", 64: "dq"}
def __init__(self):
super(PrintFormatCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
def usage(self):
h = self._syntax_
h += "\n\t-f FORMAT specifies the output format for programming language, avaliable value is py, c, js, asm (default py).\n"
h += "\t-b BITSIZE sepecifies size of bit, avaliable values is 8, 16, 32, 64 (default is 8).\n"
h += "\t-l LENGTH specifies length of array (default is 256).\n"
h += "\t-c The result of data will copied to clipboard\n"
h += "\tLOCATION specifies where the address of bytes is stored."
info(h)
return
def clip(self, data):
if sys.platform == "linux":
xclip = which("xclip")
prog = [xclip, "-selection", "clipboard", "-i"] # For linux
elif sys.platform == "darwin":
pbcopy = which("pbcopy")
prog = [pbcopy] # For OSX
else:
warn("Can't copy to clipboard, platform not supported")
return False
try:
p = subprocess.Popen(prog, stdin=subprocess.PIPE)
except Exception:
warn("Can't copy to clipboard, Something went wrong while copying")
return False
p.stdin.write(data)
p.stdin.close()
p.wait()
return True
@only_if_gdb_running
def do_invoke(self, argv):
"""Default value for print-format command."""
lang = "py"
length = 256
bitlen = 8
copy_to_clipboard = False
supported_formats = ["py", "c", "js", "asm"]
opts, args = getopt.getopt(argv, "f:l:b:ch")
for o,a in opts:
if o == "-f": lang = a
elif o == "-l": length = long(gdb.parse_and_eval(a))
elif o == "-b": bitlen = long(a)
elif o == "-c": copy_to_clipboard = True
elif o == "-h":
self.usage()
return
if not args:
err("No address specified")
return
start_addr = long(gdb.parse_and_eval(args[0]))
if bitlen not in [8, 16, 32, 64]:
err("Size of bit must be in 8, 16, 32, or 64")
return
if lang not in supported_formats:
err("Language must be : {}".format(str(supported_formats)))
return
size = long(bitlen / 8)
end_addr = start_addr+length*size
bf = self.bitformat[bitlen]
data = []
out = ""
for address in range(start_addr, end_addr, size):
value = struct.unpack(bf, read_memory(address, size))[0]
data += [value]
sdata = ", ".join(map(hex, data))
if lang == "py":
out = "buf = [{}]".format(sdata)
elif lang == "c":
out = "unsigned {0} buf[{1}] = {{{2}}};".format(self.c_type[bitlen], length, sdata)
elif lang == "js":
out = "var buf = [{}]".format(sdata)
elif lang == "asm":
out += "buf {0} {1}".format(self.asm_type[bitlen], sdata)
if copy_to_clipboard:
if self.clip(bytes(out, "utf-8")):
info("Copied to clipboard")
else:
warn("There's a problem while copying")
print(out)
return
@register_command
class PieCommand(GenericCommand):
"""PIE breakpoint support."""
_cmdline_ = "pie"
_syntax_ = "{:s} (breakpoint|info|delete|run|attach|remote)".format(_cmdline_)
def __init__(self):
super(PieCommand, self).__init__(prefix=True)
return
def do_invoke(self, argv):
if not argv:
self.usage()
return
@register_command
class PieBreakpointCommand(GenericCommand):
"""Set a PIE breakpoint."""
_cmdline_ = "pie breakpoint"
_syntax_ = "{:s} BREAKPOINT".format(_cmdline_)
def do_invoke(self, argv):
global __pie_counter__, __pie_breakpoints__
if len(argv) < 1:
self.usage()
return
bp_expr = " ".join(argv)
tmp_bp_expr = bp_expr
if bp_expr[0] == "*":
addr = long(gdb.parse_and_eval(bp_expr[1:]))
else:
addr = long(gdb.parse_and_eval("&{}".format(bp_expr))) # get address of symbol or function name
self.set_pie_breakpoint(lambda base: "b *{}".format(base + addr), addr)
# When the process is already on, set real breakpoints immediately
if is_alive():
vmmap = get_process_maps()
base_address = [x.page_start for x in vmmap if x.path == get_filepath()][0]
for bp_ins in __pie_breakpoints__.values():
bp_ins.instantiate(base_address)
@staticmethod
def set_pie_breakpoint(set_func, addr):
global __pie_counter__, __pie_breakpoints__
__pie_breakpoints__[__pie_counter__] = PieVirtualBreakpoint(set_func, __pie_counter__, addr)
__pie_counter__ += 1
@register_command
class PieInfoCommand(GenericCommand):
"""Display breakpoint info."""
_cmdline_ = "pie info"
_syntax_ = "{:s} BREAKPOINT".format(_cmdline_)
def do_invoke(self, argv):
global __pie_breakpoints__
if len(argv) < 1:
# No breakpoint info needed
bps = [__pie_breakpoints__[x] for x in __pie_breakpoints__]
else:
try:
bps = [__pie_breakpoints__[int(x)] for x in argv]
except ValueError:
err("Please give me breakpoint number")
return
lines = []
lines.append("VNum\tNum\tAddr")
lines += [
"{}\t{}\t{}".format(x.vbp_num, x.bp_num if x.bp_num else "N/A", x.addr) for x in bps
]
gef_print("\n".join(lines))
@register_command
class PieDeleteCommand(GenericCommand):
"""Delete a PIE breakpoint."""
_cmdline_ = "pie delete"
_syntax_ = "{:s} [BREAKPOINT]".format(_cmdline_)
def do_invoke(self, argv):
global __pie_breakpoints__
if len(argv) < 1:
# no arg, delete all
to_delete = [__pie_breakpoints__[x] for x in __pie_breakpoints__]
self.delete_bp(to_delete)
try:
self.delete_bp([__pie_breakpoints__[int(x)] for x in argv])
except ValueError:
err("Please input PIE virtual breakpoint number to delete")
@staticmethod
def delete_bp(breakpoints):
global __pie_breakpoints__
for bp in breakpoints:
# delete current real breakpoints if exists
if bp.bp_num:
gdb.execute("delete {}".format(bp.bp_num))
# delete virtual breakpoints
del __pie_breakpoints__[bp.vbp_num]
@register_command
class PieRunCommand(GenericCommand):
"""Run process with PIE breakpoint support."""
_cmdline_ = "pie run"
_syntax_ = _cmdline_
def do_invoke(self, argv):
global __pie_breakpoints__
fpath = get_filepath()
if fpath is None:
warn("No executable to debug, use `file` to load a binary")
return
if not os.access(fpath, os.X_OK):
warn("The file '{}' is not executable.".format(fpath))
return
if is_alive():
warn("gdb is already running. Restart process.")
# get base address
gdb.execute("set stop-on-solib-events 1")
hide_context()
gdb.execute("run {}".format(" ".join(argv)))
unhide_context()
gdb.execute("set stop-on-solib-events 0")
vmmap = get_process_maps()
base_address = [x.page_start for x in vmmap if x.path == get_filepath()][0]
info("base address {}".format(hex(base_address)))
# modify all breakpoints
for bp_ins in __pie_breakpoints__.values():
bp_ins.instantiate(base_address)
try:
gdb.execute("continue")
except gdb.error as e:
err(e)
gdb.execute("kill")
@register_command
class PieAttachCommand(GenericCommand):
"""Do attach with PIE breakpoint support."""
_cmdline_ = "pie attach"
_syntax_ = "{:s} PID".format(_cmdline_)
def do_invoke(self, argv):
try:
gdb.execute("attach {}".format(" ".join(argv)), to_string=True)
except gdb.error as e:
err(e)
return
# after attach, we are stopped so that we can
# get base address to modify our breakpoint
vmmap = get_process_maps()
base_address = [x.page_start for x in vmmap if x.path == get_filepath()][0]
for bp_ins in __pie_breakpoints__.values():
bp_ins.instantiate(base_address)
gdb.execute("context")
@register_command
class PieRemoteCommand(GenericCommand):
"""Attach to a remote connection with PIE breakpoint support."""
_cmdline_ = "pie remote"
_syntax_ = "{:s} REMOTE".format(_cmdline_)
def do_invoke(self, argv):
try:
gdb.execute("gef-remote {}".format(" ".join(argv)))
except gdb.error as e:
err(e)
return
# after remote attach, we are stopped so that we can
# get base address to modify our breakpoint
vmmap = get_process_maps()
base_address = [x.page_start for x in vmmap if x.realpath == get_filepath()][0]
for bp_ins in __pie_breakpoints__.values():
bp_ins.instantiate(base_address)
gdb.execute("context")
@register_command
class SmartEvalCommand(GenericCommand):
"""SmartEval: Smart eval (vague approach to mimic WinDBG `?`)."""
_cmdline_ = "$"
_syntax_ = "{0:s} EXPR\n{0:s} ADDRESS1 ADDRESS2".format(_cmdline_)
_example_ = "\n{0:s} $pc+1\n{0:s} 0x00007ffff7a10000 0x00007ffff7bce000".format(_cmdline_)
def do_invoke(self, argv):
argc = len(argv)
if argc==1:
self.evaluate(argv)
return
if argc==2:
self.distance(argv)
return
def evaluate(self, expr):
def show_as_int(i):
off = current_arch.ptrsize*8
def comp2_x(x): return "{:x}".format((x + (1 << off)) % (1 << off))
def comp2_b(x): return "{:b}".format((x + (1 << off)) % (1 << off))
try:
s_i = comp2_x(res)
s_i = s_i.rjust(len(s_i)+1, "0") if len(s_i)%2 else s_i
gef_print("{:d}".format(i))
gef_print("0x" + comp2_x(res))
gef_print("0b" + comp2_b(res))
gef_print("{}".format(binascii.unhexlify(s_i)))
gef_print("{}".format(binascii.unhexlify(s_i)[::-1]))
except:
pass
return
parsed_expr = []
for xp in expr:
try:
xp = gdb.parse_and_eval(xp)
xp = int(xp)
parsed_expr.append("{:d}".format(xp))
except gdb.error:
parsed_expr.append(str(xp))
try:
res = eval(" ".join(parsed_expr))
if type(res) is int:
show_as_int(res)
else:
gef_print("{}".format(res))
except SyntaxError:
gef_print(" ".join(parsed_expr))
return
def distance(self, args):
try:
x = int(args[0], 16) if is_hex(args[0]) else int(args[0])
y = int(args[1], 16) if is_hex(args[1]) else int(args[1])
gef_print("{}".format(abs(x-y)))
except ValueError:
warn("Distance requires 2 numbers: {} 0 0xffff".format(self._cmdline_))
return
@register_command
class CanaryCommand(GenericCommand):
"""Shows the canary value of the current process. Apply the techique detailed in
https://www.elttam.com.au/blog/playing-with-canaries/ to show the canary."""
_cmdline_ = "canary"
_syntax_ = _cmdline_
@only_if_gdb_running
def do_invoke(self, argv):
self.dont_repeat()
has_canary = checksec(get_filepath())["Canary"]
if not has_canary:
warn("This binary was not compiled with SSP.")
return
res = gef_read_canary()
if not res:
err("Failed to get the canary")
return
canary, location = res
info("Found AT_RANDOM at {:#x}, reading {} bytes".format(location, current_arch.ptrsize))
info("The canary of process {} is {:#x}".format(get_pid(), canary))
return
@register_command
class ProcessStatusCommand(GenericCommand):
"""Extends the info given by GDB `info proc`, by giving an exhaustive description of the
process status (file descriptors, ancestor, descendants, etc.). """
_cmdline_ = "process-status"
_syntax_ = _cmdline_
_aliases_ = ["status", ]
def __init__(self):
super(ProcessStatusCommand, self).__init__(complete=gdb.COMPLETE_NONE)
return
@only_if_gdb_running
@only_if_gdb_target_local
def do_invoke(self, argv):
self.show_info_proc()
self.show_ancestor()
self.show_descendants()
self.show_fds()
self.show_connections()
return
def get_state_of(self, pid):
res = {}
for line in open("/proc/{}/status".format(pid), "r"):
key, value = line.split(":", 1)
res[key.strip()] = value.strip()
return res
def get_cmdline_of(self, pid):
return open("/proc/{}/cmdline".format(pid), "r").read().replace("\x00", "\x20").strip()
def get_process_path_of(self, pid):
return os.readlink("/proc/{}/exe".format(pid))
def get_children_pids(self, pid):
ps = which("ps")
cmd = [ps, "-o", "pid", "--ppid","{}".format(pid), "--noheaders"]
try:
return [int(x) for x in gef_execute_external(cmd, as_list=True)]
except Exception:
return []
def show_info_proc(self):
info("Process Information")
pid = get_pid()
cmdline = self.get_cmdline_of(pid)
gef_print("\tPID {} {}".format(RIGHT_ARROW, pid))
gef_print("\tExecutable {} {}".format(RIGHT_ARROW, self.get_process_path_of(pid)))
gef_print("\tCommand line {} '{}'".format(RIGHT_ARROW, cmdline))
return
def show_ancestor(self):
info("Parent Process Information")
ppid = int(self.get_state_of(get_pid())["PPid"])
state = self.get_state_of(ppid)
cmdline = self.get_cmdline_of(ppid)
gef_print("\tParent PID {} {}".format(RIGHT_ARROW, state["Pid"]))
gef_print("\tCommand line {} '{}'".format(RIGHT_ARROW, cmdline))
return
def show_descendants(self):
info("Children Process Information")
children = self.get_children_pids(get_pid())
if not children:
gef_print("\tNo child process")
return
for child_pid in children:
state = self.get_state_of(child_pid)
pid = state["Pid"]
gef_print("\tPID {} {} (Name: '{}', CmdLine: '{}')".format(RIGHT_ARROW,
pid,
self.get_process_path_of(pid),
self.get_cmdline_of(pid)))
return
def show_fds(self):
pid = get_pid()
path = "/proc/{:d}/fd".format(pid)
info("File Descriptors:")
items = os.listdir(path)
if not items:
gef_print("\tNo FD opened")
return
for fname in items:
fullpath = os.path.join(path, fname)
if os.path.islink(fullpath):
gef_print("\t{:s} {:s} {:s}".format (fullpath, RIGHT_ARROW, os.readlink(fullpath)))
return
def list_sockets(self, pid):
sockets = []
path = "/proc/{:d}/fd".format(pid)
items = os.listdir(path)
for fname in items:
fullpath = os.path.join(path, fname)
if os.path.islink(fullpath) and os.readlink(fullpath).startswith("socket:"):
p = os.readlink(fullpath).replace("socket:", "")[1:-1]
sockets.append(int(p))
return sockets
def parse_ip_port(self, addr):
ip, port = addr.split(":")
return socket.inet_ntoa(struct.pack("<I", int(ip, 16))), int(port, 16)
def show_connections(self):
# https://github.com/torvalds/linux/blob/v4.7/include/net/tcp_states.h#L16
tcp_states_str = {
0x01: "TCP_ESTABLISHED",
0x02: "TCP_SYN_SENT",
0x03: "TCP_SYN_RECV",
0x04: "TCP_FIN_WAIT1",
0x05: "TCP_FIN_WAIT2",
0x06: "TCP_TIME_WAIT",
0x07: "TCP_CLOSE",
0x08: "TCP_CLOSE_WAIT",
0x09: "TCP_LAST_ACK",
0x0a: "TCP_LISTEN",
0x0b: "TCP_CLOSING",
0x0c: "TCP_NEW_SYN_RECV",
}
udp_states_str = {
0x07: "UDP_LISTEN",
}
info("Network Connections")
pid = get_pid()
sockets = self.list_sockets(pid)
if not sockets:
gef_print("\tNo open connections")
return
entries = {}
entries["TCP"] = [x.split() for x in open("/proc/{:d}/net/tcp".format(pid), "r").readlines()[1:]]
entries["UDP"]= [x.split() for x in open("/proc/{:d}/net/udp".format(pid), "r").readlines()[1:]]
for proto in entries:
for entry in entries[proto]:
local, remote, state = entry[1:4]
inode = int(entry[9])
if inode in sockets:
local = self.parse_ip_port(local)
remote = self.parse_ip_port(remote)
state = int(state, 16)
state_str = tcp_states_str[state] if proto=="TCP" else udp_states_str[state]
gef_print("\t{}:{} {} {}:{} ({})".format(local[0], local[1],
RIGHT_ARROW,
remote[0], remote[1],
state_str))
return
@register_priority_command
class GefThemeCommand(GenericCommand):
"""Customize GEF appearance."""
_cmdline_ = "theme"
_syntax_ = "{:s} [KEY [VALUE]]".format(_cmdline_)
def __init__(self, *args, **kwargs):
super(GefThemeCommand, self).__init__(GefThemeCommand._cmdline_)
self.add_setting("context_title_line", "gray", "Color of the borders in context window")
self.add_setting("context_title_message", "cyan", "Color of the title in context window")
self.add_setting("default_title_line", "gray", "Default color of borders")
self.add_setting("default_title_message", "cyan", "Default color of title")
self.add_setting("table_heading", "blue", "Color of the column headings to tables (e.g. vmmap)")
self.add_setting("disassemble_current_instruction", "green", "Color to use to highlight the current $pc when disassembling")
self.add_setting("dereference_string", "yellow", "Color of dereferenced string")
self.add_setting("dereference_code", "gray", "Color of dereferenced code")
self.add_setting("dereference_base_address", "cyan", "Color of dereferenced address")
self.add_setting("dereference_register_value", "bold blue" , "Color of dereferenced register")
self.add_setting("registers_register_name", "blue", "Color of the register name in the register window")
self.add_setting("registers_value_changed", "bold red", "Color of the changed register in the register window")
self.add_setting("address_stack", "pink", "Color to use when a stack address is found")
self.add_setting("address_heap", "green", "Color to use when a heap address is found")
self.add_setting("address_code", "red", "Color to use when a code address is found")
self.add_setting("source_current_line", "green", "Color to use for the current code line in the source window")
return
def do_invoke(self, args):
self.dont_repeat()
argc = len(args)
if argc==0:
for setting in sorted(self.settings):
value = self.get_setting(setting)
value = Color.colorify(value, value)
gef_print("{:40s}: {:s}".format(setting, value))
return
setting = args[0]
if not self.has_setting(setting):
err("Invalid key")
return
if argc==1:
value = self.get_setting(setting)
value = Color.colorify(value, value)
gef_print("{:40s}: {:s}".format(setting, value))
return
val = [x for x in args[1:] if x in Color.colors]
self.add_setting(setting, " ".join(val))
return
@register_command
class PCustomCommand(GenericCommand):
"""Dump user defined structure.
This command attempts to reproduce WinDBG awesome `dt` command for GDB and allows
to apply structures (from symbols or custom) directly to an address.
Custom structures can be defined in pure Python using ctypes, and should be stored
in a specific directory, whose path must be stored in the `pcustom.struct_path`
configuration setting."""
_cmdline_ = "pcustom"
_syntax_ = "{:s} [-l] [StructA [0xADDRESS] [-e]]".format(_cmdline_)
def __init__(self):
super(PCustomCommand, self).__init__(complete=gdb.COMPLETE_SYMBOL)
self.add_setting("struct_path", os.path.join(GEF_TEMP_DIR, "structs"),
"Path to store/load the structure ctypes files")
return
def do_invoke(self, argv):
argc = len(argv)
if argc == 0:
self.usage()
return
if argv[0] == "-l":
self.list_custom_structures()
return
modname, structname = argv[0].split(":", 1) if ":" in argv[0] else (argv[0], argv[0])
structname = structname.split(".", 1)[0] if "." in structname else structname
if argc == 1:
self.dump_structure(modname, structname)
return
if argv[1] == "-e":
self.create_or_edit_structure(modname, structname)
return
if not is_alive():
return
try:
address = long(gdb.parse_and_eval(argv[1]))
except gdb.error:
err("Failed to parse '{:s}'".format(argv[1]))
return
self.apply_structure_to_address(modname, structname, address)
return
def get_struct_path(self):
path = os.path.expanduser(self.get_setting("struct_path"))
path = os.path.realpath(path)
return path if os.path.isdir(path) else None
def pcustom_filepath(self, x):
p = self.get_struct_path()
if not p: return None
return os.path.join(p, "{}.py".format(x))
def is_valid_struct(self, x):
p = self.pcustom_filepath(x)
return os.access(p, os.R_OK) if p else None
def dump_structure(self, mod_name, struct_name):
# If it's a builtin or defined in the ELF use gdb's `ptype`
try:
gdb.execute("ptype struct {:s}".format(struct_name))
return
except gdb.error:
pass
self.dump_custom_structure(mod_name, struct_name)
return
def dump_custom_structure(self, mod_name, struct_name):
if not self.is_valid_struct(mod_name):
err("Invalid structure name '{:s}'".format(struct_name))
return
_class = self.get_class(mod_name, struct_name)
_offset = 0
for _name, _type in _class._fields_:
_size = ctypes.sizeof(_type)
gef_print("+{:04x} {:s} {:s} ({:#x})".format(_offset, _name, _type.__name__, _size))
_offset += _size
return
def deserialize(self, struct, data):
length = min(len(data), ctypes.sizeof(struct))
ctypes.memmove(ctypes.addressof(struct), data, length)
return
def get_module(self, modname):
_fullname = self.pcustom_filepath(modname)
return imp.load_source(modname, _fullname)
def get_class(self, modname, classname):
_mod = self.get_module(modname)
return getattr(_mod, classname)()
def list_all_structs(self, modname):
_mod = self.get_module(modname)
_invalid = set(["BigEndianStructure", "LittleEndianStructure", "Structure"])
_structs = set([x for x in dir(_mod) \
if inspect.isclass(getattr(_mod, x)) \
and issubclass(getattr(_mod, x), ctypes.Structure)])
return _structs - _invalid
def apply_structure_to_address(self, mod_name, struct_name, addr, depth=0):
if not self.is_valid_struct(mod_name):
err("Invalid structure name '{:s}'".format(struct_name))
return
try:
_class = self.get_class(mod_name, struct_name)
data = read_memory(addr, ctypes.sizeof(_class))
except gdb.MemoryError:
err("{}Cannot reach memory {:#x}".format(" "*depth, addr))
return
self.deserialize(_class, data)
_regsize = get_memory_alignment()
_offset = 0
for field in _class._fields_:
_name, _type = field
_size = ctypes.sizeof(_type)
_value = getattr(_class, _name)
if (_regsize == 4 and _type is ctypes.c_uint32) \
or (_regsize == 8 and _type is ctypes.c_uint64) \
or (_regsize == ctypes.sizeof(ctypes.c_void_p) and _type is ctypes.c_void_p):
# try to dereference pointers
_value = RIGHT_ARROW.join(DereferenceCommand.dereference_from(_value))
line = []
line += " "*depth
line += ("{:#x}+0x{:04x} {} : ".format(addr, _offset, _name)).ljust(40)
line += "{} ({})".format(_value, _type.__name__)
parsed_value = self.get_ctypes_value(_class, _name, _value)
if parsed_value:
line += " {} {}".format(RIGHT_ARROW, parsed_value)
gef_print("".join(line))
if issubclass(_type, ctypes.Structure):
self.apply_structure_to_address(mod_name, _type.__name__, addr + _offset, depth + 1)
_offset += ctypes.sizeof(_type)
else:
_offset += _size
return
def get_ctypes_value(self, struct, item, value):
if not hasattr(struct, "_values_"): return ""
values_list = getattr(struct, "_values_")
default = ""
for name, values in values_list:
if name != item: continue
if callable(values):
return values(value)
try:
for val, desc in values:
if value == val: return desc
if val is None: default = desc
except:
err("Error while trying to obtain values from _values_[\"{}\"]".format(name))
return default
def create_or_edit_structure(self, mod_name, struct_name):
path = self.get_struct_path()
if path is None:
err("Invalid struct path")
return
fullname = self.pcustom_filepath(mod_name)
if not self.is_valid_struct(mod_name):
info("Creating '{:s}' from template".format(fullname))
with open(fullname, "w") as f:
f.write(self.get_template(struct_name))
f.flush()
else:
info("Editing '{:s}'".format(fullname))
cmd = os.getenv("EDITOR").split() if os.getenv("EDITOR") else ["nano",]
cmd.append(fullname)
retcode = subprocess.call(cmd)
return retcode
def get_template(self, structname):
d = [
"from ctypes import *\n\n",
"class ", structname, "(Structure):\n",
" _fields_ = []\n"
]
return "".join(d)
def list_custom_structures(self):
path = self.get_struct_path()
if path is None:
err("Cannot open '{0}': check directory and/or `gef config {0}` "
"setting, currently: '{1}'".format("pcustom.struct_path", self.get_setting("struct_path")))
return
info("Listing custom structures from '{:s}'".format(path))
for filen in os.listdir(path):
name, ext = os.path.splitext(filen)
if ext != ".py": continue
_modz = self.list_all_structs(name)
ok("{:s} {:s} ({:s})".format(RIGHT_ARROW, name, ", ".join(_modz)))
return
@register_command
class ChangeFdCommand(GenericCommand):
"""ChangeFdCommand: redirect file descriptor during runtime."""
_cmdline_ = "hijack-fd"
_syntax_ = "{:s} FD_NUM NEW_OUTPUT".format(_cmdline_)
_example_ = "{:s} 2 /tmp/stderr_output.txt".format(_cmdline_)
@only_if_gdb_running
@only_if_gdb_target_local
def do_invoke(self, argv):
if len(argv)!=2:
self.usage()
return
if not os.access("/proc/{:d}/fd/{:s}".format(get_pid(), argv[0]), os.R_OK):
self.usage()
return
old_fd = int(argv[0])
new_output = argv[1]
if ":" in new_output:
address = socket.gethostbyname(new_output.split(":")[0])
port = int(new_output.split(":")[1])
# socket(int domain, int type, int protocol)
# AF_INET = 2, SOCK_STREAM = 1
res = gdb.execute("""call socket(2, 1, 0)""", to_string=True)
new_fd = self.get_fd_from_result(res)
# fill in memory with sockaddr_in struct contents
# we will do this in the stack, since connect() wants a pointer to a struct
vmmap = get_process_maps()
stack_addr = [entry.page_start for entry in vmmap if entry.path == "[stack]"][0]
original_contents = read_memory(stack_addr, 8)
write_memory(stack_addr, "\x02\x00", 2)
write_memory(stack_addr + 0x2, struct.pack("<H", socket.htons(port)), 2)
write_memory(stack_addr + 0x4, socket.inet_aton(address), 4)
info("Trying to connect to {}".format(new_output))
# connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen)
res = gdb.execute("""call connect({}, {}, {})""".format(new_fd, stack_addr, 16), to_string=True)
# recover stack state
write_memory(stack_addr, original_contents, 8)
res = self.get_fd_from_result(res)
if res == -1:
err("Failed to connect to {}:{}".format(address, port))
return
info("Connected to {}".format(new_output))
else:
res = gdb.execute("""call open("{:s}", 66, 0666)""".format(new_output), to_string=True)
new_fd = self.get_fd_from_result(res)
info("Opened '{:s}' as fd #{:d}".format(new_output, new_fd))
gdb.execute("""call dup2({:d}, {:d})""".format(new_fd, old_fd), to_string=True)
info("Duplicated fd #{:d}{:s}#{:d}".format(new_fd, RIGHT_ARROW, old_fd))
gdb.execute("""call close({:d})""".format(new_fd), to_string=True)
info("Closed extra fd #{:d}".format(new_fd))
ok("Success")
return
def get_fd_from_result(self, res):
# Output example: $1 = 3
return int(res.split()[2], 0)
@register_command
class IdaInteractCommand(GenericCommand):
"""IDA Interact: set of commands to interact with IDA via a XML RPC service
deployed via the IDA script `ida_gef.py`. It should be noted that this command
can also be used to interact with Binary Ninja (using the script `binja_gef.py`)
using the same interface."""
_cmdline_ = "ida-interact"
_syntax_ = "{:s} METHOD [ARGS]".format(_cmdline_)
_aliases_ = ["binaryninja-interact", "bn", "binja"]
_example_ = "\n{0:s} Jump $pc\n{0:s} SetColor $pc ff00ff".format(_cmdline_)
def __init__(self):
super(IdaInteractCommand, self).__init__(prefix=False)
host, port = "127.0.0.1", 1337
self.add_setting("host", host, "IP address to use connect to IDA/Binary Ninja script")
self.add_setting("port", port, "Port to use connect to IDA/Binary Ninja script")
self.add_setting("sync_cursor", False, "Enable real-time $pc synchronisation")
self.sock = None
self.version = ("", "")
self.old_bps = set()
return
def is_target_alive(self, host, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((host, port))
s.close()
except socket.error:
return False
return True
def connect(self, host=None, port=None):
"""Connect to the XML-RPC service."""
host = host or self.get_setting("host")
port = port or self.get_setting("port")
try:
sock = xmlrpclib.ServerProxy("http://{:s}:{:d}".format(host, port))
gef_on_stop_hook(ida_synchronize_handler)
gef_on_continue_hook(ida_synchronize_handler)
self.version = sock.version()
except ConnectionRefusedError:
err("Failed to connect to '{:s}:{:d}'".format(host, port))
sock = None
self.sock = sock
return
def disconnect(self):
gef_on_stop_unhook(ida_synchronize_handler)
gef_on_continue_unhook(ida_synchronize_handler)
self.sock = None
return
def do_invoke(self, argv):
def parsed_arglist(arglist):
args = []
for arg in arglist:
try:
# try to solve the argument using gdb
argval = gdb.parse_and_eval(arg)
argval.fetch_lazy()
# check if value is addressable
argval = long(argval) if argval.address is None else long(argval.address)
# if the bin is PIE, we need to substract the base address
is_pie = checksec(get_filepath())["PIE"]
if is_pie and main_base_address <= argval < main_end_address:
argval -= main_base_address
args.append("{:#x}".format(argval,))
except Exception:
# if gdb can't parse the value, let ida deal with it
args.append(arg)
return args
if self.sock is None:
# trying to reconnect
self.connect()
if self.sock is None:
self.disconnect()
return
if len(argv) == 0 or argv[0] in ("-h", "--help"):
method_name = argv[1] if len(argv)>1 else None
self.usage(method_name)
return
method_name = argv[0].lower()
if method_name == "version":
self.version = self.sock.version()
info("Enhancing {:s} with {:s} (v.{:s})".format(Color.greenify("gef"),
Color.redify(self.version[0]),
Color.yellowify(self.version[1])))
return
if not is_alive():
main_base_address = main_end_address = 0
else:
vmmap = get_process_maps()
main_base_address = min([x.page_start for x in vmmap if x.realpath == get_filepath()])
main_end_address = max([x.page_end for x in vmmap if x.realpath == get_filepath()])
try:
if method_name == "sync":
self.synchronize()
else:
method = getattr(self.sock, method_name)
if len(argv) > 1:
args = parsed_arglist(argv[1:])
res = method(*args)
else:
res = method()
if method_name in ("ImportStruct", "ImportStructs"):
self.import_structures(res)
else:
gef_print(str(res))
if self.get_setting("sync_cursor") is True:
jump = getattr(self.sock, "Jump")
jump(hex(current_arch.pc-main_base_address),)
except socket.error:
self.disconnect()
return
def synchronize(self):
"""Submit all active breakpoint addresses to IDA/BN."""
pc = current_arch.pc
vmmap = get_process_maps()
base_address = min([x.page_start for x in vmmap if x.path == get_filepath()])
end_address = max([x.page_end for x in vmmap if x.path == get_filepath()])
if not (base_address <= pc < end_address):
# do not sync in library
return
breakpoints = gdb.breakpoints() or []
gdb_bps = set()
for bp in breakpoints:
if bp.enabled and not bp.temporary:
if bp.location[0]=="*": # if it's an address i.e. location starts with "*"
addr = long(gdb.parse_and_eval(bp.location[1:]))
else: # it is a symbol
addr = long(gdb.parse_and_eval(bp.location).address)
if not (base_address <= addr < end_address):
continue
gdb_bps.add(addr-base_address)
added = gdb_bps - self.old_bps
removed = self.old_bps - gdb_bps
self.old_bps = gdb_bps
try:
# it is possible that the server was stopped between now and the last sync
rc = self.sock.Sync("{:#x}".format(pc-base_address), list(added), list(removed))
except ConnectionRefusedError:
self.disconnect()
return
ida_added, ida_removed = rc
# add new bp from IDA
for new_bp in ida_added:
location = base_address+new_bp
gdb.Breakpoint("*{:#x}".format(location), type=gdb.BP_BREAKPOINT)
self.old_bps.add(location)
# and remove the old ones
breakpoints = gdb.breakpoints() or []
for bp in breakpoints:
if bp.enabled and not bp.temporary:
if bp.location[0]=="*": # if it's an address i.e. location starts with "*"
addr = long(gdb.parse_and_eval(bp.location[1:]))
else: # it is a symbol
addr = long(gdb.parse_and_eval(bp.location).address)
if not (base_address <= addr < end_address):
continue
if (addr-base_address) in ida_removed:
if (addr-base_address) in self.old_bps:
self.old_bps.remove((addr-base_address))
bp.delete()
return
def usage(self, meth=None):
if self.sock is None:
return
if meth is not None:
gef_print(titlify(meth))
gef_print(self.sock.system.methodHelp(meth))
return
info("Listing available methods and syntax examples: ")
for m in self.sock.system.listMethods():
if m.startswith("system."): continue
gef_print(titlify(m))
gef_print(self.sock.system.methodHelp(m))
return
def import_structures(self, structs):
if self.version[0] != "IDA Pro":
return
path = get_gef_setting("pcustom.struct_path")
if path is None:
return
if not os.path.isdir(path):
gef_makedirs(path)
for struct_name in structs:
fullpath = os.path.join(path, "{}.py".format(struct_name))
with open(fullpath, "w") as f:
f.write("from ctypes import *\n\n")
f.write("class ")
f.write(struct_name)
f.write("(Structure):\n")
f.write(" _fields_ = [\n")
for _, name, size in structs[struct_name]:
name = bytes(name, encoding="utf-8")
if size == 1: csize = "c_uint8"
elif size == 2: csize = "c_uint16"
elif size == 4: csize = "c_uint32"
elif size == 8: csize = "c_uint64"
else: csize = "c_byte * {}".format(size)
m = ' (\"{}\", {}),\n'.format(name, csize)
f.write(m)
f.write("]\n")
ok("Success, {:d} structure{:s} imported".format(len(structs),
"s" if len(structs)>1 else ""))
return
@register_command
class ScanSectionCommand(GenericCommand):
"""Search for addresses that are located in a memory mapping (haystack) that belonging
to another (needle)."""
_cmdline_ = "scan"
_syntax_ = "{:s} HAYSTACK NEEDLE".format(_cmdline_)
_aliases_ = ["lookup",]
_example_ = "\n{0:s} stack libc".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
if len(argv) != 2:
self.usage()
return
haystack = argv[0]
needle = argv[1]
info("Searching for addresses in '{:s}' that point to '{:s}'"
.format(Color.yellowify(haystack), Color.yellowify(needle)))
if haystack == "binary":
haystack = get_filepath()
if needle == "binary":
needle = get_filepath()
needle_sections = []
haystack_sections = []
if "0x" in haystack:
start, end = parse_string_range(haystack)
haystack_sections.append((start, end, ""))
if "0x" in needle:
start, end = parse_string_range(needle)
needle_sections.append((start, end))
for sect in get_process_maps():
if haystack in sect.path:
haystack_sections.append((sect.page_start, sect.page_end, os.path.basename(sect.path)))
if needle in sect.path:
needle_sections.append((sect.page_start, sect.page_end))
step = current_arch.ptrsize
fmt = "{}{}".format(endian_str(), "I" if step==4 else "Q")
for hstart, hend, hname in haystack_sections:
try:
mem = read_memory(hstart, hend - hstart)
except gdb.MemoryError:
continue
for i in range(0, len(mem), step):
target = struct.unpack(fmt, mem[i:i+step])[0]
for nstart, nend in needle_sections:
if target >= nstart and target < nend:
deref = DereferenceCommand.pprint_dereferenced(hstart, long(i / step))
if hname != "":
name = Color.colorify(hname, "yellow")
gef_print("{:s}: {:s}".format(name, deref))
else:
gef_print(" {:s}".format(deref))
return
@register_command
class SearchPatternCommand(GenericCommand):
"""SearchPatternCommand: search a pattern in memory. If given an hex value (starting with 0x)
the command will also try to look for upwards cross-references to this address."""
_cmdline_ = "search-pattern"
_syntax_ = "{:s} PATTERN [small|big]".format(_cmdline_)
_aliases_ = ["grep", "xref"]
_example_ = "\n{0:s} AAAAAAAA\n{0:s} 0x555555554000".format(_cmdline_)
def search_pattern_by_address(self, pattern, start_address, end_address):
"""Search a pattern within a range defined by arguments."""
pattern = gef_pybytes(pattern)
step = 0x400 * 0x1000
locations = []
for chunk_addr in range(start_address, end_address, step):
if chunk_addr + step > end_address:
chunk_size = end_address - chunk_addr
else:
chunk_size = step
mem = read_memory(chunk_addr, chunk_size)
for match in re.finditer(pattern, mem):
start = chunk_addr + match.start()
if is_ascii_string(start):
ustr = read_ascii_string(start)
end = start + len(ustr)
else :
ustr = gef_pystring(pattern)+"[...]"
end = start + len(pattern)
locations.append((start, end, ustr))
del mem
return locations
def search_pattern(self, pattern, endian):
"""Search a pattern within the whole userland memory."""
if is_hex(pattern):
if endian == Elf.BIG_ENDIAN:
pattern = "".join(["\\x"+pattern[i:i+2] for i in range(2, len(pattern), 2)])
else:
pattern = "".join(["\\x"+pattern[i:i+2] for i in range(len(pattern)-2, 0, -2)])
for section in get_process_maps():
if not section.permission & Permission.READ: continue
if section.path == "[vvar]": continue
start = section.page_start
end = section.page_end - 1
old_section = None
for loc in self.search_pattern_by_address(pattern, start, end):
addr_loc_start = lookup_address(loc[0])
if addr_loc_start and addr_loc_start.section:
if old_section != addr_loc_start.section:
title = "In "
if addr_loc_start.section.path:
title += "'{}'".format(Color.blueify(addr_loc_start.section.path) )
title+= "({:#x}-{:#x})".format(addr_loc_start.section.page_start, addr_loc_start.section.page_end)
title+= ", permission={}".format(addr_loc_start.section.permission)
ok(title)
old_section = addr_loc_start.section
gef_print(""" {:#x} - {:#x} {} "{}" """.format(loc[0], loc[1], RIGHT_ARROW, Color.pinkify(loc[2]),))
return
@only_if_gdb_running
def do_invoke(self, argv):
argc = len(argv)
if argc < 1:
self.usage()
return
pattern = argv[0]
endian = get_endian()
if argc==2:
if argv[1]=="big": endian = Elf.BIG_ENDIAN
elif argv[1]=="small": endian = Elf.LITTLE_ENDIAN
info("Searching '{:s}' in memory".format(Color.yellowify(pattern)))
self.search_pattern(pattern, endian)
return
@register_command
class FlagsCommand(GenericCommand):
"""Edit flags in a human friendly way."""
_cmdline_ = "edit-flags"
_syntax_ = "{:s} [(+|-|~)FLAGNAME ...]".format(_cmdline_)
_aliases_ = ["flags",]
_example_ = "\n{0:s}\n{0:s} +zero # sets ZERO flag".format(_cmdline_)
def do_invoke(self, argv):
for flag in argv:
if len(flag)<2:
continue
action = flag[0]
name = flag[1:].lower()
if action not in ("+", "-", "~"):
err("Invalid action for flag '{:s}'".format(flag))
continue
if name not in current_arch.flags_table.values():
err("Invalid flag name '{:s}'".format(flag[1:]))
continue
for k in current_arch.flags_table:
if current_arch.flags_table[k] == name:
off = k
break
old_flag = get_register(current_arch.flag_register)
if action == "+":
new_flags = old_flag | (1 << off)
elif action == "-":
new_flags = old_flag & ~(1 << off)
else:
new_flags = old_flag ^ (1<<off)
gdb.execute("set ({:s}) = {:#x}".format(current_arch.flag_register, new_flags))
gef_print(current_arch.flag_register_to_human())
return
@register_command
class ChangePermissionCommand(GenericCommand):
"""Change a page permission. By default, it will change it to RWX."""
_cmdline_ = "set-permission"
_syntax_ = "{:s} LOCATION [PERMISSION]".format(_cmdline_)
_aliases_ = ["mprotect",]
_example_ = "{:s} $sp 7"
def __init__(self):
super(ChangePermissionCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
def pre_load(self):
try:
__import__("keystone")
except ImportError:
msg = "Missing `keystone-engine` package for Python{0}, install with: `pip{0} install keystone-engine`.".format(PYTHON_MAJOR)
raise ImportWarning(msg)
return
@only_if_gdb_running
def do_invoke(self, argv):
if len(argv) not in (1, 2):
err("Incorrect syntax")
self.usage()
return
if len(argv) == 2:
perm = int(argv[1])
else:
perm = Permission.READ | Permission.WRITE | Permission.EXECUTE
loc = safe_parse_and_eval(argv[0])
if loc is None:
err("Invalid address")
return
loc = long(loc)
sect = process_lookup_address(loc)
if sect is None:
err("Unmapped address")
return
size = sect.page_end - sect.page_start
original_pc = current_arch.pc
info("Generating sys_mprotect({:#x}, {:#x}, '{:s}') stub for arch {:s}"
.format(sect.page_start, size, str(Permission(value=perm)), get_arch()))
stub = self.get_stub_by_arch(sect.page_start, size, perm)
if stub is None:
err("Failed to generate mprotect opcodes")
return
info("Saving original code")
original_code = read_memory(original_pc, len(stub))
bp_loc = "*{:#x}".format(original_pc + len(stub))
info("Setting a restore breakpoint at {:s}".format(bp_loc))
ChangePermissionBreakpoint(bp_loc, original_code, original_pc)
info("Overwriting current memory at {:#x} ({:d} bytes)".format(loc, len(stub)))
write_memory(original_pc, stub, len(stub))
info("Resuming execution")
gdb.execute("continue")
return
def get_stub_by_arch(self, addr, size, perm):
code = current_arch.mprotect_asm(addr, size, perm)
arch, mode = get_keystone_arch()
raw_insns = keystone_assemble(code, arch, mode, raw=True)
return raw_insns
@register_command
class UnicornEmulateCommand(GenericCommand):
"""Use Unicorn-Engine to emulate the behavior of the binary, without affecting the GDB runtime.
By default the command will emulate only the next instruction, but location and number of
instruction can be changed via arguments to the command line. By default, it will emulate
the next instruction from current PC."""
_cmdline_ = "unicorn-emulate"
_syntax_ = "{:s} [-f LOCATION] [-t LOCATION] [-n NB_INSTRUCTION] [-s] [-o PATH] [-h]".format(_cmdline_)
_aliases_ = ["emulate",]
_example_ = "{0:s} -f $pc -n 10 -o /tmp/my-gef-emulation.py".format(_cmdline_)
def __init__(self):
super(UnicornEmulateCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
self.add_setting("verbose", False, "Set unicorn-engine in verbose mode")
self.add_setting("show_disassembly", False, "Show every instruction executed")
return
def help(self):
h = self._syntax_
h += "\n\t-f LOCATION specifies the start address of the emulated run (default $pc).\n"
h += "\t-t LOCATION specifies the end address of the emulated run.\n"
h += "\t-s Script-Only: do not execute the script once generated.\n"
h += "\t-o /PATH/TO/SCRIPT.py writes the persistent Unicorn script into this file.\n"
h += "\t-n NB_INSTRUCTION indicates the number of instructions to execute (mutually exclusive with `-t` and `-g`).\n"
h += "\t-g NB_GADGET indicates the number of gadgets to execute (mutually exclusive with `-t` and `-n`).\n"
h += "\nAdditional options can be setup via `gef config unicorn-emulate`\n"
info(h)
return
def pre_load(self):
try:
__import__("unicorn")
except ImportError:
msg = "Missing `unicorn` package for Python{0}. Install with `pip{0} install unicorn`.".format(PYTHON_MAJOR)
raise ImportWarning(msg)
try:
__import__("capstone")
except ImportError:
msg = "Missing `capstone` package for Python{0}. Install with `pip{0} install capstone`.".format(PYTHON_MAJOR)
raise ImportWarning(msg)
return
@only_if_gdb_running
def do_invoke(self, argv):
start_insn = None
end_insn = -1
nb_insn = -1
to_file = None
to_script_only = None
opts = getopt.getopt(argv, "f:t:n:so:h")[0]
for o,a in opts:
if o == "-f": start_insn = int(a, 16)
elif o == "-t":
end_insn = int(a, 16)
self.nb_insn = -1
elif o == "-n":
nb_insn = int(a)
end_insn = -1
elif o == "-s":
to_script_only = True
elif o == "-o":
to_file = a
elif o == "-h":
self.help()
return
if start_insn is None:
start_insn = current_arch.pc
if end_insn < 0 and nb_insn < 0:
err("No stop condition (-t|-n) defined.")
return
if end_insn > 0:
self.run_unicorn(start_insn, end_insn, to_script_only=to_script_only, to_file=to_file)
elif nb_insn > 0:
end_insn = self.get_unicorn_end_addr(start_insn, nb_insn)
self.run_unicorn(start_insn, end_insn, to_script_only=to_script_only, to_file=to_file)
else:
raise Exception("Should never be here")
return
def get_unicorn_end_addr(self, start_addr, nb):
dis = list(gef_disassemble(start_addr, nb+1, True))
last_insn = dis[-1]
return last_insn.address
def run_unicorn(self, start_insn_addr, end_insn_addr, *args, **kwargs):
verbose = self.get_setting("verbose") or False
to_script_only = kwargs.get("to_script_only", False)
arch, mode = get_unicorn_arch(to_string=True)
unicorn_registers = get_unicorn_registers(to_string=True)
cs_arch, cs_mode = get_capstone_arch(to_string=True)
fname = get_filename()
to_file = kwargs.get("to_file", None)
if to_file:
tmp_filename = to_file
to_file = open(to_file, "w")
tmp_fd = to_file.fileno()
else:
tmp_fd, tmp_filename = tempfile.mkstemp(suffix=".py", prefix="gef-uc-")
if is_x86():
# need to handle segmentation (and pagination) via MSR
emulate_segmentation_block = """
# from https://github.com/unicorn-engine/unicorn/blob/master/tests/regress/x86_64_msr.py
SCRATCH_ADDR = 0xf000
SEGMENT_FS_ADDR = 0x5000
SEGMENT_GS_ADDR = 0x6000
FSMSR = 0xC0000100
GSMSR = 0xC0000101
def set_msr(uc, msr, value, scratch=SCRATCH_ADDR):
buf = b"\\x0f\\x30" # x86: wrmsr
uc.mem_map(scratch, 0x1000)
uc.mem_write(scratch, buf)
uc.reg_write(unicorn.x86_const.UC_X86_REG_RAX, value & 0xFFFFFFFF)
uc.reg_write(unicorn.x86_const.UC_X86_REG_RDX, (value >> 32) & 0xFFFFFFFF)
uc.reg_write(unicorn.x86_const.UC_X86_REG_RCX, msr & 0xFFFFFFFF)
uc.emu_start(scratch, scratch+len(buf), count=1)
uc.mem_unmap(scratch, 0x1000)
return
def set_gs(uc, addr): return set_msr(uc, GSMSR, addr)
def set_fs(uc, addr): return set_msr(uc, FSMSR, addr)
"""
context_segmentation_block = """
emu.mem_map(SEGMENT_FS_ADDR-0x1000, 0x3000)
set_fs(emu, SEGMENT_FS_ADDR)
set_gs(emu, SEGMENT_GS_ADDR)
"""
content = """#!/usr/bin/python -i
#
# Emulation script for "{fname}" from {start:#x} to {end:#x}
#
# Powered by gef, unicorn-engine, and capstone-engine
#
# @_hugsy_
#
from __future__ import print_function
import collections
import capstone, unicorn
registers = collections.OrderedDict(sorted({{{regs}}}.items(), key=lambda t: t[0]))
uc = None
verbose = {verbose}
syscall_register = "{syscall_reg}"
def disassemble(code, addr):
cs = capstone.Cs({cs_arch}, {cs_mode})
for i in cs.disasm(code, addr):
return i
def hook_code(emu, address, size, user_data):
code = emu.mem_read(address, size)
insn = disassemble(code, address)
print(">>> {{:#x}}: {{:s}} {{:s}}".format(insn.address, insn.mnemonic, insn.op_str))
return
def code_hook(emu, address, size, user_data):
code = emu.mem_read(address, size)
insn = disassemble(code, address)
print(">>> {{:#x}}: {{:s}} {{:s}}".format(insn.address, insn.mnemonic, insn.op_str))
return
def intr_hook(emu, intno, data):
print(" \\-> interrupt={{:d}}".format(intno))
return
def syscall_hook(emu, user_data):
sysno = emu.reg_read(registers[syscall_register])
print(" \\-> syscall={{:d}}".format(sysno))
return
def print_regs(emu, regs):
for i, r in enumerate(regs):
print("{{:7s}} = {{:#0{ptrsize}x}} ".format(r, emu.reg_read(regs[r])), end="")
if (i % 4 == 3) or (i == len(regs)-1): print("")
return
{emu_block}
def reset():
emu = unicorn.Uc({arch}, {mode})
{context_block}
""".format(fname=fname, start=start_insn_addr, end=end_insn_addr,
regs=",".join(["'%s': %s" % (k.strip(), unicorn_registers[k]) for k in unicorn_registers]),
verbose="True" if verbose else "False",
syscall_reg=current_arch.syscall_register,
cs_arch=cs_arch, cs_mode=cs_mode,
ptrsize=current_arch.ptrsize,
emu_block=emulate_segmentation_block if is_x86() else "",
arch=arch, mode=mode,
context_block=context_segmentation_block if is_x86() else "")
if verbose:
info("Duplicating registers")
for r in current_arch.all_registers:
gregval = get_register(r)
content += " emu.reg_write({}, {:#x})\n".format(unicorn_registers[r], gregval)
vmmap = get_process_maps()
if not vmmap:
warn("An error occured when reading memory map.")
return
if verbose:
info("Duplicating memory map")
for sect in vmmap:
if sect.path == "[vvar]":
# this section is for GDB only, skip it
continue
page_start = sect.page_start
page_end = sect.page_end
size = sect.size
perm = sect.permission
content += " # Mapping {}: {:#x}-{:#x}\n".format(sect.path, page_start, page_end)
content += " emu.mem_map({:#x}, {:#x}, {})\n".format(page_start, size, oct(perm.value))
if perm & Permission.READ:
code = read_memory(page_start, size)
loc = "/tmp/gef-{}-{:#x}.raw".format(fname, page_start)
with open(loc, "wb") as f:
f.write(bytes(code))
content += " emu.mem_write({:#x}, open('{}', 'rb').read())\n".format(page_start, loc)
content += "\n"
content += " emu.hook_add(unicorn.UC_HOOK_CODE, code_hook)\n"
content += " emu.hook_add(unicorn.UC_HOOK_INTR, intr_hook)\n"
if is_x86_64():
content += " emu.hook_add(unicorn.UC_HOOK_INSN, syscall_hook, None, 1, 0, unicorn.x86_const.UC_X86_INS_SYSCALL)\n"
content += " return emu\n"
content += """
def emulate(emu, start_addr, end_addr):
print("========================= Initial registers =========================")
print_regs(emu, registers)
try:
print("========================= Starting emulation =========================")
emu.emu_start(start_addr, end_addr)
except Exception as e:
emu.emu_stop()
print("========================= Emulation failed =========================")
print("[!] Error: {{}}".format(e))
print("========================= Final registers =========================")
print_regs(emu, registers)
return
uc = reset()
emulate(uc, {start:#x}, {end:#x})
# unicorn-engine script generated by gef
""".format(start=start_insn_addr, end=end_insn_addr)
os.write(tmp_fd, gef_pybytes(content))
os.close(tmp_fd)
if kwargs.get("to_file", None):
info("Unicorn script generated as '{}'".format(tmp_filename))
os.chmod(tmp_filename, 0o700)
if to_script_only:
return
ok("Starting emulation: {:#x} {} {:#x}".format(start_insn_addr, RIGHT_ARROW, end_insn_addr))
pythonbin = "python{}".format(PYTHON_MAJOR)
res = gef_execute_external([pythonbin, tmp_filename], as_list=True)
gef_print("\n".join(res))
if not kwargs.get("to_file", None):
os.unlink(tmp_filename)
return
@register_command
class RemoteCommand(GenericCommand):
"""gef wrapper for the `target remote` command. This command will automatically
download the target binary in the local temporary directory (defaut /tmp) and then
source it. Additionally, it will fetch all the /proc/PID/maps and loads all its
information."""
_cmdline_ = "gef-remote"
_syntax_ = "{:s} [OPTIONS] TARGET".format(_cmdline_)
_example_ = "\n{0:s} -p 6789 localhost:1234\n{0:s} -q localhost:4444 # when using qemu-user".format(_cmdline_)
def __init__(self):
super(RemoteCommand, self).__init__(prefix=False)
self.handler_connected = False
self.add_setting("clean_on_exit", False, "Clean the temporary data downloaded when the session exits.")
return
def do_invoke(self, argv):
global __gef_remote__
if __gef_remote__ is not None:
err("You already are in remote session. Close it first before opening a new one...")
return
target = None
rpid = -1
update_solib = False
self.download_all_libs = False
download_lib = None
is_extended_remote = False
qemu_gdb_mode = False
opts, args = getopt.getopt(argv, "p:UD:qAEh")
for o,a in opts:
if o == "-U": update_solib = True
elif o == "-D": download_lib = a
elif o == "-A": self.download_all_libs = True
elif o == "-E": is_extended_remote = True
elif o == "-p": rpid = int(a)
elif o == "-q": qemu_gdb_mode = True
elif o == "-h":
self.help()
return
if not args or ":" not in args[0]:
err("A target (HOST:PORT) must always be provided.")
return
if qemu_gdb_mode:
# compat layer for qemu-user
self.prepare_qemu_stub(args[0])
return
# lazily install handler on first use
if not self.handler_connected:
gef_on_new_hook(self.new_objfile_handler)
self.handler_connected = True
target = args[0]
if self.connect_target(target, is_extended_remote) is False:
return
# if extended-remote, need to attach
if is_extended_remote:
ok("Attaching to {:d}".format(rpid))
hide_context()
gdb.execute("attach {:d}".format(rpid))
unhide_context()
else:
rpid = get_pid()
ok("Targeting PID={:d}".format(rpid))
self.add_setting("target", target, "Remote target to connect to")
self.setup_remote_environment(rpid, update_solib)
if not is_remote_debug():
err("Failed to establish remote target environment.")
return
if self.download_all_libs is True:
vmmap = get_process_maps()
success = 0
for sect in vmmap:
if sect.path.startswith("/"):
_file = download_file(sect.path)
if _file is None:
err("Failed to download {:s}".format(sect.path))
else:
success += 1
ok("Downloaded {:d} files".format(success))
elif download_lib is not None:
_file = download_file(download_lib)
if _file is None:
err("Failed to download remote file")
return
ok("Download success: {:s} {:s} {:s}".format(download_lib, RIGHT_ARROW, _file))
if update_solib:
self.refresh_shared_library_path()
set_arch()
__gef_remote__ = rpid
return
def new_objfile_handler(self, event):
"""Hook that handles new_objfile events, will update remote environment accordingly."""
if not is_remote_debug():
return
if self.download_all_libs and event.new_objfile.filename.startswith("target:"):
lib = event.new_objfile.filename[len("target:"):]
llib = download_file(lib, use_cache=True)
if llib:
ok("Download success: {:s} {:s} {:s}".format(lib, RIGHT_ARROW, llib))
return
def setup_remote_environment(self, pid, update_solib=False):
"""Clone the remote environment locally in the temporary directory.
The command will duplicate the entries in the /proc/<pid> locally and then
source those information into the current gdb context to allow gef to use
all the extra commands as it was local debugging."""
gdb.execute("reset-cache")
infos = {}
for i in ("maps", "environ", "cmdline",):
infos[i] = self.load_from_remote_proc(pid, i)
if infos[i] is None:
err("Failed to load memory map of '{:s}'".format(i))
return
exepath = get_path_from_info_proc()
infos["exe"] = download_file("/proc/{:d}/exe".format(pid), use_cache=False, local_name=exepath)
if not os.access(infos["exe"], os.R_OK):
err("Source binary is not readable")
return
directory = os.path.sep.join([GEF_TEMP_DIR, str(get_pid())])
# gdb.execute("file {:s}".format(infos["exe"]))
self.add_setting("root", directory, "Path to store the remote data")
ok("Remote information loaded to temporary path '{:s}'".format(directory))
return
def connect_target(self, target, is_extended_remote):
"""Connect to remote target and get symbols. To prevent `gef` from requesting information
not fetched just yet, we disable the context disable when connection was successful."""
hide_context()
try:
cmd = "target {} {}".format("extended-remote" if is_extended_remote else "remote", target)
gdb.execute(cmd)
ok("Connected to '{}'".format(target))
ret = True
except Exception as e:
err("Failed to connect to {:s}: {:s}".format(target, str(e)))
ret = False
unhide_context()
return ret
def load_from_remote_proc(self, pid, info):
"""Download one item from /proc/pid."""
remote_name = "/proc/{:d}/{:s}".format(pid, info)
return download_file(remote_name, use_cache=False)
def refresh_shared_library_path(self):
dirs = [r for r, d, f in os.walk(self.get_setting("root"))]
path = ":".join(dirs)
gdb.execute("set solib-search-path {:s}".format(path,))
return
def help(self):
h = self._syntax_
h += "\n\t TARGET (mandatory) specifies the host:port, serial port or tty to connect to.\n"
h += "\t-U will update gdb `solib-search-path` attribute to include the files downloaded from server (default: False).\n"
h += "\t-A will download *ALL* the remote shared libraries and store them in the new environment. " \
"This command can take a few minutes to complete (default: False).\n"
h += "\t-D LIB will download the remote library called LIB.\n"
h += "\t-E Use 'extended-remote' to connect to the target.\n"
h += "\t-p PID (mandatory if -E is used) specifies PID of the debugged process on gdbserver's end.\n"
h += "\t-q Uses this option when connecting to a Qemu GDBserver.\n"
info(h)
return
def prepare_qemu_stub(self, target):
global current_arch, current_elf, __gef_qemu_mode__
reset_all_caches()
arch = get_arch()
current_elf = Elf(minimalist=True)
if arch.startswith("arm"):
current_elf.e_machine = Elf.ARM
current_arch = ARM()
elif arch.startswith("aarch64"):
current_elf.e_machine = Elf.AARCH64
current_arch = AARCH64()
elif arch.startswith("i386:intel"):
current_elf.e_machine = Elf.X86_32
current_arch = X86()
elif arch.startswith("i386:x86-64"):
current_elf.e_machine = Elf.X86_64
current_elf.e_class = Elf.ELF_64_BITS
current_arch = X86_64()
elif arch.startswith("mips"):
current_elf.e_machine = Elf.MIPS
current_arch = MIPS()
elif arch.startswith("powerpc"):
current_elf.e_machine = Elf.POWERPC
current_arch = PowerPC()
elif arch.startswith("sparc"):
current_elf.e_machine = Elf.SPARC
current_arch = SPARC()
else:
raise RuntimeError("unsupported architecture: {}".format(arch))
ok("Setting QEMU-stub for '{}' (memory mapping may be wrong)".format(current_arch.arch))
gdb.execute("target remote {}".format(target))
__gef_qemu_mode__ = True
return
@register_command
class NopCommand(GenericCommand):
"""Patch the instruction(s) pointed by parameters with NOP. Note: this command is architecture
aware."""
_cmdline_ = "nop"
_syntax_ = "{:s} [-b NUM_BYTES] [-h] [LOCATION]".format(_cmdline_)
_example_ = "{:s} $pc".format(_cmdline_)
def __init__(self):
super(NopCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
def get_insn_size(self, addr):
cur_insn = gef_current_instruction(addr)
next_insn = gef_instruction_n(addr, 2)
return next_insn.address - cur_insn.address
def do_invoke(self, argv):
opts, args = getopt.getopt(argv, "b:h")
num_bytes = 0
for o, a in opts:
if o == "-b":
num_bytes = long(a, 0)
elif o == "-h":
self.help()
return
if args:
loc = parse_address(args[0])
else:
loc = current_arch.pc
self.nop_bytes(loc, num_bytes)
return
def help(self):
m = self._syntax_
m += "\n LOCATION\taddress/symbol to patch\n"
m += " -b NUM_BYTES\tInstead of writing one instruction, patch the specified number of bytes\n"
m += " -h \t\tprint this help\n"
info(m)
return
@only_if_gdb_running
def nop_bytes(self, loc, num_bytes):
if num_bytes == 0:
size = self.get_insn_size(loc)
else:
size = num_bytes
nops = current_arch.nop_insn
if len(nops) > size:
m = "Cannot patch instruction at {:#x} (nop_size is:{:d},insn_size is:{:d})".format(loc, len(nops), size)
err(m)
return
while len(nops) < size:
nops += current_arch.nop_insn
if len(nops) != size:
err("Cannot patch instruction at {:#x} (nop instruction does not evenly fit in requested size)"
.format(loc))
return
ok("Patching {:d} bytes from {:s}".format(size, format_address(loc)))
write_memory(loc, nops, size)
return
@register_command
class StubCommand(GenericCommand):
"""Stub out the specified function. This function is useful when needing to skip one
function to be called and disrupt your runtime flow (ex. fork)."""
_cmdline_ = "stub"
_syntax_ = """{:s} [-r RETVAL] [-h] [LOCATION]
\tLOCATION\taddress/symbol to stub out
\t-r RETVAL\tSet the return value""".format(_cmdline_)
_example_ = "{:s} -r 0 fork"
def __init__(self):
super(StubCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
try:
opts, args = getopt.getopt(argv, "r:")
retval = 0
for o, a in opts:
if o == "-r":
retval = long(a, 0)
except getopt.GetoptError:
self.usage()
return
loc = args[0] if args else "*{:#x}".format(current_arch.pc)
StubBreakpoint(loc, retval)
return
@register_command
class CapstoneDisassembleCommand(GenericCommand):
"""Use capstone disassembly framework to disassemble code."""
_cmdline_ = "capstone-disassemble"
_syntax_ = "{:s} [LOCATION] [[length=LENGTH] [option=VALUE]] ".format(_cmdline_)
_aliases_ = ["cs-dis",]
_example_ = "{:s} $pc length=50".format(_cmdline_)
def pre_load(self):
try:
__import__("capstone")
except ImportError:
msg = "Missing `capstone` package for Python{0}. Install with `pip{0} install capstone`.".format(PYTHON_MAJOR)
raise ImportWarning(msg)
return
def __init__(self):
super(CapstoneDisassembleCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
location = None
kwargs = {}
for arg in argv:
if "=" in arg:
key, value = arg.split("=", 1)
kwargs[key] = value
elif location is None:
location = parse_address(arg)
location = location or current_arch.pc
length = int(kwargs.get("length", get_gef_setting("context.nb_lines_code")))
for insn in capstone_disassemble(location, length, skip=length*self.repeat_count, **kwargs):
text_insn = str(insn)
msg = ""
if insn.address == current_arch.pc:
msg = Color.colorify("{} {}".format(RIGHT_ARROW, text_insn), "bold red")
reason = self.capstone_analyze_pc(insn, length)[0]
if reason:
gef_print(msg)
gef_print(reason)
break
else:
msg = "{} {}".format(" "*5, text_insn)
gef_print(msg)
return
def capstone_analyze_pc(self, insn, nb_insn):
if current_arch.is_conditional_branch(insn):
is_taken, reason = current_arch.is_branch_taken(insn)
if is_taken:
reason = "[Reason: {:s}]".format(reason) if reason else ""
msg = Color.colorify("\tTAKEN {:s}".format(reason), "bold green")
else:
reason = "[Reason: !({:s})]".format(reason) if reason else ""
msg = Color.colorify("\tNOT taken {:s}".format(reason), "bold red")
return (is_taken, msg)
if current_arch.is_call(insn):
target_address = int(insn.operands[-1].split()[0], 16)
msg = []
for i, new_insn in enumerate(capstone_disassemble(target_address, nb_insn)):
msg.append(" {} {}".format (DOWN_ARROW if i==0 else " ", str(new_insn)))
return (True, "\n".join(msg))
return (False, "")
@register_command
class GlibcHeapCommand(GenericCommand):
"""Base command to get information about the Glibc heap structure."""
_cmdline_ = "heap"
_syntax_ = "{:s} (chunk|chunks|bins|arenas)".format(_cmdline_)
def __init__(self):
super(GlibcHeapCommand, self).__init__(prefix=True)
return
@only_if_gdb_running
def do_invoke(self, argv):
self.usage()
return
@register_command
class GlibcHeapSetArenaCommand(GenericCommand):
"""Display information on a heap chunk."""
_cmdline_ = "heap set-arena"
_syntax_ = "{:s} LOCATION".format(_cmdline_)
_example_ = "{:s} 0x001337001337".format(_cmdline_)
def __init__(self):
super(GlibcHeapSetArenaCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
global __gef_default_main_arena__
if not argv:
ok("Current main_arena set to: '{}'".format(__gef_default_main_arena__))
return
new_arena = safe_parse_and_eval(argv[0])
if new_arena is None:
err("Invalid location")
return
if argv[0].startswith("0x"):
new_arena = Address(value=to_unsigned_long(new_arena))
if new_arena is None or not new_arena.valid:
err("Invalid location")
return
__gef_default_main_arena__ = "*{:s}".format(format_address(new_arena.value))
else:
__gef_default_main_arena__ = argv[0]
return
@register_command
class GlibcHeapArenaCommand(GenericCommand):
"""Display information on a heap chunk."""
_cmdline_ = "heap arenas"
_syntax_ = _cmdline_
@only_if_gdb_running
def do_invoke(self, argv):
try:
arena = GlibcArena(__gef_default_main_arena__)
except gdb.error:
err("Could not find Glibc main arena")
return
while True:
gef_print("{}".format(arena))
arena = arena.get_next()
if arena is None:
break
return
@register_command
class GlibcHeapChunkCommand(GenericCommand):
"""Display information on a heap chunk.
See https://github.com/sploitfun/lsploits/blob/master/glibc/malloc/malloc.c#L1123."""
_cmdline_ = "heap chunk"
_syntax_ = "{:s} LOCATION".format(_cmdline_)
def __init__(self):
super(GlibcHeapChunkCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
if not argv:
err("Missing chunk address")
self.usage()
return
if get_main_arena() is None:
return
addr = to_unsigned_long(gdb.parse_and_eval(argv[0]))
chunk = GlibcChunk(addr)
gef_print(chunk.psprint())
return
@register_command
class GlibcHeapChunksCommand(GenericCommand):
"""Display information all chunks from main_arena heap. If a location is passed,
it must correspond to the base address of the first chunk."""
_cmdline_ = "heap chunks"
_syntax_ = "{0} [LOCATION]".format(_cmdline_)
_example_ = "\n{0}\n{0} 0x555555775000".format(_cmdline_)
def __init__(self):
super(GlibcHeapChunksCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
self.add_setting("peek_nb_byte", 16, "Hexdump N first byte(s) inside the chunk data (0 to disable)")
return
@only_if_gdb_running
def do_invoke(self, argv):
if not argv:
heap_section = [x for x in get_process_maps() if x.path == "[heap]"]
if not heap_section:
err("No heap section")
return
heap_section = heap_section[0].page_start
else:
heap_section = int(argv[0], 0)
arena = get_main_arena()
if arena is None:
err("No valid arena")
return
nb = self.get_setting("peek_nb_byte")
current_chunk = GlibcChunk(heap_section, from_base=True)
while True:
if current_chunk.chunk_base_address == arena.top:
gef_print("{} {} {}".format(str(current_chunk), LEFT_ARROW, Color.greenify("top chunk")))
break
if current_chunk.chunk_base_address > arena.top:
break
if current_chunk.size == 0:
# EOF
break
line = str(current_chunk)
if nb:
line += "\n [" + hexdump(read_memory(current_chunk.address, nb), nb, base=current_chunk.address) + "]"
gef_print(line)
next_chunk = current_chunk.get_next_chunk()
if next_chunk is None:
break
next_chunk_addr = Address(value=next_chunk.address)
if not next_chunk_addr.valid:
# corrupted
break
current_chunk = next_chunk
return
@register_command
class GlibcHeapBinsCommand(GenericCommand):
"""Display information on the bins on an arena (default: main_arena).
See https://github.com/sploitfun/lsploits/blob/master/glibc/malloc/malloc.c#L1123."""
_bin_types_ = ["tcache", "fast", "unsorted", "small", "large"]
_cmdline_ = "heap bins"
_syntax_ = "{:s} [{:s}]".format(_cmdline_, "|".join(_bin_types_))
def __init__(self):
super(GlibcHeapBinsCommand, self).__init__(prefix=True, complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
if not argv:
for bin_t in GlibcHeapBinsCommand._bin_types_:
gdb.execute("heap bins {:s}".format(bin_t))
return
bin_t = argv[0]
if bin_t not in GlibcHeapBinsCommand._bin_types_:
self.usage()
return
gdb.execute("heap bins {}".format(bin_t))
return
@staticmethod
def pprint_bin(arena_addr, index, _type=""):
arena = GlibcArena(arena_addr)
fw, bk = arena.bin(index)
if bk==0x00 and fw==0x00:
warn("Invalid backward and forward bin pointers(fw==bk==NULL)")
return -1
nb_chunk = 0
head = GlibcChunk(bk, from_base=True).fwd
if fw == head:
return nb_chunk
ok("{}bins[{:d}]: fw={:#x}, bk={:#x}".format(_type, index, fw, bk))
m = []
while fw != head:
chunk = GlibcChunk(fw, from_base=True)
m.append("{:s} {:s}".format(RIGHT_ARROW, str(chunk)))
fw = chunk.fwd
nb_chunk += 1
if m:
gef_print(" ".join(m))
return nb_chunk
@register_command
class GlibcHeapTcachebinsCommand(GenericCommand):
"""Display information on the Tcachebins on an arena (default: main_arena).
See https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=d5c3fafc4307c9b7a4c7d5cb381fcdbfad340bcc."""
_cmdline_ = "heap bins tcache"
_syntax_ = "{:s} [ARENA_ADDRESS]".format(_cmdline_)
def __init__(self):
super(GlibcHeapTcachebinsCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
# Determine if we are using libc with tcache built in (2.26+)
if get_libc_version() < (2, 26):
info("No Tcache in this version of libc")
return
arena = GlibcArena("*{:s}".format(argv[0])) if len(argv) == 1 else get_main_arena()
if arena is None:
err("Invalid Glibc arena")
return
# Get tcache_perthread_struct for this arena
addr = HeapBaseFunction.heap_base() + 0x10
gef_print(titlify("Tcachebins for arena {:#x}".format(int(arena))))
for i in range(GlibcArena.TCACHE_MAX_BINS):
count = ord(read_memory(addr + i, 1))
chunk = arena.tcachebin(i)
chunks = set()
m = []
# Only print the entry if there are valid chunks. Don't trust count
while True:
if chunk is None:
break
try:
m.append("{:s} {:s} ".format(LEFT_ARROW, str(chunk)))
if chunk.address in chunks:
m.append("{:s} [loop detected]".format(RIGHT_ARROW))
break
chunks.add(chunk.address)
next_chunk = chunk.get_fwd_ptr()
if next_chunk == 0:
break
chunk = GlibcChunk(next_chunk)
except gdb.MemoryError:
m.append("{:s} [Corrupted chunk at {:#x}]".format(LEFT_ARROW, chunk.address))
break
if m:
gef_print("Tcachebins[idx={:d}, size={:#x}] count={:d} ".format(i, (i+1)*(current_arch.ptrsize)*2, count), end="")
gef_print("".join(m))
return
@register_command
class GlibcHeapFastbinsYCommand(GenericCommand):
"""Display information on the fastbinsY on an arena (default: main_arena).
See https://github.com/sploitfun/lsploits/blob/master/glibc/malloc/malloc.c#L1123."""
_cmdline_ = "heap bins fast"
_syntax_ = "{:s} [ARENA_ADDRESS]".format(_cmdline_)
def __init__(self):
super(GlibcHeapFastbinsYCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
def fastbin_index(sz):
return (sz >> 4) - 2 if SIZE_SZ == 8 else (sz >> 3) - 2
SIZE_SZ = current_arch.ptrsize
MAX_FAST_SIZE = (80 * SIZE_SZ // 4)
NFASTBINS = fastbin_index(MAX_FAST_SIZE) - 1
arena = GlibcArena("*{:s}".format(argv[0])) if len(argv) == 1 else get_main_arena()
if arena is None:
err("Invalid Glibc arena")
return
gef_print(titlify("Fastbins for arena {:#x}".format(int(arena))))
for i in range(NFASTBINS):
gef_print("Fastbins[idx={:d}, size={:#x}] ".format(i, (i+1)*SIZE_SZ*2), end="")
chunk = arena.fastbin(i)
chunks = set()
while True:
if chunk is None:
gef_print("0x00", end="")
break
try:
gef_print("{:s} {:s} ".format(LEFT_ARROW, str(chunk)), end="")
if chunk.address in chunks:
gef_print("{:s} [loop detected]".format(RIGHT_ARROW), end="")
break
if fastbin_index(chunk.get_chunk_size()) != i:
gef_print("[incorrect fastbin_index] ", end="")
chunks.add(chunk.address)
next_chunk = chunk.get_fwd_ptr()
if next_chunk == 0:
break
chunk = GlibcChunk(next_chunk, from_base=True)
except gdb.MemoryError:
gef_print("{:s} [Corrupted chunk at {:#x}]".format(LEFT_ARROW, chunk.address), end="")
break
gef_print()
return
@register_command
class GlibcHeapUnsortedBinsCommand(GenericCommand):
"""Display information on the Unsorted Bins of an arena (default: main_arena).
See: https://github.com/sploitfun/lsploits/blob/master/glibc/malloc/malloc.c#L1689."""
_cmdline_ = "heap bins unsorted"
_syntax_ = "{:s} [ARENA_ADDRESS]".format(_cmdline_)
def __init__(self):
super(GlibcHeapUnsortedBinsCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
if get_main_arena() is None:
err("Invalid Glibc arena")
return
arena_addr = "*{:s}".format(argv[0]) if len(argv) == 1 else __gef_default_main_arena__
gef_print(titlify("Unsorted Bin for arena '{:s}'".format(arena_addr)))
nb_chunk = GlibcHeapBinsCommand.pprint_bin(arena_addr, 0, "unsorted_")
if nb_chunk >= 0:
info("Found {:d} chunks in unsorted bin.".format(nb_chunk))
return
@register_command
class GlibcHeapSmallBinsCommand(GenericCommand):
"""Convenience command for viewing small bins."""
_cmdline_ = "heap bins small"
_syntax_ = "{:s} [ARENA_ADDRESS]".format(_cmdline_)
def __init__(self):
super(GlibcHeapSmallBinsCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
if get_main_arena() is None:
err("Invalid Glibc arena")
return
arena_addr = "*{:s}".format(argv[0]) if len(argv) == 1 else __gef_default_main_arena__
gef_print(titlify("Small Bins for arena '{:s}'".format(arena_addr)))
bins = {}
for i in range(1, 63):
nb_chunk = GlibcHeapBinsCommand.pprint_bin(arena_addr, i, "small_")
if nb_chunk < 0:
break
if nb_chunk > 0:
bins[i] = nb_chunk
info("Found {:d} chunks in {:d} small non-empty bins.".format(sum(bins.values()), len(bins)))
return
@register_command
class GlibcHeapLargeBinsCommand(GenericCommand):
"""Convenience command for viewing large bins."""
_cmdline_ = "heap bins large"
_syntax_ = "{:s} [ARENA_ADDRESS]".format(_cmdline_)
def __init__(self):
super(GlibcHeapLargeBinsCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke(self, argv):
if get_main_arena() is None:
err("Invalid Glibc arena")
return
arena_addr = "*{:s}".format(argv[0]) if len(argv) == 1 else __gef_default_main_arena__
gef_print(titlify("Large Bins for arena '{:s}'".format(arena_addr)))
bins = {}
for i in range(63, 126):
nb_chunk = GlibcHeapBinsCommand.pprint_bin(arena_addr, i, "large_")
if nb_chunk <= 0:
break
if nb_chunk > 0:
bins[i] = nb_chunk
info("Found {:d} chunks in {:d} large non-empty bins.".format(sum(bins.values()), len(bins)))
return
@register_command
class SolveKernelSymbolCommand(GenericCommand):
"""Solve kernel symbols from kallsyms table."""
_cmdline_ = "ksymaddr"
_syntax_ = "{:s} SymbolToSearch".format(_cmdline_)
_example_ = "{:s} prepare_creds".format(_cmdline_)
def do_invoke(self, argv):
if len(argv) != 1:
self.usage()
return
found = False
sym = argv[0]
with open("/proc/kallsyms", "r") as f:
for line in f:
try:
symaddr, symtype, symname = line.strip().split(" ", 3)
symaddr = long(symaddr, 16)
if symname == sym:
ok("Found matching symbol for '{:s}' at {:#x} (type={:s})".format(sym, symaddr, symtype))
found = True
if sym in symname:
warn("Found partial match for '{:s}' at {:#x} (type={:s}): {:s}".format(sym, symaddr, symtype, symname))
found = True
except ValueError:
pass
if not found:
err("No match for '{:s}'".format(sym))
return
@register_command
class DetailRegistersCommand(GenericCommand):
"""Display full details on one, many or all registers value from current architecture."""
_cmdline_ = "registers"
_syntax_ = "{:s} [[Register1][Register2] ... [RegisterN]]".format(_cmdline_)
_example_ = "\n{0:s}\n{0:s} $eax $eip $esp".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
unchanged_color = get_gef_setting("theme.registers_register_name")
changed_color = get_gef_setting("theme.registers_value_changed")
string_color = get_gef_setting("theme.dereference_string")
if argv:
regs = [reg for reg in current_arch.all_registers if reg in argv]
if not regs:
warn("No matching registers found")
else:
regs = current_arch.all_registers
memsize = current_arch.ptrsize
endian = endian_str()
charset = string.printable
widest = max(map(len, current_arch.all_registers))
special_line = ""
for regname in regs:
reg = gdb.parse_and_eval(regname)
if reg.type.code == gdb.TYPE_CODE_VOID:
continue
padreg = regname.ljust(widest, " ")
if str(reg) == "<unavailable>":
line = "{}: ".format(Color.colorify(padreg, unchanged_color))
line += Color.colorify("no value", "yellow underline")
gef_print(line)
continue
value = align_address(long(reg))
old_value = ContextCommand.old_registers.get(regname, 0)
if value == old_value:
color = unchanged_color
else:
color = changed_color
# Special (e.g. segment) registers go on their own line
if regname in current_arch.special_registers:
special_line += "{}: ".format(Color.colorify(regname, color))
special_line += "0x{:04x} ".format(get_register(regname))
continue
line = "{}: ".format(Color.colorify(padreg, color))
if regname == current_arch.flag_register:
line += current_arch.flag_register_to_human()
gef_print(line)
continue
addr = lookup_address(align_address(long(value)))
if addr.valid:
line += str(addr)
else:
line += format_address_spaces(value)
addrs = DereferenceCommand.dereference_from(value)
if len(addrs) > 1:
sep = " {:s} ".format(RIGHT_ARROW)
line += sep
line += sep.join(addrs[1:])
# check to see if reg value is ascii
try:
fmt = "{}{}".format(endian, "I" if memsize==4 else "Q")
last_addr = int(addrs[-1],16)
val = gef_pystring(struct.pack(fmt, last_addr))
if all([_ in charset for _ in val]):
line += ' ("{:s}"?)'.format(Color.colorify(val, string_color))
except ValueError:
pass
gef_print(line)
if special_line:
gef_print(special_line)
return
@register_command
class ShellcodeCommand(GenericCommand):
"""ShellcodeCommand uses @JonathanSalwan simple-yet-awesome shellcode API to
download shellcodes."""
_cmdline_ = "shellcode"
_syntax_ = "{:s} (search|get)".format(_cmdline_)
def __init__(self):
super(ShellcodeCommand, self).__init__(prefix=True)
return
def do_invoke(self, argv):
err("Missing sub-command (search|get)")
self.usage()
return
@register_command
class ShellcodeSearchCommand(GenericCommand):
"""Search pattern in shell-storm's shellcode database."""
_cmdline_ = "shellcode search"
_syntax_ = "{:s} PATTERN1 PATTERN2".format(_cmdline_)
_aliases_ = ["sc-search",]
api_base = "http://shell-storm.org"
search_url = "{}/api/?s=".format(api_base)
def do_invoke(self, argv):
if not argv:
err("Missing pattern to search")
self.usage()
return
self.search_shellcode(argv)
return
def search_shellcode(self, search_options):
# API : http://shell-storm.org/shellcode/
args = "*".join(search_options)
res = http_get(self.search_url + args)
if res is None:
err("Could not query search page")
return
ret = gef_pystring(res)
# format: [author, OS/arch, cmd, id, link]
lines = ret.split("\\n")
refs = [line.split("::::") for line in lines]
if refs:
info("Showing matching shellcodes")
info("\t".join(["Id", "Platform", "Description"]))
for ref in refs:
try:
_, arch, cmd, sid, _ = ref
gef_print("\t".join([sid, arch, cmd]))
except ValueError:
continue
info("Use `shellcode get <id>` to fetch shellcode")
return
@register_command
class ShellcodeGetCommand(GenericCommand):
"""Download shellcode from shell-storm's shellcode database."""
_cmdline_ = "shellcode get"
_syntax_ = "{:s} SHELLCODE_ID".format(_cmdline_)
_aliases_ = ["sc-get",]
api_base = "http://shell-storm.org"
get_url = "{}/shellcode/files/shellcode-{{:d}}.php".format(api_base)
def do_invoke(self, argv):
if len(argv) != 1:
err("Missing ID to download")
self.usage()
return
if not argv[0].isdigit():
err("ID is not a number")
self.usage()
return
self.get_shellcode(long(argv[0]))
return
def get_shellcode(self, sid):
res = http_get(self.get_url.format(sid))
if res is None:
err("Failed to fetch shellcode #{:d}".format(sid))
return
ret = gef_pystring(res)
info("Downloading shellcode id={:d}".format(sid))
fd, fname = tempfile.mkstemp(suffix=".txt", prefix="sc-", text=True, dir="/tmp")
data = ret.split("\\n")[7:-11]
buf = "\n".join(data)
buf = HTMLParser().unescape(buf)
os.write(fd, gef_pybytes(buf))
os.close(fd)
info("Shellcode written to '{:s}'".format(fname))
return
@register_command
class RopperCommand(GenericCommand):
"""Ropper (http://scoding.de/ropper) plugin."""
_cmdline_ = "ropper"
_syntax_ = "{:s} [ROPPER_OPTIONS]".format(_cmdline_)
def __init__(self):
super(RopperCommand, self).__init__(complete=gdb.COMPLETE_NONE)
return
def pre_load(self):
try:
__import__("ropper")
except ImportError:
msg = "Missing `ropper` package for Python{0}, install with: `pip{0} install ropper`.".format(PYTHON_MAJOR)
raise ImportWarning(msg)
return
@only_if_gdb_running
def do_invoke(self, argv):
ropper = sys.modules["ropper"]
if "--file" not in argv:
path = get_filepath()
sect = next(filter(lambda x: x.path == path, get_process_maps()))
argv.append("--file")
argv.append(path)
argv.append("-I")
argv.append("{:#x}".format(sect.page_start))
import readline
# ropper set up own autocompleter after which gdb/gef autocomplete don't work
old_completer_delims = readline.get_completer_delims()
old_completer = readline.get_completer()
ropper.start(argv)
readline.set_completer(old_completer)
readline.set_completer_delims(old_completer_delims)
return
@register_command
class AssembleCommand(GenericCommand):
"""Inline code assemble. Architecture can be set in GEF runtime config (default x86-32). """
_cmdline_ = "assemble"
_syntax_ = "{:s} [-a ARCH] [-m MODE] [-e] [-s] [-l LOCATION] instruction;[instruction;...instruction;])".format(_cmdline_)
_aliases_ = ["asm",]
_example_ = "\n{0:s} -a x86 -m 32 nop ; nop ; inc eax ; int3\n{0:s} -a arm -m arm add r0, r0, 1".format(_cmdline_)
def __init__(self, *args, **kwargs):
super(AssembleCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
self.valid_arch_modes = {
"ARM" : ["ARM", "THUMB"],
"ARM64" : ["ARM", "THUMB", "V5", "V8", ],
"MIPS" : ["MICRO", "MIPS3", "MIPS32", "MIPS32R6", "MIPS64",],
"PPC" : ["PPC32", "PPC64", "QPX",],
"SPARC" : ["SPARC32", "SPARC64", "V9",],
"SYSTEMZ" : ["32",],
"X86" : ["16", "32", "64"],
}
return
def pre_load(self):
try:
__import__("keystone")
except ImportError:
msg = "Missing `keystone-engine` package for Python{0}, install with: `pip{0} install keystone-engine`.".format(PYTHON_MAJOR)
raise ImportWarning(msg)
return
def usage(self):
super(AssembleCommand, self).usage()
gef_print("\nAvailable architectures/modes:")
# for updates, see https://github.com/keystone-engine/keystone/blob/master/include/keystone/keystone.h
for arch in self.valid_arch_modes:
gef_print(" - {} ".format(arch))
gef_print(" * {}".format(" / ".join(self.valid_arch_modes[arch])))
return
def do_invoke(self, argv):
arch_s, mode_s, big_endian, as_shellcode, write_to_location = None, None, False, False, None
opts, args = getopt.getopt(argv, "a:m:l:esh")
for o,a in opts:
if o == "-a": arch_s = a.upper()
if o == "-m": mode_s = a.upper()
if o == "-e": big_endian = True
if o == "-s": as_shellcode = True
if o == "-l": write_to_location = long(gdb.parse_and_eval(a))
if o == "-h":
self.usage()
return
if not args:
return
if (arch_s, mode_s) == (None, None):
if is_alive():
arch_s, mode_s = current_arch.arch, current_arch.mode
endian_s = "big" if is_big_endian() else "little"
arch, mode = get_keystone_arch(arch=arch_s, mode=mode_s, endian=is_big_endian())
else:
# if not alive, defaults to x86-32
arch_s = "X86"
mode_s = "32"
endian_s = "little"
arch, mode = get_keystone_arch(arch=arch_s, mode=mode_s, endian=False)
elif not arch_s:
err("An architecture (-a) must be provided")
return
elif not mode_s:
err("A mode (-m) must be provided")
return
else:
arch, mode = get_keystone_arch(arch=arch_s, mode=mode_s, endian=big_endian)
endian_s = "big" if big_endian else "little"
insns = " ".join(args)
insns = [x.strip() for x in insns.split(";") if x is not None]
info("Assembling {} instruction{} for {} ({} endian)".format(len(insns),
"s" if len(insns)>1 else "",
":".join([arch_s, mode_s]),
endian_s))
if as_shellcode:
gef_print("""sc="" """)
raw = b""
for insn in insns:
res = keystone_assemble(insn, arch, mode, raw=True)
if res is None:
gef_print("(Invalid)")
continue
if write_to_location:
raw += res
continue
s = binascii.hexlify(res)
res = b"\\x" + b"\\x".join([s[i:i + 2] for i in range(0, len(s), 2)])
res = res.decode("utf-8")
if as_shellcode:
res = """sc+="{0:s}" """.format(res)
gef_print("{0:60s} # {1}".format(res, insn))
if write_to_location:
l = len(raw)
info("Overwriting {:d} bytes at {:s}".format(l, format_address(write_to_location)))
write_memory(write_to_location, raw, l)
return
@register_command
class ProcessListingCommand(GenericCommand):
"""List and filter process. If a PATTERN is given as argument, results shown will be grepped
by this pattern."""
_cmdline_ = "process-search"
_syntax_ = "{:s} [PATTERN]".format(_cmdline_)
_aliases_ = ["ps",]
_example_ = "{:s} gdb".format(_cmdline_)
def __init__(self):
super(ProcessListingCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
self.add_setting("ps_command", "/bin/ps auxww", "`ps` command to get process information")
return
def do_invoke(self, argv):
do_attach = False
smart_scan = False
opts, args = getopt.getopt(argv, "as")
for o, _ in opts:
if o == "-a": do_attach = True
if o == "-s": smart_scan = True
pattern = re.compile("^.*$") if not args else re.compile(args[0])
for process in self.get_processes():
pid = int(process["pid"])
command = process["command"]
if not re.search(pattern, command):
continue
if smart_scan:
if command.startswith("[") and command.endswith("]"): continue
if command.startswith("socat "): continue
if command.startswith("grep "): continue
if command.startswith("gdb "): continue
if args and do_attach:
ok("Attaching to process='{:s}' pid={:d}".format(process["command"], pid))
gdb.execute("attach {:d}".format(pid))
return None
line = [process[i] for i in ("pid", "user", "cpu", "mem", "tty", "command")]
gef_print("\t\t".join(line))
return None
def get_processes(self):
output = gef_execute_external(self.get_setting("ps_command").split(), True)
names = [x.lower().replace("%", "") for x in output[0].split()]
for line in output[1:]:
fields = line.split()
t = {}
for i, name in enumerate(names):
if i == len(names) - 1:
t[name] = " ".join(fields[i:])
else:
t[name] = fields[i]
yield t
return
@register_command
class ElfInfoCommand(GenericCommand):
"""Display a limited subset of ELF header information. If no argument is provided, the command will
show information about the current ELF being debugged."""
_cmdline_ = "elf-info"
_syntax_ = "{:s} [FILE]".format(_cmdline_)
_example_ = "{:s} /bin/ls".format(_cmdline_)
def __init__(self, *args, **kwargs):
super(ElfInfoCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
def do_invoke(self, argv):
# http://www.sco.com/developers/gabi/latest/ch4.eheader.html
classes = {0x01: "32-bit",
0x02: "64-bit",}
endianness = {0x01: "Little-Endian",
0x02: "Big-Endian",}
osabi = {
0x00: "System V",
0x01: "HP-UX",
0x02: "NetBSD",
0x03: "Linux",
0x06: "Solaris",
0x07: "AIX",
0x08: "IRIX",
0x09: "FreeBSD",
0x0C: "OpenBSD",
}
types = {
0x01: "Relocatable",
0x02: "Executable",
0x03: "Shared",
0x04: "Core"
}
machines = {
0x02: "SPARC",
0x03: "x86",
0x08: "MIPS",
0x12: "SPARC64",
0x14: "PowerPC",
0x15: "PowerPC64",
0x28: "ARM",
0x32: "IA-64",
0x3E: "x86-64",
0xB7: "AArch64",
}
filename = argv[0] if argv else get_filepath()
if filename is None:
return
elf = get_elf_headers(filename)
if elf is None:
return
data = [
("Magic", "{0!s}".format(hexdump(struct.pack(">I",elf.e_magic), show_raw=True))),
("Class", "{0:#x} - {1}".format(elf.e_class, classes[elf.e_class])),
("Endianness", "{0:#x} - {1}".format(elf.e_endianness, endianness[elf.e_endianness])),
("Version", "{:#x}".format(elf.e_eiversion)),
("OS ABI", "{0:#x} - {1}".format(elf.e_osabi, osabi[elf.e_osabi])),
("ABI Version", "{:#x}".format(elf.e_abiversion)),
("Type", "{0:#x} - {1}".format(elf.e_type, types[elf.e_type])),
("Machine", "{0:#x} - {1}".format(elf.e_machine, machines[elf.e_machine])),
("Program Header Table" , "{}".format(format_address(elf.e_phoff))),
("Section Header Table" , "{}".format(format_address(elf.e_shoff))),
("Header Table" , "{}".format(format_address(elf.e_phoff))),
("ELF Version", "{:#x}".format(elf.e_version)),
("Header size" , "{0} ({0:#x})".format(elf.e_ehsize)),
("Entry point", "{}".format(format_address(elf.e_entry))),
]
for title, content in data:
gef_print("{}: {}".format(Color.boldify("{:<22}".format(title)), content))
return
@register_command
class EntryPointBreakCommand(GenericCommand):
"""Tries to find best entry point and sets a temporary breakpoint on it. The command will test for
well-known symbols for entry points, such as `main`, `_main`, `__libc_start_main`, etc. defined by
the setting `entrypoint_symbols`."""
_cmdline_ = "entry-break"
_syntax_ = _cmdline_
_aliases_ = ["start",]
def __init__(self, *args, **kwargs):
super(EntryPointBreakCommand, self).__init__()
self.add_setting("entrypoint_symbols", "main _main __libc_start_main __uClibc_main start _start", "Possible symbols for entry points")
return
def do_invoke(self, argv):
fpath = get_filepath()
if fpath is None:
warn("No executable to debug, use `file` to load a binary")
return
if not os.access(fpath, os.X_OK):
warn("The file '{}' is not executable.".format(fpath))
return
if is_alive() and not __gef_qemu_mode__:
warn("gdb is already running")
return
bp = None
entrypoints = self.get_setting("entrypoint_symbols").split()
for sym in entrypoints:
try:
value = gdb.parse_and_eval(sym)
info("Breaking at '{:s}'".format(str(value)))
bp = EntryBreakBreakpoint(sym)
gdb.execute("run {}".format(" ".join(argv)))
return
except gdb.error as gdb_error:
if 'The "remote" target does not support "run".' in str(gdb_error):
# this case can happen when doing remote debugging
gdb.execute("continue")
return
continue
# if here, clear the breakpoint if any set
if bp:
bp.delete()
# break at entry point
elf = get_elf_headers()
if elf is None:
return
if self.is_pie(fpath):
self.set_init_tbreak_pie(elf.e_entry, argv)
gdb.execute("continue")
return
self.set_init_tbreak(elf.e_entry)
gdb.execute("run {}".format(" ".join(argv)))
return
def set_init_tbreak(self, addr):
info("Breaking at entry-point: {:#x}".format(addr))
bp = EntryBreakBreakpoint("*{:#x}".format(addr))
return bp
def set_init_tbreak_pie(self, addr, argv):
warn("PIC binary detected, retrieving text base address")
gdb.execute("set stop-on-solib-events 1")
hide_context()
gdb.execute("run {}".format(" ".join(argv)))
unhide_context()
gdb.execute("set stop-on-solib-events 0")
vmmap = get_process_maps()
base_address = [x.page_start for x in vmmap if x.path == get_filepath()][0]
return self.set_init_tbreak(base_address + addr)
def is_pie(self, fpath):
return checksec(fpath)["PIE"]
@register_command
class NamedBreakpointCommand(GenericCommand):
"""Sets a breakpoint and assigns a name to it, which will be shown, when it's hit."""
_cmdline_ = "name-break"
_syntax_ = "{:s} NAME [LOCATION]".format(_cmdline_)
_aliases_ = ["nb",]
_example = "{:s} main *0x4008a9"
def __init__(self, *args, **kwargs):
super(NamedBreakpointCommand, self).__init__()
return
def do_invoke(self, argv):
if not argv:
err("Missing name for breakpoint")
self.usage()
return
name = argv[0]
location = argv[1] if len(argv) > 1 else "*{}".format(hex(current_arch.pc))
NamedBreakpoint(location, name)
return
@register_command
class ContextCommand(GenericCommand):
"""Displays a comprehensive and modular summary of runtime context. Unless setting `enable` is
set to False, this command will be spawned automatically every time GDB hits a breakpoint, a
watchpoint, or any kind of interrupt. By default, it will show panes that contain the register
states, the stack, and the disassembly code around $pc."""
_cmdline_ = "context"
_syntax_ = "{:s} [legend|regs|stack|code|args|memory|source|trace|threads|extra]".format(_cmdline_)
_aliases_ = ["ctx",]
old_registers = {}
def __init__(self):
super(ContextCommand, self).__init__()
self.add_setting("enable", True, "Enable/disable printing the context when breaking")
self.add_setting("show_stack_raw", False, "Show the stack pane as raw hexdump (no dereference)")
self.add_setting("show_registers_raw", False, "Show the registers pane with raw values (no dereference)")
self.add_setting("peek_calls", True, "Peek into calls")
self.add_setting("peek_ret", True, "Peek at return address")
self.add_setting("nb_lines_stack", 8, "Number of line in the stack pane")
self.add_setting("grow_stack_down", False, "Order of stack downward starts at largest down to stack pointer")
self.add_setting("nb_lines_backtrace", 10, "Number of line in the backtrace pane")
self.add_setting("nb_lines_threads", -1, "Number of line in the threads pane")
self.add_setting("nb_lines_code", 6, "Number of instruction after $pc")
self.add_setting("nb_lines_code_prev", 3, "Number of instruction before $pc")
self.add_setting("ignore_registers", "", "Space-separated list of registers not to display (e.g. '$cs $ds $gs')")
self.add_setting("clear_screen", False, "Clear the screen before printing the context")
self.add_setting("layout", "legend regs stack code args source memory threads trace extra", "Change the order/presence of the context sections")
self.add_setting("redirect", "", "Redirect the context information to another TTY")
if "capstone" in list(sys.modules.keys()):
self.add_setting("use_capstone", False, "Use capstone as disassembler in the code pane (instead of GDB)")
self.layout_mapping = {
"legend": self.show_legend,
"regs": self.context_regs,
"stack": self.context_stack,
"code": self.context_code,
"args": self.context_args,
"memory": self.context_memory,
"source": self.context_source,
"trace": self.context_trace,
"threads": self.context_threads,
"extra": self.context_additional_information,
}
return
def post_load(self):
gef_on_continue_hook(self.update_registers)
gef_on_continue_hook(self.empty_extra_messages)
return
def show_legend(self):
if get_gef_setting("gef.disable_color")!=True:
str_color = get_gef_setting("theme.dereference_string")
code_addr_color = get_gef_setting("theme.address_code")
stack_addr_color = get_gef_setting("theme.address_stack")
heap_addr_color = get_gef_setting("theme.address_heap")
changed_register_color = get_gef_setting("theme.registers_value_changed")
gef_print("[ Legend: {} | {} | {} | {} | {} ]".format(Color.colorify("Modified register", changed_register_color),
Color.colorify("Code", code_addr_color),
Color.colorify("Heap", heap_addr_color),
Color.colorify("Stack", stack_addr_color),
Color.colorify("String", str_color)
))
return
@only_if_gdb_running
def do_invoke(self, argv):
if not self.get_setting("enable") or context_hidden:
return
if not all(_ in self.layout_mapping for _ in argv):
self.usage()
return
if len(argv) > 0:
current_layout = argv
else:
current_layout = self.get_setting("layout").strip().split()
if not current_layout:
return
self.tty_rows, self.tty_columns = get_terminal_size()
redirect = self.get_setting("redirect")
if redirect and os.access(redirect, os.W_OK):
enable_redirect_output(to_file=redirect)
if self.get_setting("clear_screen") and len(argv) == 0:
clear_screen(redirect)
for section in current_layout:
if section[0] == "-":
continue
try:
self.layout_mapping[section]()
except gdb.MemoryError as e:
# a MemoryError will happen when $pc is corrupted (invalid address)
err(str(e))
self.context_title("")
if redirect and os.access(redirect, os.W_OK):
disable_redirect_output()
return
def context_title(self, m):
line_color= get_gef_setting("theme.context_title_line")
msg_color = get_gef_setting("theme.context_title_message")
if not m:
gef_print(Color.colorify(HORIZONTAL_LINE * self.tty_columns, line_color))
return
trail_len = len(m) + 6
title = ""
title += Color.colorify("{:{padd}<{width}} ".format("",
width=max(self.tty_columns - trail_len, 0),
padd=HORIZONTAL_LINE),
line_color)
title += Color.colorify(m, msg_color)
title += Color.colorify(" {:{padd}<4}".format("", padd=HORIZONTAL_LINE),
line_color)
gef_print(title)
return
def context_regs(self):
self.context_title("registers")
ignored_registers = set(self.get_setting("ignore_registers").split())
if self.get_setting("show_registers_raw") is False:
regs = set(current_arch.all_registers)
printable_registers = " ".join(list(regs - ignored_registers))
gdb.execute("registers {}".format(printable_registers))
return
widest = l = max(map(len, current_arch.all_registers))
l += 5
l += current_arch.ptrsize * 2
nb = get_terminal_size()[1]//l
i = 1
line = ""
changed_color = get_gef_setting("theme.registers_value_changed")
regname_color = get_gef_setting("theme.registers_register_name")
for reg in current_arch.all_registers:
if reg in ignored_registers:
continue
try:
r = gdb.parse_and_eval(reg)
if r.type.code == gdb.TYPE_CODE_VOID:
continue
new_value_type_flag = (r.type.code == gdb.TYPE_CODE_FLAGS)
new_value = long(r)
except (gdb.MemoryError, gdb.error):
# If this exception is triggered, it means that the current register
# is corrupted. Just use the register "raw" value (not eval-ed)
new_value = get_register(reg)
new_value_type_flag = False
except Exception:
new_value = 0
old_value = self.old_registers.get(reg, 0)
padreg = reg.ljust(widest, " ")
value = align_address(new_value)
old_value = align_address(old_value)
if value == old_value:
line += "{}: ".format(Color.colorify(padreg, regname_color))
else:
line += "{}: ".format(Color.colorify(padreg, changed_color))
if new_value_type_flag:
line += "{:s} ".format(format_address_spaces(value))
else:
addr = lookup_address(align_address(long(value)))
if addr.valid:
line += "{:s} ".format(str(addr))
else:
line += "{:s} ".format(format_address_spaces(value))
if i % nb == 0 :
gef_print(line)
line = ""
i += 1
if line:
gef_print(line)
gef_print("Flags: {:s}".format(current_arch.flag_register_to_human()))
return
def context_stack(self):
self.context_title("stack")
show_raw = self.get_setting("show_stack_raw")
nb_lines = self.get_setting("nb_lines_stack")
try:
sp = current_arch.sp
if show_raw is True:
mem = read_memory(sp, 0x10 * nb_lines)
gef_print(hexdump(mem, base=sp))
else:
gdb.execute("dereference {:#x} l{:d}".format(sp, nb_lines))
except gdb.MemoryError:
err("Cannot read memory from $SP (corrupted stack pointer?)")
return
def context_code(self):
nb_insn = self.get_setting("nb_lines_code")
nb_insn_prev = self.get_setting("nb_lines_code_prev")
use_capstone = self.has_setting("use_capstone") and self.get_setting("use_capstone")
cur_insn_color = get_gef_setting("theme.disassemble_current_instruction")
pc = current_arch.pc
frame = gdb.selected_frame()
arch = frame.architecture()
arch_name = "{}:{}".format(current_arch.arch.lower(), current_arch.mode)
self.context_title("code:{}".format(arch_name))
try:
instruction_iterator = capstone_disassemble if use_capstone else gef_disassemble
for insn in instruction_iterator(pc, nb_insn, nb_prev=nb_insn_prev):
line = []
is_taken = False
target = None
text = str(insn)
if insn.address < pc:
line += Color.grayify(" {}".format(text))
elif insn.address == pc:
line += Color.colorify("{:s}{:s}".format(RIGHT_ARROW, text), cur_insn_color)
if current_arch.is_conditional_branch(insn):
is_taken, reason = current_arch.is_branch_taken(insn)
if is_taken:
target = insn.operands[-1].split()[0]
reason = "[Reason: {:s}]".format(reason) if reason else ""
line += Color.colorify("\tTAKEN {:s}".format(reason), "bold green")
else:
reason = "[Reason: !({:s})]".format(reason) if reason else ""
line += Color.colorify("\tNOT taken {:s}".format(reason), "bold red")
elif current_arch.is_call(insn) and self.get_setting("peek_calls") is True:
target = insn.operands[-1].split()[0]
elif current_arch.is_ret(insn) and self.get_setting("peek_ret") is True:
target = current_arch.get_ra(insn, frame)
else:
line += " {}".format(text)
gef_print("".join(line))
if target:
try:
target = int(target, 0)
except TypeError: # Already an int
pass
except ValueError:
# If the operand isn't an address right now we can't parse it
continue
for i, tinsn in enumerate(instruction_iterator(target, nb_insn)):
text= " {} {}".format (DOWN_ARROW if i==0 else " ", str(tinsn))
gef_print(text)
break
except gdb.MemoryError:
err("Cannot disassemble from $PC")
return
def context_args(self):
insn = gef_current_instruction(current_arch.pc)
if not current_arch.is_call(insn):
return
self.size2type = {
1: "BYTE",
2: "WORD",
4: "DWORD",
8: "QWORD",
}
if insn.operands[-1].startswith(self.size2type[current_arch.ptrsize]+" PTR"):
target = "*" + insn.operands[-1].split()[-1]
elif "$"+insn.operands[0] in current_arch.all_registers:
target = "*{:#x}".format(get_register("$"+insn.operands[0]))
else:
# is there a symbol?
ops = " ".join(insn.operands)
if "<" in ops and ">" in ops:
# extract it
target = re.sub(r".*<([^\(> ]*).*", r"\1", ops)
else:
# it's an address, just use as is
target = re.sub(r".*(0x[a-fA-F0-9]*).*", r"\1", ops)
sym = gdb.lookup_global_symbol(target)
if sym is None:
self.print_guessed_arguments(target)
return
if sym.type.code != gdb.TYPE_CODE_FUNC:
err("Symbol '{}' is not a function: type={}".format(target, sym.type.code))
return
self.print_arguments_from_symbol(target, sym)
return
def print_arguments_from_symbol(self, function_name, symbol):
"""If symbols were found, parse them and print the argument adequately."""
args = []
for i, f in enumerate(symbol.type.fields()):
_value = current_arch.get_ith_parameter(i)[1]
_value = RIGHT_ARROW.join(DereferenceCommand.dereference_from(_value))
_name = f.name or "var_{}".format(i)
_type = f.type.name or self.size2type[f.type.sizeof]
args.append("{} {} = {}".format(_type, _name, _value))
self.context_title("arguments")
if not args:
gef_print("{} (<void>)".format(function_name))
return
gef_print("{} (".format(function_name))
gef_print(" " + ",\n ".join(args))
gef_print(")")
return
def print_guessed_arguments(self, function_name):
"""When no symbol, read the current basic block and look for "interesting" instructions."""
def __get_current_block_start_address():
pc = current_arch.pc
try:
block_start = gdb.block_for_pc(pc).start
except RuntimeError:
# if stripped, let's roll back 5 instructions
block_start = gdb_get_nth_previous_instruction_address(pc, 5)
return block_start
parameter_set = set()
pc = current_arch.pc
block_start = __get_current_block_start_address()
use_capstone = self.has_setting("use_capstone") and self.get_setting("use_capstone")
instruction_iterator = capstone_disassemble if use_capstone else gef_disassemble
function_parameters = current_arch.function_parameters
arg_key_color = get_gef_setting("theme.registers_register_name")
for insn in instruction_iterator(block_start, pc-block_start):
if not insn.operands:
continue
if is_x86_32():
if insn.mnemonic == "push":
parameter_set.add(insn.operands[0])
else:
op = "$"+insn.operands[0]
if op in function_parameters:
parameter_set.add(op)
if is_x86_64():
# also consider extended registers
extended_registers = {"$rdi": ["$edi", "$di"],
"$rsi": ["$esi", "$si"],
"$rdx": ["$edx", "$dx"],
"$rcx": ["$ecx", "$cx"],
}
for exreg in extended_registers:
if op in extended_registers[exreg]:
parameter_set.add(exreg)
if is_x86_32():
nb_argument = len(parameter_set)
else:
nb_argument = 0
for p in parameter_set:
nb_argument = max(nb_argument, function_parameters.index(p)+1)
args = []
for i in range(nb_argument):
_key, _value = current_arch.get_ith_parameter(i)
_value = RIGHT_ARROW.join(DereferenceCommand.dereference_from(_value))
args.append("{} = {}".format(Color.colorify(_key, arg_key_color), _value))
self.context_title("arguments (guessed)")
gef_print("{} (".format(function_name))
if args:
gef_print(" "+",\n ".join(args))
gef_print(")")
return
def context_source(self):
try:
pc = current_arch.pc
symtabline = gdb.find_pc_line(pc)
symtab = symtabline.symtab
line_num = symtabline.line - 1 # we substract one because line number returned by gdb start at 1
if not symtab.is_valid():
return
fpath = symtab.fullname()
with open(fpath, "r") as f:
lines = [l.rstrip() for l in f.readlines()]
except Exception:
return
nb_line = self.get_setting("nb_lines_code")
fn = symtab.filename
if len(fn) > 20:
fn = "{}[...]{}".format(fn[:15], os.path.splitext(fn)[1])
title = "source:{}+{}".format(fn, line_num + 1)
cur_line_color = get_gef_setting("theme.source_current_line")
self.context_title(title)
for i in range(line_num - nb_line + 1, line_num + nb_line):
if i < 0:
continue
if i < line_num:
gef_print(Color.grayify(" {:4d}\t {:s}".format(i + 1, lines[i],)))
if i == line_num:
extra_info = self.get_pc_context_info(pc, lines[i])
prefix = "{}{:4d}\t ".format(RIGHT_ARROW, i + 1)
leading = len(lines[i]) - len(lines[i].lstrip())
if extra_info:
gef_print("{}{}".format(" "*(len(prefix) + leading), extra_info))
gef_print(Color.colorify("{}{:s}".format(prefix, lines[i]), cur_line_color))
if i > line_num:
try:
gef_print(" {:4d}\t {:s}".format(i + 1, lines[i],))
except IndexError:
break
return
def get_pc_context_info(self, pc, line):
try:
current_block = gdb.block_for_pc(pc)
if not current_block.is_valid(): return ""
m = collections.OrderedDict()
while current_block and not current_block.is_static:
for sym in current_block:
symbol = sym.name
if not sym.is_function and re.search(r"\W{}\W".format(symbol), line):
val = gdb.parse_and_eval(symbol)
if val.type.code in (gdb.TYPE_CODE_PTR, gdb.TYPE_CODE_ARRAY):
addr = long(val.address)
addrs = DereferenceCommand.dereference_from(addr)
if len(addrs) > 2:
addrs = [addrs[0], "[...]", addrs[-1]]
f = " {:s} ".format(RIGHT_ARROW)
val = f.join(addrs)
elif val.type.code == gdb.TYPE_CODE_INT:
val = hex(long(val))
else:
continue
if symbol not in m:
m[symbol] = val
current_block = current_block.superblock
if m:
return "// " + ", ".join(["{}={}".format(Color.yellowify(a), b) for a, b in m.items()])
except Exception:
pass
return ""
def context_trace(self):
self.context_title("trace")
nb_backtrace = self.get_setting("nb_lines_backtrace")
if nb_backtrace <= 0:
return
orig_frame = current_frame = gdb.selected_frame()
i = 0
# backward compat for gdb (gdb < 7.10)
if not hasattr(gdb, "FrameDecorator"):
gdb.execute("backtrace {:d}".format(nb_backtrace))
return
while current_frame:
current_frame.select()
if not current_frame.is_valid():
continue
pc = current_frame.pc()
name = current_frame.name()
items = []
items.append("{:#x}".format(pc))
if name:
frame_args = gdb.FrameDecorator.FrameDecorator(current_frame).frame_args() or []
m = "{}({})".format(Color.greenify(name),
", ".join(["{}={!s}".format(Color.yellowify(x.sym),
x.sym.value(current_frame)) for x in frame_args]))
items.append(m)
else:
try:
insn = next(gef_disassemble(pc, 1))
except gdb.MemoryError:
break
items.append(Color.redify("{} {}".format(insn.mnemonic, ", ".join(insn.operands))))
gef_print("[{}] {}".format(Color.colorify("#{}".format(i), "bold pink"),
RIGHT_ARROW.join(items)))
current_frame = current_frame.older()
i += 1
nb_backtrace -= 1
if nb_backtrace == 0:
break
orig_frame.select()
return
def context_threads(self):
def reason():
res = gdb.execute("info program", to_string=True).splitlines()
if not res:
return "NOT RUNNING"
for line in res:
line = line.strip()
if line.startswith("It stopped with signal "):
return line.replace("It stopped with signal ", "").split(",", 1)[0]
if line == "The program being debugged is not being run.":
return "NOT RUNNING"
if line == "It stopped at a breakpoint that has since been deleted.":
return "TEMPORARY BREAKPOINT"
if line.startswith("It stopped at breakpoint "):
return "BREAKPOINT"
if line == "It stopped after being stepped.":
return "SINGLE STEP"
return "STOPPED"
self.context_title("threads")
threads = gdb.selected_inferior().threads()[::-1]
idx = self.get_setting("nb_lines_threads")
if idx > 0:
threads = threads[0:idx]
if idx==0:
return
if not threads:
err("No thread selected")
return
for i, thread in enumerate(threads):
line = """[{:s}] Id {:d}, Name: "{:s}", """.format(Color.colorify("#{:d}".format(i), "bold pink"),
thread.num, thread.name or "")
if thread.is_running():
line += Color.colorify("running", "bold green")
elif thread.is_stopped():
line += Color.colorify("stopped", "bold red")
line += ", reason: {}".format(Color.colorify(reason(), "bold pink"))
elif thread.is_exited():
line += Color.colorify("exited", "bold yellow")
gef_print(line)
i += 1
return
def context_additional_information(self):
if not __context_messages__:
return
self.context_title("extra")
for level, text in __context_messages__:
if level=="error": err(text)
elif level=="warn": warn(text)
elif level=="success": ok(text)
else: info(text)
return
def context_memory(self):
global __watches__
for address, opt in sorted(__watches__.items()):
self.context_title("memory:{:#x}".format(address))
gdb.execute("hexdump {fmt:s} {address:d} {size:d}".format(
address=address,
size=opt[0],
fmt=opt[1]
))
@classmethod
def update_registers(cls, event):
for reg in current_arch.all_registers:
try:
cls.old_registers[reg] = get_register(reg)
except Exception:
cls.old_registers[reg] = 0
return
def empty_extra_messages(self, event):
global __context_messages__
__context_messages__ = []
return
@register_command
class MemoryCommand(GenericCommand):
"""Add or remove address ranges to the memory view."""
_cmdline_ = "memory"
_syntax_ = "{:s} (watch|unwatch|reset|list)".format(_cmdline_)
def __init__(self):
super(MemoryCommand, self).__init__(prefix=True)
return
@only_if_gdb_running
def do_invoke(self, argv):
self.usage()
return
@register_command
class MemoryWatchCommand(GenericCommand):
"""Adds address ranges to the memory view."""
_cmdline_ = "memory watch"
_syntax_ = "{:s} ADDRESS [SIZE] [(qword|dword|word|byte)]".format(_cmdline_)
_example_ = "\n\t{0:s} 0x603000 0x100 byte\n\t{0:s} $sp".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
global __watches__
if len(argv) not in (1, 2, 3):
self.usage()
return
address = to_unsigned_long(gdb.parse_and_eval(argv[0]))
size = to_unsigned_long(gdb.parse_and_eval(argv[1])) if len(argv) > 1 else 0x10
group = "byte"
if len(argv) == 3:
group = argv[2].lower()
if group not in ("qword", "dword", "word", "byte"):
warn("Unexpected grouping '{}'".format(group))
self.usage()
return
else:
if current_arch.ptrsize == 4:
group = "dword"
elif current_arch.ptrsize == 8:
group = "qword"
__watches__[address] = (size, group)
ok("Adding memwatch to {:#x}".format(address))
return
@register_command
class MemoryUnwatchCommand(GenericCommand):
"""Removes address ranges to the memory view."""
_cmdline_ = "memory unwatch"
_syntax_ = "{:s} ADDRESS".format(_cmdline_)
_example_ = "\n\t{0:s} 0x603000\n\t{0:s} $sp".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
global __watches__
if not argv:
self.usage()
return
address = to_unsigned_long(gdb.parse_and_eval(argv[0]))
res = __watches__.pop(address, None)
if not res:
warn("You weren't watching {:#x}".format(address))
else:
ok("Removed memwatch of {:#x}".format(address))
return
@register_command
class MemoryWatchResetCommand(GenericCommand):
"""Removes all watchpoints."""
_cmdline_ = "memory reset"
_syntax_ = "{:s}".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
global __watches__
__watches__.clear()
ok("Memory watches cleared")
return
@register_command
class MemoryWatchListCommand(GenericCommand):
"""Lists all watchpoints to display in context layout."""
_cmdline_ = "memory list"
_syntax_ = "{:s}".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
global __watches__
if not __watches__:
info("No memory watches")
return
info("Memory watches:")
for address, opt in sorted(__watches__.items()):
gef_print("- {:#x} ({}, {})".format(address, opt[0], opt[1]))
return
@register_command
class HexdumpCommand(GenericCommand):
"""Display SIZE lines of hexdump from the memory location pointed by ADDRESS. """
_cmdline_ = "hexdump"
_syntax_ = "{:s} (qword|dword|word|byte) ADDRESS [[L][SIZE]] [UP|DOWN] [S]".format(_cmdline_)
_example_ = "{:s} byte $rsp L16 DOWN".format(_cmdline_)
def __init__(self):
super(HexdumpCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
self.add_setting("always_show_ascii", False, "If true, hexdump will always display the ASCII dump")
return
@only_if_gdb_running
def do_invoke(self, argv):
argc = len(argv)
if argc < 2:
self.usage()
return
arg0, argv = argv[0].lower(), argv[1:]
valid_formats = ["byte", "word", "dword", "qword"]
fmt = None
for valid_format in valid_formats:
if valid_format.startswith(arg0):
fmt = valid_format
break
if not fmt:
self.usage()
return
start_addr = to_unsigned_long(gdb.parse_and_eval(argv[0]))
read_from = align_address(start_addr)
read_len = 0x40 if fmt=="byte" else 0x10
up_to_down = True
if argc >= 2:
for arg in argv[1:]:
arg = arg.lower()
if arg.startswith("l"):
arg = arg[1:]
try:
read_len = long(arg, 0)
continue
except ValueError:
pass
if arg in {"up", "u"}:
up_to_down = True
continue
elif arg in {"down", "d"}:
up_to_down = False
continue
if fmt == "byte":
read_from += self.repeat_count * read_len
mem = read_memory(read_from, read_len)
lines = hexdump(mem, base=read_from).splitlines()
else:
lines = self._hexdump(read_from, read_len, fmt, self.repeat_count * read_len)
if not up_to_down:
lines.reverse()
gef_print("\n".join(lines))
return
def _hexdump(self, start_addr, length, arrange_as, offset=0):
elf = get_elf_headers()
if elf is None:
return
endianness = endian_str()
base_address_color = get_gef_setting("theme.dereference_base_address")
show_ascii = self.get_setting("always_show_ascii")
formats = {
"qword": ("Q", 8),
"dword": ("I", 4),
"word": ("H", 2),
}
r, l = formats[arrange_as]
fmt_str = "{{base}}{v}+{{offset:#06x}} {{sym}}{{val:#0{prec}x}} {{text}}".format(v=VERTICAL_LINE, prec=l*2+2)
fmt_pack = endianness + r
lines = []
i = 0
text = ""
while i < length:
cur_addr = start_addr + (i + offset) * l
sym = gdb_get_location_from_symbol(cur_addr)
sym = "<{:s}+{:04x}> ".format(*sym) if sym else ""
mem = read_memory(cur_addr, l)
val = struct.unpack(fmt_pack, mem)[0]
if show_ascii:
text = "".join([chr(b) if 0x20 <= b < 0x7F else "." for b in mem])
lines.append(fmt_str.format(base=Color.colorify(format_address(cur_addr), base_address_color),
offset=(i + offset) * l, sym=sym, val=val, text=text))
i += 1
return lines
@register_command
class PatchCommand(GenericCommand):
"""Write specified values to the specified address."""
_cmdline_ = "patch"
_syntax_ = ("{0:s} (qword|dword|word|byte) LOCATION VALUES\n"
"{0:s} string LOCATION \"double-escaped string\"".format(_cmdline_))
SUPPORTED_SIZES = {
"qword": (8, "Q"),
"dword": (4, "L"),
"word": (2, "H"),
"byte": (1, "B"),
}
def __init__(self):
super(PatchCommand, self).__init__(complete=gdb.COMPLETE_LOCATION, prefix=True)
return
@only_if_gdb_running
def do_invoke(self, argv):
argc = len(argv)
if argc < 3:
self.usage()
return
fmt, location, values = argv[0].lower(), argv[1], argv[2:]
if fmt not in self.SUPPORTED_SIZES:
self.usage()
return
addr = align_address(long(gdb.parse_and_eval(location)))
size, fcode = self.SUPPORTED_SIZES[fmt]
d = "<" if is_little_endian() else ">"
for value in values:
value = parse_address(value) & ((1 << size * 8) - 1)
vstr = struct.pack(d + fcode, value)
write_memory(addr, vstr, length=size)
addr += size
return
@register_command
class PatchStringCommand(GenericCommand):
"""Write specified string to the specified memory location pointed by ADDRESS."""
_cmdline_ = "patch string"
_syntax_ = "{:s} ADDRESS \"double backslash-escaped string\"".format(_cmdline_)
_example_ = "{:s} $sp \"GEFROCKS\"".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
argc = len(argv)
if argc != 2:
self.usage()
return
location, s = argv[0:2]
addr = align_address(long(gdb.parse_and_eval(location)))
try:
s = codecs.escape_decode(s)[0]
except binascii.Error:
gef_print("Could not decode '\\xXX' encoded string \"{}\"".format(s))
return
write_memory(addr, s, len(s))
return
@register_command
class DereferenceCommand(GenericCommand):
"""Dereference recursively from an address and display information. This acts like WinDBG `dps`
command."""
_cmdline_ = "dereference"
_syntax_ = "{:s} [LOCATION] [l[NB]]".format(_cmdline_)
_aliases_ = ["telescope", ]
_example_ = "{:s} $sp l20".format(_cmdline_)
def __init__(self):
super(DereferenceCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
self.add_setting("max_recursion", 7, "Maximum level of pointer recursion")
return
@staticmethod
def pprint_dereferenced(addr, off):
base_address_color = get_gef_setting("theme.dereference_base_address")
registers_color = get_gef_setting("theme.dereference_register_value")
regs = [(k, get_register(k)) for k in current_arch.all_registers]
sep = " {:s} ".format(RIGHT_ARROW)
memalign = current_arch.ptrsize
offset = off * memalign
current_address = align_address(addr + offset)
addrs = DereferenceCommand.dereference_from(current_address)
l = ""
addr_l = format_address(long(addrs[0], 16))
l += "{:s}{:s}+{:#06x}: {:{ma}s}".format(Color.colorify(addr_l, base_address_color),
VERTICAL_LINE, offset,
sep.join(addrs[1:]), ma=(memalign*2 + 2))
register_hints = []
for regname, regvalue in regs:
if current_address == regvalue:
register_hints.append(regname)
if register_hints:
m = "\t{:s}{:s}".format(LEFT_ARROW, ", ".join(list(register_hints)))
l += Color.colorify(m, registers_color)
offset += memalign
return l
@only_if_gdb_running
def do_invoke(self, argv):
argc = len(argv)
if argc < 1:
err("Missing location.")
return
nb = 10
if argc==2 and argv[1][0] in ("l", "L") and argv[1][1:].isdigit():
nb = int(argv[1][1:])
elif argc == 2 and argv[1].isdigit():
nb = int(argv[1])
addr = safe_parse_and_eval(argv[0])
if addr is None:
err("Invalid address")
return
addr = long(addr)
if process_lookup_address(addr) is None:
err("Unmapped address")
return
if get_gef_setting("context.grow_stack_down") is True:
from_insnum = nb * (self.repeat_count + 1) - 1
to_insnum = self.repeat_count * nb - 1
insnum_step = -1
else:
from_insnum = 0 + self.repeat_count * nb
to_insnum = nb * (self.repeat_count + 1)
insnum_step = 1
start_address = align_address(addr)
for i in range(from_insnum, to_insnum, insnum_step):
gef_print(DereferenceCommand.pprint_dereferenced(start_address, i))
return
@staticmethod
def dereference_from(addr):
if not is_alive():
return [format_address(addr),]
code_color = get_gef_setting("theme.dereference_code")
string_color = get_gef_setting("theme.dereference_string")
max_recursion = get_gef_setting("dereference.max_recursion") or 10
addr = lookup_address(align_address(long(addr)))
msg = [format_address(addr.value),]
seen_addrs = set()
while addr.section and max_recursion:
if addr.value in seen_addrs:
msg.append("[loop detected]")
break
seen_addrs.add(addr.value)
max_recursion -= 1
# Is this value a pointer or a value?
# -- If it's a pointer, dereference
deref = addr.dereference()
if deref is None:
# if here, dereferencing addr has triggered a MemoryError, no need to go further
msg.append(str(addr))
break
new_addr = lookup_address(deref)
if new_addr.valid:
addr = new_addr
msg.append(str(addr))
continue
# -- Otherwise try to parse the value
if addr.section:
if addr.section.is_executable() and addr.is_in_text_segment() and not is_ascii_string(addr.value):
insn = gef_current_instruction(addr.value)
insn_str = "{} {} {}".format(insn.location, insn.mnemonic, ", ".join(insn.operands))
msg.append(Color.colorify(insn_str, code_color))
break
elif addr.section.permission.value & Permission.READ:
if is_ascii_string(addr.value):
s = read_cstring_from_memory(addr.value)
if len(s) < get_memory_alignment():
txt = '{:s} ("{:s}"?)'.format(format_address(deref), Color.colorify(s, string_color))
elif len(s) > 50:
txt = Color.colorify('"{:s}[...]"'.format(s[:50]), string_color)
else:
txt = Color.colorify('"{:s}"'.format(s), string_color)
msg.append(txt)
break
# if not able to parse cleanly, simply display and break
val = "{:#0{ma}x}".format(long(deref & 0xFFFFFFFFFFFFFFFF), ma=(current_arch.ptrsize * 2 + 2))
msg.append(val)
break
return msg
@register_command
class ASLRCommand(GenericCommand):
"""View/modify the ASLR setting of GDB. By default, GDB will disable ASLR when it starts the process. (i.e. not
attached). This command allows to change that setting."""
_cmdline_ = "aslr"
_syntax_ = "{:s} (on|off)".format(_cmdline_)
def do_invoke(self, argv):
argc = len(argv)
if argc == 0:
ret = gdb.execute("show disable-randomization", to_string=True)
i = ret.find("virtual address space is ")
if i < 0:
return
msg = "ASLR is currently "
if ret[i + 25:].strip() == "on.":
msg += Color.redify("disabled")
else:
msg += Color.greenify("enabled")
gef_print(msg)
return
elif argc == 1:
if argv[0] == "on":
info("Enabling ASLR")
gdb.execute("set disable-randomization off")
return
elif argv[0] == "off":
info("Disabling ASLR")
gdb.execute("set disable-randomization on")
return
warn("Invalid command")
self.usage()
return
@register_command
class ResetCacheCommand(GenericCommand):
"""Reset cache of all stored data. This command is here for debugging and test purposes, GEF
handles properly the cache reset under "normal" scenario."""
_cmdline_ = "reset-cache"
_syntax_ = _cmdline_
def do_invoke(self, argv):
reset_all_caches()
return
@register_command
class VMMapCommand(GenericCommand):
"""Display a comprehensive layout of the virtual memory mapping. If a filter argument, GEF will
filter out the mapping whose pathname do not match that filter."""
_cmdline_ = "vmmap"
_syntax_ = "{:s} [FILTER]".format(_cmdline_)
_example_ = "{:s} libc".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
vmmap = get_process_maps()
if not vmmap:
err("No address mapping information found")
return
color = get_gef_setting("theme.table_heading")
headers = ["Start", "End", "Offset", "Perm", "Path"]
gef_print(Color.colorify("{:<{w}s}{:<{w}s}{:<{w}s}{:<4s} {:s}".format(*headers, w=get_memory_alignment()*2+3), color))
for entry in vmmap:
if argv and not argv[0] in entry.path:
continue
l = []
l.append(format_address(entry.page_start))
l.append(format_address(entry.page_end))
l.append(format_address(entry.offset))
if entry.permission.value == (Permission.READ|Permission.WRITE|Permission.EXECUTE) :
l.append(Color.colorify(str(entry.permission), "bold red"))
else:
l.append(str(entry.permission))
l.append(entry.path)
gef_print(" ".join(l))
return
@register_command
class XFilesCommand(GenericCommand):
"""Shows all libraries (and sections) loaded by binary. This command extends the GDB command
`info files`, by retrieving more information from extra sources, and providing a better
display. If an argument FILE is given, the output will grep information related to only that file.
If an argument name is also given, the output will grep to the name within FILE."""
_cmdline_ = "xfiles"
_syntax_ = "{:s} [FILE [NAME]]".format(_cmdline_)
_example_ = "\n{0:s} libc\n{0:s} libc IO_vtables".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
color = get_gef_setting("theme.table_heading")
headers = ["Start", "End", "Name", "File"]
gef_print(Color.colorify("{:<{w}s}{:<{w}s}{:<21s} {:s}".format(*headers, w=get_memory_alignment()*2+3), color))
filter_by_file = argv[0] if argv and argv[0] else None
filter_by_name = argv[1] if len(argv) > 1 and argv[1] else None
for xfile in get_info_files():
if filter_by_file:
if filter_by_file not in xfile.filename:
continue
if filter_by_name and filter_by_name not in xfile.name:
continue
l = []
l.append(format_address(xfile.zone_start))
l.append(format_address(xfile.zone_end))
l.append("{:<21s}".format(xfile.name))
l.append(xfile.filename)
gef_print(" ".join(l))
return
@register_command
class XAddressInfoCommand(GenericCommand):
"""Retrieve and display runtime information for the location(s) given as parameter."""
_cmdline_ = "xinfo"
_syntax_ = "{:s} LOCATION".format(_cmdline_)
_example_ = "{:s} $pc".format(_cmdline_)
def __init__(self):
super(XAddressInfoCommand, self).__init__(complete=gdb.COMPLETE_LOCATION)
return
@only_if_gdb_running
def do_invoke (self, argv):
if not argv:
err ("At least one valid address must be specified")
self.usage()
return
for sym in argv:
try:
addr = align_address(parse_address(sym))
gef_print(titlify("xinfo: {:#x}".format(addr)))
self.infos(addr)
except gdb.error as gdb_err:
err("{:s}".format(str(gdb_err)))
return
def infos(self, address):
addr = lookup_address(address)
if not addr.valid:
warn("Cannot reach {:#x} in memory space".format(address))
return
sect = addr.section
info = addr.info
if sect:
gef_print("Page: {:s} {:s} {:s} (size={:#x})".format(format_address(sect.page_start),
RIGHT_ARROW,
format_address(sect.page_end),
sect.page_end-sect.page_start))
gef_print("Permissions: {}".format(sect.permission))
gef_print("Pathname: {:s}".format(sect.path))
gef_print("Offset (from page): {:#x}".format(addr.value-sect.page_start))
gef_print("Inode: {:s}".format(sect.inode))
if info:
gef_print("Segment: {:s} ({:s}-{:s})".format(info.name,
format_address(info.zone_start),
format_address(info.zone_end)))
sym = gdb_get_location_from_symbol(address)
if sym:
name, offset = sym
msg = "Symbol: {:s}".format(name)
if offset:
msg+= "+{:d}".format(offset)
gef_print(msg)
return
@register_command
class XorMemoryCommand(GenericCommand):
"""XOR a block of memory. The command allows to simply display the result, or patch it
runtime at runtime."""
_cmdline_ = "xor-memory"
_syntax_ = "{:s} (display|patch) ADDRESS SIZE KEY".format(_cmdline_)
def __init__(self):
super(XorMemoryCommand, self).__init__(prefix=True)
return
def do_invoke(self, argv):
self.usage()
return
@register_command
class XorMemoryDisplayCommand(GenericCommand):
"""Display a block of memory pointed by ADDRESS by xor-ing each byte with KEY. The key must be
provided in hexadecimal format."""
_cmdline_ = "xor-memory display"
_syntax_ = "{:s} ADDRESS SIZE KEY".format(_cmdline_)
_example_ = "{:s} $sp 16 41414141".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
if len(argv) != 3:
self.usage()
return
address = long(gdb.parse_and_eval(argv[0]))
length = long(argv[1], 0)
key = argv[2]
block = read_memory(address, length)
info("Displaying XOR-ing {:#x}-{:#x} with {:s}".format(address, address + len(block), repr(key)))
gef_print(titlify("Original block"))
gef_print(hexdump(block, base=address))
gef_print(titlify("XOR-ed block"))
gef_print(hexdump(xor(block, key), base=address))
return
@register_command
class XorMemoryPatchCommand(GenericCommand):
"""Patch a block of memory pointed by ADDRESS by xor-ing each byte with KEY. The key must be
provided in hexadecimal format."""
_cmdline_ = "xor-memory patch"
_syntax_ = "{:s} ADDRESS SIZE KEY".format(_cmdline_)
_example_ = "{:s} $sp 16 41414141".format(_cmdline_)
@only_if_gdb_running
def do_invoke(self, argv):
if len(argv) != 3:
self.usage()
return
address = parse_address(argv[0])
length = long(argv[1], 0)
key = argv[2]
block = read_memory(address, length)
info("Patching XOR-ing {:#x}-{:#x} with '{:s}'".format(address, address + len(block), key))
xored_block = xor(block, key)
write_memory(address, xored_block, length)
return
@register_command
class TraceRunCommand(GenericCommand):
"""Create a runtime trace of all instructions executed from $pc to LOCATION specified. The
trace is stored in a text file that can be next imported in IDA Pro to visualize the runtime
path."""
_cmdline_ = "trace-run"
_syntax_ = "{:s} LOCATION [MAX_CALL_DEPTH]".format(_cmdline_)
_example_ = "{:s} 0x555555554610".format(_cmdline_)
def __init__(self):
super(TraceRunCommand, self).__init__(self._cmdline_, complete=gdb.COMPLETE_LOCATION)
self.add_setting("max_tracing_recursion", 1, "Maximum depth of tracing")
self.add_setting("tracefile_prefix", "./gef-trace-", "Specify the tracing output file prefix")
return
@only_if_gdb_running
def do_invoke(self, argv):
if len(argv) not in (1, 2):
self.usage()
return
if len(argv) == 2 and argv[1].isdigit():
depth = long(argv[1])
else:
depth = 1
try:
loc_start = current_arch.pc
loc_end = long(gdb.parse_and_eval(argv[0]))
except gdb.error as e:
err("Invalid location: {:s}".format(e))
return
self.trace(loc_start, loc_end, depth)
return
def get_frames_size(self):
n = 0
f = gdb.newest_frame()
while f:
n += 1
f = f.older()
return n
def trace(self, loc_start, loc_end, depth):
info("Tracing from {:#x} to {:#x} (max depth={:d})".format(loc_start, loc_end,depth))
logfile = "{:s}{:#x}-{:#x}.txt".format(self.get_setting("tracefile_prefix"), loc_start, loc_end)
enable_redirect_output(to_file=logfile)
hide_context()
self.start_tracing(loc_start, loc_end, depth)
unhide_context()
disable_redirect_output()
ok("Done, logfile stored as '{:s}'".format(logfile))
info("Hint: import logfile with `ida_color_gdb_trace.py` script in IDA to visualize path")
return
def start_tracing(self, loc_start, loc_end, depth):
loc_cur = loc_start
frame_count_init = self.get_frames_size()
gef_print("#")
gef_print("# Execution tracing of {:s}".format(get_filepath()))
gef_print("# Start address: {:s}".format(format_address(loc_start)))
gef_print("# End address: {:s}".format(format_address(loc_end)))
gef_print("# Recursion level: {:d}".format(depth))
gef_print("# automatically generated by gef.py")
gef_print("#\n")
while loc_cur != loc_end:
try:
delta = self.get_frames_size() - frame_count_init
if delta <= depth :
gdb.execute("stepi")
else:
gdb.execute("finish")
loc_cur = current_arch.pc
gdb.flush()
except gdb.error as e:
gef_print("#")
gef_print("# Execution interrupted at address {:s}".format(format_address(loc_cur)))
gef_print("# Exception: {:s}".format(e))
gef_print("#\n")
break
return
@register_command
class PatternCommand(GenericCommand):
"""This command will create or search a De Bruijn cyclic pattern to facilitate
determining the offset in memory. The algorithm used is the same as the one
used by pwntools, and can therefore be used in conjunction."""
_cmdline_ = "pattern"
_syntax_ = "{:s} (create|search) ARGS".format(_cmdline_)
def __init__(self, *args, **kwargs):
super(PatternCommand, self).__init__(prefix=True)
self.add_setting("length", 1024, "Initial length of a cyclic buffer to generate")
return
def do_invoke(self, argv):
self.usage()
return
@register_command
class PatternCreateCommand(GenericCommand):
"""Generate a de Bruijn cyclic pattern. It will generate a pattern long of SIZE,
incrementally varying of one byte at each generation. The length of each block is
equal to sizeof(void*).
Note: This algorithm is the same than the one used by pwntools library."""
_cmdline_ = "pattern create"
_syntax_ = "{:s} [SIZE]".format(_cmdline_)
def do_invoke(self, argv):
if len(argv) == 1:
if not argv[0].isdigit():
err("Invalid size")
return
set_gef_setting("pattern.length", long(argv[0]))
elif len(argv) > 1:
err("Invalid syntax")
return
size = get_gef_setting("pattern.length")
info("Generating a pattern of {:d} bytes".format(size))
pattern_str = gef_pystring(generate_cyclic_pattern(size))
gef_print(pattern_str)
ok("Saved as '{:s}'".format(gef_convenience(pattern_str)))
return
@register_command
class PatternSearchCommand(GenericCommand):
"""Search for the cyclic de Bruijn pattern generated by the `pattern create` command. The
PATTERN argument can be a GDB symbol (such as a register name) or an hexadecimal value."""
_cmdline_ = "pattern search"
_syntax_ = "{:s} PATTERN [SIZE]".format(_cmdline_)
_example_ = "\n{0:s} $pc\n{0:s} 0x61616164\n{0:s} aaab".format(_cmdline_)
_aliases_ = ["pattern offset",]
@only_if_gdb_running
def do_invoke(self, argv):
argc = len(argv)
if argc not in (1, 2):
self.usage()
return
if argc==2:
if not argv[1].isdigit():
err("Invalid size")
return
size = long(argv[1])
else:
size = get_gef_setting("pattern.length")
pattern = argv[0]
info("Searching '{:s}'".format(pattern))
self.search(pattern, size)
return
def search(self, pattern, size):
pattern_be, pattern_le = None, None
# 1. check if it's a symbol (like "$sp" or "0x1337")
symbol = safe_parse_and_eval(pattern)
if symbol:
addr = long(symbol)
dereferenced_value = dereference(addr)
# 1-bis. try to dereference
if dereferenced_value:
addr = long(dereferenced_value)
if current_arch.ptrsize == 4:
pattern_be = struct.pack(">I", addr)
pattern_le = struct.pack("<I", addr)
else:
pattern_be = struct.pack(">Q", addr)
pattern_le = struct.pack("<Q", addr)
else:
# 2. assume it's a plain string
pattern_be = gef_pybytes(pattern)
pattern_le = gef_pybytes(pattern[::-1])
cyclic_pattern = generate_cyclic_pattern(size)
found = False
off = cyclic_pattern.find(pattern_le)
if off >= 0:
ok("Found at offset {:d} (little-endian search) {:s}".format(off, Color.colorify("likely", "bold red") if is_little_endian() else ""))
found = True
off = cyclic_pattern.find(pattern_be)
if off >= 0:
ok("Found at offset {:d} (big-endian search) {:s}".format(off, Color.colorify("likely", "bold green") if is_big_endian() else ""))
found = True
if not found:
err("Pattern '{}' not found".format(pattern))
return
@register_command
class ChecksecCommand(GenericCommand):
"""Checksec the security properties of the current executable or passed as argument. The
command checks for the following protections:
- PIE
- NX
- RelRO
- Glibc Stack Canaries
- Fortify Source"""
_cmdline_ = "checksec"
_syntax_ = "{:s} [FILENAME]".format(_cmdline_)
_example_ = "{} /bin/ls".format(_cmdline_)
def __init__(self):
super(ChecksecCommand, self).__init__(complete=gdb.COMPLETE_FILENAME)
return
def pre_load(self):
which("readelf")
return
def do_invoke(self, argv):
argc = len(argv)
if argc == 0:
filename = get_filepath()
if filename is None:
warn("No executable/library specified")
return
elif argc == 1:
filename = os.path.realpath(os.path.expanduser(argv[0]))
if not os.access(filename, os.R_OK):
err("Invalid filename")
return
else:
self.usage()
return
info("{:s} for '{:s}'".format(self._cmdline_, filename))
self.print_security_properties(filename)
return
def print_security_properties(self, filename):
sec = checksec(filename)
for prop in sec:
if prop in ("Partial RelRO", "Full RelRO"): continue
val = sec[prop]
msg = Color.greenify("Yes") if val is True else Color.redify("No")
if val and prop=="Canary" and is_alive():
canary = gef_read_canary()[0]
msg+= "{} value: {:#x}".format(RIGHT_ARROW, canary)
gef_print("{:<30s}: {:s}".format(prop, msg))
if sec["Full RelRO"]:
gef_print("{:<30s}: {:s}".format("RelRO", Color.greenify("Full")))
elif sec["Partial RelRO"]:
gef_print("{:<30s}: {:s}".format("RelRO", Color.yellowify("Partial")))
else:
gef_print("{:<30s}: {:s}".format("RelRO", Color.redify("No")))
return
@register_command
class GotCommand(GenericCommand):
"""Display current status of the got inside the process."""
_cmdline_ = "got"
_syntax_ = "{:s} [FUNCTION_NAME ...] ".format(_cmdline_)
_example_ = "got read printf exit"
def __init__(self, *args, **kwargs):
super(GotCommand, self).__init__()
self.add_setting("function_resolved", "green", "Line color of the got command output if the function has "
"been resolved")
self.add_setting("function_not_resolved", "yellow", "Line color of the got command output if the function has "
"not been resolved")
return
def get_jmp_slots(self, readelf, filename):
output = []
cmd = [readelf, "--relocs", filename]
lines = gef_execute_external(cmd, as_list=True)
for line in lines:
if "JUMP" in line:
output.append(line)
return output
@only_if_gdb_running
def do_invoke(self, argv):
try:
readelf = which("readelf")
except IOError:
err("Missing `readelf`")
return
# get the filtering parameter.
func_names_filter = []
if argv:
func_names_filter = argv
# getting vmmap to understand the boundaries of the main binary
# we will use this info to understand if a function has been resolved or not.
vmmap = get_process_maps()
base_address = min([x.page_start for x in vmmap if x.path == get_filepath()])
end_address = max([x.page_end for x in vmmap if x.path == get_filepath()])
# get the checksec output.
checksec_status = checksec(get_filepath())
relro_status = "Full RelRO"
full_relro = checksec_status["Full RelRO"]
pie = checksec_status["PIE"] # if pie we will have offset instead of abs address.
if not full_relro:
relro_status = "Partial RelRO"
partial_relro = checksec_status["Partial RelRO"]
if not partial_relro:
relro_status = "No RelRO"
# retrieve jump slots using readelf
jmpslots = self.get_jmp_slots(readelf, get_filepath())
gef_print("\nGOT protection: {} | GOT functions: {}\n ".format(relro_status, len(jmpslots)))
for line in jmpslots:
address, _, _, _, name = line.split()[:5]
# if we have a filter let's skip the entries that are not requested.
if func_names_filter:
if not any(map(lambda x: x in name, func_names_filter)):
continue
address_val = int(address, 16)
# address_val is an offset from the base_address if we have PIE.
if pie:
address_val = base_address + address_val
# read the address of the function.
got_address = read_int_from_memory(address_val)
# for the swag: different colors if the function has been resolved or not.
if base_address < got_address < end_address:
color = self.get_setting("function_not_resolved") # function hasn't already been resolved
else:
color = self.get_setting("function_resolved") # function has already been resolved
line = "[{}] ".format(hex(address_val))
line += Color.colorify("{} {} {}".format(name, RIGHT_ARROW, hex(got_address)), color)
gef_print(line)
return
@register_command
class HighlightCommand(GenericCommand):
"""
This command highlights user defined text matches which modifies GEF output universally.
"""
_cmdline_ = "highlight"
_syntax_ = "{} (add|remove|list|clear)".format(_cmdline_)
_aliases_ = ["hl"]
def __init__(self):
super(HighlightCommand, self).__init__(prefix=True)
self.add_setting("regex", False, "Enable regex highlighting")
def do_invoke(self, argv):
return self.usage()
@register_command
class HighlightListCommand(GenericCommand):
"""Show the current highlight table with matches to colors."""
_cmdline_ = "highlight list"
_aliases_ = ["highlight ls", "hll"]
_syntax_ = _cmdline_
def print_highlight_table(self):
if not highlight_table:
return err("no matches found")
left_pad = max(map(len, highlight_table.keys()))
for match, color in sorted(highlight_table.items()):
print("{} | {}".format(Color.colorify(match.ljust(left_pad), color),
Color.colorify(color, color)))
return
def do_invoke(self, argv):
return self.print_highlight_table()
@register_command
class HighlightClearCommand(GenericCommand):
"""Clear the highlight table, remove all matches."""
_cmdline_ = "highlight clear"
_aliases_ = ["hlc"]
_syntax_ = _cmdline_
def do_invoke(self, argv):
return highlight_table.clear()
@register_command
class HighlightAddCommand(GenericCommand):
"""Add a match to the highlight table."""
_cmdline_ = "highlight add"
_syntax_ = "{} MATCH COLOR".format(_cmdline_)
_aliases_ = ["highlight set", "hla"]
_example_ = "{} 41414141 yellow".format(_cmdline_)
def do_invoke(self, argv):
if len(argv) < 2:
return self.usage()
match, color = argv
highlight_table[match] = color
return
@register_command
class HighlightRemoveCommand(GenericCommand):
"""Remove a match in the highlight table."""
_cmdline_ = "highlight remove"
_syntax_ = "{} MATCH".format(_cmdline_)
_aliases_ = [
"highlight delete",
"highlight del",
"highlight unset",
"highlight rm",
"hlr"
]
_example_ = "{} remove 41414141".format(_cmdline_)
def do_invoke(self, argv):
if not argv:
return self.usage()
highlight_table.pop(argv[0], None)
return
@register_command
class FormatStringSearchCommand(GenericCommand):
"""Exploitable format-string helper: this command will set up specific breakpoints
at well-known dangerous functions (printf, snprintf, etc.), and check if the pointer
holding the format string is writable, and therefore susceptible to format string
attacks if an attacker can control its content."""
_cmdline_ = "format-string-helper"
_syntax_ = _cmdline_
_aliases_ = ["fmtstr-helper",]
def do_invoke(self, argv):
dangerous_functions = {
"printf": 0,
"sprintf": 1,
"fprintf": 1,
"snprintf": 2,
"vsnprintf": 2,
}
enable_redirect_output("/dev/null")
for func_name, num_arg in dangerous_functions.items():
FormatStringBreakpoint(func_name, num_arg)
disable_redirect_output()
ok("Enabled {:d} FormatStringBreakpoint".format(len(dangerous_functions)))
return
@register_command
class HeapAnalysisCommand(GenericCommand):
"""Heap vulnerability analysis helper: this command aims to track dynamic heap allocation
done through malloc()/free() to provide some insights on possible heap vulnerabilities. The
following vulnerabilities are checked:
- NULL free
- Use-after-Free
- Double Free
- Heap overlap"""
_cmdline_ = "heap-analysis-helper"
_syntax_ = _cmdline_
def __init__(self, *args, **kwargs):
super(HeapAnalysisCommand, self).__init__(complete=gdb.COMPLETE_NONE)
self.add_setting("check_free_null", False, "Break execution when a free(NULL) is encountered")
self.add_setting("check_double_free", True, "Break execution when a double free is encountered")
self.add_setting("check_weird_free", True, "Break execution when free() is called against a non-tracked pointer")
self.add_setting("check_uaf", True, "Break execution when a possible Use-after-Free condition is found")
self.add_setting("check_heap_overlap", True, "Break execution when a possible overlap in allocation is found")
self.bp_malloc, self.bp_calloc, self.bp_free, self.bp_realloc = None, None, None, None
return
@only_if_gdb_running
@experimental_feature
def do_invoke(self, argv):
if not argv:
self.setup()
return
if argv[0]=="show":
self.dump_tracked_allocations()
return
def setup(self):
ok("Tracking malloc() & calloc()")
self.bp_malloc = TraceMallocBreakpoint("__libc_malloc")
self.bp_calloc = TraceMallocBreakpoint("__libc_calloc")
ok("Tracking free()")
self.bp_free = TraceFreeBreakpoint()
ok("Tracking realloc()")
self.bp_realloc = TraceReallocBreakpoint()
ok("Disabling hardware watchpoints (this may increase the latency)")
gdb.execute("set can-use-hw-watchpoints 0")
info("Dynamic breakpoints correctly setup, GEF will break execution if a possible vulnerabity is found.")
warn("{}: The heap analysis slows down the execution noticeably.".format(
Color.colorify("Note", "bold underline yellow")))
# when inferior quits, we need to clean everything for a next execution
gef_on_exit_hook(self.clean)
return
def dump_tracked_allocations(self):
global __heap_allocated_list__, __heap_freed_list__, __heap_uaf_watchpoints__
if __heap_allocated_list__:
ok("Tracked as in-use chunks:")
for addr, sz in __heap_allocated_list__: gef_print("{} malloc({:d}) = {:#x}".format(CROSS, sz, addr))
else:
ok("No malloc() chunk tracked")
if __heap_freed_list__:
ok("Tracked as free-ed chunks:")
for addr, sz in __heap_freed_list__: gef_print("{} free({:d}) = {:#x}".format(TICK, sz, addr))
else:
ok("No free() chunk tracked")
return
def clean(self, event):
global __heap_allocated_list__, __heap_freed_list__, __heap_uaf_watchpoints__
ok("{} - Cleaning up".format(Color.colorify("Heap-Analysis", "yellow bold"),))
for bp in [self.bp_malloc, self.bp_calloc, self.bp_free, self.bp_realloc]:
if hasattr(bp, "retbp") and bp.retbp:
bp.retbp.delete()
bp.delete()
for wp in __heap_uaf_watchpoints__:
wp.delete()
__heap_allocated_list__ = []
__heap_freed_list__ = []
__heap_uaf_watchpoints__ = []
ok("{} - Re-enabling hardware watchpoints".format(Color.colorify("Heap-Analysis", "yellow bold"),))
gdb.execute("set can-use-hw-watchpoints 1")
gef_on_exit_unhook(self.clean)
return
@register_command
class IsSyscallCommand(GenericCommand):
"""
Tells whether the next instruction is a system call."""
_cmdline_ = "is-syscall"
_syntax_ = _cmdline_
def do_invoke(self, argv):
insn = gef_current_instruction(current_arch.pc)
ok("Current instruction is{}a syscall".format(" " if self.is_syscall(current_arch, insn) else " not "))
return
def is_syscall(self, arch, instruction):
insn_str = instruction.mnemonic + " " + ", ".join(instruction.operands)
return insn_str.strip() in arch.syscall_instructions
@register_command
class SyscallArgsCommand(GenericCommand):
"""Gets the syscall name and arguments based on the register values in the current state."""
_cmdline_ = "syscall-args"
_syntax_ = _cmdline_
def __init__(self):
super(SyscallArgsCommand, self).__init__()
self.add_setting("path", os.path.join(GEF_TEMP_DIR, "syscall-tables"),
"Path to store/load the syscall tables files")
return
def do_invoke(self, argv):
color = get_gef_setting("theme.table_heading")
path = self.get_settings_path()
if path is None:
err("Cannot open '{0}': check directory and/or `gef config {0}` setting, "
"currently: '{1}'".format("syscall-args.path", self.get_setting("path")))
return
arch = current_arch.__class__.__name__
syscall_table = self.get_syscall_table(arch)
reg_value = get_register(current_arch.syscall_register)
if reg_value not in syscall_table:
warn("There is no system call for {:#x}".format(reg_value))
return
syscall_entry = syscall_table[reg_value]
values = []
for param in syscall_entry.params:
values.append(get_register(param.reg))
parameters = [s.param for s in syscall_entry.params]
registers = [s.reg for s in syscall_entry.params]
info("Detected syscall {}".format(Color.colorify(syscall_entry.name, color)))
gef_print(" {}({})".format(syscall_entry.name, ", ".join(parameters)))
headers = ["Parameter", "Register", "Value"]
param_names = [re.split(r" |\*", p)[-1] for p in parameters]
info(Color.colorify("{:<28} {:<28} {}".format(*headers), color))
for name, register, value in zip(param_names, registers, values):
line = " {:<15} {:<15} 0x{:x}".format(name, register, value)
addrs = DereferenceCommand.dereference_from(value)
if len(addrs) > 1:
sep = " {:s} ".format(RIGHT_ARROW)
line += sep
line += sep.join(addrs[1:])
gef_print(line)
return
def get_filepath(self, x):
p = self.get_settings_path()
if not p: return None
return os.path.join(p, "{}.py".format(x))
def get_module(self, modname):
_fullname = self.get_filepath(modname)
return imp.load_source(modname, _fullname)
def get_syscall_table(self, modname):
_mod = self.get_module(modname)
return getattr(_mod, "syscall_table")
def get_settings_path(self):
path = os.path.expanduser(self.get_setting("path"))
path = os.path.realpath(path)
return path if os.path.isdir(path) else None
@lru_cache()
def get_section_base_address(name):
section = process_lookup_path(name)
if section:
return section.page_start
return None
@lru_cache()
def get_zone_base_address(name):
zone = file_lookup_name_path(name, get_filepath())
if zone:
return zone.zone_start
return None
class GenericFunction(gdb.Function):
"""This is an abstract class for invoking convenience functions, should not be instantiated."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def _function_(self): pass
@property
def _syntax_(self):
return "${}([offset])".format(self._function_)
def __init__ (self):
super(GenericFunction, self).__init__(self._function_)
def invoke(self, *args):
if not is_alive():
raise gdb.GdbError("No debugging session active")
return long(self.do_invoke(args))
def arg_to_long(self, args, index, default=0):
try:
addr = args[index]
return long(addr) if addr.address is None else long(addr.address)
except IndexError:
return default
@abc.abstractmethod
def do_invoke(self, args): pass
@register_function
class StackOffsetFunction(GenericFunction):
"""Return the current stack base address plus an optional offset."""
_function_ = "_stack"
def do_invoke(self, args):
return self.arg_to_long(args, 0) + get_section_base_address("[stack]")
@register_function
class HeapBaseFunction(GenericFunction):
"""Return the current heap base address plus an optional offset."""
_function_ = "_heap"
def do_invoke(self, args):
base = HeapBaseFunction.heap_base()
if not base:
raise gdb.GdbError("Heap not found")
return self.arg_to_long(args, 0) + base
@staticmethod
def heap_base():
try:
base = long(gdb.parse_and_eval("mp_->sbrk_base"))
if base != 0:
return base
except gdb.error:
pass
return get_section_base_address("[heap]")
@register_function
class PieBaseFunction(GenericFunction):
"""Return the current pie base address plus an optional offset."""
_function_ = "_pie"
def do_invoke(self, args):
return self.arg_to_long(args, 0) + get_section_base_address(get_filepath())
@register_function
class BssBaseFunction(GenericFunction):
"""Return the current bss base address plus the given offset."""
_function_ = "_bss"
def do_invoke(self, args):
return self.arg_to_long(args, 0) + get_zone_base_address(".bss")
@register_function
class GotBaseFunction(GenericFunction):
"""Return the current bss base address plus the given offset."""
_function_ = "_got"
def do_invoke(self, args):
return self.arg_to_long(args, 0) + get_zone_base_address(".got")
@register_command
class GefFunctionsCommand(GenericCommand):
"""List the convenience functions provided by GEF."""
_cmdline_ = "functions"
_syntax_ = _cmdline_
def __init__(self):
super(GefFunctionsCommand, self).__init__()
self.docs = []
self.setup()
return
def setup(self):
global __gef__
for function in __gef__.loaded_functions:
self.add_function_to_doc(function)
self.__doc__ = "\n".join(sorted(self.docs))
return
def add_function_to_doc(self, function):
"""Add function to documentation."""
doc = getattr(function, "__doc__", "").lstrip()
doc = "\n ".join(doc.split("\n"))
syntax = getattr(function, "_syntax_", "").lstrip()
msg = "{syntax:<25s} -- {help:s}".format(syntax=syntax, help=Color.greenify(doc))
self.docs.append(msg)
return
def do_invoke(self, argv):
self.dont_repeat()
gef_print(titlify("GEF - Convenience Functions"))
gef_print("These functions can be used as arguments to other "
"commands to dynamically calculate values, eg: {:s}\n"
.format(Color.colorify("deref $_heap(0x20)", "yellow")))
gef_print(self.__doc__)
return
class GefCommand(gdb.Command):
"""GEF main command: view all new commands by typing `gef`."""
_cmdline_ = "gef"
_syntax_ = "{:s} (missing|config|save|restore|set|run)".format(_cmdline_)
def __init__(self):
super(GefCommand, self).__init__(GefCommand._cmdline_,
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE,
True)
set_gef_setting("gef.follow_child", True, bool, "Automatically set GDB to follow child when forking")
set_gef_setting("gef.readline_compat", False, bool, "Workaround for readline SOH/ETX issue (SEGV)")
set_gef_setting("gef.debug", False, bool, "Enable debug mode for gef")
set_gef_setting("gef.autosave_breakpoints_file", "", str, "Automatically save and restore breakpoints")
set_gef_setting("gef.extra_plugins_dir", "", str, "Autoload additional GEF commands from external directory")
set_gef_setting("gef.disable_color", False, bool, "Disable all colors in GEF")
self.loaded_commands = []
self.loaded_functions = []
self.missing_commands = {}
return
def setup(self):
self.load(initial=True)
# loading GEF sub-commands
self.doc = GefHelpCommand(self.loaded_commands)
self.cfg = GefConfigCommand(self.loaded_command_names)
GefSaveCommand()
GefRestoreCommand()
GefMissingCommand()
GefSetCommand()
GefRunCommand()
# load the saved settings
gdb.execute("gef restore")
# restore the autosave/autoreload breakpoints policy (if any)
self.__reload_auto_breakpoints()
# load plugins from `extra_plugins_dir`
if self.__load_extra_plugins() > 0:
# if here, at least one extra plugin was loaded, so we need to restore
# the settings once more
gdb.execute("gef restore quiet")
return
def __reload_auto_breakpoints(self):
bkp_fname = __config__.get("gef.autosave_breakpoints_file", None)
bkp_fname = bkp_fname[0] if bkp_fname else None
if bkp_fname:
# restore if existing
if os.access(bkp_fname, os.R_OK):
gdb.execute("source {:s}".format(bkp_fname))
# add hook for autosave breakpoints on quit command
source = [
"define hook-quit",
" save breakpoints {:s}".format(bkp_fname),
"end"
]
gef_execute_gdb_script("\n".join(source) + "\n")
return
def __load_extra_plugins(self):
nb_added = -1
try:
nb_inital = len(self.loaded_commands)
directories = get_gef_setting("gef.extra_plugins_dir")
if directories:
for directory in directories.split(";"):
directory = os.path.realpath(os.path.expanduser(directory))
if os.path.isdir(directory):
sys.path.append(directory)
for fname in os.listdir(directory):
if not fname.endswith(".py"): continue
fpath = "{:s}/{:s}".format(directory, fname)
if os.path.isfile(fpath):
gdb.execute("source {:s}".format(fpath))
nb_added = len(self.loaded_commands) - nb_inital
if nb_added > 0:
ok("{:s} extra commands added from '{:s}'".format(Color.colorify(nb_added, "bold green"),
Color.colorify(directory, "bold blue")))
except gdb.error as e:
err("failed: {}".format(str(e)))
return nb_added
@property
def loaded_command_names(self):
return [x[0] for x in self.loaded_commands]
def invoke(self, args, from_tty):
self.dont_repeat()
gdb.execute("gef help")
return
def load(self, initial=False):
"""Load all the commands and functions defined by GEF into GDB."""
nb_missing = 0
self.commands = [(x._cmdline_, x) for x in __commands__]
# load all of the functions
for function_class_name in __functions__:
self.loaded_functions.append(function_class_name())
def is_loaded(x):
return any(filter(lambda u: x == u[0], self.loaded_commands))
for cmd, class_name in self.commands:
if is_loaded(cmd):
continue
try:
self.loaded_commands.append((cmd, class_name, class_name()))
if hasattr(class_name, "_aliases_"):
aliases = getattr(class_name, "_aliases_")
for alias in aliases:
GefAlias(alias, cmd)
except Exception as reason:
self.missing_commands[cmd] = reason
nb_missing += 1
# sort by command name
self.loaded_commands = sorted(self.loaded_commands, key=lambda x: x[1]._cmdline_)
if initial:
gef_print("{:s} for {:s} ready, type `{:s}' to start, `{:s}' to configure"
.format(Color.greenify("GEF"), get_os(),
Color.colorify("gef","underline yellow"),
Color.colorify("gef config", "underline pink")))
ver = "{:d}.{:d}".format(sys.version_info.major, sys.version_info.minor)
nb_cmds = len(self.loaded_commands)
gef_print("{:s} commands loaded for GDB {:s} using Python engine {:s}"
.format(Color.colorify(nb_cmds, "bold green"),
Color.colorify(gdb.VERSION, "bold yellow"),
Color.colorify(ver, "bold red")))
if nb_missing:
warn("{:s} command{} could not be loaded, run `{:s}` to know why."
.format(Color.colorify(nb_missing, "bold red"),
"s" if nb_missing > 1 else "",
Color.colorify("gef missing", "underline pink")))
return
class GefHelpCommand(gdb.Command):
"""GEF help sub-command."""
_cmdline_ = "gef help"
_syntax_ = _cmdline_
def __init__(self, commands, *args, **kwargs):
super(GefHelpCommand, self).__init__(GefHelpCommand._cmdline_,
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE,
False)
self.docs = []
self.generate_help(commands)
self.refresh()
return
def invoke(self, args, from_tty):
self.dont_repeat()
gef_print(titlify("GEF - GDB Enhanced Features"))
gef_print(self.__doc__)
return
def generate_help(self, commands):
"""Generate builtin commands documentation."""
for command in commands:
self.add_command_to_doc(command)
return
def add_command_to_doc(self, command):
"""Add command to GEF documentation."""
cmd, class_name, _ = command
if " " in cmd:
# do not print subcommands in gef help
return
doc = getattr(class_name, "__doc__", "").lstrip()
doc = "\n ".join(doc.split("\n"))
aliases = " (alias: {:s})".format(", ".join(class_name._aliases_)) if hasattr(class_name, "_aliases_") else ""
msg = "{cmd:<25s} -- {help:s}{aliases:s}".format(cmd=cmd, help=Color.greenify(doc), aliases=aliases)
self.docs.append(msg)
return
def refresh(self):
"""Refresh the documentation."""
self.__doc__ = "\n".join(sorted(self.docs))
return
class GefConfigCommand(gdb.Command):
"""GEF configuration sub-command
This command will help set/view GEF settingsfor the current debugging session.
It is possible to make those changes permanent by running `gef save` (refer
to this command help), and/or restore previously saved settings by running
`gef restore` (refer help).
"""
_cmdline_ = "gef config"
_syntax_ = "{:s} [setting_name] [setting_value]".format(_cmdline_)
def __init__(self, loaded_commands, *args, **kwargs):
super(GefConfigCommand, self).__init__(GefConfigCommand._cmdline_, gdb.COMMAND_NONE, prefix=False)
self.loaded_commands = loaded_commands
return
def invoke(self, args, from_tty):
self.dont_repeat()
argv = gdb.string_to_argv(args)
argc = len(argv)
if not (0 <= argc <= 2):
err("Invalid number of arguments")
return
if argc == 0:
gef_print(titlify("GEF configuration settings"))
self.print_settings()
return
if argc == 1:
prefix = argv[0]
names = list(filter(lambda x: x.startswith(prefix), __config__.keys()))
if names:
if len(names)==1:
gef_print(titlify("GEF configuration setting: {:s}".format(names[0])))
self.print_setting(names[0], verbose=True)
else:
gef_print(titlify("GEF configuration settings matching '{:s}'".format(argv[0])))
for name in names: self.print_setting(name)
return
self.set_setting(argc, argv)
return
def print_setting(self, plugin_name, verbose=False):
res = __config__.get(plugin_name)
string_color = get_gef_setting("theme.dereference_string")
misc_color = get_gef_setting("theme.dereference_base_address")
if not res:
return
_value, _type, _desc = res
_setting = Color.colorify(plugin_name, "green")
_type = _type.__name__
if _type == "str":
_value = '"{:s}"'.format(Color.colorify(_value, string_color))
else:
_value = Color.colorify(_value, misc_color)
gef_print("{:s} ({:s}) = {:s}".format(_setting, _type, _value))
if verbose:
gef_print(Color.colorify("\nDescription:", "bold underline"))
gef_print("\t{:s}".format(_desc))
return
def print_settings(self):
for x in sorted(__config__):
self.print_setting(x)
return
def set_setting(self, argc, argv):
global __gef__
if "." not in argv[0]:
err("Invalid command format")
return
loaded_commands = [ x[0] for x in __gef__.loaded_commands ] + ["gef"]
plugin_name = argv[0].split(".", 1)[0]
if plugin_name not in loaded_commands:
err("Unknown plugin '{:s}'".format(plugin_name))
return
_type = __config__.get(argv[0], [None, None, None])[1]
if _type is None:
err("Failed to get '{:s}' config setting".format(argv[0],))
return
try:
if _type == bool:
_newval = True if argv[1].upper() in ("TRUE", "T", "1") else False
else:
_newval = _type(argv[1])
except Exception:
err("{} expects type '{}'".format(argv[0], _type.__name__))
return
reset_all_caches()
__config__[argv[0]][0] = _newval
return
def complete(self, text, word):
settings = sorted(__config__)
if text=="":
# no prefix: example: `gef config TAB`
return [s for s in settings if word in s]
if "." not in text:
# if looking for possible prefix
return [s for s in settings if s.startswith(text.strip())]
# finally, look for possible values for given prefix
return [s.split(".", 1)[1] for s in settings if s.startswith(text.strip())]
class GefSaveCommand(gdb.Command):
"""GEF save sub-command.
Saves the current configuration of GEF to disk (by default in file '~/.gef.rc')."""
_cmdline_ = "gef save"
_syntax_ = _cmdline_
def __init__(self, *args, **kwargs):
super(GefSaveCommand, self).__init__(GefSaveCommand._cmdline_, gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE, False)
return
def invoke(self, args, from_tty):
self.dont_repeat()
cfg = configparser.RawConfigParser()
old_sect = None
# save the configuration
for key in sorted(__config__):
sect, optname = key.split(".", 1)
value = __config__.get(key, None)
value = value[0] if value else None
if old_sect != sect:
cfg.add_section(sect)
old_sect = sect
cfg.set(sect, optname, value)
# save the aliases
cfg.add_section("aliases")
for alias in __aliases__:
cfg.set("aliases", alias._alias, alias._command)
with open(GEF_RC, "w") as fd:
cfg.write(fd)
ok("Configuration saved to '{:s}'".format(GEF_RC))
return
class GefRestoreCommand(gdb.Command):
"""GEF restore sub-command.
Loads settings from file '~/.gef.rc' and apply them to the configuration of GEF."""
_cmdline_ = "gef restore"
_syntax_ = _cmdline_
def __init__(self, *args, **kwargs):
super(GefRestoreCommand, self).__init__(GefRestoreCommand._cmdline_,
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE,
False)
return
def invoke(self, args, from_tty):
self.dont_repeat()
if not os.access(GEF_RC, os.R_OK):
return
quiet = args.lower() == "quiet"
cfg = configparser.ConfigParser()
cfg.read(GEF_RC)
for section in cfg.sections():
if section == "aliases":
# load the aliases
for key in cfg.options(section):
GefAlias(key, cfg.get(section, key))
continue
# load the other options
for optname in cfg.options(section):
try:
key = "{:s}.{:s}".format(section, optname)
_type = __config__.get(key)[1]
new_value = cfg.get(section, optname)
if _type == bool:
new_value = True if new_value == "True" else False
else:
new_value = _type(new_value)
__config__[key][0] = new_value
except Exception:
pass
if not quiet:
ok("Configuration from '{:s}' restored".format(Color.colorify(GEF_RC, "bold blue")))
return
class GefMissingCommand(gdb.Command):
"""GEF missing sub-command
Display the GEF commands that could not be loaded, along with the reason of why
they could not be loaded.
"""
_cmdline_ = "gef missing"
_syntax_ = _cmdline_
def __init__(self, *args, **kwargs):
super(GefMissingCommand, self).__init__(GefMissingCommand._cmdline_,
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE,
False)
return
def invoke(self, args, from_tty):
self.dont_repeat()
missing_commands = __gef__.missing_commands.keys()
if not missing_commands:
ok("No missing command")
return
for missing_command in missing_commands:
reason = __gef__.missing_commands[missing_command]
warn("Command `{}` is missing, reason {} {}".format(missing_command, RIGHT_ARROW, reason))
return
class GefSetCommand(gdb.Command):
"""Override GDB set commands with the context from GEF.
"""
_cmdline_ = "gef set"
_syntax_ = "{:s} [GDB_SET_ARGUMENTS]".format(_cmdline_)
def __init__(self, *args, **kwargs):
super(GefSetCommand, self).__init__(GefSetCommand._cmdline_,
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_SYMBOL,
False)
return
def invoke(self, args, from_tty):
self.dont_repeat()
args = args.split()
cmd = ["set", args[0],]
for p in args[1:]:
if p.startswith("$_gef"):
c = gdb.parse_and_eval(p)
cmd.append(c.string())
else:
cmd.append(p)
gdb.execute(" ".join(cmd))
return
class GefRunCommand(gdb.Command):
"""Override GDB run commands with the context from GEF.
Simple wrapper for GDB run command to use arguments set from `gef set args`. """
_cmdline_ = "gef run"
_syntax_ = "{:s} [GDB_RUN_ARGUMENTS]".format(_cmdline_)
def __init__(self, *args, **kwargs):
super(GefRunCommand, self).__init__(GefRunCommand._cmdline_,
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_FILENAME,
False)
return
def invoke(self, args, from_tty):
self.dont_repeat()
if is_alive():
gdb.execute("continue")
return
argv = args.split()
gdb.execute("gef set args {:s}".format(" ".join(argv)))
gdb.execute("run")
return
class GefAlias(gdb.Command):
"""Simple aliasing wrapper because GDB doesn't do what it should.
"""
def __init__(self, alias, command, completer_class=gdb.COMPLETE_NONE, command_class=gdb.COMMAND_NONE):
p = command.split()
if not p:
return
if list(filter(lambda x: x._alias == alias, __aliases__)):
return
self._command = command
self._alias = alias
c = command.split()[0]
r = self.lookup_command(c)
self.__doc__ = "Alias for '{}'".format(Color.greenify(command))
if r is not None:
_instance = r[2]
self.__doc__ += ": {}".format(_instance.__doc__)
if hasattr(_instance, "complete"):
self.complete = _instance.complete
super(GefAlias, self).__init__(alias, command_class, completer_class=completer_class)
__aliases__.append(self)
return
def invoke(self, args, from_tty):
gdb.execute("{} {}".format(self._command, args), from_tty=from_tty)
return
def lookup_command(self, cmd):
global __gef__
for _name, _class, _instance in __gef__.loaded_commands:
if cmd == _name:
return _name, _class, _instance
return None
class GefAliases(gdb.Command):
"""List all custom aliases."""
def __init__(self):
super(GefAliases, self).__init__("aliases", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE)
return
def invoke(self, args, from_tty):
self.dont_repeat()
ok("Aliases defined:")
for _alias in __aliases__:
gef_print("{:30s} {} {}".format(_alias._alias, RIGHT_ARROW, _alias._command))
return
class GefTmuxSetup(gdb.Command):
"""Setup a confortable tmux debugging environment."""
def __init__(self):
super(GefTmuxSetup, self).__init__("tmux-setup", gdb.COMMAND_NONE, gdb.COMPLETE_NONE)
GefAlias("screen-setup", "tmux-setup")
return
def invoke(self, args, from_tty):
self.dont_repeat()
tmux = os.getenv("TMUX")
if tmux:
self.tmux_setup()
return
screen = os.getenv("TERM")
if screen is not None and screen == "screen":
self.screen_setup()
return
warn("Not in a tmux/screen session")
return
def tmux_setup(self):
"""Prepare the tmux environment by vertically splitting the current pane, and
forcing the context to be redirected there."""
tmux = which("tmux")
ok("tmux session found, splitting window...")
old_ptses = set(os.listdir("/dev/pts"))
gdb.execute("! {} split-window -h 'clear ; cat'".format(tmux))
gdb.execute("! {} select-pane -L".format(tmux))
new_ptses = set(os.listdir("/dev/pts"))
pty = list(new_ptses - old_ptses)[0]
pty = "/dev/pts/{}".format(pty)
ok("Setting `context.redirect` to '{}'...".format(pty))
gdb.execute("gef config context.redirect {}".format(pty))
ok("Done!")
return
def screen_setup(self):
"""Hackish equivalent of the tmux_setup() function for screen."""
screen = which("screen")
sty = os.getenv("STY")
ok("screen session found, splitting window...")
fd_script, script_path = tempfile.mkstemp()
fd_tty, tty_path = tempfile.mkstemp()
os.close(fd_tty)
with os.fdopen(fd_script, "w") as f:
f.write("startup_message off\n")
f.write("split -v\n")
f.write("focus right\n")
f.write("screen /bin/bash -c 'tty > {}; clear; cat'\n".format(tty_path))
f.write("focus left\n")
gdb.execute("""! {} -r {} -m -d -X source {}""".format(screen, sty, script_path))
# artificial delay to make sure `tty_path` is populated
time.sleep(0.25)
with open(tty_path, "r") as f:
pty = f.read().strip()
ok("Setting `context.redirect` to '{}'...".format(pty))
gdb.execute("gef config context.redirect {}".format(pty))
ok("Done!")
os.unlink(script_path)
os.unlink(tty_path)
return
def __gef_prompt__(current_prompt):
"""GEF custom prompt function."""
if get_gef_setting("gef.readline_compat") is True: return GEF_PROMPT
if get_gef_setting("gef.disable_color") is True: return GEF_PROMPT
if is_alive(): return GEF_PROMPT_ON
return GEF_PROMPT_OFF
if __name__ == "__main__":
if GDB_VERSION < GDB_MIN_VERSION:
err("You're using an old version of GDB. GEF will not work correctly. "
"Consider updating to GDB {} or higher.".format(".".join(map(str, GDB_MIN_VERSION))))
else:
try:
pyenv = which("pyenv")
PYENV_ROOT = gef_pystring(subprocess.check_output([pyenv, "root"]).strip())
PYENV_VERSION = gef_pystring(subprocess.check_output([pyenv, "version-name"]).strip())
site_packages_dir = os.path.join(PYENV_ROOT, "versions", PYENV_VERSION, "lib",
"python{}".format(PYENV_VERSION[:3]), "site-packages")
site.addsitedir(site_packages_dir)
except FileNotFoundError:
pass
# setup prompt
gdb.prompt_hook = __gef_prompt__
# setup config
gdb.execute("set confirm off")
gdb.execute("set verbose off")
gdb.execute("set pagination off")
gdb.execute("set step-mode on")
gdb.execute("set print elements 0")
# gdb history
gdb.execute("set history save on")
gdb.execute("set history filename ~/.gdb_history")
# gdb input and output bases
gdb.execute("set output-radix 0x10")
# pretty print
gdb.execute("set print pretty on")
try:
# this will raise a gdb.error unless we're on x86
gdb.execute("set disassembly-flavor intel")
except gdb.error:
# we can safely ignore this
pass
# SIGALRM will simply display a message, but gdb won't forward the signal to the process
gdb.execute("handle SIGALRM print nopass")
# saving GDB indexes in GEF tempdir
gef_makedirs(GEF_TEMP_DIR)
gdb.execute("save gdb-index {}".format(GEF_TEMP_DIR))
# load GEF
__gef__ = GefCommand()
__gef__.setup()
# gdb events configuration
gef_on_continue_hook(continue_handler)
gef_on_stop_hook(hook_stop_handler)
gef_on_new_hook(new_objfile_handler)
gef_on_exit_hook(exit_handler)
if gdb.current_progspace().filename is not None:
# if here, we are sourcing gef from a gdb session already attached
# we must force a call to the new_objfile handler (see issue #278)
new_objfile_handler(None)
GefAliases()
GefTmuxSetup()
|
[] |
[] |
[
"EDITOR",
"TMUX",
"TERM",
"HOME",
"STY",
"PATH"
] |
[]
|
["EDITOR", "TMUX", "TERM", "HOME", "STY", "PATH"]
|
python
| 6 | 0 | |
selfdrive/manager/manager.py
|
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \
terms_version, training_version, comma_remote, \
get_git_branch, get_git_remote
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("OpenpilotEnabledToggle", "1"),
("CommunityFeaturesToggle", "1"),
("IsMetric", "1"),
# HKG
("UseClusterSpeed", "1"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("IsLdwsCar", "0"),
("LaneChangeEnabled", "0"),
("AutoLaneChangeEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("StockNaviDecelEnabled", "0"),
("ShowDebugUI", "0"),
("CustomLeadMark", "0")
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
if EON:
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
system("am startservice com.neokii.optool/.MainService")
Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter",)).start()
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
ignore = []
if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID:
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
[] |
[] |
[
"CLEAN",
"NOBOARD",
"BLOCK",
"NOCRASH",
"PREPAREONLY",
"PASSIVE",
"NOLOG",
"DONGLE_ID"
] |
[]
|
["CLEAN", "NOBOARD", "BLOCK", "NOCRASH", "PREPAREONLY", "PASSIVE", "NOLOG", "DONGLE_ID"]
|
python
| 8 | 0 | |
azure-samples/src/main/java/com/microsoft/azure/management/sql/samples/ManageSqlDatabase.java
|
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for
* license information.
*/
package com.microsoft.azure.management.sql.samples;
import com.microsoft.azure.AzureEnvironment;
import com.microsoft.azure.AzureResponseBuilder;
import com.microsoft.azure.credentials.ApplicationTokenCredentials;
import com.microsoft.azure.management.Azure;
import com.microsoft.azure.management.resources.fluentcore.arm.Region;
import com.microsoft.azure.management.samples.Utils;
import com.microsoft.azure.management.sql.DatabaseEditions;
import com.microsoft.azure.management.sql.DatabaseMetric;
import com.microsoft.azure.management.sql.ServiceObjectiveName;
import com.microsoft.azure.management.sql.SqlDatabase;
import com.microsoft.azure.management.sql.SqlFirewallRule;
import com.microsoft.azure.management.sql.SqlServer;
import com.microsoft.azure.serializer.AzureJacksonAdapter;
import com.microsoft.rest.LogLevel;
import com.microsoft.rest.RestClient;
import java.io.File;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* Azure SQL sample for managing SQL Database -
* - Create a SQL Server along with 2 firewalls.
* - Create a database in SQL server
* - Change performance level (SKU) of SQL Database
* - List and delete firewalls.
* - Create another firewall in the SQlServer
* - Delete database, firewall and SQL Server
*/
public final class ManageSqlDatabase {
/**
* Main function which runs the actual sample.
* @param azure instance of the azure client
* @return true if sample runs successfully
*/
public static boolean runSample(Azure azure) {
final String sqlServerName = Utils.createRandomName("sqlserver");
final String rgName = Utils.createRandomName("rgRSDSI");
final String administratorLogin = "sqladmin3423";
final String administratorPassword = "myS3cureP@ssword";
final String firewallRuleIPAddress = "10.0.0.1";
final String firewallRuleStartIPAddress = "10.2.0.1";
final String firewallRuleEndIPAddress = "10.2.0.10";
final String databaseName = "mydatabase";
try {
// ============================================================
// Create a SQL Server, with 2 firewall rules.
SqlServer sqlServer = azure.sqlServers().define(sqlServerName)
.withRegion(Region.US_EAST)
.withNewResourceGroup(rgName)
.withAdministratorLogin(administratorLogin)
.withAdministratorPassword(administratorPassword)
.withNewFirewallRule(firewallRuleIPAddress)
.withNewFirewallRule(firewallRuleStartIPAddress, firewallRuleEndIPAddress)
.create();
Utils.print(sqlServer);
// ============================================================
// Create a Database in SQL server created above.
System.out.println("Creating a database");
SqlDatabase database = sqlServer.databases()
.define(databaseName)
.create();
Utils.print(database);
// ============================================================
// Update the edition of database.
System.out.println("Updating a database");
database = database.update()
.withEdition(DatabaseEditions.STANDARD)
.withServiceObjective(ServiceObjectiveName.S3)
.apply();
Utils.print(database);
// ============================================================
// List and delete all firewall rules.
System.out.println("Listing all firewall rules");
List<SqlFirewallRule> firewallRules = sqlServer.firewallRules().list();
for (SqlFirewallRule firewallRule: firewallRules) {
// Print information of the firewall rule.
Utils.print(firewallRule);
// Delete the firewall rule.
System.out.println("Deleting a firewall rule");
firewallRule.delete();
}
// ============================================================
// Add new firewall rules.
System.out.println("Creating a firewall rule for SQL Server");
SqlFirewallRule firewallRule = sqlServer.firewallRules().define("myFirewallRule")
.withIPAddress("10.10.10.10")
.create();
Utils.print(firewallRule);
List<DatabaseMetric> usages = database.listUsages();
// Delete the database.
System.out.println("Deleting a database");
database.delete();
// Delete the SQL Server.
System.out.println("Deleting a Sql Server");
azure.sqlServers().deleteById(sqlServer.id());
return true;
} catch (Exception f) {
System.out.println(f.getMessage());
f.printStackTrace();
} finally {
try {
System.out.println("Deleting Resource Group: " + rgName);
azure.resourceGroups().deleteByName(rgName);
System.out.println("Deleted Resource Group: " + rgName);
}
catch (Exception e) {
System.out.println("Did not create any resources in Azure. No clean up is necessary");
}
}
return false;
}
/**
* Main entry point.
* @param args the parameters
*/
public static void main(String[] args) {
try {
final File credFile = new File(System.getenv("AZURE_AUTH_LOCATION"));
ApplicationTokenCredentials credentials = ApplicationTokenCredentials.fromFile(credFile);
RestClient restClient = new RestClient.Builder()
.withBaseUrl(AzureEnvironment.AZURE, AzureEnvironment.Endpoint.RESOURCE_MANAGER)
.withSerializerAdapter(new AzureJacksonAdapter())
.withReadTimeout(150, TimeUnit.SECONDS)
.withLogLevel(LogLevel.BODY)
.withResponseBuilderFactory(new AzureResponseBuilder.Factory())
.withCredentials(credentials).build();
Azure azure = Azure.authenticate(restClient, credentials.domain(), credentials.defaultSubscriptionId()).withDefaultSubscription();
// Print selected subscription
System.out.println("Selected subscription: " + azure.subscriptionId());
runSample(azure);
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private ManageSqlDatabase() {
}
}
|
[
"\"AZURE_AUTH_LOCATION\""
] |
[] |
[
"AZURE_AUTH_LOCATION"
] |
[]
|
["AZURE_AUTH_LOCATION"]
|
java
| 1 | 0 | |
untitled2/asgi.py
|
"""
ASGI config for untitled2 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'untitled2.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/process/config/config.go
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package config
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
model "github.com/DataDog/agent-payload/v5/process"
sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config"
"github.com/DataDog/datadog-agent/pkg/config"
"github.com/DataDog/datadog-agent/pkg/config/settings"
oconfig "github.com/DataDog/datadog-agent/pkg/orchestrator/config"
"github.com/DataDog/datadog-agent/pkg/process/util"
apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo"
"github.com/DataDog/datadog-agent/pkg/util/fargate"
ddgrpc "github.com/DataDog/datadog-agent/pkg/util/grpc"
"github.com/DataDog/datadog-agent/pkg/util/hostname/validate"
"github.com/DataDog/datadog-agent/pkg/util/log"
"github.com/DataDog/datadog-agent/pkg/util/profiling"
"google.golang.org/grpc"
)
const (
// defaultProxyPort is the default port used for proxies.
// This mirrors the configuration for the infrastructure agent.
defaultProxyPort = 3128
defaultGRPCConnectionTimeout = 60 * time.Second
)
// Name for check performed by process-agent or system-probe
const (
ProcessCheckName = "process"
RTProcessCheckName = "rtprocess"
ContainerCheckName = "container"
RTContainerCheckName = "rtcontainer"
ConnectionsCheckName = "connections"
PodCheckName = "pod"
DiscoveryCheckName = "process_discovery"
NetworkCheckName = "Network"
OOMKillCheckName = "OOM Kill"
TCPQueueLengthCheckName = "TCP queue length"
ProcessModuleCheckName = "Process Module"
ProcessCheckDefaultInterval = 10 * time.Second
RTProcessCheckDefaultInterval = 2 * time.Second
ContainerCheckDefaultInterval = 10 * time.Second
RTContainerCheckDefaultInterval = 2 * time.Second
ConnectionsCheckDefaultInterval = 30 * time.Second
PodCheckDefaultInterval = 10 * time.Second
ProcessDiscoveryCheckDefaultInterval = 4 * time.Hour
)
var (
processChecks = []string{ProcessCheckName, RTProcessCheckName}
containerChecks = []string{ContainerCheckName, RTContainerCheckName}
moduleCheckMap = map[sysconfig.ModuleName][]string{
sysconfig.NetworkTracerModule: {ConnectionsCheckName, NetworkCheckName},
sysconfig.OOMKillProbeModule: {OOMKillCheckName},
sysconfig.TCPQueueLengthTracerModule: {TCPQueueLengthCheckName},
sysconfig.ProcessModule: {ProcessModuleCheckName},
}
)
type proxyFunc func(*http.Request) (*url.URL, error)
type cmdFunc = func(name string, arg ...string) *exec.Cmd
// WindowsConfig stores all windows-specific configuration for the process-agent and system-probe.
type WindowsConfig struct {
// Number of checks runs between refreshes of command-line arguments
ArgsRefreshInterval int
// Controls getting process arguments immediately when a new process is discovered
AddNewArgs bool
// UsePerfCounters enables new process check using performance counters for process collection
UsePerfCounters bool
}
// AgentConfig is the global config for the process-agent. This information
// is sourced from config files and the environment variables.
type AgentConfig struct {
Enabled bool
HostName string
APIEndpoints []apicfg.Endpoint
LogFile string
LogLevel string
LogToConsole bool
QueueSize int // The number of items allowed in each delivery queue.
RTQueueSize int // the number of items allowed in real-time delivery queue
ProcessQueueBytes int // The total number of bytes that can be enqueued for delivery to the process intake endpoint
Blacklist []*regexp.Regexp
Scrubber *DataScrubber
MaxPerMessage int
MaxCtrProcessesPerMessage int // The maximum number of processes that belong to a container for a given message
MaxConnsPerMessage int
AllowRealTime bool
Transport *http.Transport `json:"-"`
DDAgentBin string
StatsdHost string
StatsdPort int
ProcessExpVarPort int
// profiling settings, or nil if profiling is not enabled
ProfilingSettings *profiling.Settings
// host type of the agent, used to populate container payload with additional host information
ContainerHostType model.ContainerHostType
// System probe collection configuration
EnableSystemProbe bool
SystemProbeAddress string
// Orchestrator config
Orchestrator *oconfig.OrchestratorConfig
// Check config
EnabledChecks []string
CheckIntervals map[string]time.Duration
// Internal store of a proxy used for generating the Transport
proxy proxyFunc
// Windows-specific config
Windows WindowsConfig
grpcConnectionTimeout time.Duration
}
// CheckIsEnabled returns a bool indicating if the given check name is enabled.
func (a AgentConfig) CheckIsEnabled(checkName string) bool {
return util.StringInSlice(a.EnabledChecks, checkName)
}
// CheckInterval returns the interval for the given check name, defaulting to 10s if not found.
func (a AgentConfig) CheckInterval(checkName string) time.Duration {
d, ok := a.CheckIntervals[checkName]
if !ok {
log.Errorf("missing check interval for '%s', you must set a default", checkName)
d = 10 * time.Second
}
return d
}
const (
defaultProcessEndpoint = "https://process.datadoghq.com"
maxMessageBatch = 100
defaultMaxCtrProcsMessageBatch = 10000
maxCtrProcsMessageBatch = 30000
)
// NewDefaultTransport provides a http transport configuration with sane default timeouts
func NewDefaultTransport() *http.Transport {
return &http.Transport{
MaxIdleConns: 5,
IdleConnTimeout: 90 * time.Second,
Dial: (&net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 10 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
ResponseHeaderTimeout: 5 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
}
// NewDefaultAgentConfig returns an AgentConfig with defaults initialized
func NewDefaultAgentConfig(canAccessContainers bool) *AgentConfig {
processEndpoint, err := url.Parse(defaultProcessEndpoint)
if err != nil {
// This is a hardcoded URL so parsing it should not fail
panic(err)
}
var enabledChecks []string
if canAccessContainers {
enabledChecks = containerChecks
}
ac := &AgentConfig{
Enabled: canAccessContainers, // We'll always run inside of a container.
APIEndpoints: []apicfg.Endpoint{{Endpoint: processEndpoint}},
LogFile: defaultLogFilePath,
LogLevel: "info",
LogToConsole: false,
// Allow buffering up to 60 megabytes of payload data in total
ProcessQueueBytes: 60 * 1000 * 1000,
// This can be fairly high as the input should get throttled by queue bytes first.
// Assuming we generate ~8 checks/minute (for process/network), this should allow buffering of ~30 minutes of data assuming it fits within the queue bytes memory budget
QueueSize: 256,
RTQueueSize: 5, // We set a small queue size for real-time message queue because they get staled very quickly, thus we only keep the latest several payloads
MaxPerMessage: maxMessageBatch,
MaxCtrProcessesPerMessage: defaultMaxCtrProcsMessageBatch,
MaxConnsPerMessage: 600,
AllowRealTime: true,
HostName: "",
Transport: NewDefaultTransport(),
ProcessExpVarPort: 6062,
ContainerHostType: model.ContainerHostType_notSpecified,
// Statsd for internal instrumentation
StatsdHost: "127.0.0.1",
StatsdPort: 8125,
// System probe collection configuration
EnableSystemProbe: false,
SystemProbeAddress: defaultSystemProbeAddress,
// Orchestrator config
Orchestrator: oconfig.NewDefaultOrchestratorConfig(),
// Check config
EnabledChecks: enabledChecks,
CheckIntervals: map[string]time.Duration{
ProcessCheckName: ProcessCheckDefaultInterval,
RTProcessCheckName: RTProcessCheckDefaultInterval,
ContainerCheckName: ContainerCheckDefaultInterval,
RTContainerCheckName: RTContainerCheckDefaultInterval,
ConnectionsCheckName: ConnectionsCheckDefaultInterval,
PodCheckName: PodCheckDefaultInterval,
DiscoveryCheckName: ProcessDiscoveryCheckDefaultInterval,
},
// DataScrubber to hide command line sensitive words
Scrubber: NewDefaultDataScrubber(),
Blacklist: make([]*regexp.Regexp, 0),
// Windows process config
Windows: WindowsConfig{
ArgsRefreshInterval: 15, // with default 20s check interval we refresh every 5m
AddNewArgs: true,
},
grpcConnectionTimeout: defaultGRPCConnectionTimeout,
}
// Set default values for proc/sys paths if unset.
// Don't set this is /host is not mounted to use context within container.
// Generally only applicable for container-only cases like Fargate.
if config.IsContainerized() && util.PathExists("/host") {
if v := os.Getenv("HOST_PROC"); v == "" {
os.Setenv("HOST_PROC", "/host/proc")
}
if v := os.Getenv("HOST_SYS"); v == "" {
os.Setenv("HOST_SYS", "/host/sys")
}
}
return ac
}
// LoadConfigIfExists takes a path to either a directory containing datadog.yaml or a direct path to a datadog.yaml file
// and loads it into ddconfig.Datadog. It does this silently, and does not produce any logs.
func LoadConfigIfExists(path string) error {
if path != "" {
if util.PathExists(path) {
config.Datadog.AddConfigPath(path)
if strings.HasSuffix(path, ".yaml") { // If they set a config file directly, let's try to honor that
config.Datadog.SetConfigFile(path)
}
if _, err := config.LoadWithoutSecret(); err != nil {
return err
}
} else {
log.Infof("no config exists at %s, ignoring...", path)
}
}
return nil
}
// NewAgentConfig returns an AgentConfig using a configuration file. It can be nil
// if there is no file available. In this case we'll configure only via environment.
func NewAgentConfig(loggerName config.LoggerName, yamlPath, netYamlPath string) (*AgentConfig, error) {
var err error
// For Agent 6 we will have a YAML config file to use.
if err := LoadConfigIfExists(yamlPath); err != nil {
return nil, err
}
// Note: This only considers container sources that are already setup. It's possible that container sources may
// need a few minutes to be ready on newly provisioned hosts.
_, err = util.GetContainers()
canAccessContainers := err == nil
cfg := NewDefaultAgentConfig(canAccessContainers)
if err := cfg.LoadProcessYamlConfig(yamlPath); err != nil {
return nil, err
}
if err := cfg.Orchestrator.Load(); err != nil {
return nil, err
}
// (Re)configure the logging from our configuration
if err := setupLogger(loggerName, cfg.LogFile, cfg); err != nil {
log.Errorf("failed to setup configured logger: %s", err)
return nil, err
}
// For system probe, there is an additional config file that is shared with the system-probe
syscfg, err := sysconfig.Merge(netYamlPath)
if err != nil {
return nil, err
}
if syscfg.Enabled {
cfg.EnableSystemProbe = true
cfg.MaxConnsPerMessage = syscfg.MaxConnsPerMessage
cfg.SystemProbeAddress = syscfg.SocketAddress
// enable corresponding checks to system-probe modules
for mod := range syscfg.EnabledModules {
if checks, ok := moduleCheckMap[mod]; ok {
cfg.EnabledChecks = append(cfg.EnabledChecks, checks...)
}
}
if !cfg.Enabled {
log.Info("enabling process-agent for connections check as the system-probe is enabled")
cfg.Enabled = true
}
}
// TODO: Once proxies have been moved to common config util, remove this
if cfg.proxy, err = proxyFromEnv(cfg.proxy); err != nil {
log.Errorf("error parsing environment proxy settings, not using a proxy: %s", err)
cfg.proxy = nil
}
// Python-style log level has WARNING vs WARN
if strings.ToLower(cfg.LogLevel) == "warning" {
cfg.LogLevel = "warn"
}
if err := validate.ValidHostname(cfg.HostName); err != nil {
// lookup hostname if there is no config override or if the override is invalid
if hostname, err := getHostname(context.TODO(), cfg.DDAgentBin, cfg.grpcConnectionTimeout); err == nil {
cfg.HostName = hostname
} else {
log.Errorf("Cannot get hostname: %v", err)
}
}
cfg.ContainerHostType = getContainerHostType()
if cfg.proxy != nil {
cfg.Transport.Proxy = cfg.proxy
}
// sanity check. This element is used with the modulo operator (%), so it can't be zero.
// if it is, log the error, and assume the config was attempting to disable
if cfg.Windows.ArgsRefreshInterval == 0 {
log.Warnf("invalid configuration: windows_collect_skip_new_args was set to 0. Disabling argument collection")
cfg.Windows.ArgsRefreshInterval = -1
}
// activate the pod collection if enabled and we have the cluster name set
if cfg.Orchestrator.OrchestrationCollectionEnabled {
if cfg.Orchestrator.KubeClusterName != "" {
cfg.EnabledChecks = append(cfg.EnabledChecks, PodCheckName)
} else {
log.Warnf("Failed to auto-detect a Kubernetes cluster name. Pod collection will not start. To fix this, set it manually via the cluster_name config option")
}
}
initRuntimeSettings()
return cfg, nil
}
// initRuntimeSettings registers settings to be added to the runtime config.
func initRuntimeSettings() {
// NOTE: Any settings you want to register should simply be added here
var processRuntimeSettings = []settings.RuntimeSetting{
settings.LogLevelRuntimeSetting{},
}
// Before we begin listening, register runtime settings
for _, setting := range processRuntimeSettings {
err := settings.RegisterRuntimeSetting(setting)
if err != nil {
_ = log.Warnf("cannot initialize the runtime setting %s: %v", setting.Name(), err)
}
}
}
// getContainerHostType uses the fargate library to detect container environment and returns the protobuf version of it
func getContainerHostType() model.ContainerHostType {
switch fargate.GetOrchestrator(context.TODO()) {
case fargate.ECS:
return model.ContainerHostType_fargateECS
case fargate.EKS:
return model.ContainerHostType_fargateEKS
}
return model.ContainerHostType_notSpecified
}
func loadEnvVariables() {
// The following environment variables will be loaded in the order listed, meaning variables
// further down the list may override prior variables.
for _, variable := range []struct{ env, cfg string }{
{"DD_PROCESS_AGENT_CONTAINER_SOURCE", "process_config.container_source"},
{"DD_SCRUB_ARGS", "process_config.scrub_args"},
{"DD_STRIP_PROCESS_ARGS", "process_config.strip_proc_arguments"},
{"DD_PROCESS_AGENT_URL", "process_config.process_dd_url"},
{"DD_PROCESS_AGENT_INTERNAL_PROFILING_ENABLED", "process_config.internal_profiling.enabled"},
{"DD_PROCESS_AGENT_REMOTE_TAGGER", "process_config.remote_tagger"},
{"DD_PROCESS_AGENT_MAX_PER_MESSAGE", "process_config.max_per_message"},
{"DD_PROCESS_AGENT_MAX_CTR_PROCS_PER_MESSAGE", "process_config.max_ctr_procs_per_message"},
{"DD_PROCESS_AGENT_CMD_PORT", "process_config.cmd_port"},
{"DD_PROCESS_AGENT_WINDOWS_USE_PERF_COUNTERS", "process_config.windows.use_perf_counters"},
{"DD_PROCESS_AGENT_DISCOVERY_ENABLED", "process_config.process_discovery.enabled"},
{"DD_ORCHESTRATOR_URL", "orchestrator_explorer.orchestrator_dd_url"},
{"DD_HOSTNAME", "hostname"},
{"DD_DOGSTATSD_PORT", "dogstatsd_port"},
{"DD_BIND_HOST", "bind_host"},
{"HTTPS_PROXY", "proxy.https"},
{"DD_PROXY_HTTPS", "proxy.https"},
{"DD_LOGS_STDOUT", "log_to_console"},
{"LOG_TO_CONSOLE", "log_to_console"},
{"DD_LOG_TO_CONSOLE", "log_to_console"},
{"LOG_LEVEL", "log_level"}, // Support LOG_LEVEL and DD_LOG_LEVEL but prefer DD_LOG_LEVEL
{"DD_LOG_LEVEL", "log_level"},
} {
if v, ok := os.LookupEnv(variable.env); ok {
config.Datadog.Set(variable.cfg, v)
}
}
// Support API_KEY and DD_API_KEY but prefer DD_API_KEY.
apiKey, envKey := os.Getenv("DD_API_KEY"), "DD_API_KEY"
if apiKey == "" {
apiKey, envKey = os.Getenv("API_KEY"), "API_KEY"
}
if apiKey != "" { // We don't want to overwrite the API KEY provided as an environment variable
log.Infof("overriding API key from env %s value", envKey)
config.Datadog.Set("api_key", config.SanitizeAPIKey(strings.Split(apiKey, ",")[0]))
}
if v := os.Getenv("DD_CUSTOM_SENSITIVE_WORDS"); v != "" {
config.Datadog.Set("process_config.custom_sensitive_words", strings.Split(v, ","))
}
if v := os.Getenv("DD_PROCESS_ADDITIONAL_ENDPOINTS"); v != "" {
endpoints := make(map[string][]string)
if err := json.Unmarshal([]byte(v), &endpoints); err != nil {
log.Errorf(`Could not parse DD_PROCESS_ADDITIONAL_ENDPOINTS: %v. It must be of the form '{"https://process.agent.datadoghq.com": ["apikey1", ...], ...}'.`, err)
} else {
config.Datadog.Set("process_config.additional_endpoints", endpoints)
}
}
if v := os.Getenv("DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS"); v != "" {
endpoints := make(map[string][]string)
if err := json.Unmarshal([]byte(v), &endpoints); err != nil {
log.Errorf(`Could not parse DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS: %v. It must be of the form '{"https://process.agent.datadoghq.com": ["apikey1", ...], ...}'.`, err)
} else {
config.Datadog.Set("orchestrator_explorer.orchestrator_additional_endpoints", endpoints)
}
}
}
// IsBlacklisted returns a boolean indicating if the given command is blacklisted by our config.
func IsBlacklisted(cmdline []string, blacklist []*regexp.Regexp) bool {
cmd := strings.Join(cmdline, " ")
for _, b := range blacklist {
if b.MatchString(cmd) {
return true
}
}
return false
}
func isAffirmative(value string) (bool, error) {
if value == "" {
return false, fmt.Errorf("value is empty")
}
v := strings.ToLower(value)
return v == "true" || v == "yes" || v == "1", nil
}
// getHostname attempts to resolve the hostname in the following order: the main datadog agent via grpc, the main agent
// via cli and lastly falling back to os.Hostname() if it is unavailable
func getHostname(ctx context.Context, ddAgentBin string, grpcConnectionTimeout time.Duration) (string, error) {
// Fargate is handled as an exceptional case (there is no concept of a host, so we use the ARN in-place).
if fargate.IsFargateInstance(ctx) {
hostname, err := fargate.GetFargateHost(ctx)
if err == nil {
return hostname, nil
}
log.Errorf("failed to get Fargate host: %v", err)
}
// Get the hostname via gRPC from the main agent if a hostname has not been set either from config/fargate
hostname, err := getHostnameFromGRPC(ctx, ddgrpc.GetDDAgentClient, grpcConnectionTimeout)
if err == nil {
return hostname, nil
}
log.Errorf("failed to get hostname from grpc: %v", err)
// If the hostname is not set then we fallback to use the agent binary
hostname, err = getHostnameFromCmd(ddAgentBin, exec.Command)
if err == nil {
return hostname, nil
}
log.Errorf("failed to get hostname from cmd: %v", err)
return os.Hostname()
}
// getHostnameCmd shells out to obtain the hostname used by the infra agent
func getHostnameFromCmd(ddAgentBin string, cmdFn cmdFunc) (string, error) {
cmd := cmdFn(ddAgentBin, "hostname")
// Copying all environment variables to child process
// Windows: Required, so the child process can load DLLs, etc.
// Linux: Optional, but will make use of DD_HOSTNAME and DOCKER_DD_AGENT if they exist
cmd.Env = append(cmd.Env, os.Environ()...)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return "", err
}
hostname := strings.TrimSpace(stdout.String())
if hostname == "" {
return "", fmt.Errorf("error retrieving dd-agent hostname %s", stderr.String())
}
return hostname, nil
}
// getHostnameFromGRPC retrieves the hostname from the main datadog agent via GRPC
func getHostnameFromGRPC(ctx context.Context, grpcClientFn func(ctx context.Context, opts ...grpc.DialOption) (pb.AgentClient, error), grpcConnectionTimeout time.Duration) (string, error) {
ctx, cancel := context.WithTimeout(ctx, grpcConnectionTimeout)
defer cancel()
ddAgentClient, err := grpcClientFn(ctx)
if err != nil {
return "", fmt.Errorf("cannot connect to datadog agent via grpc: %w", err)
}
reply, err := ddAgentClient.GetHostname(ctx, &pb.HostnameRequest{})
if err != nil {
return "", fmt.Errorf("cannot get hostname from datadog agent via grpc: %w", err)
}
log.Debugf("retrieved hostname:%s from datadog agent via grpc", reply.Hostname)
return reply.Hostname, nil
}
// proxyFromEnv parses out the proxy configuration from the ENV variables in a
// similar way to getProxySettings and, if enough values are available, returns
// a new proxy URL value. If the environment is not set for this then the
// `defaultVal` is returned.
func proxyFromEnv(defaultVal proxyFunc) (proxyFunc, error) {
var host string
scheme := "http"
if v := os.Getenv("PROXY_HOST"); v != "" {
// accept either http://myproxy.com or myproxy.com
if i := strings.Index(v, "://"); i != -1 {
// when available, parse the scheme from the url
scheme = v[0:i]
host = v[i+3:]
} else {
host = v
}
}
if host == "" {
return defaultVal, nil
}
port := defaultProxyPort
if v := os.Getenv("PROXY_PORT"); v != "" {
port, _ = strconv.Atoi(v)
}
var user, password string
if v := os.Getenv("PROXY_USER"); v != "" {
user = v
}
if v := os.Getenv("PROXY_PASSWORD"); v != "" {
password = v
}
return constructProxy(host, scheme, port, user, password)
}
// constructProxy constructs a *url.Url for a proxy given the parts of a
// Note that we assume we have at least a non-empty host for this call but
// all other values can be their defaults (empty string or 0).
func constructProxy(host, scheme string, port int, user, password string) (proxyFunc, error) {
var userpass *url.Userinfo
if user != "" {
if password != "" {
userpass = url.UserPassword(user, password)
} else {
userpass = url.User(user)
}
}
var path string
if userpass != nil {
path = fmt.Sprintf("%s@%s:%v", userpass.String(), host, port)
} else {
path = fmt.Sprintf("%s:%v", host, port)
}
if scheme != "" {
path = fmt.Sprintf("%s://%s", scheme, path)
}
u, err := url.Parse(path)
if err != nil {
return nil, err
}
return http.ProxyURL(u), nil
}
func setupLogger(loggerName config.LoggerName, logFile string, cfg *AgentConfig) error {
return config.SetupLogger(
loggerName,
cfg.LogLevel,
logFile,
config.GetSyslogURI(),
config.Datadog.GetBool("syslog_rfc"),
config.Datadog.GetBool("log_to_console"),
config.Datadog.GetBool("log_format_json"),
)
}
|
[
"\"HOST_PROC\"",
"\"HOST_SYS\"",
"\"DD_API_KEY\"",
"\"API_KEY\"",
"\"DD_CUSTOM_SENSITIVE_WORDS\"",
"\"DD_PROCESS_ADDITIONAL_ENDPOINTS\"",
"\"DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS\"",
"\"PROXY_HOST\"",
"\"PROXY_PORT\"",
"\"PROXY_USER\"",
"\"PROXY_PASSWORD\""
] |
[] |
[
"HOST_SYS",
"PROXY_PASSWORD",
"PROXY_HOST",
"API_KEY",
"HOST_PROC",
"PROXY_USER",
"PROXY_PORT",
"DD_PROCESS_ADDITIONAL_ENDPOINTS",
"DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS",
"DD_API_KEY",
"DD_CUSTOM_SENSITIVE_WORDS"
] |
[]
|
["HOST_SYS", "PROXY_PASSWORD", "PROXY_HOST", "API_KEY", "HOST_PROC", "PROXY_USER", "PROXY_PORT", "DD_PROCESS_ADDITIONAL_ENDPOINTS", "DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS", "DD_API_KEY", "DD_CUSTOM_SENSITIVE_WORDS"]
|
go
| 11 | 0 | |
app.py
|
import requests
from flask import Flask, request, jsonify, send_from_directory
app = Flask(__name__)
import pandas as pd
import quandl
import math
import random
import os
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
if 'ON_HEROKU' in os.environ:
@app.route('/')
def index():
return send_from_directory('client/build','index.html')
@app.route('/index.html')
def index2():
return send_from_directory('client/build','index.html')
@app.route('/static/css/<filename>')
def index_css(filename):
return send_from_directory('client/build/static/css',filename)
@app.route('/static/js/<filename>')
def index_js(filename):
return send_from_directory('client/build/static/js',filename)
@app.route('/service-worker.js')
def index_service_worker():
return send_from_directory('client/build', 'service-worker.js')
@app.route('/manifest.json')
def index_manifest():
return send_from_directory('client/build', 'manifest.json')
@app.route('/favicon-16x16.png')
def index_favicon16():
return send_from_directory('client/build', 'favicon-16x16.png')
@app.route('/favicon-32x32.png')
def index_favicon32():
return send_from_directory('client/build', 'favicon-32x32.png')
@app.route('/favicon-96x96.png')
def index_favicon96():
return send_from_directory('client/build', 'favicon-96x96.png')
@app.route('/getstockdata/')
def getStockData():
stock = request.args.get('stock', default=None, type=None)
quandl.ApiConfig.api_key = "qWcicxSctVxrP9PhyneG"
allData = quandl.get('WIKI/'+stock)
dataLength = 251
allDataLength = len(allData)
firstDataElem = math.floor(random.random()*(allDataLength-dataLength))
mlData = allData[0:firstDataElem+dataLength]
def FormatForModel(dataArray):
dataArray = dataArray[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
dataArray['HL_PCT'] = (dataArray['Adj. High'] - dataArray['Adj. Close']) / dataArray['Adj. Close'] * 100.0
dataArray['PCT_change'] = (dataArray['Adj. Close'] - dataArray['Adj. Open']) / dataArray['Adj. Open'] * 100.0
dataArray = dataArray[['Adj. Close', 'HL_PCT', 'PCT_change','Adj. Volume']]
dataArray.fillna(-99999, inplace=True)
return dataArray
mlData = FormatForModel(mlData)
forecast_col = 'Adj. Close'
forecast_out = int(math.ceil(0.12*dataLength))
mlData['label'] = mlData[forecast_col].shift(-forecast_out)
mlData.dropna(inplace=True)
X = np.array(mlData.drop(['label'],1))
X = preprocessing.scale(X)
X_data = X[-dataLength:]
X = X[:-dataLength]
data = mlData[-dataLength:]
mlData = mlData[:-dataLength]
y = np.array(mlData['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.001)
clf = LinearRegression()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
prediction = clf.predict(X_data)
data = data[['Adj. Close']]
data = data.rename(columns={'Adj. Close':'EOD'})
data['prediction'] = prediction[:]
data = data.to_json(orient='table')
return jsonify(data)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cityscapesScripts/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py
|
#!/usr/bin/python
#
# The evaluation script for pixel-level semantic labeling.
# We use this script to evaluate your approach on the test set.
# You can use the script to evaluate on the validation set.
#
# Please check the description of the "getPrediction" method below
# and set the required environment variables as needed, such that
# this script can locate your results.
# If the default implementation of the method works, then it's most likely
# that our evaluation server will be able to process your results as well.
#
# Note that the script is a lot faster, if you enable cython support.
# WARNING: Cython only tested for Ubuntu 64bit OS.
# To enable cython, run
# setup.py build_ext --inplace
#
# To run this script, make sure that your results are images,
# where pixels encode the class IDs as defined in labels.py.
# Note that the regular ID is used, not the train ID.
# Further note that many classes are ignored from evaluation.
# Thus, authors are not expected to predict these classes and all
# pixels with a ground truth label that is ignored are ignored in
# evaluation.
# python imports
from __future__ import print_function
import os, sys
import platform
import fnmatch
try:
from itertools import izip
except ImportError:
izip = zip
# Cityscapes imports
sys.path.append( os.path.normpath( os.path.join( os.path.dirname( __file__ ) , '..' , 'helpers' ) ) )
from csHelpers import *
# C Support
# Enable the cython support for faster evaluation
# Only tested for Ubuntu 64bit OS
CSUPPORT = True
# Check if C-Support is available for better performance
if CSUPPORT:
try:
import addToConfusionMatrix
except:
CSUPPORT = False
###################################
# PLEASE READ THESE INSTRUCTIONS!!!
###################################
# Provide the prediction file for the given ground truth file.
#
# The current implementation expects the results to be in a certain root folder.
# This folder is one of the following with decreasing priority:
# - environment variable CITYSCAPES_RESULTS
# - environment variable CITYSCAPES_DATASET/results
# - ../../results/"
#
# Within the root folder, a matching prediction file is recursively searched.
# A file matches, if the filename follows the pattern
# <city>_123456_123456*.png
# for a ground truth filename
# <city>_123456_123456_gtFine_labelIds.png
def getPrediction( args, groundTruthFile ):
# determine the prediction path, if the method is first called
if not args.predictionPath:
rootPath = None
if 'CITYSCAPES_RESULTS' in os.environ:
rootPath = os.environ['CITYSCAPES_RESULTS']
elif 'CITYSCAPES_DATASET' in os.environ:
rootPath = os.path.join( os.environ['CITYSCAPES_DATASET'] , "results" )
else:
rootPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..','results')
if not os.path.isdir(rootPath):
printError("Could not find a result root folder. Please read the instructions of this method.")
args.predictionPath = rootPath
# walk the prediction path, if not happened yet
if not args.predictionWalk:
walk = []
for root, dirnames, filenames in os.walk(args.predictionPath):
walk.append( (root,filenames) )
args.predictionWalk = walk
csFile = getCsFileInfo(groundTruthFile)
filePattern = "{}_{}_{}*.png".format( csFile.city , csFile.sequenceNb , csFile.frameNb )
predictionFile = None
for root, filenames in args.predictionWalk:
for filename in fnmatch.filter(filenames, filePattern):
if not predictionFile:
predictionFile = os.path.join(root, filename)
else:
printError("Found multiple predictions for ground truth {}".format(groundTruthFile))
if not predictionFile:
printError("Found no prediction for ground truth {}".format(groundTruthFile))
return predictionFile
######################
# Parameters
######################
# A dummy class to collect all bunch of data
class CArgs(object):
pass
# And a global object of that class
args = CArgs()
# Where to look for Cityscapes
if 'CITYSCAPES_DATASET' in os.environ:
args.cityscapesPath = os.environ['CITYSCAPES_DATASET']
else:
args.cityscapesPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..')
if 'CITYSCAPES_EXPORT_DIR' in os.environ:
export_dir = os.environ['CITYSCAPES_EXPORT_DIR']
if not os.path.isdir(export_dir):
raise ValueError("CITYSCAPES_EXPORT_DIR {} is not a directory".format(export_dir))
args.exportFile = "{}/resultPixelLevelSemanticLabeling.json".format(export_dir)
else:
args.exportFile = os.path.join(args.cityscapesPath, "evaluationResults", "resultPixelLevelSemanticLabeling.json")
# Parameters that should be modified by user
args.groundTruthSearch = os.path.join( args.cityscapesPath , "gtFine" , "val" , "*", "*_gtFine_labelIds.png" )
# Remaining params
args.evalInstLevelScore = True
args.evalPixelAccuracy = False
args.evalLabels = []
args.printRow = 5
args.normalized = True
args.colorized = hasattr(sys.stderr, "isatty") and sys.stderr.isatty() and platform.system()=='Linux'
args.bold = colors.BOLD if args.colorized else ""
args.nocol = colors.ENDC if args.colorized else ""
args.JSONOutput = True
args.quiet = False
args.avgClassSize = {
"bicycle" : 4672.3249222261 ,
"caravan" : 36771.8241758242 ,
"motorcycle" : 6298.7200839748 ,
"rider" : 3930.4788056518 ,
"bus" : 35732.1511111111 ,
"train" : 67583.7075812274 ,
"car" : 12794.0202738185 ,
"person" : 3462.4756337644 ,
"truck" : 27855.1264367816 ,
"trailer" : 16926.9763313609 ,
}
# store some parameters for finding predictions in the args variable
# the values are filled when the method getPrediction is first called
args.predictionPath = None
args.predictionWalk = None
#########################
# Methods
#########################
# Generate empty confusion matrix and create list of relevant labels
def generateMatrix(args):
args.evalLabels = []
for label in labels:
if (label.id < 0):
continue
# we append all found labels, regardless of being ignored
args.evalLabels.append(label.id)
maxId = max(args.evalLabels)
# We use longlong type to be sure that there are no overflows
return np.zeros(shape=(maxId+1, maxId+1),dtype=np.ulonglong)
def generateInstanceStats(args):
instanceStats = {}
instanceStats["classes" ] = {}
instanceStats["categories"] = {}
for label in labels:
if label.hasInstances and not label.ignoreInEval:
instanceStats["classes"][label.name] = {}
instanceStats["classes"][label.name]["tp"] = 0.0
instanceStats["classes"][label.name]["tpWeighted"] = 0.0
instanceStats["classes"][label.name]["fn"] = 0.0
instanceStats["classes"][label.name]["fnWeighted"] = 0.0
for category in category2labels:
labelIds = []
allInstances = True
for label in category2labels[category]:
if label.id < 0:
continue
if not label.hasInstances:
allInstances = False
break
labelIds.append(label.id)
if not allInstances:
continue
instanceStats["categories"][category] = {}
instanceStats["categories"][category]["tp"] = 0.0
instanceStats["categories"][category]["tpWeighted"] = 0.0
instanceStats["categories"][category]["fn"] = 0.0
instanceStats["categories"][category]["fnWeighted"] = 0.0
instanceStats["categories"][category]["labelIds"] = labelIds
return instanceStats
# Get absolute or normalized value from field in confusion matrix.
def getMatrixFieldValue(confMatrix, i, j, args):
if args.normalized:
rowSum = confMatrix[i].sum()
if (rowSum == 0):
return float('nan')
return float(confMatrix[i][j]) / rowSum
else:
return confMatrix[i][j]
# Calculate and return IOU score for a particular label
def getIouScoreForLabel(label, confMatrix, args):
if id2label[label].ignoreInEval:
return float('nan')
# the number of true positive pixels for this label
# the entry on the diagonal of the confusion matrix
tp = np.longlong(confMatrix[label,label])
# the number of false negative pixels for this label
# the row sum of the matching row in the confusion matrix
# minus the diagonal entry
fn = np.longlong(confMatrix[label,:].sum()) - tp
# the number of false positive pixels for this labels
# Only pixels that are not on a pixel with ground truth label that is ignored
# The column sum of the corresponding column in the confusion matrix
# without the ignored rows and without the actual label of interest
notIgnored = [l for l in args.evalLabels if not id2label[l].ignoreInEval and not l==label]
fp = np.longlong(confMatrix[notIgnored,label].sum())
# the denominator of the IOU score
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
# return IOU
return float(tp) / denom
# Calculate and return IOU score for a particular label
def getInstanceIouScoreForLabel(label, confMatrix, instStats, args):
if id2label[label].ignoreInEval:
return float('nan')
labelName = id2label[label].name
if not labelName in instStats["classes"]:
return float('nan')
tp = instStats["classes"][labelName]["tpWeighted"]
fn = instStats["classes"][labelName]["fnWeighted"]
# false postives computed as above
notIgnored = [l for l in args.evalLabels if not id2label[l].ignoreInEval and not l==label]
fp = np.longlong(confMatrix[notIgnored,label].sum())
# the denominator of the IOU score
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
# return IOU
return float(tp) / denom
# Calculate prior for a particular class id.
def getPrior(label, confMatrix):
return float(confMatrix[label,:].sum()) / confMatrix.sum()
# Get average of scores.
# Only computes the average over valid entries.
def getScoreAverage(scoreList, args):
validScores = 0
scoreSum = 0.0
for score in scoreList:
if not math.isnan(scoreList[score]):
validScores += 1
scoreSum += scoreList[score]
if validScores == 0:
return float('nan')
return scoreSum / validScores
# Calculate and return IOU score for a particular category
def getIouScoreForCategory(category, confMatrix, args):
# All labels in this category
labels = category2labels[category]
# The IDs of all valid labels in this category
labelIds = [label.id for label in labels if not label.ignoreInEval and label.id in args.evalLabels]
# If there are no valid labels, then return NaN
if not labelIds:
return float('nan')
# the number of true positive pixels for this category
# this is the sum of all entries in the confusion matrix
# where row and column belong to a label ID of this category
tp = np.longlong(confMatrix[labelIds,:][:,labelIds].sum())
# the number of false negative pixels for this category
# that is the sum of all rows of labels within this category
# minus the number of true positive pixels
fn = np.longlong(confMatrix[labelIds,:].sum()) - tp
# the number of false positive pixels for this category
# we count the column sum of all labels within this category
# while skipping the rows of ignored labels and of labels within this category
notIgnoredAndNotInCategory = [l for l in args.evalLabels if not id2label[l].ignoreInEval and id2label[l].category != category]
fp = np.longlong(confMatrix[notIgnoredAndNotInCategory,:][:,labelIds].sum())
# the denominator of the IOU score
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
# return IOU
return float(tp) / denom
# Calculate and return IOU score for a particular category
def getInstanceIouScoreForCategory(category, confMatrix, instStats, args):
if not category in instStats["categories"]:
return float('nan')
labelIds = instStats["categories"][category]["labelIds"]
tp = instStats["categories"][category]["tpWeighted"]
fn = instStats["categories"][category]["fnWeighted"]
# the number of false positive pixels for this category
# same as above
notIgnoredAndNotInCategory = [l for l in args.evalLabels if not id2label[l].ignoreInEval and id2label[l].category != category]
fp = np.longlong(confMatrix[notIgnoredAndNotInCategory,:][:,labelIds].sum())
# the denominator of the IOU score
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
# return IOU
return float(tp) / denom
# create a dictionary containing all relevant results
def createResultDict( confMatrix, classScores, classInstScores, categoryScores, categoryInstScores, perImageStats, args ):
# write JSON result file
wholeData = {}
wholeData["confMatrix"] = confMatrix.tolist()
wholeData["priors"] = {}
wholeData["labels"] = {}
for label in args.evalLabels:
wholeData["priors"][id2label[label].name] = getPrior(label, confMatrix)
wholeData["labels"][id2label[label].name] = label
wholeData["classScores"] = classScores
wholeData["classInstScores"] = classInstScores
wholeData["categoryScores"] = categoryScores
wholeData["categoryInstScores"] = categoryInstScores
wholeData["averageScoreClasses"] = getScoreAverage(classScores, args)
wholeData["averageScoreInstClasses"] = getScoreAverage(classInstScores, args)
wholeData["averageScoreCategories"] = getScoreAverage(categoryScores, args)
wholeData["averageScoreInstCategories"] = getScoreAverage(categoryInstScores, args)
if perImageStats:
wholeData["perImageScores"] = perImageStats
return wholeData
def writeJSONFile(wholeData, args):
path = os.path.dirname(args.exportFile)
ensurePath(path)
writeDict2JSON(wholeData, args.exportFile)
# Print confusion matrix
def printConfMatrix(confMatrix, args):
# print line
print("\b{text:{fill}>{width}}".format(width=15, fill='-', text=" "), end=' ')
for label in args.evalLabels:
print("\b{text:{fill}>{width}}".format(width=args.printRow + 2, fill='-', text=" "), end=' ')
print("\b{text:{fill}>{width}}".format(width=args.printRow + 3, fill='-', text=" "))
# print label names
print("\b{text:>{width}} |".format(width=13, text=""), end=' ')
for label in args.evalLabels:
print("\b{text:^{width}} |".format(width=args.printRow, text=id2label[label].name[0]), end=' ')
print("\b{text:>{width}} |".format(width=6, text="Prior"))
# print line
print("\b{text:{fill}>{width}}".format(width=15, fill='-', text=" "), end=' ')
for label in args.evalLabels:
print("\b{text:{fill}>{width}}".format(width=args.printRow + 2, fill='-', text=" "), end=' ')
print("\b{text:{fill}>{width}}".format(width=args.printRow + 3, fill='-', text=" "))
# print matrix
for x in range(0, confMatrix.shape[0]):
if (not x in args.evalLabels):
continue
# get prior of this label
prior = getPrior(x, confMatrix)
# skip if label does not exist in ground truth
if prior < 1e-9:
continue
# print name
name = id2label[x].name
if len(name) > 13:
name = name[:13]
print("\b{text:>{width}} |".format(width=13,text=name), end=' ')
# print matrix content
for y in range(0, len(confMatrix[x])):
if (not y in args.evalLabels):
continue
matrixFieldValue = getMatrixFieldValue(confMatrix, x, y, args)
print(getColorEntry(matrixFieldValue, args) + "\b{text:>{width}.2f} ".format(width=args.printRow, text=matrixFieldValue) + args.nocol, end=' ')
# print prior
print(getColorEntry(prior, args) + "\b{text:>{width}.4f} ".format(width=6, text=prior) + args.nocol)
# print line
print("\b{text:{fill}>{width}}".format(width=15, fill='-', text=" "), end=' ')
for label in args.evalLabels:
print("\b{text:{fill}>{width}}".format(width=args.printRow + 2, fill='-', text=" "), end=' ')
print("\b{text:{fill}>{width}}".format(width=args.printRow + 3, fill='-', text=" "), end=' ')
# Print intersection-over-union scores for all classes.
def printClassScores(scoreList, instScoreList, args):
if (args.quiet):
return
print(args.bold + "classes IoU nIoU" + args.nocol)
print("--------------------------------")
for label in args.evalLabels:
if (id2label[label].ignoreInEval):
continue
labelName = str(id2label[label].name)
iouStr = getColorEntry(scoreList[labelName], args) + "{val:>5.3f}".format(val=scoreList[labelName]) + args.nocol
niouStr = getColorEntry(instScoreList[labelName], args) + "{val:>5.3f}".format(val=instScoreList[labelName]) + args.nocol
print("{:<14}: ".format(labelName) + iouStr + " " + niouStr)
# Print intersection-over-union scores for all categorys.
def printCategoryScores(scoreDict, instScoreDict, args):
if (args.quiet):
return
print(args.bold + "categories IoU nIoU" + args.nocol)
print("--------------------------------")
for categoryName in scoreDict:
if all( label.ignoreInEval for label in category2labels[categoryName] ):
continue
iouStr = getColorEntry(scoreDict[categoryName], args) + "{val:>5.3f}".format(val=scoreDict[categoryName]) + args.nocol
niouStr = getColorEntry(instScoreDict[categoryName], args) + "{val:>5.3f}".format(val=instScoreDict[categoryName]) + args.nocol
print("{:<14}: ".format(categoryName) + iouStr + " " + niouStr)
# Evaluate image lists pairwise.
def evaluateImgLists(predictionImgList, groundTruthImgList, args):
if len(predictionImgList) != len(groundTruthImgList):
printError("List of images for prediction and groundtruth are not of equal size.")
confMatrix = generateMatrix(args)
instStats = generateInstanceStats(args)
perImageStats = {}
nbPixels = 0
if not args.quiet:
print("Evaluating {} pairs of images...".format(len(predictionImgList)))
# Evaluate all pairs of images and save them into a matrix
for i in range(len(predictionImgList)):
predictionImgFileName = predictionImgList[i]
groundTruthImgFileName = groundTruthImgList[i]
#print "Evaluate ", predictionImgFileName, "<>", groundTruthImgFileName
nbPixels += evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix, instStats, perImageStats, args)
# sanity check
if confMatrix.sum() != nbPixels:
printError('Number of analyzed pixels and entries in confusion matrix disagree: contMatrix {}, pixels {}'.format(confMatrix.sum(),nbPixels))
if not args.quiet:
print("\rImages Processed: {}".format(i+1), end=' ')
sys.stdout.flush()
if not args.quiet:
print("\n")
# sanity check
if confMatrix.sum() != nbPixels:
printError('Number of analyzed pixels and entries in confusion matrix disagree: contMatrix {}, pixels {}'.format(confMatrix.sum(),nbPixels))
# print confusion matrix
if (not args.quiet):
printConfMatrix(confMatrix, args)
# Calculate IOU scores on class level from matrix
classScoreList = {}
for label in args.evalLabels:
labelName = id2label[label].name
classScoreList[labelName] = getIouScoreForLabel(label, confMatrix, args)
# Calculate instance IOU scores on class level from matrix
classInstScoreList = {}
for label in args.evalLabels:
labelName = id2label[label].name
classInstScoreList[labelName] = getInstanceIouScoreForLabel(label, confMatrix, instStats, args)
# Print IOU scores
if (not args.quiet):
print("")
print("")
printClassScores(classScoreList, classInstScoreList, args)
iouAvgStr = getColorEntry(getScoreAverage(classScoreList, args), args) + "{avg:5.3f}".format(avg=getScoreAverage(classScoreList, args)) + args.nocol
niouAvgStr = getColorEntry(getScoreAverage(classInstScoreList , args), args) + "{avg:5.3f}".format(avg=getScoreAverage(classInstScoreList , args)) + args.nocol
print("--------------------------------")
print("Score Average : " + iouAvgStr + " " + niouAvgStr)
print("--------------------------------")
print("")
# Calculate IOU scores on category level from matrix
categoryScoreList = {}
for category in category2labels.keys():
categoryScoreList[category] = getIouScoreForCategory(category,confMatrix,args)
# Calculate instance IOU scores on category level from matrix
categoryInstScoreList = {}
for category in category2labels.keys():
categoryInstScoreList[category] = getInstanceIouScoreForCategory(category,confMatrix,instStats,args)
# Print IOU scores
if (not args.quiet):
print("")
printCategoryScores(categoryScoreList, categoryInstScoreList, args)
iouAvgStr = getColorEntry(getScoreAverage(categoryScoreList, args), args) + "{avg:5.3f}".format(avg=getScoreAverage(categoryScoreList, args)) + args.nocol
niouAvgStr = getColorEntry(getScoreAverage(categoryInstScoreList, args), args) + "{avg:5.3f}".format(avg=getScoreAverage(categoryInstScoreList, args)) + args.nocol
print("--------------------------------")
print("Score Average : " + iouAvgStr + " " + niouAvgStr)
print("--------------------------------")
print("")
# write result file
allResultsDict = createResultDict( confMatrix, classScoreList, classInstScoreList, categoryScoreList, categoryInstScoreList, perImageStats, args )
writeJSONFile( allResultsDict, args)
# return confusion matrix
return allResultsDict
# Main evaluation method. Evaluates pairs of prediction and ground truth
# images which are passed as arguments.
def evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix, instanceStats, perImageStats, args):
# Loading all resources for evaluation.
try:
predictionImg = Image.open(predictionImgFileName)
predictionNp = np.array(predictionImg)
except:
printError("Unable to load " + predictionImgFileName)
try:
groundTruthImg = Image.open(groundTruthImgFileName)
groundTruthNp = np.array(groundTruthImg)
except:
printError("Unable to load " + groundTruthImgFileName)
# load ground truth instances, if needed
if args.evalInstLevelScore:
groundTruthInstanceImgFileName = groundTruthImgFileName.replace("labelIds","instanceIds")
try:
instanceImg = Image.open(groundTruthInstanceImgFileName)
instanceNp = np.array(instanceImg)
except:
printError("Unable to load " + groundTruthInstanceImgFileName)
# Check for equal image sizes
if (predictionImg.size[0] != groundTruthImg.size[0]):
printError("Image widths of " + predictionImgFileName + " and " + groundTruthImgFileName + " are not equal.")
if (predictionImg.size[1] != groundTruthImg.size[1]):
printError("Image heights of " + predictionImgFileName + " and " + groundTruthImgFileName + " are not equal.")
if ( len(predictionNp.shape) != 2 ):
printError("Predicted image has multiple channels.")
imgWidth = predictionImg.size[0]
imgHeight = predictionImg.size[1]
nbPixels = imgWidth*imgHeight
# Evaluate images
if (CSUPPORT):
# using cython
confMatrix = addToConfusionMatrix.cEvaluatePair(predictionNp, groundTruthNp, confMatrix, args.evalLabels)
else:
# the slower python way
for (groundTruthImgPixel,predictionImgPixel) in izip(groundTruthImg.getdata(),predictionImg.getdata()):
if (not groundTruthImgPixel in args.evalLabels):
printError("Unknown label with id {:}".format(groundTruthImgPixel))
confMatrix[groundTruthImgPixel][predictionImgPixel] += 1
if args.evalInstLevelScore:
# Generate category masks
categoryMasks = {}
for category in instanceStats["categories"]:
categoryMasks[category] = np.in1d( predictionNp , instanceStats["categories"][category]["labelIds"] ).reshape(predictionNp.shape)
instList = np.unique(instanceNp[instanceNp > 1000])
for instId in instList:
labelId = int(instId/1000)
label = id2label[ labelId ]
if label.ignoreInEval:
continue
mask = instanceNp==instId
instSize = np.count_nonzero( mask )
tp = np.count_nonzero( predictionNp[mask] == labelId )
fn = instSize - tp
weight = args.avgClassSize[label.name] / float(instSize)
tpWeighted = float(tp) * weight
fnWeighted = float(fn) * weight
instanceStats["classes"][label.name]["tp"] += tp
instanceStats["classes"][label.name]["fn"] += fn
instanceStats["classes"][label.name]["tpWeighted"] += tpWeighted
instanceStats["classes"][label.name]["fnWeighted"] += fnWeighted
category = label.category
if category in instanceStats["categories"]:
catTp = 0
catTp = np.count_nonzero( np.logical_and( mask , categoryMasks[category] ) )
catFn = instSize - catTp
catTpWeighted = float(catTp) * weight
catFnWeighted = float(catFn) * weight
instanceStats["categories"][category]["tp"] += catTp
instanceStats["categories"][category]["fn"] += catFn
instanceStats["categories"][category]["tpWeighted"] += catTpWeighted
instanceStats["categories"][category]["fnWeighted"] += catFnWeighted
if args.evalPixelAccuracy:
notIgnoredLabels = [l for l in args.evalLabels if not id2label[l].ignoreInEval]
notIgnoredPixels = np.in1d( groundTruthNp , notIgnoredLabels , invert=True ).reshape(groundTruthNp.shape)
erroneousPixels = np.logical_and( notIgnoredPixels , ( predictionNp != groundTruthNp ) )
perImageStats[predictionImgFileName] = {}
perImageStats[predictionImgFileName]["nbNotIgnoredPixels"] = np.count_nonzero(notIgnoredPixels)
perImageStats[predictionImgFileName]["nbCorrectPixels"] = np.count_nonzero(erroneousPixels)
return nbPixels
# The main method
def main(argv):
global args
predictionImgList = []
groundTruthImgList = []
# the image lists can either be provided as arguments
if (len(argv) > 3):
for arg in argv:
if ("gt" in arg or "groundtruth" in arg):
groundTruthImgList.append(arg)
elif ("pred" in arg):
predictionImgList.append(arg)
# however the no-argument way is prefered
elif len(argv) == 0:
# use the ground truth search string specified above
groundTruthImgList = glob.glob(args.groundTruthSearch)
if not groundTruthImgList:
printError("Cannot find any ground truth images to use for evaluation. Searched for: {}".format(args.groundTruthSearch))
# get the corresponding prediction for each ground truth imag
for gt in groundTruthImgList:
predictionImgList.append( getPrediction(args,gt) )
# evaluate
evaluateImgLists(predictionImgList, groundTruthImgList, args)
return
# call the main method
if __name__ == "__main__":
main(sys.argv[1:])
|
[] |
[] |
[
"CITYSCAPES_EXPORT_DIR",
"CITYSCAPES_DATASET",
"CITYSCAPES_RESULTS"
] |
[]
|
["CITYSCAPES_EXPORT_DIR", "CITYSCAPES_DATASET", "CITYSCAPES_RESULTS"]
|
python
| 3 | 0 | |
vendor/github.com/hyperledger/fabric-chaincode-go/shim/internal/config.go
|
// Copyright the Hyperledger Fabric contributors. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package internal
import (
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"os"
"strconv"
"time"
"github.com/cetcxinlian/cryptogm/tls"
"github.com/cetcxinlian/cryptogm/x509"
"google.golang.org/grpc/keepalive"
)
// Config contains chaincode's configuration
type Config struct {
ChaincodeName string
TLS *tls.Config
KaOpts keepalive.ClientParameters
}
// LoadConfig loads the chaincode configuration
func LoadConfig() (Config, error) {
var err error
tlsEnabled, err := strconv.ParseBool(os.Getenv("CORE_PEER_TLS_ENABLED"))
if err != nil {
return Config{}, errors.New("'CORE_PEER_TLS_ENABLED' must be set to 'true' or 'false'")
}
conf := Config{
ChaincodeName: os.Getenv("CORE_CHAINCODE_ID_NAME"),
// hardcode to match chaincode server
KaOpts: keepalive.ClientParameters{
Time: 1 * time.Minute,
Timeout: 20 * time.Second,
PermitWithoutStream: true,
},
}
if !tlsEnabled {
return conf, nil
}
var key []byte
path, set := os.LookupEnv("CORE_TLS_CLIENT_KEY_FILE")
if set {
key, err = ioutil.ReadFile(path)
if err != nil {
return Config{}, fmt.Errorf("failed to read private key file: %s", err)
}
} else {
data, err := ioutil.ReadFile(os.Getenv("CORE_TLS_CLIENT_KEY_PATH"))
if err != nil {
return Config{}, fmt.Errorf("failed to read private key file: %s", err)
}
key, err = base64.StdEncoding.DecodeString(string(data))
if err != nil {
return Config{}, fmt.Errorf("failed to decode private key file: %s", err)
}
}
var cert []byte
path, set = os.LookupEnv("CORE_TLS_CLIENT_CERT_FILE")
if set {
cert, err = ioutil.ReadFile(path)
if err != nil {
return Config{}, fmt.Errorf("failed to read public key file: %s", err)
}
} else {
data, err := ioutil.ReadFile(os.Getenv("CORE_TLS_CLIENT_CERT_PATH"))
if err != nil {
return Config{}, fmt.Errorf("failed to read public key file: %s", err)
}
cert, err = base64.StdEncoding.DecodeString(string(data))
if err != nil {
return Config{}, fmt.Errorf("failed to decode public key file: %s", err)
}
}
root, err := ioutil.ReadFile(os.Getenv("CORE_PEER_TLS_ROOTCERT_FILE"))
if err != nil {
return Config{}, fmt.Errorf("failed to read root cert file: %s", err)
}
tlscfg, err := LoadTLSConfig(false, key, cert, root)
if err != nil {
return Config{}, err
}
conf.TLS = tlscfg
return conf, nil
}
// LoadTLSConfig loads the TLS configuration for the chaincode
func LoadTLSConfig(isserver bool, key, cert, root []byte) (*tls.Config, error) {
if key == nil {
return nil, fmt.Errorf("key not provided")
}
if cert == nil {
return nil, fmt.Errorf("cert not provided")
}
if !isserver && root == nil {
return nil, fmt.Errorf("root cert not provided")
}
cccert, err := tls.X509KeyPair(cert, key)
if err != nil {
return nil, errors.New("failed to parse client key pair")
}
var rootCertPool *x509.CertPool
if root != nil {
rootCertPool = x509.NewCertPool()
if ok := rootCertPool.AppendCertsFromPEM(root); !ok {
return nil, errors.New("failed to load root cert file")
}
}
tlscfg := &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: []tls.Certificate{cccert},
}
//follow Peer's server default config properties
if isserver {
tlscfg.ClientCAs = rootCertPool
tlscfg.SessionTicketsDisabled = true
tlscfg.CipherSuites = []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
}
if rootCertPool != nil {
tlscfg.ClientAuth = tls.RequireAndVerifyClientCert
}
} else {
tlscfg.RootCAs = rootCertPool
}
return tlscfg, nil
}
|
[
"\"CORE_PEER_TLS_ENABLED\"",
"\"CORE_CHAINCODE_ID_NAME\"",
"\"CORE_TLS_CLIENT_KEY_PATH\"",
"\"CORE_TLS_CLIENT_CERT_PATH\"",
"\"CORE_PEER_TLS_ROOTCERT_FILE\""
] |
[] |
[
"CORE_PEER_TLS_ENABLED",
"CORE_PEER_TLS_ROOTCERT_FILE",
"CORE_CHAINCODE_ID_NAME",
"CORE_TLS_CLIENT_KEY_PATH",
"CORE_TLS_CLIENT_CERT_PATH"
] |
[]
|
["CORE_PEER_TLS_ENABLED", "CORE_PEER_TLS_ROOTCERT_FILE", "CORE_CHAINCODE_ID_NAME", "CORE_TLS_CLIENT_KEY_PATH", "CORE_TLS_CLIENT_CERT_PATH"]
|
go
| 5 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ[
"DJANGO_SETTINGS_MODULE"] = "facebook_login.tests.runserver_settings"
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
face_enhancer/enhance.py
|
import model
import dataset
import cv2
from trainer import Trainer
import os
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
from skimage.io import imsave
from imageio import get_writer
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
image_transforms = transforms.Compose([
Image.fromarray,
transforms.ToTensor(),
transforms.Normalize([.5, .5, .5], [.5, .5, .5]),
])
device = torch.device('cuda')
def load_models(directory):
generator = model.GlobalGenerator(n_downsampling=2, n_blocks=6)
gen_name = os.path.join(directory, '40000_generator.pth')
if os.path.isfile(gen_name):
gen_dict = torch.load(gen_name)
generator.load_state_dict(gen_dict)
return generator.to(device)
def torch2numpy(tensor):
generated = tensor.detach().cpu().permute(1, 2, 0).numpy()
generated[generated < -1] = -1
generated[generated > 1] = 1
generated = (generated + 1) / 2 * 255
return generated.astype(np.uint8)
if __name__ == '__main__':
torch.backends.cudnn.benchmark = True
dataset_dir = '../results/target/face' # save test_sync in this folder
pose_name = '../data/source/pose_source_norm.npy' # coordinate save every heads
ckpt_dir = '../checkpoints/face'
result_dir = './results'
save_dir = dataset_dir+'/full_fake/'
if not os.path.exists(save_dir):
print('generate %s'%save_dir)
os.mkdir(save_dir)
else:
print(save_dir, 'is existing...')
image_folder = dataset.ImageFolderDataset(dataset_dir, cache=os.path.join(dataset_dir, 'local.db'), is_test=True)
face_dataset = dataset.FaceCropDataset(image_folder, pose_name, image_transforms, crop_size=48)
length = len(face_dataset)
print('Picture number',length)
generator = load_models(os.path.join(ckpt_dir))
for i in tqdm(range(length)):
_, fake_head, top, bottom, left, right, real_full, fake_full \
= face_dataset.get_full_sample(i)
with torch.no_grad():
fake_head.unsqueeze_(0)
fake_head = fake_head.to(device)
residual = generator(fake_head)
enhanced = fake_head + residual
enhanced.squeeze_()
enhanced = torch2numpy(enhanced)
fake_full_old = fake_full.copy()
fake_full[top: bottom, left: right, :] = enhanced
b, g, r = cv2.split(fake_full)
fake_full = cv2.merge([r, g, b])
cv2.imwrite(save_dir+ '{:05}.png'.format(i),fake_full)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
test/run_test.py
|
#!/usr/bin/env python3
import argparse
import copy
from datetime import datetime
import json
import modulefinder
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import torch
from torch.utils import cpp_extension
from torch.testing._internal.common_utils import TEST_WITH_ROCM, shell, set_cwd, FILE_SCHEMA
from torch.testing._internal.framework_utils import calculate_shards
import torch.distributed as dist
from typing import Dict, Optional, Tuple, List, Any
from typing_extensions import TypedDict
try:
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."))
from tools.stats_utils.s3_stat_parser import (get_previous_reports_for_branch, Report, HAVE_BOTO3)
except ImportError:
print("Unable to import s3_stat_parser from tools. Running without S3 stats...")
HAVE_BOTO3 = False
TESTS = [
'test_import_time',
'test_public_bindings',
'test_type_hints',
'test_autograd',
'benchmark_utils/test_benchmark_utils',
'test_binary_ufuncs',
'test_bundled_inputs',
'test_complex',
'test_cpp_api_parity',
'test_cpp_extensions_aot_no_ninja',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_jit',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_cuda',
'test_jit_cuda_fuser',
'test_cuda_primary_ctx',
'test_dataloader',
'test_datapipe',
'distributed/test_data_parallel',
'distributed/test_distributed_fork',
'distributed/test_distributed_spawn',
'distributions/test_constraints',
'distributions/test_distributions',
'test_dispatch',
'test_expecttest',
'test_foreach',
'test_indexing',
'test_jit',
'test_linalg',
'test_logging',
'test_mkldnn',
'test_model_dump',
'test_module_init',
'test_multiprocessing',
'test_multiprocessing_spawn',
'distributed/test_nccl',
'test_native_functions',
'test_numba_integration',
'test_nn',
'test_ops',
'test_optim',
'test_pytree',
'test_mobile_optimizer',
'test_set_default_mobile_cpu_allocator',
'test_xnnpack_integration',
'test_vulkan',
'test_sparse',
'test_sparse_csr',
'test_quantization',
'test_pruning_op',
'test_spectral_ops',
'test_serialization',
'test_shape_ops',
'test_show_pickle',
'test_sort_and_select',
'test_tensor_creation_ops',
'test_testing',
'test_torch',
'test_type_info',
'test_unary_ufuncs',
'test_utils',
'test_view_ops',
'test_vmap',
'test_namedtuple_return_api',
'test_numpy_interop',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
'test_tensorboard',
'test_namedtensor',
'test_reductions',
'test_type_promotion',
'test_jit_disabled',
'test_function_schema',
'test_op_aliases',
'test_overrides',
'test_jit_fuser_te',
'test_tensorexpr',
'test_tensorexpr_pybind',
'test_openmp',
'test_profiler',
"distributed/test_launcher",
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_futures',
'test_fx',
'test_fx_experimental',
'test_functional_autograd_benchmark',
'test_package',
'test_license',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
'distributed/elastic/timer/api_test',
'distributed/elastic/timer/local_timer_example',
'distributed/elastic/timer/local_timer_test',
'distributed/elastic/events/lib_test',
'distributed/elastic/metrics/api_test',
'distributed/elastic/utils/logging_test',
'distributed/elastic/utils/util_test',
'distributed/elastic/utils/distributed_test',
'distributed/elastic/multiprocessing/api_test',
]
# Tests need to be run with pytest.
USE_PYTEST_LIST = [
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributions/test_constraints',
'distributions/test_transforms',
'distributions/test_utils',
'test_typing',
"distributed/elastic/events/lib_test",
"distributed/elastic/agent/server/test/api_test",
]
WINDOWS_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/test_distributed_fork',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
'distributed/optim/test_zero_redundancy_optimizer',
"distributed/elastic/agent/server/test/api_test",
'distributed/elastic/multiprocessing/api_test',
]
ROCM_BLOCKLIST = [
'distributed/nn/jit/test_instantiator',
'distributed/rpc/test_faulty_agent',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'test_determination',
'test_multiprocessing',
'test_jit_legacy',
'test_type_hints',
'test_openmp',
]
RUN_PARALLEL_BLOCKLIST = [
'test_cpp_extensions_jit',
'test_expecttest',
'test_jit_disabled',
'test_mobile_optimizer',
'test_multiprocessing',
'test_multiprocessing_spawn',
'test_namedtuple_return_api',
'test_overrides',
'test_show_pickle',
'test_tensorexpr',
'test_cuda_primary_ctx',
] + [test for test in TESTS if test.startswith('distributed/')]
WINDOWS_COVERAGE_BLOCKLIST = [
]
# These tests are slow enough that it's worth calculating whether the patch
# touched any related files first. This list was manually generated, but for every
# run with --determine-from, we use another generated list based on this one and the
# previous test stats.
TARGET_DET_LIST = [
'distributions/test_distributions',
'test_nn',
'test_autograd',
'test_cpp_extensions_jit',
'test_jit_legacy',
'test_dataloader',
'test_overrides',
'test_linalg',
'test_jit',
'test_jit_profiling',
'test_torch',
'test_binary_ufuncs',
'test_numpy_interop',
'test_reductions',
'test_shape_ops',
'test_sort_and_select',
'test_testing',
'test_view_ops',
'distributed/nn/jit/test_instantiator',
'distributed/test_distributed_fork',
'distributed/rpc/test_process_group_agent',
'distributed/rpc/cuda/test_process_group_agent',
'distributed/rpc/test_tensorpipe_agent',
'distributed/rpc/cuda/test_tensorpipe_agent',
'distributed/algorithms/ddp_comm_hooks/test_ddp_hooks',
'distributed/test_distributed_spawn',
'test_cuda',
'test_cuda_primary_ctx',
'test_cpp_extensions_aot_ninja',
'test_cpp_extensions_aot_no_ninja',
'test_serialization',
'test_optim',
'test_utils',
'test_multiprocessing',
'test_tensorboard',
'distributed/test_c10d_common',
'distributed/test_c10d_gloo',
'distributed/test_c10d_nccl',
'distributed/test_jit_c10d',
'distributed/test_c10d_spawn_gloo',
'distributed/test_c10d_spawn_nccl',
'test_quantization',
'test_pruning_op',
'test_determination',
'test_futures',
'distributed/pipeline/sync/skip/test_api',
'distributed/pipeline/sync/skip/test_gpipe',
'distributed/pipeline/sync/skip/test_inspect_skip_layout',
'distributed/pipeline/sync/skip/test_leak',
'distributed/pipeline/sync/skip/test_portal',
'distributed/pipeline/sync/skip/test_stash_pop',
'distributed/pipeline/sync/skip/test_tracker',
'distributed/pipeline/sync/skip/test_verify_skippables',
'distributed/pipeline/sync/test_balance',
'distributed/pipeline/sync/test_bugs',
'distributed/pipeline/sync/test_checkpoint',
'distributed/pipeline/sync/test_copy',
'distributed/pipeline/sync/test_deferred_batch_norm',
'distributed/pipeline/sync/test_dependency',
'distributed/pipeline/sync/test_inplace',
'distributed/pipeline/sync/test_microbatch',
'distributed/pipeline/sync/test_phony',
'distributed/pipeline/sync/test_pipe',
'distributed/pipeline/sync/test_pipeline',
'distributed/pipeline/sync/test_stream',
'distributed/pipeline/sync/test_transparency',
'distributed/pipeline/sync/test_worker',
]
# the JSON file to store the S3 test stats
TEST_TIMES_FILE = '.pytorch-test-times'
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
_DEP_MODULES_CACHE: Dict[str, set] = {}
DISTRIBUTED_TESTS_CONFIG = {}
if dist.is_available():
DISTRIBUTED_TESTS_CONFIG['test'] = {
'WORLD_SIZE': '1'
}
if not TEST_WITH_ROCM and dist.is_mpi_available():
DISTRIBUTED_TESTS_CONFIG['mpi'] = {
'WORLD_SIZE': '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-mpi'
}
if dist.is_nccl_available():
DISTRIBUTED_TESTS_CONFIG['nccl'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-nccl'
}
if dist.is_gloo_available():
DISTRIBUTED_TESTS_CONFIG['gloo'] = {
'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',
'TEST_REPORT_SOURCE_OVERRIDE': 'dist-gloo'
}
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)
if n.startswith('SIG') and '_' not in n}
CPP_EXTENSIONS_ERROR = """
Ninja (https://ninja-build.org) is required for some of the C++ extensions
tests, but it could not be found. Install ninja with `pip install ninja`
or `conda install ninja`. Alternatively, disable said tests with
`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.
"""
PYTORCH_COLLECT_COVERAGE = bool(os.environ.get("PYTORCH_COLLECT_COVERAGE"))
JIT_EXECUTOR_TESTS = [
'test_jit_cuda_fuser',
'test_jit_profiling',
'test_jit_legacy',
'test_jit_fuser_legacy',
]
def print_to_stderr(message):
print(message, file=sys.stderr)
# Convert something like pytorch_windows_vs2019_py36_cuda10.1_build to pytorch_windows_vs2019_py36_cuda10.1
def get_stripped_CI_job() -> str:
job = os.environ.get("CIRCLE_JOB", "").rstrip('0123456789')
if job.endswith('_slow_test'):
job = job[:len(job) - len('_slow_test')]
elif job.endswith('_test'):
job = job[:len(job) - len('_test')]
elif job.endswith('_build'):
job = job[:len(job) - len('_build')]
return job
def calculate_job_times(reports: List["Report"]) -> Dict[str, float]:
# an entry will be like ("test_file_name" -> (current_avg, # values))
jobs_to_times: Dict[str, Tuple[float, int]] = dict()
for report in reports:
assert report.get('format_version') == 2, "S3 format currently handled is version 2 only"
files: Dict[str, Any] = report['files']
for name, test_file in files.items():
if name not in jobs_to_times:
jobs_to_times[name] = (test_file['total_seconds'], 1)
else:
curr_avg, curr_count = jobs_to_times[name]
new_count = curr_count + 1
new_avg = (curr_avg * curr_count + test_file['total_seconds']) / new_count
jobs_to_times[name] = (new_avg, new_count)
# if there's 'test_cpp_extensions_aot' entry in jobs_to_times, add 'test_cpp_extensions_aot_ninja'
# and 'test_cpp_extensions_aot_no_ninja' duplicate entries to ease future computation since
# test_cpp_extensions_aot_no_ninja and test_cpp_extensions_aot_ninja are Python test jobs that
# both use the test_cpp_extensions_aot.py file.
if 'test_cpp_extensions_aot' in jobs_to_times:
jobs_to_times['test_cpp_extensions_aot_ninja'] = jobs_to_times['test_cpp_extensions_aot']
jobs_to_times['test_cpp_extensions_aot_no_ninja'] = jobs_to_times['test_cpp_extensions_aot']
return {job: time for job, (time, _) in jobs_to_times.items()}
def pull_job_times_from_S3() -> Dict[str, float]:
if HAVE_BOTO3:
ci_job_prefix = get_stripped_CI_job()
s3_reports: List["Report"] = get_previous_reports_for_branch('origin/nightly', ci_job_prefix)
else:
print('Uh oh, boto3 is not found. Either it is not installed or we failed to import s3_stat_parser.')
print('If not installed, please install boto3 for automatic sharding and test categorization.')
s3_reports = []
if len(s3_reports) == 0:
print('Gathered no reports from S3. Please proceed without them.')
return dict()
return calculate_job_times(s3_reports)
def get_past_job_times() -> Dict[str, float]:
if os.path.exists(TEST_TIMES_FILE):
with open(TEST_TIMES_FILE) as file:
test_times_json: JobTimeJSON = json.load(file)
curr_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip()
file_commit = test_times_json.get('commit', '')
curr_ci_job = get_stripped_CI_job()
file_ci_job = test_times_json.get('CIRCLE_JOB', 'N/A')
if curr_commit != file_commit:
print(f'Current test times file is from different commit {file_commit}.')
elif curr_ci_job != file_ci_job:
print(f'Current test times file is for different CI job {file_ci_job}.')
else:
print(f'Found stats for current commit: {curr_commit} and job: {curr_ci_job}. Proceeding with those values.')
return test_times_json.get('job_times', {})
# Found file, but commit or CI job in JSON doesn't match
print(f'Overwriting current file with stats based on current commit: {curr_commit} and CI job: {curr_ci_job}')
job_times = pull_job_times_from_S3()
print(f'Exporting S3 test stats to {TEST_TIMES_FILE}.')
export_S3_test_times(TEST_TIMES_FILE, job_times)
return job_times
class JobTimeJSON(TypedDict):
commit: str
job_times: Dict[str, float]
def get_job_times_json(job_times: Dict[str, float]) -> JobTimeJSON:
return {
'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding="ascii").strip(),
'CIRCLE_JOB': get_stripped_CI_job(),
'job_times': job_times,
}
def get_shard(which_shard: int, num_shards: int, tests: List[str]) -> List[str]:
jobs_to_times = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. Proceeding with default sharding plan.')
return tests[which_shard - 1 :: num_shards]
shards = calculate_shards(num_shards, tests, jobs_to_times)
_, tests_from_shard = shards[which_shard - 1]
return tests_from_shard
def get_slow_tests_based_on_S3() -> List[str]:
jobs_to_times: Dict[str, float] = get_past_job_times()
# Got no stats from S3, returning early to save runtime
if len(jobs_to_times) == 0:
print('Gathered no stats from S3. No new slow tests calculated.')
return []
slow_tests: List[str] = []
for test in TESTS:
if test in jobs_to_times and test not in TARGET_DET_LIST:
if jobs_to_times[test] > SLOW_TEST_THRESHOLD:
slow_tests.append(test)
return slow_tests
def get_executable_command(options, allow_pytest, disable_coverage=False):
if options.coverage and not disable_coverage:
executable = ['coverage', 'run', '--parallel-mode', '--source=torch']
else:
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ['-m', 'pytest']
else:
print_to_stderr('Pytest cannot be used for this test. Falling back to unittest.')
return executable
def run_test(test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None):
unittest_args = options.additional_unittest_args.copy()
if options.verbose:
unittest_args.append(f'-{"v"*options.verbose}') # in case of pytest
if test_module in RUN_PARALLEL_BLOCKLIST:
unittest_args = [arg for arg in unittest_args if not arg.startswith('--run-parallel')]
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
argv = [test_module + '.py'] + unittest_args
# Multiprocessing related tests cannot run with coverage.
# Tracking issue: https://github.com/pytorch/pytorch/issues/50661
disable_coverage = sys.platform == 'win32' and test_module in WINDOWS_COVERAGE_BLOCKLIST
# Extra arguments are not supported with pytest
executable = get_executable_command(options, allow_pytest=not extra_unittest_args,
disable_coverage=disable_coverage)
command = (launcher_cmd or []) + executable + argv
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))
return shell(command, test_directory)
def test_cuda_primary_ctx(test_module, test_directory, options):
return run_test(test_module, test_directory, options, extra_unittest_args=['--subprocess'])
def _test_cpp_extensions_aot(test_module, test_directory, options, use_ninja):
if use_ninja:
try:
cpp_extension.verify_ninja_availability()
except RuntimeError:
print(CPP_EXTENSIONS_ERROR)
return 1
# Wipe the build folder, if it exists already
cpp_extensions_test_dir = os.path.join(test_directory, 'cpp_extensions')
cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, 'build')
if os.path.exists(cpp_extensions_test_build_dir):
shutil.rmtree(cpp_extensions_test_build_dir)
# Build the test cpp extensions modules
shell_env = os.environ.copy()
shell_env['USE_NINJA'] = str(1 if use_ninja else 0)
cmd = [sys.executable, 'setup.py', 'install', '--root', './install']
return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)
if return_code != 0:
return return_code
if sys.platform != 'win32':
return_code = shell(cmd,
cwd=os.path.join(cpp_extensions_test_dir, 'no_python_abi_suffix_test'),
env=shell_env)
if return_code != 0:
return return_code
# "install" the test modules and run tests
python_path = os.environ.get('PYTHONPATH', '')
try:
cpp_extensions = os.path.join(test_directory, 'cpp_extensions')
install_directory = ''
# install directory is the one that is named site-packages
for root, directories, _ in os.walk(os.path.join(cpp_extensions, 'install')):
for directory in directories:
if '-packages' in directory:
install_directory = os.path.join(root, directory)
assert install_directory, 'install_directory must not be empty'
os.environ['PYTHONPATH'] = os.pathsep.join([install_directory, python_path])
return run_test(test_module, test_directory, options)
finally:
os.environ['PYTHONPATH'] = python_path
def test_cpp_extensions_aot_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot', test_directory,
options, use_ninja=True)
def test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):
return _test_cpp_extensions_aot('test_cpp_extensions_aot',
test_directory, options, use_ninja=False)
def test_distributed(test_module, test_directory, options):
# MPI tests are broken with Python-3.9
mpi_available = subprocess.call('command -v mpiexec', shell=True) == 0 and sys.version_info < (3, 9)
if options.verbose and not mpi_available:
print_to_stderr(
'MPI not available -- MPI backend tests will be skipped')
config = DISTRIBUTED_TESTS_CONFIG
for backend, env_vars in config.items():
if sys.platform == 'win32' and backend != 'gloo':
continue
if backend == 'mpi' and not mpi_available:
continue
for with_init_file in {True, False}:
if sys.platform == 'win32' and not with_init_file:
continue
tmp_dir = tempfile.mkdtemp()
if options.verbose:
init_str = "with {} init_method"
with_init = init_str.format("file" if with_init_file else "env")
print_to_stderr(
'Running distributed tests for the {} backend {}'.format(
backend, with_init))
os.environ['TEMP_DIR'] = tmp_dir
os.environ['BACKEND'] = backend
os.environ['INIT_METHOD'] = 'env://'
os.environ.update(env_vars)
if with_init_file:
if test_module in ["test_distributed_fork", "test_distributed_spawn"]:
init_method = f'{FILE_SCHEMA}{tmp_dir}/'
else:
init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'
os.environ['INIT_METHOD'] = init_method
try:
os.mkdir(os.path.join(tmp_dir, 'barrier'))
os.mkdir(os.path.join(tmp_dir, 'test_dir'))
if backend == 'mpi':
# test mpiexec for --noprefix option
with open(os.devnull, 'w') as devnull:
allowrunasroot_opt = '--allow-run-as-root' if subprocess.call(
'mpiexec --allow-run-as-root -n 1 bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
noprefix_opt = '--noprefix' if subprocess.call(
f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c ""', shell=True,
stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''
mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]
return_code = run_test(test_module, test_directory, options,
launcher_cmd=mpiexec)
else:
return_code = run_test(test_module, test_directory, options)
if return_code != 0:
return return_code
finally:
shutil.rmtree(tmp_dir)
return 0
CUSTOM_HANDLERS = {
'test_cuda_primary_ctx': test_cuda_primary_ctx,
'test_cpp_extensions_aot_no_ninja': test_cpp_extensions_aot_no_ninja,
'test_cpp_extensions_aot_ninja': test_cpp_extensions_aot_ninja,
'distributed/test_distributed_fork': test_distributed,
'distributed/test_distributed_spawn': test_distributed,
}
def parse_test_module(test):
return test.split('.')[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description='Run the PyTorch unit test suite',
epilog='where TESTS is any of: {}'.format(', '.join(TESTS)))
parser.add_argument(
'-v',
'--verbose',
action='count',
default=0,
help='print verbose information and test-by-test results')
parser.add_argument(
'--jit',
'--jit',
action='store_true',
help='run all jit tests')
parser.add_argument(
'-pt', '--pytest', action='store_true',
help='If true, use `pytest` to execute the tests. E.g., this runs '
'TestTorch with pytest in verbose and coverage mode: '
'python run_test.py -vci torch -pt')
parser.add_argument(
'-c', '--coverage', action='store_true', help='enable coverage',
default=PYTORCH_COLLECT_COVERAGE)
parser.add_argument(
'-i',
'--include',
nargs='+',
choices=TestChoices(TESTS),
default=TESTS,
metavar='TESTS',
help='select a set of tests to include (defaults to ALL tests).'
' tests can be specified with module name, module.TestClass'
' or module.TestClass.test_method')
parser.add_argument(
'-x',
'--exclude',
nargs='+',
choices=TESTS,
metavar='TESTS',
default=[],
help='select a set of tests to exclude')
parser.add_argument(
'-f',
'--first',
choices=TESTS,
metavar='TESTS',
help='select the test to start from (excludes previous tests)')
parser.add_argument(
'-l',
'--last',
choices=TESTS,
metavar='TESTS',
help='select the last test to run (excludes following tests)')
parser.add_argument(
'--bring-to-front',
nargs='+',
choices=TestChoices(TESTS),
default=[],
metavar='TESTS',
help='select a set of tests to run first. This can be used in situations'
' where you want to run all tests, but care more about some set, '
'e.g. after making a change to a specific component')
parser.add_argument(
'--ignore-win-blocklist',
action='store_true',
help='always run blocklisted windows tests')
parser.add_argument(
'--determine-from',
help='File of affected source filenames to determine which tests to run.')
parser.add_argument(
'--continue-through-error',
action='store_true',
help='Runs the full test suite despite one of the tests failing')
parser.add_argument(
'additional_unittest_args',
nargs='*',
help='additional arguments passed through to unittest, e.g., '
'python run_test.py -i sparse -- TestSparse.test_factory_size_check')
parser.add_argument(
'--export-past-test-times',
nargs='?',
type=str,
const=TEST_TIMES_FILE,
help='dumps test times from previous S3 stats into a file, format JSON',
)
parser.add_argument(
'--shard',
nargs=2,
type=int,
help='runs a shard of the tests (taking into account other selections), e.g., '
'--shard 2 3 will break up the selected tests into 3 shards and run the tests '
'in the 2nd shard (the first number should not exceed the second)',
)
parser.add_argument(
'--exclude-jit-executor',
action='store_true',
help='exclude tests that are run for a specific jit config'
)
return parser.parse_args()
def find_test_index(test, selected_tests, find_last_index=False):
"""Find the index of the first or last occurrence of a given test/test module in the list of selected tests.
This function is used to determine the indices when slicing the list of selected tests when
``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.
:attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests
as part of the same test module, e.g.:
```
selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',
'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']
```
If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.
If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.
Args:
test (str): Name of test to lookup
selected_tests (list): List of tests
find_last_index (bool, optional): should we lookup the index of first or last
occurrence (first is default)
Returns:
index of the first or last occurrence of the given test
"""
idx = 0
found_idx = -1
for t in selected_tests:
if t.startswith(test):
found_idx = idx
if not find_last_index:
break
idx += 1
return found_idx
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr('Excluding {} {}'.format(test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
selected_tests = options.include
if options.bring_to_front:
to_front = set(options.bring_to_front)
selected_tests = options.bring_to_front + list(filter(lambda name: name not in to_front,
selected_tests))
if options.first:
first_index = find_test_index(options.first, selected_tests)
selected_tests = selected_tests[first_index:]
if options.last:
last_index = find_test_index(options.last, selected_tests, find_last_index=True)
selected_tests = selected_tests[:last_index + 1]
if options.shard:
assert len(options.shard) == 2, "Unexpected shard format"
assert min(options.shard) > 0, "Shards must be positive numbers"
which_shard, num_shards = options.shard
assert which_shard <= num_shards, "Selected shard must be less or equal that total number of shards"
assert num_shards <= len(selected_tests), f"Number of shards must be less than {len(selected_tests)}"
selected_tests = get_shard(which_shard, num_shards, selected_tests)
if options.exclude_jit_executor:
options.exclude.extend(JIT_EXECUTOR_TESTS)
selected_tests = exclude_tests(options.exclude, selected_tests)
if sys.platform == 'win32' and not options.ignore_win_blocklist:
target_arch = os.environ.get('VSCMD_ARG_TGT_ARCH')
if target_arch != 'x64':
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_no_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_aot_ninja')
WINDOWS_BLOCKLIST.append('cpp_extensions_jit')
WINDOWS_BLOCKLIST.append('jit')
WINDOWS_BLOCKLIST.append('jit_fuser')
selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, 'on Windows')
elif TEST_WITH_ROCM:
selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, 'on ROCm')
return selected_tests
def test_impact_of_file(filename):
"""Determine what class of impact this file has on test runs.
Possible values:
TORCH - torch python code
CAFFE2 - caffe2 python code
TEST - torch test code
UNKNOWN - may affect all tests
NONE - known to have no effect on test outcome
CI - CI configuration files
"""
parts = filename.split(os.sep)
if parts[0] in ['.jenkins', '.circleci']:
return 'CI'
if parts[0] in ['docs', 'scripts', 'CODEOWNERS', 'README.md']:
return 'NONE'
elif parts[0] == 'torch':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TORCH'
elif parts[0] == 'caffe2':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'CAFFE2'
elif parts[0] == 'test':
if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):
return 'TEST'
return 'UNKNOWN'
def log_test_reason(file_type, filename, test, options):
if options.verbose:
print_to_stderr(
'Determination found {} file {} -- running {}'.format(
file_type,
filename,
test,
)
)
def get_dep_modules(test):
# Cache results in case of repetition
if test in _DEP_MODULES_CACHE:
return _DEP_MODULES_CACHE[test]
repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_location = os.path.join(repo_root, 'test', test + '.py')
finder = modulefinder.ModuleFinder(
# Ideally exclude all third party modules, to speed up calculation.
excludes=[
'scipy',
'numpy',
'numba',
'multiprocessing',
'sklearn',
'setuptools',
'hypothesis',
'llvmlite',
'joblib',
'email',
'importlib',
'unittest',
'urllib',
'json',
'collections',
# Modules below are excluded because they are hitting https://bugs.python.org/issue40350
# Trigger AttributeError: 'NoneType' object has no attribute 'is_package'
'mpl_toolkits',
'google',
'onnx',
# Triggers RecursionError
'mypy'
],
)
# HACK: some platforms default to ascii, so we can't just run_script :(
with open(test_location, 'r', encoding='utf-8') as fp:
finder.load_module('__main__', fp, test_location, ('', 'r', 1))
dep_modules = set(finder.modules.keys())
_DEP_MODULES_CACHE[test] = dep_modules
return dep_modules
def determine_target(target_det_list, test, touched_files, options):
test = parse_test_module(test)
# Some tests are faster to execute than to determine.
if test not in target_det_list:
if options.verbose:
print_to_stderr(f'Running {test} without determination')
return True
# HACK: "no_ninja" is not a real module
if test.endswith('_no_ninja'):
test = test[:(-1 * len('_no_ninja'))]
if test.endswith('_ninja'):
test = test[:(-1 * len('_ninja'))]
dep_modules = get_dep_modules(test)
for touched_file in touched_files:
file_type = test_impact_of_file(touched_file)
if file_type == 'NONE':
continue
elif file_type == 'CI':
# Force all tests to run if any change is made to the CI
# configurations.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type == 'UNKNOWN':
# Assume uncategorized source files can affect every test.
log_test_reason(file_type, touched_file, test, options)
return True
elif file_type in ['TORCH', 'CAFFE2', 'TEST']:
parts = os.path.splitext(touched_file)[0].split(os.sep)
touched_module = ".".join(parts)
# test/ path does not have a "test." namespace
if touched_module.startswith('test.'):
touched_module = touched_module.split('test.')[1]
if (
touched_module in dep_modules
or touched_module == test.replace('/', '.')
):
log_test_reason(file_type, touched_file, test, options)
return True
# If nothing has determined the test has run, don't run the test.
if options.verbose:
print_to_stderr(f'Determination is skipping {test}')
return False
def run_test_module(test: str, test_directory: str, options) -> Optional[str]:
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test_module, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool), 'Return code should be an integer'
if return_code == 0:
return None
message = f'{test} failed!'
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
message += f' Received signal: {signal_name}'
return message
def export_S3_test_times(test_times_filename: str, test_times: Dict[str, float]) -> None:
if os.path.exists(test_times_filename):
print(f'Overwriting existent file: {test_times_filename}')
with open(test_times_filename, 'w+') as file:
job_times_json = get_job_times_json(test_times)
json.dump(job_times_json, file, indent=' ', separators=(',', ': '))
file.write('\n')
def query_changed_test_files() -> List[str]:
cmd = ["git", "diff", "--name-only", "origin/master", "HEAD"]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
lines = proc.stdout.decode().strip().split("\n")
lines = [line.strip() for line in lines]
return lines
def reorder_tests(tests: List[str]) -> List[str]:
try:
changed_files = query_changed_test_files()
except Exception:
# If unable to get changed files from git, quit without doing any sorting
return tests
prefix = f"test{os.path.sep}"
changed_tests = [f for f in changed_files if f.startswith(prefix) and f.endswith(".py")]
changed_tests = [f[len(prefix):] for f in changed_tests]
changed_tests = [f[:-len(".py")] for f in changed_tests]
bring_to_front = []
the_rest = []
for test in tests:
if test in changed_tests:
bring_to_front.append(test)
else:
the_rest.append(test)
sorted_tests = bring_to_front + the_rest
if len(sorted_tests) != len(tests):
# Something went wrong, bail out without doing any sorting
return tests
return sorted_tests
def main():
options = parse_args()
test_times_filename = options.export_past_test_times
if test_times_filename:
print(f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.')
export_S3_test_times(test_times_filename, pull_job_times_from_S3())
return
test_directory = os.path.dirname(os.path.abspath(__file__))
selected_tests = get_selected_tests(options)
if options.verbose:
print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))
if options.coverage and not PYTORCH_COLLECT_COVERAGE:
shell(['coverage', 'erase'])
if options.jit:
selected_tests = filter(lambda test_name: "jit" in test_name, TESTS)
if options.determine_from is not None and os.path.exists(options.determine_from):
slow_tests = get_slow_tests_based_on_S3()
print('Added the following tests to target_det tests as calculated based on S3:')
print(slow_tests)
with open(options.determine_from, 'r') as fh:
touched_files = [
os.path.normpath(name.strip()) for name in fh.read().split('\n')
if len(name.strip()) > 0
]
# HACK: Ensure the 'test' paths can be traversed by Modulefinder
sys.path.append('test')
selected_tests = [
test for test in selected_tests
if determine_target(TARGET_DET_LIST + slow_tests, test, touched_files, options)
]
sys.path.remove('test')
selected_tests = reorder_tests(selected_tests)
has_failed = False
failure_messages = []
try:
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
finally:
if options.coverage:
from coverage import Coverage
test_dir = os.path.dirname(os.path.abspath(__file__))
with set_cwd(test_dir):
cov = Coverage()
if PYTORCH_COLLECT_COVERAGE:
cov.load()
cov.combine(strict=False)
cov.save()
if not PYTORCH_COLLECT_COVERAGE:
cov.html_report()
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"TEMP_DIR",
"INIT_METHOD",
"CIRCLE_JOB",
"BACKEND",
"VSCMD_ARG_TGT_ARCH",
"PYTORCH_COLLECT_COVERAGE",
"PYTHONPATH"
] |
[]
|
["TEMP_DIR", "INIT_METHOD", "CIRCLE_JOB", "BACKEND", "VSCMD_ARG_TGT_ARCH", "PYTORCH_COLLECT_COVERAGE", "PYTHONPATH"]
|
python
| 7 | 0 | |
msticpy/data/azure/sentinel_analytics.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Mixin Classes for Sentinel Analytics Features."""
from uuid import UUID, uuid4
import pandas as pd
import httpx
from IPython.display import display
from azure.common.exceptions import CloudError
from ..._version import VERSION
from ...common.exceptions import MsticpyUserError
from .azure_data import get_api_headers
from .sentinel_utils import _build_sent_data
__version__ = VERSION
__author__ = "Pete Bryan"
class SentinelHuntingMixin:
"""Mixin class for Sentinel Hunting feature integrations."""
def list_hunting_queries(self) -> pd.DataFrame:
"""
Return all hunting queries in a Microsoft Sentinel workspace.
Returns
-------
pd.DataFrame
A table of the hunting queries.
"""
saved_query_df = self._list_items( # type: ignore
item_type="alert_rules", api_version="2017-04-26-preview"
)
return saved_query_df[
saved_query_df["properties.Category"] == "Hunting Queries"
]
get_hunting_queries = list_hunting_queries
class SentinelAnalyticsMixin:
"""Mixin class for Sentinel Analytics feature integrations."""
def list_alert_rules(self) -> pd.DataFrame:
"""
Return all Microsoft Sentinel alert rules for a workspace.
Returns
-------
pd.DataFrame
A table of the workspace's alert rules.
"""
return self._list_items(item_type="alert_rules") # type: ignore
def _get_template_id(
self,
template: str,
) -> str:
"""
Get an analytic template ID.
Parameters
----------
template : str
Template ID or Name
res_id : str
Sentinel workspace to get template from
Returns
-------
str
Template ID
Raises
------
MsticpyUserError
If template not found or multiple templates found.
"""
try:
UUID(template)
return template
except ValueError as template_name:
templates = self.list_analytic_templates()
template_details = templates[
templates["properties.displayName"].str.contains(template)
]
if len(template_details) > 1:
display(template_details[["name", "properties.displayName"]])
raise MsticpyUserError(
"More than one template found, please specify by GUID"
) from template_name
if not isinstance(template_details, pd.DataFrame) or template_details.empty:
raise MsticpyUserError(
f"Template {template_details} not found"
) from template_name
return template_details["name"].iloc[0]
def create_analytic_rule( # pylint: disable=too-many-arguments, too-many-locals
self,
template: str = None,
name: str = None,
enabled: bool = True,
query: str = None,
query_frequency: str = "PT5H",
query_period: str = "PT5H",
severity: str = "Medium",
suppression_duration: str = "PT1H",
suppression_enabled: bool = False,
trigger_operator: str = "GreaterThan",
trigger_threshold: int = 0,
description: str = None,
tactics: list = None,
):
"""
Create a Sentinel Analytics Rule.
Parameters
----------
template : str, optional
The GUID or name of a templated to create the analytic from, by default None
name : str, optional
The name to give the analytic, by default None
enabled : bool, optional
Whether you want the analytic to be enabled once deployed, by default True
query : str, optional
The KQL query string to use in the anlaytic, by default None
query_frequency : str, optional
How often the query should run in ISO8601 format, by default "PT5H"
query_period : str, optional
How far back the query should look in ISO8601 format, by default "PT5H"
severity : str, optional
The severity to raise incidents as, by default "Medium"
Options are; Informational, Low, Medium, or High
suppression_duration : str, optional
How long to suppress duplicate alerts in ISO8601 format, by default "PT1H"
suppression_enabled : bool, optional
Whether you want to suppress duplicates, by default False
trigger_operator : str, optional
The operator for the trigger, by default "GreaterThan"
trigger_threshold : int, optional
The threshold of events required to create the incident, by default 0
description : str, optional
A description of the analytic, by default None
tactics : list, optional
A list of MITRE ATT&CK tactics related to the analytic, by default None
Raises
------
MsticpyUserError
If template provided isn't found.
CloudError
If the API returns an error.
"""
if template:
template_id = self._get_template_id(template)
templates = self.list_analytic_templates()
template_details = templates[templates["name"] == template_id].iloc[0]
name = template_details["properties.displayName"]
query = template_details["properties.query"]
query_frequency = template_details["properties.queryFrequency"]
query_period = template_details["properties.queryPeriod"]
severity = template_details["properties.severity"]
trigger_operator = template_details["properties.triggerOperator"]
trigger_threshold = template_details["properties.triggerThreshold"]
description = template_details["properties.description"]
tactics = (
template_details["properties.tactics"]
if not pd.isna(template_details["properties.tactics"])
else []
)
if not tactics:
tactics = []
if not name:
raise MsticpyUserError(
"Please specify either a template ID or analytic details."
)
rule_id = uuid4()
analytic_url = self.sent_urls["alert_rules"] + f"/{rule_id}" # type: ignore
data_items = {
"displayName": name,
"query": query,
"queryFrequency": query_frequency,
"queryPeriod": query_period,
"severity": severity,
"suppressionDuration": suppression_duration,
"suppressionEnabled": str(suppression_enabled).lower(),
"triggerOperator": trigger_operator,
"triggerThreshold": trigger_threshold,
"description": description,
"tactics": tactics,
"enabled": str(enabled).lower(),
}
data = _build_sent_data(data_items, props=True)
data["kind"] = "Scheduled"
params = {"api-version": "2020-01-01"}
response = httpx.put(
analytic_url,
headers=get_api_headers(self.token), # type: ignore
params=params,
content=str(data),
)
if response.status_code != 201:
raise CloudError(response=response)
print("Analytic Created.")
def _get_analytic_id(self, analytic: str) -> str:
"""
Get the GUID of an analytic rule.
Parameters
----------
analytic : str
The GUID or name of the analytic
Returns
-------
str
The analytic GUID
Raises
------
MsticpyUserError
If analytic not found or multiple matching analytics found
"""
try:
UUID(analytic)
return analytic
except ValueError as analytic_name:
analytics = self.list_analytic_rules()
analytic_details = analytics[
analytics["properties.displayName"].str.contains(analytic)
]
if len(analytic_details) > 1:
display(analytic_details[["name", "properties.displayName"]])
raise MsticpyUserError(
"More than one analytic found, please specify by GUID"
) from analytic_name
if not isinstance(analytic_details, pd.DataFrame) or analytic_details.empty:
raise MsticpyUserError(
f"Analytic {analytic_details} not found"
) from analytic_name
return analytic_details["name"].iloc[0]
def delete_analytic_rule(
self,
analytic_rule: str,
):
"""
Delete a deployed Analytic rule from a Sentinel workspace.
Parameters
----------
analytic_rule : str
The GUID or name of the analytic.
Raises
------
CloudError
If the API returns an error.
"""
analytic_id = self._get_analytic_id(analytic_rule)
analytic_url = self.sent_urls["alert_rules"] + f"/{analytic_id}" # type: ignore
params = {"api-version": "2020-01-01"}
response = httpx.delete(
analytic_url,
headers=get_api_headers(self.token), # type: ignore
params=params,
)
if response.status_code != 200:
raise CloudError(response=response)
print("Analytic Deleted.")
def list_analytic_templates(self) -> pd.DataFrame:
"""
List Analytic Templates.
Returns
-------
pd.DataFrame
A DataFrame containing the analytics templates
Raises
------
CloudError
If a valid result is not returned.
"""
return self._list_items(item_type="alert_template") # type: ignore
get_alert_rules = list_alert_rules
list_analytic_rules = list_alert_rules
get_analytic_rules = list_alert_rules
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
azuredevops/internal/acceptancetests/data_client_config_test.go
|
// +build all core
package acceptancetests
import (
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/terraform-providers/terraform-provider-azuredevops/azuredevops/internal/acceptancetests/testutils"
)
// Verifies that the client config data source loads the configured AzDO org
func TestAccClientConfig_LoadsCorrectProperties(t *testing.T) {
tfNode := "data.azuredevops_client_config.c"
resource.Test(t, resource.TestCase{
PreCheck: func() { testutils.PreCheck(t, nil) },
Providers: testutils.GetProviders(),
Steps: []resource.TestStep{
{
Config: `data "azuredevops_client_config" "c" {}`,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet(tfNode, "id"),
resource.TestCheckResourceAttr(tfNode, "organization_url", os.Getenv("AZDO_ORG_SERVICE_URL")),
),
},
},
})
}
|
[
"\"AZDO_ORG_SERVICE_URL\""
] |
[] |
[
"AZDO_ORG_SERVICE_URL"
] |
[]
|
["AZDO_ORG_SERVICE_URL"]
|
go
| 1 | 0 | |
bsp/lpc408x/rtconfig.py
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'D:/xxx'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'D:/Keil'
elif CROSS_TOOL == 'iar':
print('================ERROR============================')
print('Not support iar yet!')
print('=================================================')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=' + CPU + ' -mthumb '
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T drivers/linker_scripts/link.lds'
CXXFLAGS = CFLAGS
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu ' + CPU + '.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "drivers/linker_scripts/link.sct" --info sizes --info totals --info unused --info veneers --list rtthread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/ARMCC/LIB'
CXXFLAGS = CFLAGS
EXEC_PATH += '/arm/armcc/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
|
[] |
[] |
[
"RTT_CC",
"RTT_EXEC_PATH"
] |
[]
|
["RTT_CC", "RTT_EXEC_PATH"]
|
python
| 2 | 0 | |
Tests/Marketplace/upload_packs.py
|
import json
import os
import sys
import argparse
import shutil
import uuid
import prettytable
import glob
import requests
import logging
from datetime import datetime
from google.cloud.storage import Bucket
from zipfile import ZipFile
from typing import Any, Tuple, Union, Optional
from Tests.Marketplace.marketplace_services import init_storage_client, Pack, \
load_json, get_content_git_client, get_recent_commits_data, store_successful_and_failed_packs_in_ci_artifacts, \
json_write
from Tests.Marketplace.marketplace_statistics import StatisticsHandler
from Tests.Marketplace.marketplace_constants import PackStatus, Metadata, GCPConfig, BucketUploadFlow, \
CONTENT_ROOT_PATH, PACKS_FOLDER, PACKS_FULL_PATH, IGNORED_FILES, IGNORED_PATHS, LANDING_PAGE_SECTIONS_PATH
from demisto_sdk.commands.common.tools import run_command, str2bool
from Tests.scripts.utils.log_util import install_logging
def get_packs_names(target_packs: str, previous_commit_hash: str = "HEAD^") -> set:
"""Detects and returns packs names to upload.
In case that `Modified` is passed in target_packs input, checks the git difference between two commits,
current and previous and greps only ones with prefix Packs/.
By default this function will receive `All` as target_packs and will return all packs names from content repo.
Args:
target_packs (str): csv packs names or `All` for all available packs in content
or `Modified` for only modified packs (currently not in use).
previous_commit_hash (str): the previous commit to diff with.
Returns:
set: unique collection of packs names to upload.
"""
if target_packs.lower() == "all":
if os.path.exists(PACKS_FULL_PATH):
all_packs = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(all_packs)}")
# return all available packs names
return all_packs
else:
logging.error(f"Folder {PACKS_FOLDER} was not found at the following path: {PACKS_FULL_PATH}")
sys.exit(1)
elif target_packs.lower() == "modified":
cmd = f"git diff --name-only HEAD..{previous_commit_hash} | grep 'Packs/'"
modified_packs_path = run_command(cmd).splitlines()
modified_packs = {p.split('/')[1] for p in modified_packs_path if p not in IGNORED_PATHS}
logging.info(f"Number of modified packs is: {len(modified_packs)}")
# return only modified packs between two commits
return modified_packs
elif target_packs and isinstance(target_packs, str):
modified_packs = {p.strip() for p in target_packs.split(',') if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(modified_packs)}")
# return only packs from csv list
return modified_packs
else:
logging.critical("Not correct usage of flag -p. Please check help section of upload packs script.")
sys.exit(1)
def extract_packs_artifacts(packs_artifacts_path: str, extract_destination_path: str):
"""Extracts all packs from content pack artifact zip.
Args:
packs_artifacts_path (str): full path to content artifacts zip file.
extract_destination_path (str): full path to directory where to extract the packs.
"""
with ZipFile(packs_artifacts_path) as packs_artifacts:
packs_artifacts.extractall(extract_destination_path)
logging.info("Finished extracting packs artifacts")
def download_and_extract_index(storage_bucket: Any, extract_destination_path: str) -> Tuple[str, Any, int]:
"""Downloads and extracts index zip from cloud storage.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
extract_destination_path (str): the full path of extract folder.
Returns:
str: extracted index folder full path.
Blob: google cloud storage object that represents index.zip blob.
str: downloaded index generation.
"""
if storage_bucket.name == GCPConfig.PRODUCTION_PRIVATE_BUCKET:
index_storage_path = os.path.join(GCPConfig.PRIVATE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
else:
index_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
download_index_path = os.path.join(extract_destination_path, f"{GCPConfig.INDEX_NAME}.zip")
index_blob = storage_bucket.blob(index_storage_path)
index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME)
index_generation = 0 # Setting to 0 makes the operation succeed only if there are no live versions of the blob
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
if not index_blob.exists():
os.mkdir(index_folder_path)
logging.error(f"{storage_bucket.name} index blob does not exists")
return index_folder_path, index_blob, index_generation
index_blob.reload()
index_generation = index_blob.generation
index_blob.download_to_filename(download_index_path, if_generation_match=index_generation)
if os.path.exists(download_index_path):
with ZipFile(download_index_path, 'r') as index_zip:
index_zip.extractall(extract_destination_path)
if not os.path.exists(index_folder_path):
logging.critical(f"Failed creating {GCPConfig.INDEX_NAME} folder with extracted data.")
sys.exit(1)
os.remove(download_index_path)
logging.success(f"Finished downloading and extracting {GCPConfig.INDEX_NAME} file to "
f"{extract_destination_path}")
return index_folder_path, index_blob, index_generation
else:
logging.critical(f"Failed to download {GCPConfig.INDEX_NAME}.zip file from cloud storage.")
sys.exit(1)
def update_index_folder(index_folder_path: str, pack_name: str, pack_path: str, pack_version: str = '',
hidden_pack: bool = False) -> bool:
"""
Copies pack folder into index folder.
Args:
index_folder_path (str): full path to index folder.
pack_name (str): pack folder name to copy.
pack_path (str): pack folder full path.
pack_version (str): pack latest version.
hidden_pack (bool): whether pack is hidden/internal or regular pack.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
try:
index_folder_subdirectories = [d for d in os.listdir(index_folder_path) if
os.path.isdir(os.path.join(index_folder_path, d))]
index_pack_path = os.path.join(index_folder_path, pack_name)
metadata_files_in_index = glob.glob(f"{index_pack_path}/metadata-*.json")
new_metadata_path = os.path.join(index_pack_path, f"metadata-{pack_version}.json")
if pack_version:
# Update the latest metadata
if new_metadata_path in metadata_files_in_index:
metadata_files_in_index.remove(new_metadata_path)
# Remove old files but keep metadata files
if pack_name in index_folder_subdirectories:
for d in os.scandir(index_pack_path):
if d.path not in metadata_files_in_index:
os.remove(d.path)
# skipping index update in case hidden is set to True
if hidden_pack:
if os.path.exists(index_pack_path):
shutil.rmtree(index_pack_path) # remove pack folder inside index in case that it exists
logging.warning(f"Skipping updating {pack_name} pack files to index")
task_status = True
return True
# Copy new files and add metadata for latest version
for d in os.scandir(pack_path):
if not os.path.exists(index_pack_path):
os.mkdir(index_pack_path)
logging.info(f"Created {pack_name} pack folder in {GCPConfig.INDEX_NAME}")
shutil.copy(d.path, index_pack_path)
if pack_version and Pack.METADATA == d.name:
shutil.copy(d.path, new_metadata_path)
task_status = True
except Exception:
logging.exception(f"Failed in updating index folder for {pack_name} pack.")
finally:
return task_status
def clean_non_existing_packs(index_folder_path: str, private_packs: list, storage_bucket: Any) -> bool:
""" Detects packs that are not part of content repo or from private packs bucket.
In case such packs were detected, problematic pack is deleted from index and from content/packs/{target_pack} path.
Args:
index_folder_path (str): full path to downloaded index folder.
private_packs (list): priced packs from private bucket.
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
Returns:
bool: whether cleanup was skipped or not.
"""
if ('CI' not in os.environ) or (
os.environ.get('CI_COMMIT_BRANCH') != 'master' and storage_bucket.name == GCPConfig.PRODUCTION_BUCKET) or (
os.environ.get('CI_COMMIT_BRANCH') == 'master' and storage_bucket.name not in
(GCPConfig.PRODUCTION_BUCKET, GCPConfig.CI_BUILD_BUCKET)):
logging.info("Skipping cleanup of packs in gcs.") # skipping execution of cleanup in gcs bucket
return True
public_packs_names = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
private_packs_names = {p.get('id', '') for p in private_packs}
valid_packs_names = public_packs_names.union(private_packs_names)
# search for invalid packs folder inside index
invalid_packs_names = {(entry.name, entry.path) for entry in os.scandir(index_folder_path) if
entry.name not in valid_packs_names and entry.is_dir()}
if invalid_packs_names:
try:
logging.warning(f"Detected {len(invalid_packs_names)} non existing pack inside index, starting cleanup.")
for invalid_pack in invalid_packs_names:
invalid_pack_name = invalid_pack[0]
invalid_pack_path = invalid_pack[1]
# remove pack from index
shutil.rmtree(invalid_pack_path)
logging.warning(f"Deleted {invalid_pack_name} pack from {GCPConfig.INDEX_NAME} folder")
# important to add trailing slash at the end of path in order to avoid packs with same prefix
invalid_pack_gcs_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, invalid_pack_name, "") # by design
for invalid_blob in [b for b in storage_bucket.list_blobs(prefix=invalid_pack_gcs_path)]:
logging.warning(f"Deleted invalid {invalid_pack_name} pack under url {invalid_blob.public_url}")
invalid_blob.delete() # delete invalid pack in gcs
except Exception:
logging.exception("Failed to cleanup non existing packs.")
else:
logging.info(f"No invalid packs detected inside {GCPConfig.INDEX_NAME} folder")
return False
def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any,
build_number: str, private_packs: list, current_commit_hash: str,
index_generation: int, is_private: bool = False, force_upload: bool = False,
previous_commit_hash: str = None, landing_page_sections: dict = None,
artifacts_dir: Optional[str] = None,
storage_bucket: Optional[Bucket] = None,
):
"""
Upload updated index zip to cloud storage.
:param index_folder_path: index folder full path.
:param extract_destination_path: extract folder full path.
:param index_blob: google cloud storage object that represents index.zip blob.
:param build_number: CI build number, used as an index revision.
:param private_packs: List of private packs and their price.
:param current_commit_hash: last commit hash of head.
:param index_generation: downloaded index generation.
:param is_private: Indicates if upload is private.
:param force_upload: Indicates if force upload or not.
:param previous_commit_hash: The previous commit hash to diff with.
:param landing_page_sections: landingPage sections.
:param artifacts_dir: The CI artifacts directory to upload the index.json to.
:param storage_bucket: The storage bucket object
:returns None.
"""
if force_upload:
# If we force upload we don't want to update the commit in the index.json file,
# this is to be able to identify all changed packs in the next upload
commit = previous_commit_hash
logging.info('Force upload flow - Index commit hash should not be changed')
else:
# Otherwise, update the index with the current commit hash (the commit of the upload)
commit = current_commit_hash
logging.info('Updating production index commit hash to master last commit hash')
if not landing_page_sections:
landing_page_sections = load_json(LANDING_PAGE_SECTIONS_PATH)
logging.debug(f'commit hash is: {commit}')
index_json_path = os.path.join(index_folder_path, f'{GCPConfig.INDEX_NAME}.json')
logging.info(f'index json path: {index_json_path}')
logging.info(f'Private packs are: {private_packs}')
with open(index_json_path, "w+") as index_file:
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT),
'packs': private_packs,
'commit': commit,
'landingPage': {'sections': landing_page_sections.get('sections', [])}
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(index_folder_path)
index_zip_path = shutil.make_archive(base_name=index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
logging.info(f'index zip path: {index_zip_path}')
index_blob.reload()
current_index_generation = index_blob.generation
index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
if is_private or current_index_generation == index_generation:
# we upload both index.json and the index.zip to allow usage of index.json without having to unzip
index_blob.upload_from_filename(index_zip_path)
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.critical(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation.")
logging.critical(f"Downloaded index generation: {index_generation}")
logging.critical(f"Current index generation: {current_index_generation}")
sys.exit(0)
except Exception:
logging.exception(f"Failed in uploading {GCPConfig.INDEX_NAME}.")
sys.exit(1)
finally:
if artifacts_dir:
# Store index.json in CircleCI artifacts
shutil.copyfile(
os.path.join(index_folder_path, f'{GCPConfig.INDEX_NAME}.json'),
os.path.join(artifacts_dir, f'{GCPConfig.INDEX_NAME}.json'),
)
shutil.rmtree(index_folder_path)
def create_corepacks_config(storage_bucket: Any, build_number: str, index_folder_path: str,
artifacts_dir: Optional[str]):
"""Create corepacks.json file to artifacts dir. Corepacks file includes core packs for server installation.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
build_number (str): circleCI build number.
index_folder_path (str): The index folder path.
artifacts_dir: The CI artifacts directory to upload the corepacks.json to.
"""
core_packs_public_urls = []
found_core_packs = set()
for pack in os.scandir(index_folder_path):
if pack.is_dir() and pack.name in GCPConfig.CORE_PACKS_LIST:
pack_metadata_path = os.path.join(index_folder_path, pack.name, Pack.METADATA)
if not os.path.exists(pack_metadata_path):
logging.critical(f"{pack.name} pack {Pack.METADATA} is missing in {GCPConfig.INDEX_NAME}")
sys.exit(1)
with open(pack_metadata_path, 'r') as metadata_file:
metadata = json.load(metadata_file)
pack_current_version = metadata.get('currentVersion', Pack.PACK_INITIAL_VERSION)
core_pack_relative_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, pack.name,
pack_current_version, f"{pack.name}.zip")
core_pack_public_url = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name, core_pack_relative_path)
if not storage_bucket.blob(core_pack_relative_path).exists():
logging.critical(f"{pack.name} pack does not exist under {core_pack_relative_path} path")
sys.exit(1)
core_packs_public_urls.append(core_pack_public_url)
found_core_packs.add(pack.name)
if len(found_core_packs) != len(GCPConfig.CORE_PACKS_LIST):
missing_core_packs = set(GCPConfig.CORE_PACKS_LIST) ^ found_core_packs
logging.critical(f"Number of defined core packs are: {len(GCPConfig.CORE_PACKS_LIST)}")
logging.critical(f"Actual number of found core packs are: {len(found_core_packs)}")
logging.critical(f"Missing core packs are: {missing_core_packs}")
sys.exit(1)
corepacks_json_path = os.path.join(artifacts_dir, GCPConfig.CORE_PACK_FILE_NAME)
core_packs_data = {
'corePacks': core_packs_public_urls,
'buildNumber': build_number
}
json_write(corepacks_json_path, core_packs_data)
logging.success(f"Finished copying {GCPConfig.CORE_PACK_FILE_NAME} to artifacts.")
def _build_summary_table(packs_input_list: list, include_pack_status: bool = False) -> Any:
"""Build summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
PrettyTable: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Aggregated Pack Versions"]
if include_pack_status:
table_fields.append("Status")
table = prettytable.PrettyTable()
table.field_names = table_fields
for index, pack in enumerate(packs_input_list, start=1):
pack_status_message = PackStatus[pack.status].value
row = [index, pack.name, pack.display_name, pack.latest_version,
pack.aggregation_str if pack.aggregated and pack.aggregation_str else "False"]
if include_pack_status:
row.append(pack_status_message)
table.add_row(row)
return table
def build_summary_table_md(packs_input_list: list, include_pack_status: bool = False) -> str:
"""Build markdown summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
Markdown table: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Status"] if include_pack_status \
else ["Index", "Pack ID", "Pack Display Name", "Latest Version"]
table = ['|', '|']
for key in table_fields:
table[0] = f'{table[0]} {key} |'
table[1] = f'{table[1]} :- |'
for index, pack in enumerate(packs_input_list):
pack_status_message = PackStatus[pack.status].value if include_pack_status else ''
row = [index, pack.name, pack.display_name, pack.latest_version, pack_status_message] if include_pack_status \
else [index, pack.name, pack.display_name, pack.latest_version]
row_hr = '|'
for _value in row:
row_hr = f'{row_hr} {_value}|'
table.append(row_hr)
return '\n'.join(table)
def add_private_content_to_index(private_index_path: str, extract_destination_path: str, index_folder_path: str,
pack_names: set) -> Tuple[Union[list, list], list]:
""" Adds a list of priced packs data-structures to the public index.json file.
This step should not be skipped even if there are no new or updated private packs.
Args:
private_index_path: path to where the private index is located.
extract_destination_path (str): full path to extract directory.
index_folder_path (str): downloaded index folder directory path.
pack_names (set): collection of pack names.
Returns:
list: priced packs from private bucket.
"""
private_packs = []
updated_private_packs = []
try:
private_packs = get_private_packs(private_index_path, pack_names,
extract_destination_path)
updated_private_packs = get_updated_private_packs(private_packs, index_folder_path)
add_private_packs_to_index(index_folder_path, private_index_path)
except Exception as e:
logging.exception(f"Could not add private packs to the index. Additional Info: {str(e)}")
finally:
logging.info("Finished updating index with priced packs")
shutil.rmtree(os.path.dirname(private_index_path), ignore_errors=True)
return private_packs, updated_private_packs
def get_updated_private_packs(private_packs, index_folder_path):
""" Checks for updated private packs by compering contentCommitHash between public index json and private pack
metadata files.
Args:
private_packs (list): List of dicts containing pack metadata information.
index_folder_path (str): The public index folder path.
Returns:
updated_private_packs (list) : a list of all private packs id's that were updated.
"""
updated_private_packs = []
public_index_file_path = os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")
public_index_json = load_json(public_index_file_path)
private_packs_from_public_index = public_index_json.get("packs", {})
for pack in private_packs:
private_pack_id = pack.get('id')
private_commit_hash_from_metadata = pack.get('contentCommitHash', "")
private_commit_hash_from_content_repo = ""
for public_pack in private_packs_from_public_index:
if public_pack.get('id') == private_pack_id:
private_commit_hash_from_content_repo = public_pack.get('contentCommitHash', "")
private_pack_was_updated = private_commit_hash_from_metadata != private_commit_hash_from_content_repo
if private_pack_was_updated:
updated_private_packs.append(private_pack_id)
logging.debug(f"Updated private packs are: {updated_private_packs}")
return updated_private_packs
def get_private_packs(private_index_path: str, pack_names: set = set(),
extract_destination_path: str = '') -> list:
"""
Gets a list of private packs.
:param private_index_path: Path to where the private index is located.
:param pack_names: Collection of pack names.
:param extract_destination_path: Path to where the files should be extracted to.
:return: List of dicts containing pack metadata information.
"""
logging.info(f'getting all private packs. private_index_path: {private_index_path}')
try:
metadata_files = glob.glob(f"{private_index_path}/**/metadata.json")
except Exception:
logging.exception(f'Could not find metadata files in {private_index_path}.')
return []
if not metadata_files:
logging.warning(f'No metadata files found in [{private_index_path}]')
private_packs = []
logging.info(f'all metadata files found: {metadata_files}')
for metadata_file_path in metadata_files:
try:
with open(metadata_file_path, "r") as metadata_file:
metadata = json.load(metadata_file)
pack_id = metadata.get('id')
is_changed_private_pack = pack_id in pack_names
if is_changed_private_pack: # Should take metadata from artifacts.
with open(os.path.join(extract_destination_path, pack_id, "pack_metadata.json"),
"r") as metadata_file:
metadata = json.load(metadata_file)
logging.info(f'metadata of changed private pack: {metadata}')
if metadata:
private_packs.append({
'id': metadata.get('id') if not is_changed_private_pack else metadata.get('name'),
'price': metadata.get('price'),
'vendorId': metadata.get('vendorId', ""),
'partnerId': metadata.get('partnerId', ""),
'partnerName': metadata.get('partnerName', ""),
'contentCommitHash': metadata.get('contentCommitHash', "")
})
except ValueError:
logging.exception(f'Invalid JSON in the metadata file [{metadata_file_path}].')
return private_packs
def add_private_packs_to_index(index_folder_path: str, private_index_path: str):
""" Add the private packs to the index folder.
Args:
index_folder_path: The index folder path.
private_index_path: The path for the index of the private packs.
"""
for d in os.scandir(private_index_path):
if os.path.isdir(d.path):
update_index_folder(index_folder_path, d.name, d.path)
def is_private_packs_updated(public_index_json, private_index_path):
""" Checks whether there were changes in private packs from the last upload.
The check compares the `content commit hash` field in the public index with the value stored in the private index.
If there is at least one private pack that has been updated/released, the upload should be performed and not
skipped.
Args:
public_index_json (dict) : The public index.json file.
private_index_path (str): Path to where the private index.zip is located.
Returns:
is_private_packs_updated (bool): True if there is at least one private pack that was updated/released,
False otherwise (i.e there are no private packs that have been updated/released).
"""
logging.debug("Checking if there are updated private packs")
private_index_file_path = os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")
private_index_json = load_json(private_index_file_path)
private_packs_from_private_index = private_index_json.get("packs")
private_packs_from_public_index = public_index_json.get("packs")
if len(private_packs_from_private_index) != len(private_packs_from_public_index):
# private pack was added or deleted
logging.debug("There is at least one private pack that was added/deleted, upload should not be skipped.")
return True
id_to_commit_hash_from_public_index = {private_pack.get("id"): private_pack.get("contentCommitHash", "") for
private_pack in private_packs_from_public_index}
for private_pack in private_packs_from_private_index:
pack_id = private_pack.get("id")
content_commit_hash = private_pack.get("contentCommitHash", "")
if id_to_commit_hash_from_public_index.get(pack_id) != content_commit_hash:
logging.debug("There is at least one private pack that was updated, upload should not be skipped.")
return True
logging.debug("No private packs were changed")
return False
def check_if_index_is_updated(index_folder_path: str, content_repo: Any, current_commit_hash: str,
previous_commit_hash: str, storage_bucket: Any,
is_private_content_updated: bool = False):
""" Checks stored at index.json commit hash and compares it to current commit hash. In case no packs folders were
added/modified/deleted, all other steps are not performed.
Args:
index_folder_path (str): index folder full path.
content_repo (git.repo.base.Repo): content repo object.
current_commit_hash (str): last commit hash of head.
previous_commit_hash (str): the previous commit to diff with
storage_bucket: public storage bucket.
is_private_content_updated (bool): True if private content updated, False otherwise.
"""
skipping_build_task_message = "Skipping Upload Packs To Marketplace Storage Step."
try:
if storage_bucket.name not in (GCPConfig.CI_BUILD_BUCKET, GCPConfig.PRODUCTION_BUCKET):
logging.info("Skipping index update check in non production/build bucket")
return
if is_private_content_updated:
logging.debug("Skipping index update as Private Content has updated.")
return
if not os.path.exists(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")):
# will happen only in init bucket run
logging.warning(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
return
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")) as index_file:
index_json = json.load(index_file)
index_commit_hash = index_json.get('commit', previous_commit_hash)
try:
index_commit = content_repo.commit(index_commit_hash)
except Exception:
# not updated build will receive this exception because it is missing more updated commit
logging.exception(f"Index is already updated. {skipping_build_task_message}")
sys.exit()
current_commit = content_repo.commit(current_commit_hash)
if current_commit.committed_datetime <= index_commit.committed_datetime:
logging.warning(
f"Current commit {current_commit.hexsha} committed time: {current_commit.committed_datetime}")
logging.warning(f"Index commit {index_commit.hexsha} committed time: {index_commit.committed_datetime}")
logging.warning("Index is already updated.")
logging.warning(skipping_build_task_message)
sys.exit()
for changed_file in current_commit.diff(index_commit):
if changed_file.a_path.startswith(PACKS_FOLDER):
logging.info(
f"Found changed packs between index commit {index_commit.hexsha} and {current_commit.hexsha}")
break
else:
logging.warning(f"No changes found between index commit {index_commit.hexsha} and {current_commit.hexsha}")
logging.warning(skipping_build_task_message)
sys.exit()
except Exception:
logging.exception("Failed in checking status of index")
sys.exit(1)
def print_packs_summary(successful_packs: list, skipped_packs: list, failed_packs: list,
fail_build: bool = True):
"""Prints summary of packs uploaded to gcs.
Args:
successful_packs (list): list of packs that were successfully uploaded.
skipped_packs (list): list of packs that were skipped during upload.
failed_packs (list): list of packs that were failed during upload.
fail_build (bool): indicates whether to fail the build upon failing pack to upload or not
"""
logging.info(
f"""\n
------------------------------------------ Packs Upload Summary ------------------------------------------
Total number of packs: {len(successful_packs + skipped_packs + failed_packs)}
----------------------------------------------------------------------------------------------------------""")
if successful_packs:
successful_packs_table = _build_summary_table(successful_packs)
logging.success(f"Number of successful uploaded packs: {len(successful_packs)}")
logging.success(f"Uploaded packs:\n{successful_packs_table}")
with open('pack_list.txt', 'w') as f:
f.write(successful_packs_table.get_string())
if skipped_packs:
skipped_packs_table = _build_summary_table(skipped_packs, include_pack_status=True)
logging.warning(f"Number of skipped packs: {len(skipped_packs)}")
logging.warning(f"Skipped packs:\n{skipped_packs_table}")
if failed_packs:
failed_packs_table = _build_summary_table(failed_packs, include_pack_status=True)
logging.critical(f"Number of failed packs: {len(failed_packs)}")
logging.critical(f"Failed packs:\n{failed_packs_table}")
if fail_build:
# We don't want the bucket upload flow to fail in Prepare Content step if a pack has failed to upload.
sys.exit(1)
# for external pull requests - when there is no failed packs, add the build summary to the pull request
branch_name = os.environ.get('CI_COMMIT_BRANCH')
if branch_name and branch_name.startswith('pull/'):
successful_packs_table = build_summary_table_md(successful_packs)
build_num = os.environ['CI_BUILD_ID']
bucket_path = f'https://console.cloud.google.com/storage/browser/' \
f'marketplace-ci-build/content/builds/{branch_name}/{build_num}'
pr_comment = f'Number of successful uploaded packs: {len(successful_packs)}\n' \
f'Uploaded packs:\n{successful_packs_table}\n\n' \
f'Browse to the build bucket with this address:\n{bucket_path}'
add_pr_comment(pr_comment)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-e', '--extract_path', help="Full path of folder to extract wanted packs", required=True)
parser.add_argument('-b', '--bucket_name', help="Storage bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-d', '--pack_dependencies', help="Full path to pack dependencies json file.", required=False)
parser.add_argument('-p', '--pack_names',
help=("Target packs to upload to gcs. Optional values are: `All`, "
"`Modified` or csv list of packs "
"Default is set to `All`"),
required=False, default="All")
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=False)
parser.add_argument('-o', '--override_all_packs', help="Override all existing packs in cloud storage",
type=str2bool, default=False, required=True)
parser.add_argument('-k', '--key_string', help="Base64 encoded signature key used for signing packs.",
required=False)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-rt', '--remove_test_playbooks', type=str2bool,
help='Should remove test playbooks from content packs or not.', default=True)
parser.add_argument('-bu', '--bucket_upload', help='is bucket upload build?', type=str2bool, required=True)
parser.add_argument('-pb', '--private_bucket_name', help="Private storage bucket name", required=False)
parser.add_argument('-c', '--ci_branch', help="CI branch of current build", required=True)
parser.add_argument('-f', '--force_upload', help="is force upload build?", type=str2bool, required=True)
# disable-secrets-detection-end
return parser.parse_args()
def add_pr_comment(comment: str):
"""Add comment to the pull request.
Args:
comment (string): The comment text.
"""
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CI_COMMIT_BRANCH']
sha1 = os.environ['CI_COMMIT_SHA']
query = f'?q={sha1}+repo:demisto/content+is:pr+is:open+head:{branch_name}+is:open'
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
logging.warning(
f'Add pull request comment failed: There is more then one open pull request for branch {branch_name}.')
except Exception:
logging.exception('Add pull request comment failed.')
def handle_github_response(response: json) -> dict:
"""
Handles the response from the GitHub server after making a request.
:param response: Response from the server.
:return: The returned response.
"""
res_dict = response.json()
if not res_dict.get('ok'):
logging.warning(f'Add pull request comment failed: {res_dict.get("message")}')
return res_dict
def get_packs_summary(packs_list):
""" Returns the packs list divided into 3 lists by their status
Args:
packs_list (list): The full packs list
Returns: 3 lists of packs - successful_packs, skipped_packs & failed_packs
"""
successful_packs = [pack for pack in packs_list if pack.status == PackStatus.SUCCESS.name]
skipped_packs = [pack for pack in packs_list if
pack.status == PackStatus.PACK_ALREADY_EXISTS.name
or pack.status == PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name]
failed_packs = [pack for pack in packs_list if pack not in successful_packs and pack not in skipped_packs]
return successful_packs, skipped_packs, failed_packs
def handle_private_content(public_index_folder_path, private_bucket_name, extract_destination_path, storage_client,
public_pack_names) -> Tuple[bool, list, list]:
"""
1. Add private packs to public index.json.
2. Checks if there are private packs that were added/deleted/updated.
Args:
public_index_folder_path: extracted public index folder full path.
private_bucket_name: Private storage bucket name
extract_destination_path: full path to extract directory.
storage_client : initialized google cloud storage client.
public_pack_names : unique collection of public packs names to upload.
Returns:
is_private_content_updated (bool): True if there is at least one private pack that was updated/released.
False otherwise (i.e there are no private packs that have been updated/released).
private_packs (list) : priced packs from private bucket.
updated_private_packs_ids (list): all private packs id's that were updated.
"""
if private_bucket_name:
private_storage_bucket = storage_client.bucket(private_bucket_name)
private_index_path, _, _ = download_and_extract_index(
private_storage_bucket, os.path.join(extract_destination_path, "private")
)
public_index_json_file_path = os.path.join(public_index_folder_path, f"{GCPConfig.INDEX_NAME}.json")
public_index_json = load_json(public_index_json_file_path)
if public_index_json:
are_private_packs_updated = is_private_packs_updated(public_index_json, private_index_path)
private_packs, updated_private_packs_ids = add_private_content_to_index(
private_index_path, extract_destination_path, public_index_folder_path, public_pack_names
)
return are_private_packs_updated, private_packs, updated_private_packs_ids
else:
logging.error(f"Public {GCPConfig.INDEX_NAME}.json was found empty.")
sys.exit(1)
else:
return False, [], []
def get_images_data(packs_list: list):
""" Returns a data structure of all packs that an integration/author image of them was uploaded
Args:
packs_list (list): The list of all packs
Returns:
The images data structure
"""
images_data = {}
for pack in packs_list:
pack_image_data = {pack.name: {}}
if pack.uploaded_author_image:
pack_image_data[pack.name][BucketUploadFlow.AUTHOR] = True
if pack.uploaded_integration_images:
pack_image_data[pack.name][BucketUploadFlow.INTEGRATIONS] = pack.uploaded_integration_images
if pack_image_data[pack.name]:
images_data.update(pack_image_data)
return images_data
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
ci_branch = option.ci_branch
force_upload = option.force_upload
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, ci_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
diff_files_list = content_repo.commit(current_commit_hash).diff(content_repo.commit(previous_commit_hash))
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# initiate the statistics handler for marketplace packs
statistics_handler = StatisticsHandler(service_account, index_folder_path)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# Packages that depend on new packs that are not in the previous index.json
packs_missing_dependencies = []
# starting iteration over packs
for pack in packs_list:
task_status = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status = pack.upload_integration_images(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status = pack.upload_author_image(storage_bucket, diff_files_list, True)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, modified_pack_files_paths, pack_was_modified = pack.detect_modified(
content_repo, index_folder_path, current_commit_hash, previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status, is_missing_dependencies = pack.format_metadata(index_folder_path,
packs_dependencies_mapping, build_number,
current_commit_hash, pack_was_modified,
statistics_handler, pack_names)
if is_missing_dependencies:
# If the pack is dependent on a new pack
# (which is not yet in the index.zip as it might not have been iterated yet)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.zip - i.e. the new pack exists now.
# We will go over the pack again to add what was missing.
# See issue #37290
packs_missing_dependencies.append(pack)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified,
modified_pack_files_paths)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
task_status, skipped_upload, _ = pack.upload_to_storage(zip_pack_path, pack.latest_version, storage_bucket,
override_all_packs or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_upload and exists_in_index and pack not in packs_missing_dependencies:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
logging.info(f"packs_missing_dependencies: {packs_missing_dependencies}")
# Going over all packs that were marked as missing dependencies,
# updating them with the new data for the new packs that were added to the index.zip
for pack in packs_missing_dependencies:
task_status, _ = pack.format_metadata(index_folder_path, packs_dependencies_mapping,
build_number, current_commit_hash, False, statistics_handler,
pack_names, format_dependencies_only=True)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_REFORMATING.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
create_corepacks_config(storage_bucket, build_number, index_folder_path,
artifacts_dir=os.path.dirname(packs_artifacts_path))
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=statistics_handler.landing_page_sections,
artifacts_dir=os.path.dirname(packs_artifacts_path),
storage_bucket=storage_bucket,
)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids, images_data=get_images_data(packs_list)
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CI_BUILD_ID",
"CI_COMMIT_BRANCH",
"CI_COMMIT_SHA",
"CONTENT_GITHUB_TOKEN"
] |
[]
|
["CI_BUILD_ID", "CI_COMMIT_BRANCH", "CI_COMMIT_SHA", "CONTENT_GITHUB_TOKEN"]
|
python
| 4 | 0 | |
internal/docs/modular_docs.go
|
package docs
import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"sort"
"strings"
"text/template"
"github.com/pkg/errors"
)
// Create Modular Documentation from the CLI generated docs
func CreateModularDocs() error {
srcDir := os.Getenv("SRC_DIR")
if srcDir == "" {
return errors.New("SRC_DIR must be set")
}
files, err := filepath.Glob(fmt.Sprintf("%s/*.adoc", srcDir))
if err != nil {
return errors.WithStack(err)
}
outDir := os.Getenv("DEST_DIR")
if outDir == "" {
outDir = "dist"
}
modulesDir := path.Join(outDir, "modules")
err = os.RemoveAll(modulesDir)
if err != nil {
return errors.WithStack(err)
}
err = os.MkdirAll(modulesDir, 0o755)
if err != nil {
return errors.WithStack(err)
}
moduleFiles, err := CreateModules(modulesDir, files)
if err != nil {
return errors.WithStack(err)
}
assembliesDir := path.Join(outDir, "assemblies")
err = os.RemoveAll(assembliesDir)
if err != nil {
return errors.WithStack(err)
}
err = os.MkdirAll(assembliesDir, 0o755)
if err != nil {
return errors.WithStack(err)
}
err = CreateAssembly(assembliesDir, moduleFiles)
if err != nil {
return errors.WithStack(err)
}
return nil
}
func CreateModules(modulesDir string, commandAdocFiles []string) ([]string, error) {
answer := make([]string, 0)
for _, f := range commandAdocFiles {
destName := fmt.Sprintf("ref-cli%s", strings.Replace(strings.ReplaceAll(filepath.Base(f), "_", "-"), "rhoas", "", 1))
destPath := path.Join(modulesDir, destName)
_, err := copyFile(f, destPath)
if err != nil {
return nil, errors.WithStack(err)
}
answer = append(answer, destPath)
}
return answer, nil
}
func CreateAssembly(assembliesDir string, files []string) error {
sort.Slice(files, func(i, j int) bool {
return files[i] < files[j]
})
commandFileNames := make([]string, 0)
for _, f := range files {
relPath, err := filepath.Rel(assembliesDir, f)
if err != nil {
return errors.WithStack(err)
}
commandFileNames = append(commandFileNames, relPath)
}
contentTemplate := `:context: rhoas-cli-command-reference
[id="cli-command-reference_{context}"]
= CLI command reference (rhoas)
[role="_abstract"]
You use the ` + "`rhoas`" + ` CLI to manage your application services from the command line.
{{ range .Commands}}
include::{{.}}[leveloffset=+1]
{{ end }}
`
type Vars struct {
Commands []string
}
vars := Vars{
Commands: commandFileNames,
}
filename := "assembly-cli-command-reference.adoc"
output, err := os.Create(path.Join(assembliesDir, filename))
if err != nil {
return errors.WithStack(err)
}
err = template.Must(template.New("content").Parse(contentTemplate)).Execute(output, vars)
if err != nil {
return errors.WithStack(err)
}
err = output.Sync()
if err != nil {
return errors.WithStack(err)
}
return nil
}
func copyFile(src, dst string) (int64, error) {
sourceFileStat, err := os.Stat(src)
if err != nil {
return 0, err
}
if !sourceFileStat.Mode().IsRegular() {
return 0, fmt.Errorf("%s is not a regular file", src)
}
source, err := os.Open(src)
if err != nil {
return 0, err
}
defer source.Close()
destination, err := os.Create(dst)
if err != nil {
return 0, err
}
defer destination.Close()
nBytes, err := io.Copy(destination, source)
return nBytes, err
}
|
[
"\"SRC_DIR\"",
"\"DEST_DIR\""
] |
[] |
[
"DEST_DIR",
"SRC_DIR"
] |
[]
|
["DEST_DIR", "SRC_DIR"]
|
go
| 2 | 0 | |
share/qt/extract_strings_qt.py
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/tpfcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *tpfcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("tpfcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[] |
[] |
[
"XGETTEXT"
] |
[]
|
["XGETTEXT"]
|
python
| 1 | 0 | |
examples/request_example.py
|
import os
from requre.storage import PersistentObjectStorage
from requre.helpers.requests_response import RequestResponseHandling
import requests
requests.Session.send = RequestResponseHandling.decorator_plain(requests.Session.send)
PersistentObjectStorage().storage_file = "github.yaml"
import github
g = github.Github(os.getenv("TOKEN", "EMPTY"))
print("Count of your repos: ", len(list(g.get_user().get_repos())))
PersistentObjectStorage().dump()
|
[] |
[] |
[
"TOKEN"
] |
[]
|
["TOKEN"]
|
python
| 1 | 0 | |
datalabeling/create_instruction.py
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from google.api_core.client_options import ClientOptions
# [START datalabeling_create_instruction_beta]
def create_instruction(project_id, data_type, instruction_gcs_uri):
""" Creates a data labeling PDF instruction for the given Google Cloud
project. The PDF file should be uploaded to the project in
Google Cloud Storage.
"""
from google.cloud import datalabeling_v1beta1 as datalabeling
client = datalabeling.DataLabelingServiceClient()
# [END datalabeling_create_instruction_beta]
# If provided, use a provided test endpoint - this will prevent tests on
# this snippet from triggering any action by a real human
if 'DATALABELING_ENDPOINT' in os.environ:
opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT'))
client = datalabeling.DataLabelingServiceClient(client_options=opts)
# [START datalabeling_create_instruction_beta]
project_path = client.project_path(project_id)
pdf_instruction = datalabeling.types.PdfInstruction(
gcs_file_uri=instruction_gcs_uri)
instruction = datalabeling.types.Instruction(
display_name='YOUR_INSTRUCTION_DISPLAY_NAME',
description='YOUR_DESCRIPTION',
data_type=data_type,
pdf_instruction=pdf_instruction
)
operation = client.create_instruction(project_path, instruction)
result = operation.result()
# The format of the resource name:
# project_id/{project_id}/instruction/{instruction_id}
print('The instruction resource name: {}\n'.format(result.name))
print('Display name: {}'.format(result.display_name))
print('Description: {}'.format(result.description))
print('Create time:')
print('\tseconds: {}'.format(result.create_time.seconds))
print('\tnanos: {}'.format(result.create_time.nanos))
print('Data type: {}'.format(
datalabeling.enums.DataType(result.data_type).name))
print('Pdf instruction:')
print('\tGcs file uri: {}'.format(
result.pdf_instruction.gcs_file_uri))
return result
# [END datalabeling_create_instruction_beta]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--project-id',
help='Project ID. Required.',
required=True
)
parser.add_argument(
'--data-type',
help='Data type. Only support IMAGE, VIDEO, TEXT and AUDIO. Required.',
required=True
)
parser.add_argument(
'--instruction-gcs-uri',
help='The URI of Google Cloud Storage of the instruction. Required.',
required=True
)
args = parser.parse_args()
create_instruction(
args.project_id,
args.data_type,
args.instruction_gcs_uri
)
|
[] |
[] |
[
"DATALABELING_ENDPOINT"
] |
[]
|
["DATALABELING_ENDPOINT"]
|
python
| 1 | 0 | |
cmd/main.go
|
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"time"
"github.com/porjo/ingaugo"
)
type arrayFlags []string
func (i *arrayFlags) String() string {
return fmt.Sprintf("%v", *i)
}
func (i *arrayFlags) Set(value string) error {
*i = append(*i, value)
return nil
}
var bank ingaugo.Bank
func main() {
accounts := make(arrayFlags, 0)
wsURL := flag.String("ws-url", "", "WebSsocket URL e.g. ws://localhost:9222")
clientNumber := flag.String("clientNumber", "", "Client number")
accessPin := flag.String("accessPin", "", "Access pin")
flag.Var(&accounts, "accountNumber", "Account number")
days := flag.Int("days", 30, "Number of days of transactions")
outputDir := flag.String("outputDir", "", "Directory to write CSV files. Defaults to current directory")
flag.Parse()
if *clientNumber == "" {
fmt.Printf("-clientNumber is required\n\n")
fmt.Println("Flags:")
flag.PrintDefaults()
os.Exit(1)
}
if *accessPin == "" {
// check environmenta
*accessPin = os.Getenv("ACCESS_PIN")
if *accessPin == "" {
fmt.Printf("-accessPin parameter or ACCESS_PIN environment variable is required\n\n")
fmt.Println("Flags:")
flag.PrintDefaults()
os.Exit(1)
}
}
if *outputDir != "" {
info, err := os.Stat(*outputDir)
if os.IsNotExist(err) {
log.Fatalf("Directory %s does not exist", *outputDir)
}
if !info.IsDir() {
log.Fatalf("%s is not a directory", *outputDir)
}
}
// create a timeout as a safety net to prevent any infinite wait loops
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
if *wsURL != "" {
bank = ingaugo.NewBankWithWS(*wsURL)
} else {
bank = ingaugo.NewBank()
}
fmt.Printf("Fetching auth token...\n")
token, err := bank.Login(ctx, *clientNumber, *accessPin)
if err != nil {
log.Fatal(err)
}
for _, acct := range accounts {
err := GetTransactions(*days, acct, token, *outputDir)
if err != nil {
log.Fatal(err)
}
}
}
func GetTransactions(days int, accountNumber, token, outputDir string) error {
log.Printf("Fetching transactions for account %s\n", accountNumber)
trans, err := bank.GetTransactionsDays(days, accountNumber, token)
if err != nil {
return err
}
file := accountNumber + ".csv"
if outputDir != "" {
file = outputDir + "/" + file
}
log.Printf("Writing CSV file %s\n", file)
if err := os.WriteFile(file, trans, 0666); err != nil {
return err
}
return nil
}
|
[
"\"ACCESS_PIN\""
] |
[] |
[
"ACCESS_PIN"
] |
[]
|
["ACCESS_PIN"]
|
go
| 1 | 0 | |
hub_module/modules/image/classification/fix_resnext101_32x48d_wsl_imagenet/module.py
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from paddlehub.common.paddle_helper import add_vars_prefix
from fix_resnext101_32x48d_wsl_imagenet.processor import postprocess, base64_to_cv2
from fix_resnext101_32x48d_wsl_imagenet.data_feed import reader
from fix_resnext101_32x48d_wsl_imagenet.resnext101_wsl import Fix_ResNeXt101_32x48d_wsl
@moduleinfo(
name="fix_resnext101_32x48d_wsl_imagenet",
type="CV/image_classification",
author="paddlepaddle",
author_email="[email protected]",
summary=
"fix_resnext101_32x48d_wsl is a image classfication model, this module is trained with imagenet datasets.",
version="1.0.0")
class FixResnext10132x48dwslImagenet(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(
self.directory, "model")
label_file = os.path.join(self.directory, "label_list.txt")
with open(label_file, 'r', encoding='utf-8') as file:
self.label_list = file.read().split("\n")[:-1]
self.predictor_set = False
def get_expected_image_width(self):
return 224
def get_expected_image_height(self):
return 224
def get_pretrained_images_mean(self):
im_mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3)
return im_mean
def get_pretrained_images_std(self):
im_std = np.array([0.229, 0.224, 0.225]).reshape(1, 3)
return im_std
def _set_config(self):
"""
predictor config setting
"""
cpu_config = AnalysisConfig(self.default_pretrained_model_path)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_paddle_predictor(cpu_config)
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
int(_places[0])
use_gpu = True
except:
use_gpu = False
if use_gpu:
gpu_config = AnalysisConfig(self.default_pretrained_model_path)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(
memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_paddle_predictor(gpu_config)
def context(self, trainable=True, pretrained=True):
"""context for transfer learning.
Args:
trainable (bool): Set parameters in program to be trainable.
pretrained (bool) : Whether to load pretrained model.
Returns:
inputs (dict): key is 'image', corresponding vaule is image tensor.
outputs (dict): key is :
'classification', corresponding value is the result of classification.
'feature_map', corresponding value is the result of the layer before the fully connected layer.
context_prog (fluid.Program): program for transfer learning.
"""
context_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(context_prog, startup_prog):
with fluid.unique_name.guard():
image = fluid.layers.data(
name="image", shape=[3, 224, 224], dtype="float32")
resnet_vd = Fix_ResNeXt101_32x48d_wsl()
output, feature_map = resnet_vd.net(
input=image, class_dim=len(self.label_list))
name_prefix = '@HUB_{}@'.format(self.name)
inputs = {'image': name_prefix + image.name}
outputs = {
'classification': name_prefix + output.name,
'feature_map': name_prefix + feature_map.name
}
add_vars_prefix(context_prog, name_prefix)
add_vars_prefix(startup_prog, name_prefix)
global_vars = context_prog.global_block().vars
inputs = {
key: global_vars[value]
for key, value in inputs.items()
}
outputs = {
key: global_vars[value]
for key, value in outputs.items()
}
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# pretrained
if pretrained:
def _if_exist(var):
b = os.path.exists(
os.path.join(self.default_pretrained_model_path,
var.name))
return b
fluid.io.load_vars(
exe,
self.default_pretrained_model_path,
context_prog,
predicate=_if_exist)
else:
exe.run(startup_prog)
# trainable
for param in context_prog.global_block().iter_parameters():
param.trainable = trainable
return inputs, outputs, context_prog
def classification(self,
images=None,
paths=None,
batch_size=1,
use_gpu=False,
top_k=1):
"""
API for image classification.
Args:
images (list[numpy.ndarray]): data of images, shape of each is [H, W, C], color space must be BGR.
paths (list[str]): The paths of images.
batch_size (int): batch size.
use_gpu (bool): Whether to use gpu.
top_k (int): Return top k results.
Returns:
res (list[dict]): The classfication results.
"""
if use_gpu:
try:
_places = os.environ["CUDA_VISIBLE_DEVICES"]
int(_places[0])
except:
raise RuntimeError(
"Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id."
)
if not self.predictor_set:
self._set_config()
self.predictor_set = True
all_data = list()
for yield_data in reader(images, paths):
all_data.append(yield_data)
total_num = len(all_data)
loop_num = int(np.ceil(total_num / batch_size))
res = list()
for iter_id in range(loop_num):
batch_data = list()
handle_id = iter_id * batch_size
for image_id in range(batch_size):
try:
batch_data.append(all_data[handle_id + image_id])
except:
pass
# feed batch image
batch_image = np.array([data['image'] for data in batch_data])
batch_image = PaddleTensor(batch_image.copy())
predictor_output = self.gpu_predictor.run([
batch_image
]) if use_gpu else self.cpu_predictor.run([batch_image])
out = postprocess(
data_out=predictor_output[0].as_ndarray(),
label_list=self.label_list,
top_k=top_k)
res += out
return res
def save_inference_model(self,
dirname,
model_filename=None,
params_filename=None,
combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
@serving
def serving_method(self, images, **kwargs):
"""
Run as a service.
"""
images_decode = [base64_to_cv2(image) for image in images]
results = self.classification(images=images_decode, **kwargs)
return results
@runnable
def run_cmd(self, argvs):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(
title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options",
description=
"Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
results = self.classification(
paths=[args.input_path],
batch_size=args.batch_size,
use_gpu=args.use_gpu)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--use_gpu',
type=ast.literal_eval,
default=False,
help="whether use GPU or not.")
self.arg_config_group.add_argument(
'--batch_size',
type=ast.literal_eval,
default=1,
help="batch size.")
self.arg_config_group.add_argument(
'--top_k',
type=ast.literal_eval,
default=1,
help="Return top k results.")
def add_module_input_arg(self):
"""
Add the command input options.
"""
self.arg_input_group.add_argument(
'--input_path', type=str, help="path to image.")
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
pkg/config/git_remote.go
|
package config
type GitRemote struct {
*GitRemoteExport
Name string
Url string
raw *rawGit
}
func (c *GitRemote) GetRaw() interface{} {
return c.raw
}
func (c *GitRemote) validate() error {
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
main.go
|
package main
import (
"./api"
"./webserver"
"fmt"
"os"
"os/signal"
"syscall"
"time"
)
func main() {
fmt.Println("=============================================")
fmt.Println("=============================================")
fmt.Println("=== Docker Swarm Service Lister UI 0.2.1 ====")
fmt.Println("=============================================")
apiProtocol := "http"
if len(os.Getenv("API_PROTOCOL")) > 0 {
apiProtocol = os.Getenv("API_PROTOCOL")
}
apiHost := "127.0.0.1"
if len(os.Getenv("API_HOST")) > 0 {
apiHost = os.Getenv("API_HOST")
}
apiPort := "7777"
if len(os.Getenv("API_PORT")) > 0 {
apiPort = os.Getenv("API_PORT")
}
apiUrl := fmt.Sprintf("%s://%s:%s/stacks", apiProtocol, apiHost, apiPort)
serverPort := "8087"
if len(os.Getenv("SERVER_PORT")) > 0 {
serverPort = os.Getenv("SERVER_PORT")
}
fmt.Printf("=== POLLING API @%s\n", apiUrl)
fmt.Printf("=== STARTING WEB SERVER @%s\n", serverPort)
fmt.Println("=============================================")
stacks := api.GetStacks(apiUrl)
webserverData := &webserver.WebserverData{Stacks: stacks, Title: "Service Listing"}
c := make(chan bool)
go webserver.StartServer(serverPort, webserverData, c)
fmt.Println("> Started the web server, now polling swarm")
stop := make(chan os.Signal, 1)
signal.Notify(stop, syscall.SIGINT, syscall.SIGTERM)
for i := 1; ; i++ { // this is still infinite
t := time.NewTicker(time.Second * 30)
select {
case <-stop:
fmt.Println("> Shutting down polling")
break
case <-t.C:
fmt.Println(" > Updating Stacks")
webserverData.UpdateStacks(api.GetStacks(apiUrl))
continue
}
break // only reached if the quitCh case happens
}
fmt.Println("> Shutting down webserver")
c <- true
if b := <-c; b {
fmt.Println("> Webserver shut down")
}
fmt.Println("> Shut down app")
}
|
[
"\"API_PROTOCOL\"",
"\"API_PROTOCOL\"",
"\"API_HOST\"",
"\"API_HOST\"",
"\"API_PORT\"",
"\"API_PORT\"",
"\"SERVER_PORT\"",
"\"SERVER_PORT\""
] |
[] |
[
"SERVER_PORT",
"API_PORT",
"API_PROTOCOL",
"API_HOST"
] |
[]
|
["SERVER_PORT", "API_PORT", "API_PROTOCOL", "API_HOST"]
|
go
| 4 | 0 | |
salt/client/ssh/ssh_py_shim.py
|
# -*- coding: utf-8 -*-
'''
This is a shim that handles checking and updating salt thin and
then invoking thin.
This is not intended to be instantiated as a module, rather it is a
helper script used by salt.client.ssh.Single. It is here, in a
separate file, for convenience of development.
'''
from __future__ import absolute_import
import hashlib
import tarfile
import shutil
import sys
import os
import stat
THIN_ARCHIVE = 'salt-thin.tgz'
EXT_ARCHIVE = 'salt-ext_mods.tgz'
# FIXME - it would be ideal if these could be obtained directly from
# salt.exitcodes rather than duplicated.
EX_THIN_DEPLOY = 11
EX_THIN_CHECKSUM = 12
EX_MOD_DEPLOY = 13
class OBJ(object):
pass
OPTIONS = None
ARGS = None
#%%OPTS
def need_deployment():
if os.path.exists(OPTIONS.saltdir):
shutil.rmtree(OPTIONS.saltdir)
old_umask = os.umask(0o077)
os.makedirs(OPTIONS.saltdir)
os.umask(old_umask)
# Verify perms on saltdir
euid = os.geteuid()
dstat = os.stat(OPTIONS.saltdir)
if dstat.st_uid != euid:
# Attack detected, try again
need_deployment()
if dstat.st_mode != 16832:
# Attack detected
need_deployment()
# If SUDOing then also give the super user group write permissions
sudo_gid = os.environ.get('SUDO_GID')
if sudo_gid:
os.chown(OPTIONS.saltdir, -1, int(sudo_gid))
st = os.stat(OPTIONS.saltdir)
os.chmod(OPTIONS.saltdir, st.st_mode | stat.S_IWGRP | stat.S_IRGRP | stat.S_IXGRP)
# Delimeter emitted on stdout *only* to indicate shim message to master.
sys.stdout.write("{0}\ndeploy\n".format(OPTIONS.delimiter))
sys.exit(EX_THIN_DEPLOY)
# Adapted from salt.utils.get_hash()
def get_hash(path, form='sha1', chunk_size=4096):
try:
hash_type = getattr(hashlib, form)
except AttributeError:
raise ValueError('Invalid hash type: {0}'.format(form))
with open(path, 'rb') as ifile:
hash_obj = hash_type()
# read the file in in chunks, not the entire file
for chunk in iter(lambda: ifile.read(chunk_size), b''):
hash_obj.update(chunk)
return hash_obj.hexdigest()
def unpack_thin(thin_path):
tfile = tarfile.TarFile.gzopen(thin_path)
tfile.extractall(path=OPTIONS.saltdir)
tfile.close()
os.unlink(thin_path)
def need_ext():
sys.stdout.write("{0}\next_mods\n".format(OPTIONS.delimiter))
sys.exit(EX_MOD_DEPLOY)
def unpack_ext(ext_path):
modcache = os.path.join(
OPTIONS.saltdir,
'running_data',
'var',
'cache',
'salt',
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
tfile.extractall(path=modcache)
tfile.close()
os.unlink(ext_path)
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
shutil.move(ver_path, ver_dst)
def main(argv): # pylint: disable=W0613
thin_path = os.path.join(OPTIONS.saltdir, THIN_ARCHIVE)
if os.path.isfile(thin_path):
if OPTIONS.checksum != get_hash(thin_path, OPTIONS.hashfunc):
sys.stderr.write('{0}\n'.format(OPTIONS.checksum))
sys.stderr.write('{0}\n'.format(get_hash(thin_path, OPTIONS.hashfunc)))
os.unlink(thin_path)
sys.stderr.write('WARNING: checksum mismatch for "{0}"\n'.format(thin_path))
sys.exit(EX_THIN_CHECKSUM)
unpack_thin(thin_path)
# Salt thin now is available to use
else:
if not os.path.exists(OPTIONS.saltdir):
need_deployment()
if not os.path.isdir(OPTIONS.saltdir):
sys.stderr.write('ERROR: salt path "{0}" exists but is not a directory\n'.format(OPTIONS.saltdir))
sys.exit(os.EX_CANTCREAT)
version_path = os.path.join(OPTIONS.saltdir, 'version')
if not os.path.exists(version_path) or not os.path.isfile(version_path):
sys.stderr.write('WARNING: Unable to locate current thin version.\n')
need_deployment()
with open(version_path, 'r') as vpo:
cur_version = vpo.readline().strip()
if cur_version != OPTIONS.version:
sys.stderr.write('WARNING: current thin version is not up-to-date.\n')
need_deployment()
# Salt thin exists and is up-to-date - fall through and use it
salt_call_path = os.path.join(OPTIONS.saltdir, 'salt-call')
if not os.path.isfile(salt_call_path):
sys.stderr.write('ERROR: thin is missing "{0}"\n'.format(salt_call_path))
need_deployment()
with open(os.path.join(OPTIONS.saltdir, 'minion'), 'w') as config:
config.write(OPTIONS.config + '\n')
if OPTIONS.ext_mods:
ext_path = os.path.join(OPTIONS.saltdir, EXT_ARCHIVE)
if os.path.exists(ext_path):
unpack_ext(ext_path)
else:
version_path = os.path.join(OPTIONS.saltdir, 'ext_version')
if not os.path.exists(version_path) or not os.path.isfile(version_path):
need_ext()
with open(version_path, 'r') as vpo:
cur_version = vpo.readline().strip()
if cur_version != OPTIONS.ext_mods:
need_ext()
#Fix parameter passing issue
if len(ARGS) == 1:
argv_prepared = ARGS[0].split()
else:
argv_prepared = ARGS
salt_argv = [
sys.executable,
salt_call_path,
'--local',
'--metadata',
'--out', 'json',
'-l', 'quiet',
'-c', OPTIONS.saltdir,
'--',
] + argv_prepared
sys.stderr.write('SALT_ARGV: {0}\n'.format(salt_argv))
# Only emit the delimiter on *both* stdout and stderr when completely successful.
# Yes, the flush() is necessary.
sys.stdout.write(OPTIONS.delimiter + '\n')
sys.stdout.flush()
sys.stderr.write(OPTIONS.delimiter + '\n')
sys.stderr.flush()
if OPTIONS.wipe:
import subprocess
subprocess.call(salt_argv)
shutil.rmtree(OPTIONS.saltdir)
else:
os.execv(sys.executable, salt_argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[] |
[] |
[
"SUDO_GID"
] |
[]
|
["SUDO_GID"]
|
python
| 1 | 0 | |
watch_commits.py
|
#!/usr/bin/env python3
# Copyright 2022 tison <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import os
import dotenv
import requests
if __name__ == "__main__":
dotenv.load_dotenv()
headers = {}
headers['Accept'] = 'application/vnd.github.v3+json'
headers['Authorization'] = os.getenv('GITHUB_TOKEN')
Watcher = namedtuple('Watcher', ['repo', 'commit'])
watchers = [
Watcher(repo='stretchr/testify', commit='3c33e07c4c233bf61e1414f5acb3cda06ffef1d1')
]
exit_code = 0
for watcher in watchers:
result = requests.get(url=f'https://api.github.com/repos/{watcher.repo}/commits?per_page=1', headers=headers)
commit = result.json()[0]['sha']
if commit != watcher.commit:
print(f'{watcher.repo} has been pushed new commit: {commit}, last commit: {watcher.commit}')
exit_code = 1
exit(exit_code)
|
[] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
python
| 1 | 0 | |
src/graphql.go
|
package src
import (
"context"
"fmt"
"os"
log "github.com/sirupsen/logrus"
"github.com/graphql-services/graphql-files/model"
"github.com/machinebox/graphql"
)
const (
graphqlSaveFile = `mutation createFile($input: FileCreateInput!) {
result: createFile(input:$input) {
id
name
size
contentType
status
}
}`
graphqlFetchFile = `query file($id: ID!) {
result: file(id: $id) {
id
name
size
contentType
status
}
}`
)
// SaveFile ...
func SaveFile(ctx context.Context, f model.UploadResponse, auth string, data map[string]interface{}) (model.UploadResponse, error) {
var res struct {
Result model.UploadResponse
}
data["id"] = f.ID
data["name"] = f.Name
data["size"] = f.Size
data["contentType"] = f.ContentType
data["status"] = f.Status
req := graphql.NewRequest(graphqlSaveFile)
req.Var("input", data)
if auth != "" {
req.Header.Set("authorization", auth)
}
err := sendRequest(ctx, req, &res)
return res.Result, err
}
// FetchFile ...
func FetchFile(ctx context.Context, id, auth string) (*model.UploadResponse, error) {
var res struct {
Result *model.UploadResponse
}
req := graphql.NewRequest(graphqlFetchFile)
req.Var("id", id)
if auth != "" {
req.Header.Set("authorization", auth)
}
err := sendRequest(ctx, req, &res)
return res.Result, err
}
func sendRequest(ctx context.Context, req *graphql.Request, data interface{}) error {
URL := os.Getenv("GRAPHQL_URL")
if URL == "" {
return fmt.Errorf("Missing required environment variable GRAPHQL_URL")
}
client := graphql.NewClient(URL)
client.Log = func(s string) {
log.Info(s)
}
return client.Run(ctx, req, data)
}
|
[
"\"GRAPHQL_URL\""
] |
[] |
[
"GRAPHQL_URL"
] |
[]
|
["GRAPHQL_URL"]
|
go
| 1 | 0 | |
gen/samples/proposaldeleteoperation.go
|
//This file is generated by btsgen. DO NOT EDIT.
//operation sample data for OperationTypeProposalDelete
package samples
import (
"github.com/gkany/cocos-go/gen/data"
"github.com/gkany/cocos-go/types"
)
var (
sampleDataProposalDeleteOperation = make(map[int]string)
)
func init() {
data.OpSampleMap[types.OperationTypeProposalDelete] =
sampleDataProposalDeleteOperation
}
//end of file
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/api-service/__app__/onefuzzlib/azure/vmss.py
|
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import os
from typing import Any, Dict, List, Optional, Union, cast
from uuid import UUID
from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError
from azure.mgmt.compute.models import (
ResourceSku,
ResourceSkuRestrictionsType,
VirtualMachineScaleSetVMInstanceIDs,
VirtualMachineScaleSetVMInstanceRequiredIDs,
)
from memoization import cached
from msrestazure.azure_exceptions import CloudError
from onefuzztypes.enums import OS, ErrorCode
from onefuzztypes.models import Error
from onefuzztypes.primitives import Region
from .compute import get_compute_client
from .creds import get_base_resource_group, get_scaleset_identity_resource_path
from .image import get_os
def list_vmss(name: UUID) -> Optional[List[str]]:
resource_group = get_base_resource_group()
client = get_compute_client()
try:
instances = [
x.instance_id
for x in client.virtual_machine_scale_set_vms.list(
resource_group, str(name)
)
]
return instances
except (ResourceNotFoundError, CloudError) as err:
logging.error("cloud error listing vmss: %s (%s)", name, err)
return None
def delete_vmss(name: UUID) -> bool:
resource_group = get_base_resource_group()
compute_client = get_compute_client()
response = compute_client.virtual_machine_scale_sets.begin_delete(
resource_group, str(name)
)
# https://docs.microsoft.com/en-us/python/api/azure-core/
# azure.core.polling.lropoller?view=azure-python#status--
#
# status returns a str, however mypy thinks this is an Any.
#
# Checked by hand that the result is Succeeded in practice
return bool(response.status() == "Succeeded")
def get_vmss(name: UUID) -> Optional[Any]:
resource_group = get_base_resource_group()
logging.debug("getting vm: %s", name)
compute_client = get_compute_client()
try:
return compute_client.virtual_machine_scale_sets.get(resource_group, str(name))
except ResourceNotFoundError as err:
logging.debug("vm does not exist %s", err)
return None
def resize_vmss(name: UUID, capacity: int) -> None:
check_can_update(name)
resource_group = get_base_resource_group()
logging.info("updating VM count - name: %s vm_count: %d", name, capacity)
compute_client = get_compute_client()
compute_client.virtual_machine_scale_sets.begin_update(
resource_group, str(name), {"sku": {"capacity": capacity}}
)
def get_vmss_size(name: UUID) -> Optional[int]:
vmss = get_vmss(name)
if vmss is None:
return None
return cast(int, vmss.sku.capacity)
def list_instance_ids(name: UUID) -> Dict[UUID, str]:
logging.debug("get instance IDs for scaleset: %s", name)
resource_group = get_base_resource_group()
compute_client = get_compute_client()
results = {}
try:
for instance in compute_client.virtual_machine_scale_set_vms.list(
resource_group, str(name)
):
results[UUID(instance.vm_id)] = cast(str, instance.instance_id)
except (ResourceNotFoundError, CloudError):
logging.debug("vm does not exist %s", name)
return results
@cached(ttl=60)
def get_instance_id(name: UUID, vm_id: UUID) -> Union[str, Error]:
resource_group = get_base_resource_group()
logging.info("get instance ID for scaleset node: %s:%s", name, vm_id)
compute_client = get_compute_client()
vm_id_str = str(vm_id)
for instance in compute_client.virtual_machine_scale_set_vms.list(
resource_group, str(name)
):
if instance.vm_id == vm_id_str:
return cast(str, instance.instance_id)
return Error(
code=ErrorCode.UNABLE_TO_FIND,
errors=["unable to find scaleset machine: %s:%s" % (name, vm_id)],
)
class UnableToUpdate(Exception):
pass
def check_can_update(name: UUID) -> Any:
vmss = get_vmss(name)
if vmss is None:
raise UnableToUpdate
if vmss.provisioning_state == "Updating":
raise UnableToUpdate
return vmss
def reimage_vmss_nodes(name: UUID, vm_ids: List[UUID]) -> Optional[Error]:
check_can_update(name)
resource_group = get_base_resource_group()
logging.info("reimaging scaleset VM - name: %s vm_ids:%s", name, vm_ids)
compute_client = get_compute_client()
instance_ids = []
machine_to_id = list_instance_ids(name)
for vm_id in vm_ids:
if vm_id in machine_to_id:
instance_ids.append(machine_to_id[vm_id])
else:
logging.info("unable to find vm_id for %s:%s", name, vm_id)
if instance_ids:
compute_client.virtual_machine_scale_sets.begin_reimage_all(
resource_group,
str(name),
VirtualMachineScaleSetVMInstanceIDs(instance_ids=instance_ids),
)
return None
def delete_vmss_nodes(name: UUID, vm_ids: List[UUID]) -> Optional[Error]:
check_can_update(name)
resource_group = get_base_resource_group()
logging.info("deleting scaleset VM - name: %s vm_ids:%s", name, vm_ids)
compute_client = get_compute_client()
instance_ids = []
machine_to_id = list_instance_ids(name)
for vm_id in vm_ids:
if vm_id in machine_to_id:
instance_ids.append(machine_to_id[vm_id])
else:
logging.info("unable to find vm_id for %s:%s", name, vm_id)
if instance_ids:
compute_client.virtual_machine_scale_sets.begin_delete_instances(
resource_group,
str(name),
VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids),
)
return None
def update_extensions(name: UUID, extensions: List[Any]) -> None:
check_can_update(name)
resource_group = get_base_resource_group()
logging.info("updating VM extensions: %s", name)
compute_client = get_compute_client()
compute_client.virtual_machine_scale_sets.begin_update(
resource_group,
str(name),
{"virtual_machine_profile": {"extension_profile": {"extensions": extensions}}},
)
def create_vmss(
location: Region,
name: UUID,
vm_sku: str,
vm_count: int,
image: str,
network_id: str,
spot_instances: bool,
extensions: List[Any],
password: str,
ssh_public_key: str,
tags: Dict[str, str],
) -> Optional[Error]:
vmss = get_vmss(name)
if vmss is not None:
return None
logging.info(
"creating VM "
"name: %s vm_sku: %s vm_count: %d "
"image: %s subnet: %s spot_instances: %s",
name,
vm_sku,
vm_count,
image,
network_id,
spot_instances,
)
resource_group = get_base_resource_group()
compute_client = get_compute_client()
if image.startswith("/"):
image_ref = {"id": image}
else:
image_val = image.split(":", 4)
image_ref = {
"publisher": image_val[0],
"offer": image_val[1],
"sku": image_val[2],
"version": image_val[3],
}
sku = {"name": vm_sku, "tier": "Standard", "capacity": vm_count}
params: Dict[str, Any] = {
"location": location,
"do_not_run_extensions_on_overprovisioned_vms": True,
"upgrade_policy": {"mode": "Manual"},
"sku": sku,
"overprovision": False,
"identity": {
"type": "userAssigned",
"userAssignedIdentities": {get_scaleset_identity_resource_path(): {}},
},
"virtual_machine_profile": {
"priority": "Regular",
"storage_profile": {"image_reference": image_ref},
"os_profile": {
"computer_name_prefix": "node",
"admin_username": "onefuzz",
"admin_password": password,
},
"network_profile": {
"network_interface_configurations": [
{
"name": "onefuzz-nic",
"primary": True,
"ip_configurations": [
{"name": "onefuzz-ip-config", "subnet": {"id": network_id}}
],
}
]
},
"extension_profile": {"extensions": extensions},
},
"single_placement_group": False,
}
image_os = get_os(location, image)
if isinstance(image_os, Error):
return image_os
if image_os == OS.linux:
params["virtual_machine_profile"]["os_profile"]["linux_configuration"] = {
"disable_password_authentication": True,
"ssh": {
"public_keys": [
{
"path": "/home/onefuzz/.ssh/authorized_keys",
"key_data": ssh_public_key,
}
]
},
}
if spot_instances:
# Setting max price to -1 means it won't be evicted because of
# price.
#
# https://docs.microsoft.com/en-us/azure/
# virtual-machine-scale-sets/use-spot#resource-manager-templates
params["virtual_machine_profile"].update(
{
"eviction_policy": "Delete",
"priority": "Spot",
"billing_profile": {"max_price": -1},
}
)
params["tags"] = tags.copy()
owner = os.environ.get("ONEFUZZ_OWNER")
if owner:
params["tags"]["OWNER"] = owner
try:
compute_client.virtual_machine_scale_sets.begin_create_or_update(
resource_group, name, params
)
except ResourceExistsError as err:
err_str = str(err)
if "SkuNotAvailable" in err_str or "OperationNotAllowed" in err_str:
return Error(
code=ErrorCode.VM_CREATE_FAILED, errors=[f"creating vmss: {err_str}"]
)
raise err
except (ResourceNotFoundError, CloudError) as err:
if "The request failed due to conflict with a concurrent request" in repr(err):
logging.debug(
"create VM had conflicts with concurrent request, ignoring %s", err
)
return None
return Error(
code=ErrorCode.VM_CREATE_FAILED,
errors=["creating vmss: %s" % err],
)
return None
@cached(ttl=60)
def list_available_skus(location: str) -> List[str]:
compute_client = get_compute_client()
skus: List[ResourceSku] = list(
compute_client.resource_skus.list(filter="location eq '%s'" % location)
)
sku_names: List[str] = []
for sku in skus:
available = True
if sku.restrictions is not None:
for restriction in sku.restrictions:
if restriction.type == ResourceSkuRestrictionsType.location and (
location.upper() in [v.upper() for v in restriction.values]
):
available = False
break
if available:
sku_names.append(sku.name)
return sku_names
|
[] |
[] |
[
"ONEFUZZ_OWNER"
] |
[]
|
["ONEFUZZ_OWNER"]
|
python
| 1 | 0 | |
orderer/common/server/main_test.go
|
// Copyright IBM Corp. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package server
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/hyperledger/fabric/bccsp/factory"
"github.com/hyperledger/fabric/common/channelconfig"
"github.com/hyperledger/fabric/common/crypto/tlsgen"
deliver_mocks "github.com/hyperledger/fabric/common/deliver/mock"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/common/flogging/floggingtest"
"github.com/hyperledger/fabric/common/ledger/blockledger"
ledger_mocks "github.com/hyperledger/fabric/common/ledger/blockledger/mocks"
ramledger "github.com/hyperledger/fabric/common/ledger/blockledger/ram"
"github.com/hyperledger/fabric/common/localmsp"
"github.com/hyperledger/fabric/common/metrics/disabled"
"github.com/hyperledger/fabric/common/metrics/prometheus"
"github.com/hyperledger/fabric/common/mocks/crypto"
"github.com/hyperledger/fabric/common/tools/configtxgen/configtxgentest"
"github.com/hyperledger/fabric/common/tools/configtxgen/encoder"
genesisconfig "github.com/hyperledger/fabric/common/tools/configtxgen/localconfig"
"github.com/hyperledger/fabric/core/comm"
"github.com/hyperledger/fabric/core/config/configtest"
"github.com/hyperledger/fabric/orderer/common/cluster"
"github.com/hyperledger/fabric/orderer/common/localconfig"
"github.com/hyperledger/fabric/orderer/common/multichannel"
"github.com/hyperledger/fabric/orderer/common/server/mocks"
server_mocks "github.com/hyperledger/fabric/orderer/common/server/mocks"
"github.com/hyperledger/fabric/orderer/consensus"
"github.com/hyperledger/fabric/protos/common"
"github.com/hyperledger/fabric/protos/utils"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func TestInitializeLogging(t *testing.T) {
origEnvValue := os.Getenv("FABRIC_LOGGING_SPEC")
os.Setenv("FABRIC_LOGGING_SPEC", "foo=debug")
initializeLogging()
assert.Equal(t, "debug", flogging.Global.Level("foo").String())
os.Setenv("FABRIC_LOGGING_SPEC", origEnvValue)
}
func TestInitializeProfilingService(t *testing.T) {
origEnvValue := os.Getenv("FABRIC_LOGGING_SPEC")
defer os.Setenv("FABRIC_LOGGING_SPEC", origEnvValue)
os.Setenv("FABRIC_LOGGING_SPEC", "debug")
// get a free random port
listenAddr := func() string {
l, _ := net.Listen("tcp", "localhost:0")
l.Close()
return l.Addr().String()
}()
initializeProfilingService(
&localconfig.TopLevel{
General: localconfig.General{
Profile: localconfig.Profile{
Enabled: true,
Address: listenAddr,
}},
Kafka: localconfig.Kafka{Verbose: true},
},
)
time.Sleep(500 * time.Millisecond)
if _, err := http.Get("http://" + listenAddr + "/" + "/debug/"); err != nil {
t.Logf("Expected pprof to be up (will retry again in 3 seconds): %s", err)
time.Sleep(3 * time.Second)
if _, err := http.Get("http://" + listenAddr + "/" + "/debug/"); err != nil {
t.Fatalf("Expected pprof to be up: %s", err)
}
}
}
func TestInitializeServerConfig(t *testing.T) {
conf := &localconfig.TopLevel{
General: localconfig.General{
TLS: localconfig.TLS{
Enabled: true,
ClientAuthRequired: true,
Certificate: "main.go",
PrivateKey: "main.go",
RootCAs: []string{"main.go"},
ClientRootCAs: []string{"main.go"},
},
},
}
sc := initializeServerConfig(conf, nil)
defaultOpts := comm.DefaultKeepaliveOptions
assert.Equal(t, defaultOpts.ServerMinInterval, sc.KaOpts.ServerMinInterval)
assert.Equal(t, time.Duration(0), sc.KaOpts.ServerInterval)
assert.Equal(t, time.Duration(0), sc.KaOpts.ServerTimeout)
testDuration := 10 * time.Second
conf.General.Keepalive = localconfig.Keepalive{
ServerMinInterval: testDuration,
ServerInterval: testDuration,
ServerTimeout: testDuration,
}
sc = initializeServerConfig(conf, nil)
assert.Equal(t, testDuration, sc.KaOpts.ServerMinInterval)
assert.Equal(t, testDuration, sc.KaOpts.ServerInterval)
assert.Equal(t, testDuration, sc.KaOpts.ServerTimeout)
sc = initializeServerConfig(conf, nil)
assert.NotNil(t, sc.Logger)
assert.Equal(t, &disabled.Provider{}, sc.MetricsProvider)
assert.Len(t, sc.UnaryInterceptors, 2)
assert.Len(t, sc.StreamInterceptors, 2)
sc = initializeServerConfig(conf, &prometheus.Provider{})
assert.Equal(t, &prometheus.Provider{}, sc.MetricsProvider)
goodFile := "main.go"
badFile := "does_not_exist"
oldLogger := logger
defer func() { logger = oldLogger }()
logger, _ = floggingtest.NewTestLogger(t)
testCases := []struct {
name string
certificate string
privateKey string
rootCA string
clientRootCert string
clusterCert string
clusterKey string
clusterCA string
}{
{"BadCertificate", badFile, goodFile, goodFile, goodFile, "", "", ""},
{"BadPrivateKey", goodFile, badFile, goodFile, goodFile, "", "", ""},
{"BadRootCA", goodFile, goodFile, badFile, goodFile, "", "", ""},
{"BadClientRootCertificate", goodFile, goodFile, goodFile, badFile, "", "", ""},
{"ClusterBadCertificate", goodFile, goodFile, goodFile, goodFile, badFile, goodFile, goodFile},
{"ClusterBadPrivateKey", goodFile, goodFile, goodFile, goodFile, goodFile, badFile, goodFile},
{"ClusterBadRootCA", goodFile, goodFile, goodFile, goodFile, goodFile, goodFile, badFile},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
conf := &localconfig.TopLevel{
General: localconfig.General{
TLS: localconfig.TLS{
Enabled: true,
ClientAuthRequired: true,
Certificate: tc.certificate,
PrivateKey: tc.privateKey,
RootCAs: []string{tc.rootCA},
ClientRootCAs: []string{tc.clientRootCert},
},
Cluster: localconfig.Cluster{
ClientCertificate: tc.clusterCert,
ClientPrivateKey: tc.clusterKey,
RootCAs: []string{tc.clusterCA},
},
},
}
assert.Panics(t, func() {
if tc.clusterCert == "" {
initializeServerConfig(conf, nil)
} else {
initializeClusterClientConfig(conf, false, nil)
}
},
)
})
}
}
func TestInitializeBootstrapChannel(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
testCases := []struct {
genesisMethod string
ledgerType string
panics bool
}{
{"provisional", "ram", false},
{"provisional", "file", false},
{"provisional", "json", false},
{"invalid", "ram", true},
{"file", "ram", true},
}
for _, tc := range testCases {
t.Run(tc.genesisMethod+"/"+tc.ledgerType, func(t *testing.T) {
fileLedgerLocation, _ := ioutil.TempDir("", "test-ledger")
ledgerFactory, _ := createLedgerFactory(
&localconfig.TopLevel{
General: localconfig.General{LedgerType: tc.ledgerType},
FileLedger: localconfig.FileLedger{
Location: fileLedgerLocation,
},
},
&disabled.Provider{},
)
bootstrapConfig := &localconfig.TopLevel{
General: localconfig.General{
GenesisMethod: tc.genesisMethod,
GenesisProfile: "SampleSingleMSPSolo",
GenesisFile: "genesisblock",
SystemChannel: genesisconfig.TestChainID,
},
}
if tc.panics {
assert.Panics(t, func() {
genesisBlock := extractBootstrapBlock(bootstrapConfig)
initializeBootstrapChannel(genesisBlock, ledgerFactory)
})
} else {
assert.NotPanics(t, func() {
genesisBlock := extractBootstrapBlock(bootstrapConfig)
initializeBootstrapChannel(genesisBlock, ledgerFactory)
})
}
})
}
}
func TestExtractSysChanLastConfig(t *testing.T) {
rlf := ramledger.New(10)
conf := configtxgentest.Load(genesisconfig.SampleInsecureSoloProfile)
genesisBlock := encoder.New(conf).GenesisBlock()
lastConf := extractSysChanLastConfig(rlf, genesisBlock)
assert.Nil(t, lastConf)
rl, err := rlf.GetOrCreate(genesisconfig.TestChainID)
require.NoError(t, err)
err = rl.Append(genesisBlock)
require.NoError(t, err)
lastConf = extractSysChanLastConfig(rlf, genesisBlock)
assert.NotNil(t, lastConf)
assert.Equal(t, uint64(0), lastConf.Header.Number)
assert.Panics(t, func() {
_ = extractSysChanLastConfig(rlf, nil)
})
configTx, err := utils.CreateSignedEnvelope(common.HeaderType_CONFIG, genesisconfig.TestChainID, nil, &common.ConfigEnvelope{}, 0, 0)
require.NoError(t, err)
nextBlock := blockledger.CreateNextBlock(rl, []*common.Envelope{configTx})
nextBlock.Metadata.Metadata[common.BlockMetadataIndex_LAST_CONFIG] = utils.MarshalOrPanic(&common.Metadata{
Value: utils.MarshalOrPanic(&common.LastConfig{Index: rl.Height()}),
})
err = rl.Append(nextBlock)
require.NoError(t, err)
lastConf = extractSysChanLastConfig(rlf, genesisBlock)
assert.NotNil(t, lastConf)
assert.Equal(t, uint64(1), lastConf.Header.Number)
}
func TestSelectClusterBootBlock(t *testing.T) {
bootstrapBlock := &common.Block{Header: &common.BlockHeader{Number: 100}}
lastConfBlock := &common.Block{Header: &common.BlockHeader{Number: 100}}
clusterBoot := selectClusterBootBlock(bootstrapBlock, nil)
assert.NotNil(t, clusterBoot)
assert.Equal(t, uint64(100), clusterBoot.Header.Number)
assert.True(t, bootstrapBlock == clusterBoot)
clusterBoot = selectClusterBootBlock(bootstrapBlock, lastConfBlock)
assert.NotNil(t, clusterBoot)
assert.Equal(t, uint64(100), clusterBoot.Header.Number)
assert.True(t, bootstrapBlock == clusterBoot)
lastConfBlock.Header.Number = 200
clusterBoot = selectClusterBootBlock(bootstrapBlock, lastConfBlock)
assert.NotNil(t, clusterBoot)
assert.Equal(t, uint64(200), clusterBoot.Header.Number)
assert.True(t, lastConfBlock == clusterBoot)
bootstrapBlock.Header.Number = 300
clusterBoot = selectClusterBootBlock(bootstrapBlock, lastConfBlock)
assert.NotNil(t, clusterBoot)
assert.Equal(t, uint64(300), clusterBoot.Header.Number)
assert.True(t, bootstrapBlock == clusterBoot)
}
func TestLoadLocalMSP(t *testing.T) {
t.Run("Happy", func(t *testing.T) {
assert.NotPanics(t, func() {
localMSPDir, _ := configtest.GetDevMspDir()
initializeLocalMsp(
&localconfig.TopLevel{
General: localconfig.General{
LocalMSPDir: localMSPDir,
LocalMSPID: "SampleOrg",
BCCSP: &factory.FactoryOpts{
ProviderName: "SW",
SwOpts: &factory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: true,
},
},
},
})
})
})
t.Run("Error", func(t *testing.T) {
oldLogger := logger
defer func() { logger = oldLogger }()
logger, _ = floggingtest.NewTestLogger(t)
assert.Panics(t, func() {
initializeLocalMsp(
&localconfig.TopLevel{
General: localconfig.General{
LocalMSPDir: "",
LocalMSPID: "",
},
})
})
})
}
func TestInitializeMultiChainManager(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
conf := genesisConfig(t)
assert.NotPanics(t, func() {
initializeLocalMsp(conf)
lf, _ := createLedgerFactory(conf, &disabled.Provider{})
bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system")
initializeMultichannelRegistrar(bootBlock, &replicationInitiator{}, &cluster.PredicateDialer{}, comm.ServerConfig{}, nil, conf, localmsp.NewSigner(), &disabled.Provider{}, &mocks.HealthChecker{}, lf)
})
}
func TestInitializeGrpcServer(t *testing.T) {
// get a free random port
listenAddr := func() string {
l, _ := net.Listen("tcp", "localhost:0")
l.Close()
return l.Addr().String()
}()
host := strings.Split(listenAddr, ":")[0]
port, _ := strconv.ParseUint(strings.Split(listenAddr, ":")[1], 10, 16)
conf := &localconfig.TopLevel{
General: localconfig.General{
ListenAddress: host,
ListenPort: uint16(port),
TLS: localconfig.TLS{
Enabled: false,
ClientAuthRequired: false,
},
},
}
assert.NotPanics(t, func() {
grpcServer := initializeGrpcServer(conf, initializeServerConfig(conf, nil))
grpcServer.Listener().Close()
})
}
func TestUpdateTrustedRoots(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
initializeLocalMsp(genesisConfig(t))
// get a free random port
listenAddr := func() string {
l, _ := net.Listen("tcp", "localhost:0")
l.Close()
return l.Addr().String()
}()
port, _ := strconv.ParseUint(strings.Split(listenAddr, ":")[1], 10, 16)
conf := &localconfig.TopLevel{
General: localconfig.General{
ListenAddress: "localhost",
ListenPort: uint16(port),
TLS: localconfig.TLS{
Enabled: false,
ClientAuthRequired: false,
},
},
}
grpcServer := initializeGrpcServer(conf, initializeServerConfig(conf, nil))
caSupport := &comm.CredentialSupport{
AppRootCAsByChain: make(map[string]comm.CertificateBundle),
OrdererRootCAsByChainAndOrg: make(comm.OrgRootCAs),
}
callback := func(bundle *channelconfig.Bundle) {
if grpcServer.MutualTLSRequired() {
t.Log("callback called")
updateTrustedRoots(caSupport, bundle, grpcServer)
}
}
lf, _ := createLedgerFactory(conf, &disabled.Provider{})
bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system")
initializeMultichannelRegistrar(bootBlock, &replicationInitiator{}, &cluster.PredicateDialer{}, comm.ServerConfig{}, nil, genesisConfig(t), localmsp.NewSigner(), &disabled.Provider{}, &mocks.HealthChecker{}, lf, callback)
t.Logf("# app CAs: %d", len(caSupport.AppRootCAsByChain[genesisconfig.TestChainID]))
t.Logf("# orderer CAs: %d", len(caSupport.OrdererRootCAsByChainAndOrg[genesisconfig.TestChainID]["SampleOrg"]))
// mutual TLS not required so no updates should have occurred
assert.Equal(t, 0, len(caSupport.AppRootCAsByChain[genesisconfig.TestChainID]))
assert.Equal(t, 0, len(caSupport.OrdererRootCAsByChainAndOrg[genesisconfig.TestChainID]["SampleOrg"]))
grpcServer.Listener().Close()
conf = &localconfig.TopLevel{
General: localconfig.General{
ListenAddress: "localhost",
ListenPort: uint16(port),
TLS: localconfig.TLS{
Enabled: true,
ClientAuthRequired: true,
PrivateKey: filepath.Join(".", "testdata", "tls", "server.key"),
Certificate: filepath.Join(".", "testdata", "tls", "server.crt"),
},
},
}
grpcServer = initializeGrpcServer(conf, initializeServerConfig(conf, nil))
caSupport = &comm.CredentialSupport{
AppRootCAsByChain: make(map[string]comm.CertificateBundle),
OrdererRootCAsByChainAndOrg: make(comm.OrgRootCAs),
}
clusterConf := initializeClusterClientConfig(conf, true, nil)
predDialer := &cluster.PredicateDialer{
ClientConfig: clusterConf,
}
callback = func(bundle *channelconfig.Bundle) {
if grpcServer.MutualTLSRequired() {
t.Log("callback called")
updateTrustedRoots(caSupport, bundle, grpcServer)
updateClusterDialer(caSupport, predDialer, clusterConf.SecOpts.ServerRootCAs)
}
}
initializeMultichannelRegistrar(
bootBlock,
&replicationInitiator{},
predDialer,
comm.ServerConfig{},
nil,
genesisConfig(t),
localmsp.NewSigner(),
&disabled.Provider{},
&server_mocks.HealthChecker{},
lf,
callback,
)
t.Logf("# app CAs: %d", len(caSupport.AppRootCAsByChain[genesisconfig.TestChainID]))
t.Logf("# orderer CAs: %d", len(caSupport.OrdererRootCAsByChainAndOrg[genesisconfig.TestChainID]["SampleOrg"]))
// mutual TLS is required so updates should have occurred
// we expect an intermediate and root CA for apps and orderers
assert.Equal(t, 2, len(caSupport.AppRootCAsByChain[genesisconfig.TestChainID]))
assert.Equal(t, 2, len(caSupport.OrdererRootCAsByChainAndOrg[genesisconfig.TestChainID]["SampleOrg"]))
assert.Len(t, predDialer.ClientConfig.SecOpts.ServerRootCAs, 2)
grpcServer.Listener().Close()
}
func TestConfigureClusterListener(t *testing.T) {
logEntries := make(chan string, 100)
allocatePort := func() uint16 {
l, err := net.Listen("tcp", "127.0.0.1:0")
assert.NoError(t, err)
_, portStr, err := net.SplitHostPort(l.Addr().String())
assert.NoError(t, err)
port, err := strconv.ParseInt(portStr, 10, 64)
assert.NoError(t, err)
assert.NoError(t, l.Close())
t.Log("picked unused port", port)
return uint16(port)
}
unUsedPort := allocatePort()
backupLogger := logger
logger = logger.With(zap.Hooks(func(entry zapcore.Entry) error {
logEntries <- entry.Message
return nil
}))
defer func() {
logger = backupLogger
}()
ca, err := tlsgen.NewCA()
assert.NoError(t, err)
serverKeyPair, err := ca.NewServerCertKeyPair("127.0.0.1")
assert.NoError(t, err)
loadPEM := func(fileName string) ([]byte, error) {
switch fileName {
case "cert":
return serverKeyPair.Cert, nil
case "key":
return serverKeyPair.Key, nil
case "ca":
return ca.CertBytes(), nil
default:
return nil, errors.New("I/O error")
}
}
for _, testCase := range []struct {
name string
conf *localconfig.TopLevel
generalConf comm.ServerConfig
generalSrv *comm.GRPCServer
shouldBeEqual bool
expectedPanic string
expectedLogEntries []string
}{
{
name: "no separate listener",
shouldBeEqual: true,
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{},
generalSrv: &comm.GRPCServer{},
},
{
name: "partial configuration",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenPort: 5000,
},
},
},
expectedPanic: "Options: General.Cluster.ListenPort, General.Cluster.ListenAddress, " +
"General.Cluster.ServerCertificate, General.Cluster.ServerPrivateKey, should be defined altogether.",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Options: General.Cluster.ListenPort, General.Cluster.ListenAddress, " +
"General.Cluster.ServerCertificate," +
" General.Cluster.ServerPrivateKey, should be defined altogether."},
},
{
name: "invalid certificate",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "bad",
RootCAs: []string{"ca"},
},
},
},
expectedPanic: "Failed to load cluster server certificate from 'bad' (I/O error)",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Failed to load cluster server certificate from 'bad' (I/O error)"},
},
{
name: "invalid key",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "bad",
ServerCertificate: "cert",
RootCAs: []string{"ca"},
},
},
},
expectedPanic: "Failed to load cluster server key from 'bad' (I/O error)",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Failed to load cluster server certificate from 'bad' (I/O error)"},
},
{
name: "invalid ca cert",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "cert",
RootCAs: []string{"bad"},
},
},
},
expectedPanic: "Failed to load CA cert file 'I/O error' (bad)",
generalSrv: &comm.GRPCServer{},
expectedLogEntries: []string{"Failed to load CA cert file 'I/O error' (bad)"},
},
{
name: "bad listen address",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "99.99.99.99",
ListenPort: unUsedPort,
ServerPrivateKey: "key",
ServerCertificate: "cert",
RootCAs: []string{"ca"},
},
},
},
expectedPanic: fmt.Sprintf("Failed creating gRPC server on 99.99.99.99:%d due "+
"to listen tcp 99.99.99.99:%d:", unUsedPort, unUsedPort),
generalSrv: &comm.GRPCServer{},
},
{
name: "green path",
generalConf: comm.ServerConfig{},
conf: &localconfig.TopLevel{
General: localconfig.General{
Cluster: localconfig.Cluster{
ListenAddress: "127.0.0.1",
ListenPort: 5000,
ServerPrivateKey: "key",
ServerCertificate: "cert",
RootCAs: []string{"ca"},
},
},
},
generalSrv: &comm.GRPCServer{},
},
} {
t.Run(testCase.name, func(t *testing.T) {
if testCase.shouldBeEqual {
conf, srv := configureClusterListener(testCase.conf, testCase.generalConf, testCase.generalSrv, loadPEM)
assert.Equal(t, conf, testCase.generalConf)
assert.Equal(t, srv, testCase.generalSrv)
}
if testCase.expectedPanic != "" {
f := func() {
configureClusterListener(testCase.conf, testCase.generalConf, testCase.generalSrv, loadPEM)
}
assert.Contains(t, panicMsg(f), testCase.expectedPanic)
} else {
configureClusterListener(testCase.conf, testCase.generalConf, testCase.generalSrv, loadPEM)
}
// Ensure logged messages that are expected were all logged
var loggedMessages []string
for len(logEntries) > 0 {
logEntry := <-logEntries
loggedMessages = append(loggedMessages, logEntry)
}
assert.Subset(t, testCase.expectedLogEntries, loggedMessages)
})
}
}
func TestInitializeEtcdraftConsenter(t *testing.T) {
consenters := make(map[string]consensus.Consenter)
rlf := ramledger.New(10)
conf := configtxgentest.Load(genesisconfig.SampleInsecureSoloProfile)
genesisBlock := encoder.New(conf).GenesisBlock()
ca, _ := tlsgen.NewCA()
crt, _ := ca.NewServerCertKeyPair("127.0.0.1")
srv, err := comm.NewGRPCServer("127.0.0.1:0", comm.ServerConfig{})
assert.NoError(t, err)
initializeEtcdraftConsenter(consenters,
&localconfig.TopLevel{},
rlf,
&cluster.PredicateDialer{},
genesisBlock, &replicationInitiator{},
comm.ServerConfig{
SecOpts: &comm.SecureOptions{
Certificate: crt.Cert,
Key: crt.Key,
UseTLS: true,
},
}, srv, &multichannel.Registrar{}, &disabled.Provider{})
assert.NotNil(t, consenters["etcdraft"])
}
func genesisConfig(t *testing.T) *localconfig.TopLevel {
t.Helper()
localMSPDir, _ := configtest.GetDevMspDir()
return &localconfig.TopLevel{
General: localconfig.General{
LedgerType: "ram",
GenesisMethod: "provisional",
GenesisProfile: "SampleDevModeSolo",
SystemChannel: genesisconfig.TestChainID,
LocalMSPDir: localMSPDir,
LocalMSPID: "SampleOrg",
BCCSP: &factory.FactoryOpts{
ProviderName: "SW",
SwOpts: &factory.SwOpts{
HashFamily: "SHA2",
SecLevel: 256,
Ephemeral: true,
},
},
},
}
}
func panicMsg(f func()) string {
var message interface{}
func() {
defer func() {
message = recover()
}()
f()
}()
return message.(string)
}
func TestCreateReplicator(t *testing.T) {
cleanup := configtest.SetDevFabricConfigPath(t)
defer cleanup()
bootBlock := encoder.New(genesisconfig.Load(genesisconfig.SampleDevModeSoloProfile)).GenesisBlockForChannel("system")
iterator := &deliver_mocks.BlockIterator{}
iterator.NextReturnsOnCall(0, bootBlock, common.Status_SUCCESS)
iterator.NextReturnsOnCall(1, bootBlock, common.Status_SUCCESS)
ledger := &ledger_mocks.ReadWriter{}
ledger.On("Height").Return(uint64(1))
ledger.On("Iterator", mock.Anything).Return(iterator, uint64(1))
ledgerFactory := &server_mocks.Factory{}
ledgerFactory.On("GetOrCreate", "mychannel").Return(ledger, nil)
ledgerFactory.On("ChainIDs").Return([]string{"mychannel"})
signer := &crypto.LocalSigner{}
r := createReplicator(ledgerFactory, bootBlock, &localconfig.TopLevel{}, &comm.SecureOptions{}, signer)
err := r.verifierRetriever.RetrieveVerifier("mychannel").VerifyBlockSignature(nil, nil)
assert.EqualError(t, err, "implicit policy evaluation failed - 0 sub-policies were satisfied, but this policy requires 1 of the 'Writers' sub-policies to be satisfied")
err = r.verifierRetriever.RetrieveVerifier("system").VerifyBlockSignature(nil, nil)
assert.NoError(t, err)
}
|
[
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_SPEC\""
] |
[] |
[
"FABRIC_LOGGING_SPEC"
] |
[]
|
["FABRIC_LOGGING_SPEC"]
|
go
| 1 | 0 | |
ip-messaging/rest/roles/update-role/update-role.7.x.java
|
// Install the Java helper library from twilio.com/docs/java/install
import java.util.*;
import com.twilio.Twilio;
import com.twilio.rest.chat.v2.service.Role;
public class Example {
// Get your Account SID and Auth Token from https://twilio.com/console
// To set up environment variables, see http://twil.io/secure
public static final String ACCOUNT_SID = System.getenv("TWILIO_ACCOUNT_SID");
public static final String AUTH_TOKEN = System.getenv("TWILIO_AUTH_TOKEN");
public static final String SERVICE_SID = System.getenv("TWILIO_SERVICE_SID");
public static final String ROLE_SID = "RLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX";
public static void main(String[] args) {
// Initialize the client
Twilio.init(ACCOUNT_SID, AUTH_TOKEN);
Role role = Role.fetcher(SERVICE_SID, ROLE_SID).fetch();
List<String> newPermissions = new ArrayList<>(Arrays.asList("sendMediaMessage"));
newPermissions.addAll(role.getPermissions());
// Update the role
role = Role.updater(SERVICE_SID, ROLE_SID, newPermissions).update();
}
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\"",
"\"TWILIO_SERVICE_SID\""
] |
[] |
[
"TWILIO_SERVICE_SID",
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_SERVICE_SID", "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
java
| 3 | 0 | |
python/roboscheduler/tests/conftest.py
|
# encoding: utf-8
#
# conftest.py
#
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
"""
Here you can add fixtures that will be used for all the tests in this
directory. You can also add conftest.py files in underlying subdirectories.
Those conftest.py will only be applies to the tests in that subdirectory and
underlying directories. See https://docs.pytest.org/en/2.7.3/plugins.html for
more information.
"""
rs_dir = os.path.abspath(__file__).split("/python/roboscheduler/")[0]
os.environ["ROBOSCHEDULER_DIR"] = rs_dir
|
[] |
[] |
[
"ROBOSCHEDULER_DIR"
] |
[]
|
["ROBOSCHEDULER_DIR"]
|
python
| 1 | 0 | |
app/funct.py
|
# -*- coding: utf-8 -*-"
import cgi
import os, sys
form = cgi.FieldStorage()
serv = form.getvalue('serv')
def get_app_dir():
d = sys.path[0]
d = d.split('/')[-1]
return sys.path[0] if d == "app" else os.path.dirname(sys.path[0])
def get_config_var(sec, var):
from configparser import ConfigParser, ExtendedInterpolation
try:
path_config = get_app_dir()+"/haproxy-wi.cfg"
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read(path_config)
except:
print('Content-type: text/html\n')
print('<center><div class="alert alert-danger">Check the config file, whether it exists and the path. Must be: app/haproxy-webintarface.config</div>')
try:
return config.get(sec, var)
except:
print('Content-type: text/html\n')
print('<center><div class="alert alert-danger">Check the config file. Presence section %s and parameter %s</div>' % (sec, var))
def get_data(type):
from datetime import datetime
from pytz import timezone
import sql
now_utc = datetime.now(timezone(sql.get_setting('time_zone')))
if type == 'config':
fmt = "%Y-%m-%d.%H:%M:%S"
if type == 'logs':
fmt = '%Y%m%d'
if type == "date_in_log":
fmt = "%b %d %H:%M:%S"
return now_utc.strftime(fmt)
def logging(serv, action, **kwargs):
import sql
import http.cookies
log_path = get_config_var('main', 'log_path')
login = ''
if not os.path.exists(log_path):
os.makedirs(log_path)
try:
IP = cgi.escape(os.environ["REMOTE_ADDR"])
cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE"))
user_uuid = cookie.get('uuid')
login = sql.get_user_name_by_uuid(user_uuid.value)
except:
pass
if kwargs.get('alerting') == 1:
mess = get_data('date_in_log') + action + "\n"
log = open(log_path + "/checker-"+get_data('logs')+".log", "a")
elif kwargs.get('metrics') == 1:
mess = get_data('date_in_log') + action + "\n"
log = open(log_path + "/metrics-"+get_data('logs')+".log", "a")
elif kwargs.get('keep_alive') == 1:
mess = get_data('date_in_log') + action + "\n"
log = open(log_path + "/keep_alive-"+get_data('logs')+".log", "a")
else:
mess = get_data('date_in_log') + " from " + IP + " user: " + login + " " + action + " for: " + serv + "\n"
log = open(log_path + "/config_edit-"+get_data('logs')+".log", "a")
try:
log.write(mess)
log.close
except IOError as e:
print('<center><div class="alert alert-danger">Can\'t write log. Please check log_path in config %e</div></center>' % e)
pass
def telegram_send_mess(mess, **kwargs):
import telebot
from telebot import apihelper
import sql
telegrams = sql.get_telegram_by_ip(kwargs.get('ip'))
proxy = sql.get_setting('proxy')
for telegram in telegrams:
token_bot = telegram[1]
channel_name = telegram[2]
if proxy is not None:
apihelper.proxy = {'https': proxy}
try:
bot = telebot.TeleBot(token=token_bot)
bot.send_message(chat_id=channel_name, text=mess)
except:
print("Fatal: Can't send message. Add Telegram chanel before use alerting at this servers group")
sys.exit()
def check_login(**kwargs):
import sql
import http.cookies
cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE"))
user_uuid = cookie.get('uuid')
ref = os.environ.get("SCRIPT_NAME")
sql.delete_old_uuid()
if user_uuid is not None:
sql.update_last_act_user(user_uuid.value)
if sql.get_user_name_by_uuid(user_uuid.value) is None:
print('<meta http-equiv="refresh" content="0; url=login.py?ref=%s">' % ref)
else:
print('<meta http-equiv="refresh" content="0; url=login.py?ref=%s">' % ref)
def is_admin(**kwargs):
import sql
import http.cookies
cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE"))
user_id = cookie.get('uuid')
try:
role = sql.get_user_role_by_uuid(user_id.value)
except:
role = 3
pass
level = kwargs.get("level")
if level is None:
level = 1
try:
return True if role <= level else False
except:
return False
pass
def page_for_admin(**kwargs):
give_level = 1
give_level = kwargs.get("level")
if not is_admin(level = give_level):
print('<center><h3 style="color: red">How did you get here?! O_o You do not have need permissions</h>')
print('<meta http-equiv="refresh" content="5; url=/">')
import sys
sys.exit()
def ssh_connect(serv, **kwargs):
import paramiko
from paramiko import SSHClient
import sql
fullpath = get_config_var('main', 'fullpath')
ssh_enable = ''
ssh_port = ''
ssh_user_name = ''
ssh_user_password = ''
for sshs in sql.select_ssh(serv=serv):
ssh_enable = sshs[3]
ssh_user_name = sshs[4]
ssh_user_password = sshs[5]
ssh_key_name = fullpath+'/keys/%s.pem' % sshs[2]
servers = sql.select_servers(server=serv)
for server in servers:
ssh_port = server[10]
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
if ssh_enable == 1:
k = paramiko.RSAKey.from_private_key_file(ssh_key_name)
ssh.connect(hostname = serv, port = ssh_port, username = ssh_user_name, pkey = k)
else:
ssh.connect(hostname = serv, port = ssh_port, username = ssh_user_name, password = ssh_user_password)
return ssh
except paramiko.AuthenticationException:
return 'Authentication failed, please verify your credentials'
pass
except paramiko.SSHException as sshException:
return 'Unable to establish SSH connection: %s ' % sshException
pass
except paramiko.BadHostKeyException as badHostKeyException:
return 'Unable to verify server\'s host key: %s ' % badHostKeyException
pass
except Exception as e:
if e == "No such file or directory":
return '%s. Check ssh key' % e
pass
elif e == "Invalid argument":
error = 'Check the IP of the server'
pass
else:
error = e
pass
return str(error)
def get_config(serv, cfg, **kwargs):
import sql
config_path = "/etc/keepalived/keepalived.conf" if kwargs.get("keepalived") else sql.get_setting('haproxy_config_path')
ssh = ssh_connect(serv)
try:
sftp = ssh.open_sftp()
sftp.get(config_path, cfg)
sftp.close()
ssh.close()
except Exception as e:
ssh = str(e)
return ssh
def diff_config(oldcfg, cfg):
log_path = get_config_var('main', 'log_path')
diff = ""
date = get_data('date_in_log')
cmd="/bin/diff -ub %s %s" % (oldcfg, cfg)
output, stderr = subprocess_execute(cmd)
for line in output:
diff += date + " " + line + "\n"
try:
log = open(log_path + "/config_edit-"+get_data('logs')+".log", "a")
log.write(diff)
log.close
except IOError:
print('<center><div class="alert alert-danger">Can\'t read write change to log. %s</div></center>' % stderr)
pass
def install_haproxy(serv, **kwargs):
import sql
script = "install_haproxy.sh"
tmp_config_path = sql.get_setting('tmp_config_path')
haproxy_sock_port = sql.get_setting('haproxy_sock_port')
stats_port = sql.get_setting('stats_port')
server_state_file = sql.get_setting('server_state_file')
stats_user = sql.get_setting('stats_user')
stats_password = sql.get_setting('stats_password')
proxy = sql.get_setting('proxy')
os.system("cp scripts/%s ." % script)
proxy_serv = proxy if proxy is not None else ""
commands = [ "sudo chmod +x "+tmp_config_path+script+" && " +tmp_config_path+"/"+script +" PROXY=" + proxy_serv+
" SOCK_PORT="+haproxy_sock_port+" STAT_PORT="+stats_port+" STAT_FILE="+server_state_file+
" STATS_USER="+stats_user+" STATS_PASS="+stats_password ]
error = str(upload(serv, tmp_config_path, script))
if error:
print('error: '+error)
os.system("rm -f %s" % script)
ssh_command(serv, commands, print_out="1")
if kwargs.get('syn_flood') == "1":
syn_flood_protect(serv)
def syn_flood_protect(serv, **kwargs):
import sql
script = "syn_flood_protect.sh"
tmp_config_path = sql.get_setting('tmp_config_path')
enable = "disable" if kwargs.get('enable') == "0" else "disable"
os.system("cp scripts/%s ." % script)
commands = [ "sudo chmod +x "+tmp_config_path+script, tmp_config_path+script+ " "+enable ]
error = str(upload(serv, tmp_config_path, script))
if error:
print('error: '+error)
os.system("rm -f %s" % script)
ssh_command(serv, commands, print_out="1")
def waf_install(serv, **kwargs):
import sql
script = "waf.sh"
tmp_config_path = sql.get_setting('tmp_config_path')
proxy = sql.get_setting('proxy')
haproxy_dir = sql.get_setting('haproxy_dir')
ver = check_haproxy_version(serv)
os.system("cp scripts/%s ." % script)
commands = [ "sudo chmod +x "+tmp_config_path+script+" && " +tmp_config_path+script +" PROXY=" + proxy+
" HAPROXY_PATH="+haproxy_dir +" VERSION="+ver ]
error = str(upload(serv, tmp_config_path, script))
if error:
print('error: '+error)
os.system("rm -f %s" % script)
stderr = ssh_command(serv, commands, print_out="1")
if stderr is None:
sql.insert_waf_metrics_enable(serv, "0")
def check_haproxy_version(serv):
import sql
haproxy_sock_port = sql.get_setting('haproxy_sock_port')
ver = ""
cmd="echo 'show info' |nc %s %s |grep Version |awk '{print $2}'" % (serv, haproxy_sock_port)
output, stderr = subprocess_execute(cmd)
for line in output:
ver = line
return ver
def upload(serv, path, file, **kwargs):
error = ""
full_path = path + file
if kwargs.get('dir') == "fullpath":
full_path = path
try:
ssh = ssh_connect(serv)
except Exception as e:
error = e
pass
try:
sftp = ssh.open_sftp()
file = sftp.put(file, full_path)
sftp.close()
ssh.close()
except Exception as e:
error = e
pass
return error
def upload_and_restart(serv, cfg, **kwargs):
import sql
tmp_file = sql.get_setting('tmp_config_path') + "/" + get_data('config') + ".cfg"
error = ""
try:
os.system("dos2unix "+cfg)
except OSError:
return 'Please install dos2unix'
pass
if kwargs.get("keepalived") == 1:
if kwargs.get("just_save") == "save":
commands = [ "sudo mv -f " + tmp_file + " /etc/keepalived/keepalived.conf" ]
else:
commands = [ "sudo mv -f " + tmp_file + " /etc/keepalived/keepalived.conf && sudo systemctl restart keepalived" ]
else:
if kwargs.get("just_save") == "test":
commands = [ "sudo haproxy -q -c -f " + tmp_file + "&& sudo rm -f " + tmp_file ]
elif kwargs.get("just_save") == "save":
commands = [ "sudo haproxy -q -c -f " + tmp_file + "&& sudo mv -f " + tmp_file + " " + sql.get_setting('haproxy_config_path') ]
else:
commands = [ "sudo haproxy -q -c -f " + tmp_file + "&& sudo mv -f " + tmp_file + " " + sql.get_setting('haproxy_config_path') + " && sudo " + sql.get_setting('restart_command') ]
if sql.get_setting('firewall_enable') == "1":
commands.extend(open_port_firewalld(cfg))
error += str(upload(serv, tmp_file, cfg, dir='fullpath'))
try:
error += ssh_command(serv, commands)
except Exception as e:
error += e
if error:
return error
def open_port_firewalld(cfg):
try:
conf = open(cfg, "r")
except IOError:
print('<div class="alert alert-danger">Can\'t read export config file</div>')
firewalld_commands = []
for line in conf:
if "bind" in line:
bind = line.split(":")
bind[1] = bind[1].strip(' ')
bind = bind[1].split("ssl")
bind = bind[0].strip(' \t\n\r')
firewalld_commands.append('sudo firewall-cmd --zone=public --add-port=%s/tcp --permanent' % bind)
firewalld_commands.append('sudo firewall-cmd --reload')
return firewalld_commands
def check_haproxy_config(serv):
import sql
commands = [ "haproxy -q -c -f %s" % sql.get_setting('haproxy_config_path') ]
ssh = ssh_connect(serv)
for command in commands:
stdin , stdout, stderr = ssh.exec_command(command, get_pty=True)
if not stderr.read():
return True
else:
return False
ssh.close()
def show_log(stdout):
i = 0
for line in stdout:
i = i + 1
line_class = "line3" if i % 2 == 0 else "line"
print('<div class="'+line_class+'">' + escape_html(line) + '</div>')
def show_ip(stdout):
for line in stdout:
print(line)
def server_status(stdout):
proc_count = ""
for line in stdout:
if "Ncat: " not in line:
for k in line:
proc_count = k.split(":")[1]
else:
proc_count = 0
return proc_count
def ssh_command(serv, commands, **kwargs):
ssh = ssh_connect(serv)
for command in commands:
try:
stdin, stdout, stderr = ssh.exec_command(command, get_pty=True)
except:
continue
if kwargs.get("ip") == "1":
show_ip(stdout)
elif kwargs.get("show_log") == "1":
show_log(stdout)
elif kwargs.get("server_status") == "1":
server_status(stdout)
elif kwargs.get('print_out'):
print(stdout.read().decode(encoding='UTF-8'))
return stdout.read().decode(encoding='UTF-8')
elif kwargs.get('retunr_err') == 1:
return stderr.read().decode(encoding='UTF-8')
else:
return stdout.read().decode(encoding='UTF-8')
for line in stderr.read().decode(encoding='UTF-8'):
if line:
print("<div class='alert alert-warning'>"+line+"</div>")
try:
ssh.close()
except:
print("<div class='alert alert-danger' style='margin: 0;'>"+str(ssh)+"<a title='Close' id='errorMess'><b>X</b></a></div>")
pass
def escape_html(text):
return cgi.escape(text, quote=True)
def subprocess_execute(cmd):
import subprocess
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, universal_newlines=True)
stdout, stderr = p.communicate()
output = stdout.splitlines()
return output, stderr
def show_backends(serv, **kwargs):
import json
import sql
haproxy_sock_port = sql.get_setting('haproxy_sock_port')
cmd='echo "show backend" |nc %s %s' % (serv, haproxy_sock_port)
output, stderr = subprocess_execute(cmd)
ret = ""
for line in output:
if "#" in line or "stats" in line:
continue
if line != "":
back = json.dumps(line).split("\"")
if kwargs.get('ret'):
ret += back[1]
ret += "<br />"
else:
print(back[1], end="<br>")
if kwargs.get('ret'):
return ret
def get_files(dir = get_config_var('configs', 'haproxy_save_configs_dir'), format = 'cfg', **kwargs):
import glob
file = set()
return_files = set()
for files in glob.glob(os.path.join(dir,'*.'+format)):
file.add(files.split('/')[-1])
files = sorted(file, reverse=True)
if format == 'cfg':
for file in files:
ip = file.split("-")
if serv == ip[0]:
return_files.add(file)
return sorted(return_files, reverse=True)
else:
return files
def get_key(item):
return item[0]
|
[] |
[] |
[
"HTTP_COOKIE",
"SCRIPT_NAME",
"REMOTE_ADDR"
] |
[]
|
["HTTP_COOKIE", "SCRIPT_NAME", "REMOTE_ADDR"]
|
python
| 3 | 0 | |
app/config.example.py
|
"""
Global Flask Application Setting
See `.flaskenv` for default settings.
"""
import os
from app import app
class Config(object):
# If not set fall back to production for safety
FLASK_ENV = os.getenv('FLASK_ENV', 'production')
print("FLASK_ENV:", FLASK_ENV)
# Set FLASK_SECRET on your production Environment
SECRET_KEY = os.getenv('FLASK_SECRET', 'Secret')
# Mysql config
HOST = os.getenv('MYSQL_HOST', '')
PORT = os.getenv('MYSQL_PORT', '3306')
DATABASE = os.getenv('MYSQL_DATABASE', 'grad')
USERNAME = os.getenv('MYSQL_USERNAME', 'root')
PASSWORD = os.getenv('MYSQL_PASSWORD', '')
APP_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.dirname(APP_DIR)
DIST_DIR = os.path.join(ROOT_DIR, 'dist')
UPLOAD_DIR = os.path.join(ROOT_DIR, 'upload')
DB_URI = "mysql+pymysql://{username}:{password}@{host}:{port}/{db}?charset=utf8".format(username=USERNAME,
password=PASSWORD,
host=HOST, port=PORT,
db=DATABASE)
SQLALCHEMY_DATABASE_URI = DB_URI
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
# Redis
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', 6379)
REDIS_PWD = os.getenv('REDIS_PWD', "")
# auth key setting
AUTH_KEY = "authorized"
# admin pwd
ADMIN_PWD = ""
PRIVATE_KEY = ""
# JWT KEY
JWT_KEY = ""
SERVER_IP = "192.168.1.184"
SERVER_PORT = "9876"
if not os.path.exists(DIST_DIR):
raise Exception(
'DIST_DIR not found: {}'.format(DIST_DIR))
app.config.from_object('app.config.Config')
|
[] |
[] |
[
"REDIS_PORT",
"FLASK_ENV",
"MYSQL_PASSWORD",
"MYSQL_PORT",
"REDIS_HOST",
"MYSQL_USERNAME",
"FLASK_SECRET",
"MYSQL_DATABASE",
"REDIS_PWD",
"MYSQL_HOST"
] |
[]
|
["REDIS_PORT", "FLASK_ENV", "MYSQL_PASSWORD", "MYSQL_PORT", "REDIS_HOST", "MYSQL_USERNAME", "FLASK_SECRET", "MYSQL_DATABASE", "REDIS_PWD", "MYSQL_HOST"]
|
python
| 10 | 0 | |
node/defaults.go
|
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package node
import (
"os"
"os/user"
"path/filepath"
"runtime"
"github.com/kooksee/uspnet/p2p"
"github.com/kooksee/uspnet/p2p/nat"
)
const (
DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server
DefaultHTTPPort = 8545 // Default TCP port for the HTTP RPC server
DefaultWSHost = "localhost" // Default host interface for the websocket RPC server
DefaultWSPort = 8546 // Default TCP port for the websocket RPC server
)
// DefaultConfig contains reasonable default settings.
var DefaultConfig = Config{
DataDir: DefaultDataDir(),
HTTPPort: DefaultHTTPPort,
HTTPModules: []string{"net", "web3"},
WSPort: DefaultWSPort,
WSModules: []string{"net", "web3"},
P2P: p2p.Config{
ListenAddr: ":30303",
DiscoveryV5Addr: ":30304",
MaxPeers: 25,
NAT: nat.Any(),
},
}
// DefaultDataDir is the default data directory to use for the databases and other
// persistence requirements.
func DefaultDataDir() string {
// Try to place the data folder in the user's home dir
home := homeDir()
if home != "" {
if runtime.GOOS == "darwin" {
return filepath.Join(home, "Library", "Ethereum")
} else if runtime.GOOS == "windows" {
return filepath.Join(home, "AppData", "Roaming", "Ethereum")
} else {
return filepath.Join(home, ".ethereum")
}
}
// As we cannot guess a stable location, return empty and handle later
return ""
}
func homeDir() string {
if home := os.Getenv("HOME"); home != "" {
return home
}
if usr, err := user.Current(); err == nil {
return usr.HomeDir
}
return ""
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
pkg/fsutil/fsutil_test.go
|
package fsutil
import (
"crypto/rand"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCleanFilename(t *testing.T) {
m := map[string]string{
`"§$%&aÜÄ*&b%§"'Ä"c%$"'"`: "a____b______c",
}
for k, v := range m {
out := CleanFilename(k)
t.Logf("%s -> %s / %s", k, v, out)
if out != v {
t.Errorf("'%s' != '%s'", out, v)
}
}
}
func TestCleanPath(t *testing.T) {
tempdir, err := ioutil.TempDir("", "gopass-")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempdir)
}()
m := map[string]string{
".": "",
"/home/user/../bob/.password-store": "/home/bob/.password-store",
"/home/user//.password-store": "/home/user/.password-store",
tempdir + "/foo.gpg": tempdir + "/foo.gpg",
}
usr, err := user.Current()
if err == nil {
hd := usr.HomeDir
if gph := os.Getenv("GOPASS_HOMEDIR"); gph != "" {
hd = gph
}
m["~/.password-store"] = hd + "/.password-store"
}
for in, out := range m {
got := CleanPath(in)
// filepath.Abs turns /home/bob into C:\home\bob on Windows
absOut, err := filepath.Abs(out)
assert.NoError(t, err)
assert.Equal(t, absOut, got)
}
}
func TestIsDir(t *testing.T) {
tempdir, err := ioutil.TempDir("", "gopass-")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempdir)
}()
fn := filepath.Join(tempdir, "foo")
assert.NoError(t, ioutil.WriteFile(fn, []byte("bar"), 0644))
assert.Equal(t, true, IsDir(tempdir))
assert.Equal(t, false, IsDir(fn))
assert.Equal(t, false, IsDir(filepath.Join(tempdir, "non-existing")))
}
func TestIsFile(t *testing.T) {
tempdir, err := ioutil.TempDir("", "gopass-")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempdir)
}()
fn := filepath.Join(tempdir, "foo")
assert.NoError(t, ioutil.WriteFile(fn, []byte("bar"), 0644))
assert.Equal(t, false, IsFile(tempdir))
assert.Equal(t, true, IsFile(fn))
}
func TestShred(t *testing.T) {
tempdir, err := ioutil.TempDir("", "gopass-")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempdir)
}()
fn := filepath.Join(tempdir, "file")
// test successful shread
fh, err := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE, 0644)
assert.NoError(t, err)
buf := make([]byte, 1024)
for i := 0; i < 10*1024; i++ {
_, _ = rand.Read(buf)
_, _ = fh.Write(buf)
}
_ = fh.Close()
assert.NoError(t, Shred(fn, 8))
assert.Equal(t, false, IsFile(fn))
// test failed
fh, err = os.OpenFile(fn, os.O_WRONLY|os.O_CREATE, 0400)
assert.NoError(t, err)
buf = make([]byte, 1024)
for i := 0; i < 10*1024; i++ {
_, _ = rand.Read(buf)
_, _ = fh.Write(buf)
}
_ = fh.Close()
assert.Error(t, Shred(fn, 8))
assert.Equal(t, true, IsFile(fn))
}
func TestIsEmptyDir(t *testing.T) {
tempdir, err := ioutil.TempDir("", "gopass-")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempdir)
}()
fn := filepath.Join(tempdir, "foo", "bar", "baz", "zab")
require.NoError(t, os.MkdirAll(fn, 0755))
isEmpty, err := IsEmptyDir(tempdir)
assert.NoError(t, err)
assert.Equal(t, true, isEmpty)
fn = filepath.Join(fn, ".config.yml")
require.NoError(t, ioutil.WriteFile(fn, []byte("foo"), 0644))
isEmpty, err = IsEmptyDir(tempdir)
require.NoError(t, err)
assert.Equal(t, false, isEmpty)
}
|
[
"\"GOPASS_HOMEDIR\""
] |
[] |
[
"GOPASS_HOMEDIR"
] |
[]
|
["GOPASS_HOMEDIR"]
|
go
| 1 | 0 | |
vendor/code.cloudfoundry.org/buildpackapplifecycle/credhub/credhub.go
|
package credhub
import (
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"code.cloudfoundry.org/buildpackapplifecycle/containerpath"
"code.cloudfoundry.org/goshims/osshim"
api "github.com/cloudfoundry-incubator/credhub-cli/credhub"
)
type Credhub struct {
os osshim.Os
pathFor func(path ...string) string
}
func New(os osshim.Os) *Credhub {
return &Credhub{
os: os,
pathFor: containerpath.New(os.Getenv("USERPROFILE")).For,
}
}
func (c *Credhub) InterpolateServiceRefs(credhubURI string) error {
if !strings.Contains(c.os.Getenv("VCAP_SERVICES"), `"credhub-ref"`) {
return nil
}
ch, err := c.credhubClient(credhubURI)
if err != nil {
return fmt.Errorf("Unable to set up credhub client: %v", err)
}
interpolatedServices, err := ch.InterpolateString(c.os.Getenv("VCAP_SERVICES"))
if err != nil {
return fmt.Errorf("Unable to interpolate credhub references: %v", err)
}
c.os.Setenv("VCAP_SERVICES", interpolatedServices)
return nil
}
func (c *Credhub) credhubClient(credhubURI string) (*api.CredHub, error) {
if c.os.Getenv("CF_INSTANCE_CERT") == "" || c.os.Getenv("CF_INSTANCE_KEY") == "" {
return nil, fmt.Errorf("Missing CF_INSTANCE_CERT and/or CF_INSTANCE_KEY")
}
if c.os.Getenv("CF_SYSTEM_CERT_PATH") == "" {
return nil, fmt.Errorf("Missing CF_SYSTEM_CERT_PATH")
}
systemCertsPath := c.pathFor(c.os.Getenv("CF_SYSTEM_CERT_PATH"))
caCerts := []string{}
files, err := ioutil.ReadDir(systemCertsPath)
if err != nil {
return nil, fmt.Errorf("Can't read contents of system cert path: %v", err)
}
for _, file := range files {
if strings.HasSuffix(file.Name(), ".crt") {
contents, err := ioutil.ReadFile(filepath.Join(systemCertsPath, file.Name()))
if err != nil {
return nil, fmt.Errorf("Can't read contents of cert in system cert path: %v", err)
}
caCerts = append(caCerts, string(contents))
}
}
return api.New(
credhubURI,
api.ClientCert(c.pathFor(c.os.Getenv("CF_INSTANCE_CERT")), c.pathFor(c.os.Getenv("CF_INSTANCE_KEY"))),
api.CaCerts(caCerts...),
)
}
|
[
"\"USERPROFILE\"",
"\"VCAP_SERVICES\"",
"\"VCAP_SERVICES\"",
"\"CF_INSTANCE_CERT\"",
"\"CF_INSTANCE_KEY\"",
"\"CF_SYSTEM_CERT_PATH\"",
"\"CF_SYSTEM_CERT_PATH\"",
"\"CF_INSTANCE_CERT\"",
"\"CF_INSTANCE_KEY\""
] |
[] |
[
"CF_INSTANCE_KEY",
"VCAP_SERVICES",
"CF_SYSTEM_CERT_PATH",
"CF_INSTANCE_CERT",
"USERPROFILE"
] |
[]
|
["CF_INSTANCE_KEY", "VCAP_SERVICES", "CF_SYSTEM_CERT_PATH", "CF_INSTANCE_CERT", "USERPROFILE"]
|
go
| 5 | 0 | |
src/main/java/com/seleniumtests/browserfactory/ChromeCapabilitiesFactory.java
|
/*
* Copyright 2018 www.muvi.com
*/
package com.seleniumtests.browserfactory;
import java.io.File;
import java.io.IOException;
import org.openqa.selenium.Proxy;
import org.openqa.selenium.chrome.ChromeOptions;
import org.openqa.selenium.remote.CapabilityType;
import org.openqa.selenium.remote.DesiredCapabilities;
import com.seleniumtests.driver.DriverConfig;
import com.seleniumtests.driver.DriverMode;
import com.seleniumtests.helper.FileUtility;
import com.seleniumtests.helper.OSUtility;
import com.seleniumtests.resources.WebDriverExternalResources;
public class ChromeCapabilitiesFactory implements ICapabilitiesFactory {
public DesiredCapabilities createCapabilities(final DriverConfig webDriverConfig) {
DesiredCapabilities capability = null;
capability = DesiredCapabilities.chrome();
capability.setBrowserName(DesiredCapabilities.chrome().getBrowserName());
ChromeOptions options = new ChromeOptions();
options.addArguments("start-maximized");
//options.addArguments("--start-fullscreen");
if (webDriverConfig.getUserAgentOverride() != null) {
options.addArguments("--user-agent=" + webDriverConfig.getUserAgentOverride());
}
capability.setCapability(ChromeOptions.CAPABILITY, options);
if (webDriverConfig.isEnableJavascript()) {
capability.setJavascriptEnabled(true);
} else {
capability.setJavascriptEnabled(false);
}
capability.setCapability(CapabilityType.TAKES_SCREENSHOT, true);
capability.setCapability(CapabilityType.ACCEPT_SSL_CERTS, true);
if (webDriverConfig.getBrowserVersion() != null) {
capability.setVersion(webDriverConfig.getBrowserVersion());
}
if (webDriverConfig.getWebPlatform() != null) {
capability.setPlatform(webDriverConfig.getWebPlatform());
}
if (webDriverConfig.getProxyHost() != null) {
Proxy proxy = webDriverConfig.getProxy();
capability.setCapability(CapabilityType.PROXY, proxy);
}
if (webDriverConfig.getChromeBinPath() != null) {
capability.setCapability("chrome.binary", webDriverConfig.getChromeBinPath());
}
// Set ChromeDriver for local mode
if (webDriverConfig.getMode() == DriverMode.LOCAL) {
String chromeDriverPath = webDriverConfig.getChromeDriverPath();
if (chromeDriverPath == null) {
try {
if (System.getenv("webdriver.chrome.driver") != null) {
System.out.println("get Chrome driver from property:"
+ System.getenv("webdriver.chrome.driver"));
System.setProperty("webdriver.chrome.driver", System.getenv("webdriver.chrome.driver"));
} else {
handleExtractResources();
}
} catch (IOException ex) {
ex.printStackTrace();
}
} else {
System.setProperty("webdriver.chrome.driver", chromeDriverPath);
}
}
return capability;
}
public void handleExtractResources() throws IOException {
String dir = this.getClass().getResource("/").getPath();
dir = FileUtility.decodePath(dir);
if (!new File(dir).exists()) {
System.out.println("extracting chrome resources in " + dir);
FileUtility.extractJar(dir, WebDriverExternalResources.class);
}
if (!new File(dir + OSUtility.getSlash() + "chromedriver.exe").exists()) {
FileUtility.extractJar(dir, WebDriverExternalResources.class);
}
if (OSUtility.isWindows()) {
System.setProperty("webdriver.chrome.driver", dir + "\\chromedriver.exe");
} else {
System.setProperty("webdriver.chrome.driver", dir + "/chromedriver");
new File(dir + "/chromedriver").setExecutable(true);
}
}
}
|
[
"\"webdriver.chrome.driver\"",
"\"webdriver.chrome.driver\"",
"\"webdriver.chrome.driver\""
] |
[] |
[
"webdriver.chrome.driver"
] |
[]
|
["webdriver.chrome.driver"]
|
java
| 1 | 0 | |
controllers/storagecluster/reconcile.go
|
package storagecluster
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/blang/semver"
"github.com/go-logr/logr"
openshiftv1 "github.com/openshift/api/template/v1"
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
ocsv1 "github.com/openshift/ocs-operator/api/v1"
statusutil "github.com/openshift/ocs-operator/controllers/util"
"github.com/openshift/ocs-operator/version"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// ReconcileStrategy is a string representing how we want to reconcile
// (or not) a particular resource
type ReconcileStrategy string
// StorageClassProvisionerType is a string representing StorageClass Provisioner. E.g: aws-ebs
type StorageClassProvisionerType string
type resourceManager interface {
ensureCreated(*StorageClusterReconciler, *ocsv1.StorageCluster) error
ensureDeleted(*StorageClusterReconciler, *ocsv1.StorageCluster) error
}
type ocsCephConfig struct{}
type ocsJobTemplates struct{}
const (
rookConfigMapName = "rook-config-override"
rookConfigData = `
[global]
mon_osd_full_ratio = .85
mon_osd_backfillfull_ratio = .8
mon_osd_nearfull_ratio = .75
mon_max_pg_per_osd = 600
[osd]
osd_memory_target_cgroup_limit_ratio = 0.5
`
monCountOverrideEnvVar = "MON_COUNT_OVERRIDE"
// Name of MetadataPVCTemplate
metadataPVCName = "metadata"
// Name of WalPVCTemplate
walPVCName = "wal"
// ReconcileStrategyUnknown is the same as default
ReconcileStrategyUnknown ReconcileStrategy = ""
// ReconcileStrategyInit means reconcile once and ignore if it exists
ReconcileStrategyInit ReconcileStrategy = "init"
// ReconcileStrategyIgnore means never reconcile
ReconcileStrategyIgnore ReconcileStrategy = "ignore"
// ReconcileStrategyManage means always reconcile
ReconcileStrategyManage ReconcileStrategy = "manage"
// ReconcileStrategyStandalone also means never reconcile (NooBaa)
ReconcileStrategyStandalone ReconcileStrategy = "standalone"
// DeviceTypeSSD represents the DeviceType SSD
DeviceTypeSSD = "ssd"
// DeviceTypeHDD represents the DeviceType HDD
DeviceTypeHDD = "hdd"
// DeviceTypeNVMe represents the DeviceType NVMe
DeviceTypeNVMe = "nvme"
// AzureDisk represents Azure Premium Managed Disks provisioner for StorageClass
AzureDisk StorageClassProvisionerType = "kubernetes.io/azure-disk"
// EBS represents AWS EBS provisioner for StorageClass
EBS StorageClassProvisionerType = "kubernetes.io/aws-ebs"
)
var storageClusterFinalizer = "storagecluster.ocs.openshift.io"
const labelZoneRegionWithoutBeta = "failure-domain.kubernetes.io/region"
const labelZoneFailureDomainWithoutBeta = "failure-domain.kubernetes.io/zone"
const labelRookPrefix = "topology.rook.io"
var validTopologyLabelKeys = []string{
// This is the most preferred key as kubernetes recommends zone and region
// labels under this key.
corev1.LabelZoneRegionStable,
// These two are retained only to have backward compatibility; they are
// deprecated by kubernetes. If topology.kubernetes.io key has same label we
// will skip the next two from the topologyMap.
corev1.LabelZoneRegion,
labelZoneRegionWithoutBeta,
// This is the most preferred key as kubernetes recommends zone and region
// labels under this key.
corev1.LabelZoneFailureDomainStable,
// These two are retained only to have backward compatibility; they are
// deprecated by kubernetes. If topology.kubernetes.io key has same label we
// will skip the next two from the topologyMap.
corev1.LabelZoneFailureDomain,
labelZoneFailureDomainWithoutBeta,
// This is the kubernetes recommended label to select nodes.
corev1.LabelHostname,
// This label is used to assign rack based topology.
labelRookPrefix,
}
// +kubebuilder:rbac:groups=ocs.openshift.io,resources=*,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=ceph.rook.io,resources=cephclusters;cephblockpools;cephfilesystems;cephobjectstores;cephobjectstoreusers,verbs=*
// +kubebuilder:rbac:groups=noobaa.io,resources=noobaas,verbs=*
// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=*
// +kubebuilder:rbac:groups=core,resources=pods;services;endpoints;persistentvolumeclaims;events;configmaps;secrets;nodes,verbs=*
// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get
// +kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=*
// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;prometheusrules,verbs=get;list;watch;create;update
// +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshots;volumesnapshotclasses,verbs=*
// +kubebuilder:rbac:groups=template.openshift.io,resources=templates,verbs=*
// +kubebuilder:rbac:groups=config.openshift.io,resources=infrastructures,verbs=get;list;watch
// +kubebuilder:rbac:groups=console.openshift.io,resources=consolequickstarts,verbs=*
// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch;create;update
// +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=*
// Reconcile reads that state of the cluster for a StorageCluster object and makes changes based on the state read
// and what is in the StorageCluster.Spec
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *StorageClusterReconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
prevLogger := r.Log
defer func() { r.Log = prevLogger }()
r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
// Fetch the StorageCluster instance
sc := &ocsv1.StorageCluster{}
if err := r.Client.Get(context.TODO(), request.NamespacedName, sc); err != nil {
if errors.IsNotFound(err) {
r.Log.Info("No StorageCluster resource")
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
if err := r.validateStorageClusterSpec(sc, request); err != nil {
return reconcile.Result{}, err
}
// Reconcile changes to the cluster
result, reconcileError := r.reconcilePhases(sc, request)
// Apply status changes to the storagecluster
statusError := r.Client.Status().Update(context.TODO(), sc)
if statusError != nil {
r.Log.Info("Status Update Error", "StatusUpdateErr", "Could not update storagecluster status")
}
// Reconcile errors have higher priority than status update errors
if reconcileError != nil {
return result, reconcileError
} else if statusError != nil {
return result, statusError
} else {
return result, nil
}
}
func (r *StorageClusterReconciler) initializeImagesStatus(sc *ocsv1.StorageCluster) {
images := &sc.Status.Images
if images.Ceph == nil {
images.Ceph = &ocsv1.ComponentImageStatus{}
}
images.Ceph.DesiredImage = r.images.Ceph
if images.NooBaaCore == nil {
images.NooBaaCore = &ocsv1.ComponentImageStatus{}
}
images.NooBaaCore.DesiredImage = r.images.Ceph
if images.NooBaaDB == nil {
images.NooBaaDB = &ocsv1.ComponentImageStatus{}
}
images.NooBaaDB.DesiredImage = r.images.NooBaaDB
}
// validateStorageClusterSpec must be called before reconciling. Any syntactic and sematic errors in the CR must be caught here.
func (r *StorageClusterReconciler) validateStorageClusterSpec(instance *ocsv1.StorageCluster, request reconcile.Request) error {
if err := versionCheck(instance, r.Log); err != nil {
r.Log.Error(err, "Failed to validate version")
r.recorder.Event(instance, statusutil.EventTypeWarning, statusutil.EventReasonValidationFailed, err.Error())
return err
}
if !instance.Spec.ExternalStorage.Enable {
if err := r.validateStorageDeviceSets(instance); err != nil {
r.Log.Error(err, "Failed to validate StorageDeviceSets")
r.recorder.Event(instance, statusutil.EventTypeWarning, statusutil.EventReasonValidationFailed, err.Error())
return err
}
}
if err := validateArbiterSpec(instance, r.Log); err != nil {
r.Log.Error(err, "Failed to validate ArbiterSpec")
r.recorder.Event(instance, statusutil.EventTypeWarning, statusutil.EventReasonValidationFailed, err.Error())
return err
}
return nil
}
func (r *StorageClusterReconciler) reconcilePhases(
instance *ocsv1.StorageCluster,
request reconcile.Request) (reconcile.Result, error) {
if instance.Spec.ExternalStorage.Enable {
r.Log.Info("Reconciling external StorageCluster")
} else {
r.Log.Info("Reconciling StorageCluster")
}
// Initialize the StatusImages section of the storageclsuter CR
r.initializeImagesStatus(instance)
// Check for active StorageCluster only if Create request is made
// and ignore it if there's another active StorageCluster
// If Update request is made and StorageCluster is PhaseIgnored, no need to
// proceed further
if instance.Status.Phase == "" {
isActive, err := r.isActiveStorageCluster(instance)
if err != nil {
r.Log.Error(err, "StorageCluster could not be reconciled. Retrying")
return reconcile.Result{}, err
}
if !isActive {
instance.Status.Phase = statusutil.PhaseIgnored
return reconcile.Result{}, nil
}
} else if instance.Status.Phase == statusutil.PhaseIgnored {
return reconcile.Result{}, nil
}
if instance.Status.Phase != statusutil.PhaseReady &&
instance.Status.Phase != statusutil.PhaseClusterExpanding &&
instance.Status.Phase != statusutil.PhaseDeleting &&
instance.Status.Phase != statusutil.PhaseConnecting {
instance.Status.Phase = statusutil.PhaseProgressing
}
// Add conditions if there are none
if instance.Status.Conditions == nil {
reason := ocsv1.ReconcileInit
message := "Initializing StorageCluster"
statusutil.SetProgressingCondition(&instance.Status.Conditions, reason, message)
}
// Check GetDeletionTimestamp to determine if the object is under deletion
if instance.GetDeletionTimestamp().IsZero() {
if !contains(instance.GetFinalizers(), storageClusterFinalizer) {
r.Log.Info("Finalizer not found for storagecluster. Adding finalizer")
instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, storageClusterFinalizer)
if err := r.Client.Update(context.TODO(), instance); err != nil {
r.Log.Info("Update Error", "MetaUpdateErr", "Failed to update storagecluster with finalizer")
return reconcile.Result{}, err
}
}
if err := r.reconcileUninstallAnnotations(instance); err != nil {
return reconcile.Result{}, err
}
} else {
// The object is marked for deletion
instance.Status.Phase = statusutil.PhaseDeleting
if contains(instance.GetFinalizers(), storageClusterFinalizer) {
if err := r.deleteResources(instance); err != nil {
r.Log.Info("Uninstall in progress", "Status", err)
return reconcile.Result{RequeueAfter: time.Second * time.Duration(1)}, nil
}
r.Log.Info("Removing finalizer")
// Once all finalizers have been removed, the object will be deleted
instance.ObjectMeta.Finalizers = remove(instance.ObjectMeta.Finalizers, storageClusterFinalizer)
if err := r.Client.Update(context.TODO(), instance); err != nil {
r.Log.Info("Update Error", "MetaUpdateErr", "Failed to remove finalizer from storagecluster")
return reconcile.Result{}, err
}
}
r.Log.Info("Object is terminated, skipping reconciliation")
return reconcile.Result{}, nil
}
if !instance.Spec.ExternalStorage.Enable {
// Get storage node topology labels
if err := r.reconcileNodeTopologyMap(instance); err != nil {
r.Log.Error(err, "Failed to set node topology map")
return reconcile.Result{}, err
}
}
// in-memory conditions should start off empty. It will only ever hold
// negative conditions (!Available, Degraded, Progressing)
r.conditions = nil
// Start with empty r.phase
r.phase = ""
var objs []resourceManager
if !instance.Spec.ExternalStorage.Enable {
// list of default ensure functions
objs = []resourceManager{
&ocsStorageClass{},
&ocsSnapshotClass{},
&ocsCephObjectStores{},
&ocsCephObjectStoreUsers{},
&ocsCephRGWRoutes{},
&ocsCephBlockPools{},
&ocsCephFilesystems{},
&ocsCephConfig{},
&ocsCephCluster{},
&ocsNoobaaSystem{},
&ocsJobTemplates{},
&ocsQuickStarts{},
}
} else {
// for external cluster, we have a different set of ensure functions
objs = []resourceManager{
&ocsExternalResources{},
&ocsCephCluster{},
&ocsSnapshotClass{},
&ocsNoobaaSystem{},
&ocsQuickStarts{},
}
}
for _, obj := range objs {
err := obj.ensureCreated(r, instance)
if r.phase == statusutil.PhaseClusterExpanding {
instance.Status.Phase = statusutil.PhaseClusterExpanding
} else if instance.Status.Phase != statusutil.PhaseReady &&
instance.Status.Phase != statusutil.PhaseConnecting {
instance.Status.Phase = statusutil.PhaseProgressing
}
if err != nil {
reason := ocsv1.ReconcileFailed
message := fmt.Sprintf("Error while reconciling: %v", err)
statusutil.SetErrorCondition(&instance.Status.Conditions, reason, message)
instance.Status.Phase = statusutil.PhaseError
// don't want to overwrite the actual reconcile failure
return reconcile.Result{}, err
}
}
// All component operators are in a happy state.
if r.conditions == nil {
r.Log.Info("No component operator reported negatively")
reason := ocsv1.ReconcileCompleted
message := ocsv1.ReconcileCompletedMessage
statusutil.SetCompleteCondition(&instance.Status.Conditions, reason, message)
// If no operator whose conditions we are watching reports an error, then it is safe
// to set readiness.
ReadinessSet()
if instance.Status.Phase != statusutil.PhaseClusterExpanding &&
!instance.Spec.ExternalStorage.Enable {
instance.Status.Phase = statusutil.PhaseReady
}
} else {
// If any component operator reports negatively we want to write that to
// the instance while preserving it's lastTransitionTime.
// For example, consider the resource has the Available condition
// type with type "False". When reconciling the resource we would
// add it to the in-memory representation of OCS's conditions (r.conditions)
// and here we are simply writing it back to the server.
// One shortcoming is that only one failure of a particular condition can be
// captured at one time (ie. if resource1 and resource2 are both reporting !Available,
// you will only see resource2q as it updates last).
for _, condition := range r.conditions {
conditionsv1.SetStatusCondition(&instance.Status.Conditions, condition)
}
reason := ocsv1.ReconcileCompleted
message := ocsv1.ReconcileCompletedMessage
conditionsv1.SetStatusCondition(&instance.Status.Conditions, conditionsv1.Condition{
Type: ocsv1.ConditionReconcileComplete,
Status: corev1.ConditionTrue,
Reason: reason,
Message: message,
})
// If for any reason we marked ourselves !upgradeable...then unset readiness
if conditionsv1.IsStatusConditionFalse(instance.Status.Conditions, conditionsv1.ConditionUpgradeable) {
ReadinessUnset()
}
if instance.Status.Phase != statusutil.PhaseClusterExpanding &&
!instance.Spec.ExternalStorage.Enable {
if conditionsv1.IsStatusConditionTrue(instance.Status.Conditions, conditionsv1.ConditionProgressing) {
instance.Status.Phase = statusutil.PhaseProgressing
} else if conditionsv1.IsStatusConditionFalse(instance.Status.Conditions, conditionsv1.ConditionUpgradeable) {
instance.Status.Phase = statusutil.PhaseNotReady
} else {
instance.Status.Phase = statusutil.PhaseError
}
}
}
// enable metrics exporter at the end of reconcile
// this allows storagecluster to be instantiated before
// scraping metrics
if err := r.enableMetricsExporter(instance); err != nil {
r.Log.Error(err, "failed to reconcile metrics exporter")
return reconcile.Result{}, err
}
if err := r.enablePrometheusRules(instance.Spec.ExternalStorage.Enable); err != nil {
r.Log.Error(err, "failed to reconcile prometheus rules")
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// versionCheck populates the `.Spec.Version` field
func versionCheck(sc *ocsv1.StorageCluster, reqLogger logr.Logger) error {
if sc.Spec.Version == "" {
sc.Spec.Version = version.Version
} else if sc.Spec.Version != version.Version { // check anything else only if the versions mis-match
storClustSemV1, err := semver.Make(sc.Spec.Version)
if err != nil {
reqLogger.Error(err, "Error while parsing Storage Cluster version")
return err
}
ocsSemV1, err := semver.Make(version.Version)
if err != nil {
reqLogger.Error(err, "Error while parsing OCS Operator version")
return err
}
// if the storage cluster version is higher than the invoking OCS Operator's version,
// return error
if storClustSemV1.GT(ocsSemV1) {
err = fmt.Errorf("Storage cluster version (%s) is higher than the OCS Operator version (%s)",
sc.Spec.Version, version.Version)
reqLogger.Error(err, "Incompatible Storage cluster version")
return err
}
// if the storage cluster version is less than the OCS Operator version,
// just update.
sc.Spec.Version = version.Version
}
return nil
}
// validateStorageDeviceSets checks the StorageDeviceSets of the given
// StorageCluster for completeness and correctness
func (r *StorageClusterReconciler) validateStorageDeviceSets(sc *ocsv1.StorageCluster) error {
for i, ds := range sc.Spec.StorageDeviceSets {
if ds.DataPVCTemplate.Spec.StorageClassName == nil || *ds.DataPVCTemplate.Spec.StorageClassName == "" {
return fmt.Errorf("failed to validate StorageDeviceSet %d: no StorageClass specified", i)
}
if ds.MetadataPVCTemplate != nil {
if ds.MetadataPVCTemplate.Spec.StorageClassName == nil || *ds.MetadataPVCTemplate.Spec.StorageClassName == "" {
return fmt.Errorf("failed to validate StorageDeviceSet %d: no StorageClass specified for metadataPVCTemplate", i)
}
}
if ds.WalPVCTemplate != nil {
if ds.WalPVCTemplate.Spec.StorageClassName == nil || *ds.WalPVCTemplate.Spec.StorageClassName == "" {
return fmt.Errorf("failed to validate StorageDeviceSet %d: no StorageClass specified for walPVCTemplate", i)
}
}
if ds.DeviceType != "" {
if (DeviceTypeSSD != strings.ToLower(ds.DeviceType)) && (DeviceTypeHDD != strings.ToLower(ds.DeviceType)) && (DeviceTypeNVMe != strings.ToLower(ds.DeviceType)) {
return fmt.Errorf("failed to validate DeviceType %q: no Device of this type", ds.DeviceType)
}
}
}
return nil
}
// ensureCreated ensures that a ConfigMap resource exists with its Spec in
// the desired state.
func (obj *ocsCephConfig) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) error {
ownerRef := metav1.OwnerReference{
UID: sc.UID,
APIVersion: sc.APIVersion,
Kind: sc.Kind,
Name: sc.Name,
}
cm := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: rookConfigMapName,
Namespace: sc.Namespace,
OwnerReferences: []metav1.OwnerReference{ownerRef},
},
Data: map[string]string{
"config": rookConfigData,
},
}
found := &corev1.ConfigMap{}
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: rookConfigMapName, Namespace: sc.Namespace}, found)
if err != nil {
if errors.IsNotFound(err) {
r.Log.Info("Creating Ceph ConfigMap")
err = r.Client.Create(context.TODO(), cm)
if err != nil {
return err
}
}
return err
}
ownerRefFound := false
for _, ownerRef := range found.OwnerReferences {
if ownerRef.UID == sc.UID {
ownerRefFound = true
}
}
val, ok := found.Data["config"]
if !ok || val != rookConfigData || !ownerRefFound {
r.Log.Info("Updating Ceph ConfigMap")
return r.Client.Update(context.TODO(), cm)
}
return nil
}
// ensureDeleted is dummy func for the ocsCephConfig
func (obj *ocsCephConfig) ensureDeleted(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) error {
return nil
}
func (r *StorageClusterReconciler) isActiveStorageCluster(instance *ocsv1.StorageCluster) (bool, error) {
storageClusterList := ocsv1.StorageClusterList{}
// instance is already marked for deletion
// do not mark it as active
if !instance.GetDeletionTimestamp().IsZero() {
return false, nil
}
err := r.Client.List(context.TODO(), &storageClusterList, client.InNamespace(instance.Namespace))
if err != nil {
return false, fmt.Errorf("Error fetching StorageClusterList. %+v", err)
}
// There is only one StorageCluster i.e. instance
if len(storageClusterList.Items) == 1 {
return true, nil
}
// There are many StorageClusters. Check if this is Active
for n, storageCluster := range storageClusterList.Items {
if storageCluster.Status.Phase != statusutil.PhaseIgnored &&
storageCluster.ObjectMeta.Name != instance.ObjectMeta.Name {
// Both StorageClusters are in creation phase
// Tiebreak using CreationTimestamp and Alphanumeric ordering
if storageCluster.Status.Phase == "" {
if storageCluster.CreationTimestamp.Before(&instance.CreationTimestamp) {
return false, nil
} else if storageCluster.CreationTimestamp.Equal(&instance.CreationTimestamp) && storageCluster.Name < instance.Name {
return false, nil
}
if n == len(storageClusterList.Items)-1 {
return true, nil
}
continue
}
return false, nil
}
}
return true, nil
}
// Checks whether a string is contained within a slice
func contains(slice []string, s string) bool {
for _, item := range slice {
if item == s {
return true
}
}
return false
}
// Removes a given string from a slice and returns the new slice
func remove(slice []string, s string) (result []string) {
for _, item := range slice {
if item == s {
continue
}
result = append(result, item)
}
return
}
// ensureCreated ensures if the osd removal job template exists
func (obj *ocsJobTemplates) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) error {
osdCleanUpTemplate := &openshiftv1.Template{
ObjectMeta: metav1.ObjectMeta{
Name: "ocs-osd-removal",
Namespace: sc.Namespace,
},
}
_, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, osdCleanUpTemplate, func() error {
osdCleanUpTemplate.Objects = []runtime.RawExtension{
{
Object: newCleanupJob(sc),
},
}
osdCleanUpTemplate.Parameters = []openshiftv1.Parameter{
{
Name: "FAILED_OSD_IDS",
DisplayName: "OSD IDs",
Required: true,
Description: `
The parameter OSD IDs needs a comma-separated list of numerical FAILED_OSD_IDs
when a single job removes multiple OSDs.
If the expected comma-separated format is not used,
or an ID cannot be converted to an int,
or if an OSD ID is not found, errors will be generated in the log and no OSDs would be removed.`,
},
}
return controllerutil.SetControllerReference(sc, osdCleanUpTemplate, r.Scheme)
})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create Template: %v", err.Error())
}
return nil
}
// ensureDeleted is dummy func for the ocsJobTemplates
func (obj *ocsJobTemplates) ensureDeleted(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) error {
return nil
}
func newCleanupJob(sc *ocsv1.StorageCluster) *batchv1.Job {
labels := map[string]string{
"app": "ceph-toolbox-job",
}
// Annotation template.alpha.openshift.io/wait-for-ready ensures template readiness
annotations := map[string]string{
"template.alpha.openshift.io/wait-for-ready": "true",
}
job := &batchv1.Job{
TypeMeta: metav1.TypeMeta{
Kind: "Job",
APIVersion: "batch/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "ocs-osd-removal-job",
Namespace: sc.Namespace,
Labels: labels,
Annotations: annotations,
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
ServiceAccountName: "rook-ceph-system",
Volumes: []corev1.Volume{
{
Name: "ceph-conf-emptydir",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
},
{
Name: "rook-config",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
},
},
Containers: []corev1.Container{
{
Name: "operator",
Image: os.Getenv("ROOK_CEPH_IMAGE"),
Args: []string{
"ceph",
"osd",
"remove",
"--osd-ids=${FAILED_OSD_IDS}",
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "ceph-conf-emptydir",
MountPath: "/etc/ceph",
},
{
Name: "rook-config",
MountPath: "/var/lib/rook",
},
},
Env: []corev1.EnvVar{
{
Name: "ROOK_MON_ENDPOINTS",
ValueFrom: &corev1.EnvVarSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
Key: "data",
LocalObjectReference: corev1.LocalObjectReference{Name: "rook-ceph-mon-endpoints"},
},
},
},
{
Name: "POD_NAMESPACE",
Value: sc.Namespace,
},
{
Name: "ROOK_CEPH_USERNAME",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
Key: "ceph-username",
LocalObjectReference: corev1.LocalObjectReference{Name: "rook-ceph-mon"},
},
},
},
{
Name: "ROOK_CEPH_SECRET",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
Key: "ceph-secret",
LocalObjectReference: corev1.LocalObjectReference{Name: "rook-ceph-mon"},
},
},
},
{
Name: "ROOK_FSID",
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
Key: "fsid",
LocalObjectReference: corev1.LocalObjectReference{Name: "rook-ceph-mon"},
},
},
},
{
Name: "ROOK_CONFIG_DIR",
Value: "/var/lib/rook",
},
{
Name: "ROOK_CEPH_CONFIG_OVERRIDE",
Value: "/etc/rook/config/override.conf",
},
{
Name: "ROOK_LOG_LEVEL",
Value: "DEBUG",
},
},
},
},
},
},
},
}
return job
}
func validateArbiterSpec(sc *ocsv1.StorageCluster, reqLogger logr.Logger) error {
if sc.Spec.Arbiter.Enable && sc.Spec.FlexibleScaling {
return fmt.Errorf("arbiter and flexibleScaling both can't be enabled")
}
if sc.Spec.Arbiter.Enable && sc.Spec.NodeTopologies.ArbiterLocation == "" {
return fmt.Errorf("arbiter is set to enable but no arbiterLocation has been provided in the Spec.NodeTopologies.ArbiterLocation")
}
return nil
}
|
[
"\"ROOK_CEPH_IMAGE\""
] |
[] |
[
"ROOK_CEPH_IMAGE"
] |
[]
|
["ROOK_CEPH_IMAGE"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"context"
"flag"
"fmt"
"os"
"github.com/peterbourgon/ff/v3/ffcli"
)
var (
rootFlagSet = flag.NewFlagSet("sqs-pub", flag.ExitOnError)
replayer = NewSQSMessageReplayer()
)
func init() {
rootFlagSet.StringVar(&replayer.cfg.from, "from", "queue-name-source", "sqs queue from where messages will be sourced from")
rootFlagSet.StringVar(&replayer.cfg.to, "to", "queue-name-destination", "sqs queue where messages will be pushed to")
rootFlagSet.StringVar(&replayer.cfg.filters, "filters", "10104211111292", "comma separted text that can be used a message body filter")
rootFlagSet.BoolVar(&replayer.cfg.deleteFromSource, "delete", true, "delete messages from source after successfuly pushed to destination queue")
rootFlagSet.BoolVar(&replayer.cfg.dryrun, "dryrun", false, "a flag to run the replay in dry run mode.")
}
func main() {
root := &ffcli.Command{
Name: "replay",
ShortUsage: "sqs_pub [-from queue1 - to queue2 -filter text1,text2,...]",
ShortHelp: "Source message from the given queue then push to the destination queue",
FlagSet: rootFlagSet,
Exec: replayer.replay,
}
if err := root.ParseAndRun(context.Background(), os.Args[1:]); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
resources_portal/__init__.py
|
import os
import resources_portal.models # noqa
from flask import Flask
from flask_migrate import Migrate
from flask_restful import Api
from resources_portal.db import db
from resources_portal.views import user
migrate = Migrate()
def initialize_routes(api: Api):
api.add_resource(user.UsersApi, "/users")
api.add_resource(user.UserApi, "/users/<user_id>")
def set_database_URI(app: Flask):
database_URI_template = "postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
app.config["SQLALCHEMY_DATABASE_URI"] = database_URI_template.format(
DB_USER=app.config["DB_USER"],
DB_PASSWORD=app.config["DB_PASSWORD"],
DB_HOST=os.environ["DB_HOST"],
DB_PORT=app.config["DB_PORT"],
DB_NAME=app.config["DB_NAME"],
)
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
app.config.from_envvar("RESOURCES_PORTAL_CONFIG_FILE")
set_database_URI(app)
api = Api(app)
initialize_routes(api)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
db.init_app(app)
migrate.init_app(app, db)
from resources_portal.schemas import ma
ma.init_app(app)
return app
|
[] |
[] |
[
"DB_HOST"
] |
[]
|
["DB_HOST"]
|
python
| 1 | 0 | |
examples/markethours/getMarketHours.go
|
package main
import (
"context"
"fmt"
"github.com/zricethezav/go-tdameritrade/tdameritrade"
"golang.org/x/oauth2"
"log"
"os"
"time"
)
func main() {
// pass an http client with auth
token := os.Getenv("TDAMERITRADE_CLIENT_ID")
if token == "" {
log.Fatal("Unauthorized: No token present")
}
refreshToken := os.Getenv("TDAMERITRADE_REFRESH_TOKEN")
if refreshToken == "" {
log.Fatal("Unauthorized: No refresh token present")
}
conf := oauth2.Config{
ClientID: token,
Endpoint: oauth2.Endpoint{
TokenURL: "https://api.tdameritrade.com/v1/oauth2/token",
},
RedirectURL: "https://localhost",
}
tkn := &oauth2.Token{
RefreshToken: refreshToken,
}
ctx := context.Background()
tc := conf.Client(ctx, tkn)
c, err := tdameritrade.NewClient(tc)
if err != nil {
log.Fatal(err)
}
hours, _, err := c.MarketHours.GetMarketHours(ctx, "Equity", time.Now())
if err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", (*hours)["equity"]["EQ"])
hours, _, err = c.MarketHours.GetMarketHoursMulti(ctx, "EQUITY,OPTION", time.Now())
if err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", (*hours)["option"]["EQO"])
}
|
[
"\"TDAMERITRADE_CLIENT_ID\"",
"\"TDAMERITRADE_REFRESH_TOKEN\""
] |
[] |
[
"TDAMERITRADE_REFRESH_TOKEN",
"TDAMERITRADE_CLIENT_ID"
] |
[]
|
["TDAMERITRADE_REFRESH_TOKEN", "TDAMERITRADE_CLIENT_ID"]
|
go
| 2 | 0 | |
app/app/settings.py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w_yr*dlq=12l6xi=#lfaz)x$_sqp8!*sput55byzn%egl(s53_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
|
[] |
[] |
[
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"]
|
python
| 4 | 0 | |
cmd/ddev/cmd/share_test.go
|
package cmd
import (
"bufio"
"encoding/json"
"fmt"
asrt "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"io"
"net/http"
"os"
"os/exec"
"runtime"
"strconv"
"testing"
)
// TestShareCmd tests `ddev share`
func TestShareCmd(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Skipping because unreliable on Windows due to DNS lookup failure")
}
if os.Getenv("GITHUB_ACTIONS") == "true" {
t.Skip("Skipping on GitHub actions because no auth can be provided")
}
assert := asrt.New(t)
urlRead := false
site := TestSites[0]
defer site.Chdir()()
// Configure ddev/ngrok to use json output to stdout
cmd := exec.Command(DdevBin, "config", "--ngrok-args", "-log stdout -log-format=json")
err := cmd.Start()
require.NoError(t, err)
err = cmd.Wait()
require.NoError(t, err)
cmd = exec.Command(DdevBin, "share", "--use-http")
cmdReader, err := cmd.StdoutPipe()
require.NoError(t, err)
scanner := bufio.NewScanner(cmdReader)
// Make absolutely sure the ngrok process gets killed off, because otherwise
// the testbot (windows) can remain occupied forever.
// nolint: errcheck
defer pKill(cmd)
// Read through the ngrok json output until we get the url it has opened
go func() {
for scanner.Scan() {
logLine := scanner.Text()
logData := make(map[string]string)
err := json.Unmarshal([]byte(logLine), &logData)
if err != nil {
switch err.(type) {
case *json.SyntaxError:
continue
default:
t.Errorf("failed unmarshalling %v: %v", logLine, err)
break
}
}
if logErr, ok := logData["err"]; ok && logErr != "<nil>" {
assert.Equal("<nil>", logErr)
err = pKill(cmd)
assert.NoError(err)
return
}
// If URL is provided, try to hit it and look for expected response
if url, ok := logData["url"]; ok {
resp, err := http.Get(url + site.Safe200URIWithExpectation.URI)
if err != nil {
t.Logf("http.Get on url=%s failed, err=%v", url+site.Safe200URIWithExpectation.URI, err)
err = pKill(cmd)
assert.NoError(err)
return
}
//nolint: errcheck
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
assert.NoError(err)
assert.Contains(string(body), site.Safe200URIWithExpectation.Expect)
urlRead = true
err = pKill(cmd)
assert.NoError(err)
return
}
}
}()
err = cmd.Start()
require.NoError(t, err)
err = cmd.Wait()
t.Logf("cmd.Wait() err: %v", err)
assert.True(urlRead)
_ = cmdReader.Close()
}
// pKill kills a started cmd; If windows, it shells out to the
// taskkill command.
func pKill(cmd *exec.Cmd) error {
var err error
if cmd == nil {
return fmt.Errorf("pKill: cmd is nill")
}
if runtime.GOOS == "windows" {
// Windows has a completely different process model, no SIGCHLD,
// no killing of subprocesses. I wasn't successful in finding a way
// to properly kill a process set using golang; rfay 20190622
kill := exec.Command("TASKKILL", "/T", "/F", "/PID", strconv.Itoa(cmd.Process.Pid))
kill.Stderr = os.Stderr
kill.Stdout = os.Stdout
err = kill.Run()
} else {
err = cmd.Process.Kill()
}
return err
}
|
[
"\"GITHUB_ACTIONS\""
] |
[] |
[
"GITHUB_ACTIONS"
] |
[]
|
["GITHUB_ACTIONS"]
|
go
| 1 | 0 | |
example_test.go
|
package airtable_test
import (
"fmt"
"os"
airtable "github.com/makeitraina/airtable-go"
)
func ExampleNew() {
airtableAPIKey := os.Getenv("AIRTABLE_API_KEY")
baseID := "apphllLCpWnySSF7q"
client, err := airtable.New(airtableAPIKey, baseID)
if err != nil {
panic(err)
}
fmt.Println(client)
}
func ExampleClient_CreateRecord() {
client, _ := airtable.New("AIRTABLE_API_KEY", "BASE_ID")
type task struct {
AirtableID string
Fields struct {
Name string
Notes string
}
}
aTask := task{}
aTask.Fields.Name = "Contact suppliers"
aTask.Fields.Notes = "Get pricing on both the blue and green variants"
client.CreateRecord("TABLE_NAME", &aTask)
// aTask.AirtableID is now set to the newly created Airtable recordID
}
func ExampleClient_DestroyRecord() {
client, _ := airtable.New("AIRTABLE_API_KEY", "BASE_ID")
if err := client.DestroyRecord("TABLE_NAME", "RECORD_ID"); err != nil {
panic(err)
}
}
func ExampleClient_ListRecords() {
client, _ := airtable.New("AIRTABLE_API_KEY", "BASE_ID")
type task struct {
AirtableID string
Fields struct {
Name string
Notes string
}
}
tasks := []task{}
if err := client.ListRecords("TABLE_NAME", &tasks); err != nil {
panic(err)
}
fmt.Println(tasks)
}
func ExampleClient_RetrieveRecord() {
client, _ := airtable.New("AIRTABLE_API_KEY", "BASE_ID")
type task struct {
AirtableID string
Fields struct {
Name string
Notes string
}
}
retrievedTask := task{}
if err := client.RetrieveRecord("TABLE_NAME", "RECORD_ID", &retrievedTask); err != nil {
panic(err)
}
fmt.Println(retrievedTask)
}
func ExampleClient_UpdateRecord() {
client, _ := airtable.New("AIRTABLE_API_KEY", "BASE_ID")
type task struct {
AirtableID string
Fields struct {
Name string
Notes string
}
}
aTask := task{}
aTask.Fields.Name = "Clean kitchen"
aTask.Fields.Notes = "Make sure to clean all the counter tops"
UpdatedFields := map[string]interface{}{
"Name": "Clean entire kitchen",
}
if err := client.UpdateRecord("TABLE_NAME", "RECORD_ID", UpdatedFields, &aTask); err != nil {
panic(err)
}
fmt.Println(aTask)
}
func ExampleListParameters() {
client, _ := airtable.New("AIRTABLE_API_KEY", "BASE_ID")
type task struct {
AirtableID string
Fields struct {
Name string
Notes string
Priority int
}
}
listParams := airtable.ListParameters{
Fields: []string{"Name", "Notes", "Priority"},
FilterByFormula: "{Priority} < 2",
MaxRecords: 50,
Sort: []airtable.SortParameter{
airtable.SortParameter{
Field: "Priority",
ShouldSortDesc: true,
},
},
View: "Main View",
}
tasks := []task{}
if err := client.ListRecords("TABLE_NAME", &tasks, listParams); err != nil {
panic(err)
}
fmt.Println(tasks)
}
|
[
"\"AIRTABLE_API_KEY\""
] |
[] |
[
"AIRTABLE_API_KEY"
] |
[]
|
["AIRTABLE_API_KEY"]
|
go
| 1 | 0 | |
tests/regres/main.go
|
// Copyright 2019 The SwiftShader Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Regres is a tool that detects test regressions with SwiftShader changes.
//
// Regres monitors changes that have been put up for review with Gerrit.
// Once a new patchset has been found, regres will checkout, build and test the
// change against the parent changelist. Any differences in results are reported
// as a review comment on the change.
//
// Once a day regres will also test another, larger set of tests, and post the
// full test results as a Gerrit changelist. The CI test lists can be based from
// this daily test list, so testing can be limited to tests that were known to
// pass.
package main
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"flag"
"fmt"
"log"
"math"
"math/rand"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"time"
"./cause"
"./consts"
"./git"
"./shell"
"./testlist"
gerrit "github.com/andygrunwald/go-gerrit"
)
const (
gitURL = "https://swiftshader.googlesource.com/SwiftShader"
gerritURL = "https://swiftshader-review.googlesource.com/"
reportHeader = "Regres report:"
dataVersion = 1
changeUpdateFrequency = time.Minute * 5
changeQueryFrequency = time.Minute * 5
testTimeout = time.Minute * 2 // timeout for a single test
buildTimeout = time.Minute * 10 // timeout for a build
dailyUpdateTestListHour = 5 // 5am
fullTestListRelPath = "tests/regres/full-tests.json"
ciTestListRelPath = "tests/regres/ci-tests.json"
deqpConfigRelPath = "tests/regres/deqp.json"
)
var (
numParallelTests = runtime.NumCPU()
cacheDir = flag.String("cache", "cache", "path to the output cache directory")
gerritEmail = flag.String("email", "$SS_REGRES_EMAIL", "gerrit email address for posting regres results")
gerritUser = flag.String("user", "$SS_REGRES_USER", "gerrit username for posting regres results")
gerritPass = flag.String("pass", "$SS_REGRES_PASS", "gerrit password for posting regres results")
keepCheckouts = flag.Bool("keep", false, "don't delete checkout directories after use")
dryRun = flag.Bool("dry", false, "don't post regres reports to gerrit")
maxProcMemory = flag.Uint64("max-proc-mem", shell.MaxProcMemory, "maximum virtual memory per child process")
dailyNow = flag.Bool("dailynow", false, "Start by running the daily pass")
priority = flag.String("priority", "", "Prioritize a single change with the given id")
)
func main() {
if runtime.GOOS != "linux" {
log.Fatal("regres only currently runs on linux")
}
flag.ErrHelp = errors.New("regres is a tool to detect regressions between versions of SwiftShader")
flag.Parse()
shell.MaxProcMemory = *maxProcMemory
r := regres{
cacheRoot: *cacheDir,
gerritEmail: os.ExpandEnv(*gerritEmail),
gerritUser: os.ExpandEnv(*gerritUser),
gerritPass: os.ExpandEnv(*gerritPass),
keepCheckouts: *keepCheckouts,
dryRun: *dryRun,
dailyNow: *dailyNow,
priority: *priority,
}
if err := r.run(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(-1)
}
}
type regres struct {
cmake string // path to cmake executable
make string // path to make executable
python string // path to python executable
cacheRoot string // path to the regres cache directory
gerritEmail string // gerrit email address used for posting results
gerritUser string // gerrit username used for posting results
gerritPass string // gerrit password used for posting results
keepCheckouts bool // don't delete source & build checkouts after testing
dryRun bool // don't post any reviews
maxProcMemory uint64 // max virtual memory for child processes
dailyNow bool // start with a daily run
priority string // Prioritize a single change with the given id
}
// resolveDirs ensures that the necessary directories used can be found, and
// expands them to absolute paths.
func (r *regres) resolveDirs() error {
allDirs := []*string{
&r.cacheRoot,
}
for _, path := range allDirs {
abs, err := filepath.Abs(*path)
if err != nil {
return cause.Wrap(err, "Couldn't find path '%v'", *path)
}
*path = abs
}
if err := os.MkdirAll(r.cacheRoot, 0777); err != nil {
return cause.Wrap(err, "Couldn't create cache root directory")
}
for _, path := range allDirs {
if _, err := os.Stat(*path); err != nil {
return cause.Wrap(err, "Couldn't find path '%v'", *path)
}
}
return nil
}
// resolveExes resolves all external executables used by regres.
func (r *regres) resolveExes() error {
type exe struct {
name string
path *string
}
for _, e := range []exe{
{"cmake", &r.cmake},
{"make", &r.make},
{"python", &r.python},
} {
path, err := exec.LookPath(e.name)
if err != nil {
return cause.Wrap(err, "Couldn't find path to %s", e.name)
}
*e.path = path
}
return nil
}
// run performs the main processing loop for the regress tool. It:
// * Scans for open and recently updated changes in gerrit using queryChanges()
// and changeInfo.update().
// * Builds the most recent patchset and the commit's parent CL using
// r.newTest(<hash>).lazyRun().
// * Compares the results of the tests using compare().
// * Posts the results of the compare to gerrit as a review.
// * Repeats the above steps until the process is interrupted.
func (r *regres) run() error {
if err := r.resolveExes(); err != nil {
return cause.Wrap(err, "Couldn't resolve all exes")
}
if err := r.resolveDirs(); err != nil {
return cause.Wrap(err, "Couldn't resolve all directories")
}
client, err := gerrit.NewClient(gerritURL, nil)
if err != nil {
return cause.Wrap(err, "Couldn't create gerrit client")
}
if r.gerritUser != "" {
client.Authentication.SetBasicAuth(r.gerritUser, r.gerritPass)
}
changes := map[string]*changeInfo{} // Change ID -> changeInfo
lastUpdatedTestLists := toDate(time.Now())
lastQueriedChanges := time.Time{}
if r.dailyNow {
lastUpdatedTestLists = date{}
}
for {
if now := time.Now(); toDate(now) != lastUpdatedTestLists && now.Hour() >= dailyUpdateTestListHour {
lastUpdatedTestLists = toDate(now)
if err := r.updateTestLists(client); err != nil {
log.Println(err.Error())
}
}
// Update list of tracked changes.
if time.Since(lastQueriedChanges) > changeQueryFrequency {
lastQueriedChanges = time.Now()
if err := queryChanges(client, changes); err != nil {
log.Println(err.Error())
}
}
// Update change info.
for _, change := range changes {
if time.Since(change.lastUpdated) > changeUpdateFrequency {
change.lastUpdated = time.Now()
err := change.update(client)
if err != nil {
log.Println(cause.Wrap(err, "Couldn't update info for change '%s'", change.id))
}
}
}
for _, c := range changes {
if c.pending && r.priority == c.id {
log.Printf("Prioritizing change '%s'\n", c.id)
c.priority = 1e6
}
}
// Find the change with the highest priority.
var change *changeInfo
numPending := 0
for _, c := range changes {
if c.pending {
numPending++
if change == nil || c.priority > change.priority {
change = c
}
}
}
if change == nil {
// Everything up to date. Take a break.
log.Println("Nothing to do. Sleeping")
time.Sleep(time.Minute)
continue
}
log.Printf("%d changes queued for testing\n", numPending)
log.Printf("Testing change '%s'\n", change.id)
// Test the latest patchset in the change, diff against parent change.
msg, err := r.test(change)
if err != nil {
log.Println(cause.Wrap(err, "Failed to test changelist '%s'", change.latest))
time.Sleep(time.Minute)
change.pending = false
continue
}
// Always include the reportHeader in the message.
// changeInfo.update() uses this header to detect whether a patchset has
// already got a test result.
msg = reportHeader + "\n\n" + msg
if r.dryRun {
log.Printf("DRY RUN: add review to change '%v':\n%v\n", change.id, msg)
} else {
log.Printf("Posting review to '%s'\n", change.id)
_, _, err = client.Changes.SetReview(change.id, change.latest.String(), &gerrit.ReviewInput{
Message: msg,
Tag: "autogenerated:regress",
})
if err != nil {
return cause.Wrap(err, "Failed to post comments on change '%s'", change.id)
}
}
change.pending = false
}
}
func (r *regres) test(change *changeInfo) (string, error) {
latest := r.newTest(change.latest)
defer latest.cleanup()
deqp, err := r.getOrBuildDEQP(latest)
if err != nil {
return "", cause.Wrap(err, "Failed to build dEQP '%v' for change", change.id)
}
if err := latest.checkout(); err != nil {
return "", cause.Wrap(err, "Failed to checkout '%s'", change.latest)
}
log.Printf("Testing latest patchset for change '%s'\n", change.id)
latestResults, testlists, err := r.testLatest(change, latest, deqp)
if err != nil {
return "", cause.Wrap(err, "Failed to test latest change of '%v'", change.id)
}
log.Printf("Testing parent of change '%s'\n", change.id)
parentResults, err := r.testParent(change, testlists, deqp)
if err != nil {
return "", cause.Wrap(err, "Failed to test parent change of '%v'", change.id)
}
log.Println("Comparing latest patchset's results with parent")
msg := compare(parentResults, latestResults)
return msg, nil
}
type deqp struct {
path string // path to deqp directory
hash string // hash of the deqp config
}
func (r *regres) getOrBuildDEQP(test *test) (deqp, error) {
srcDir := test.srcDir
if !isFile(path.Join(srcDir, deqpConfigRelPath)) {
srcDir, _ = os.Getwd()
log.Println("Couldn't open dEQP config file from change, falling back to internal version")
}
file, err := os.Open(path.Join(srcDir, deqpConfigRelPath))
if err != nil {
return deqp{}, cause.Wrap(err, "Couldn't open dEQP config file")
}
defer file.Close()
cfg := struct {
Remote string `json:"remote"`
SHA string `json:"sha"`
Patches []string `json:"patches"`
}{}
if err := json.NewDecoder(file).Decode(&cfg); err != nil {
return deqp{}, cause.Wrap(err, "Couldn't parse %s", deqpConfigRelPath)
}
hasher := sha1.New()
if err := json.NewEncoder(hasher).Encode(&cfg); err != nil {
return deqp{}, cause.Wrap(err, "Couldn't re-encode %s", deqpConfigRelPath)
}
hash := hex.EncodeToString(hasher.Sum(nil))
cacheDir := path.Join(r.cacheRoot, "deqp", hash)
buildDir := path.Join(cacheDir, "build")
if !isDir(cacheDir) {
if err := os.MkdirAll(cacheDir, 0777); err != nil {
return deqp{}, cause.Wrap(err, "Couldn't make deqp cache directory '%s'", cacheDir)
}
success := false
defer func() {
if !success {
os.RemoveAll(cacheDir)
}
}()
log.Printf("Checking out deqp %s @ %s into %s\n", cfg.Remote, cfg.SHA, cacheDir)
if err := git.Checkout(cacheDir, cfg.Remote, git.ParseHash(cfg.SHA)); err != nil {
return deqp{}, cause.Wrap(err, "Couldn't build deqp %s @ %s", cfg.Remote, cfg.SHA)
}
log.Println("Fetching deqp dependencies")
if err := shell.Shell(buildTimeout, r.python, cacheDir, "external/fetch_sources.py"); err != nil {
return deqp{}, cause.Wrap(err, "Couldn't fetch deqp sources %s @ %s", cfg.Remote, cfg.SHA)
}
log.Println("Applying deqp patches")
for _, patch := range cfg.Patches {
fullPath := path.Join(srcDir, patch)
if err := git.Apply(cacheDir, fullPath); err != nil {
return deqp{}, cause.Wrap(err, "Couldn't apply deqp patch %s for %s @ %s", patch, cfg.Remote, cfg.SHA)
}
}
log.Printf("Building deqp into %s\n", buildDir)
if err := os.MkdirAll(buildDir, 0777); err != nil {
return deqp{}, cause.Wrap(err, "Couldn't make deqp cache directory '%s'", cacheDir)
}
if err := shell.Shell(buildTimeout, r.cmake, buildDir,
"-DDEQP_TARGET=x11_egl",
"-DCMAKE_BUILD_TYPE=Release",
".."); err != nil {
return deqp{}, cause.Wrap(err, "Couldn't generate build rules for deqp %s @ %s", cfg.Remote, cfg.SHA)
}
if err := shell.Shell(buildTimeout, r.make, buildDir, fmt.Sprintf("-j%d", runtime.NumCPU())); err != nil {
return deqp{}, cause.Wrap(err, "Couldn't build deqp %s @ %s", cfg.Remote, cfg.SHA)
}
success = true
}
return deqp{
path: cacheDir,
hash: hash,
}, nil
}
var additionalTestsRE = regexp.MustCompile(`\n\s*Test[s]?:\s*([^\s]+)[^\n]*`)
func (r *regres) testLatest(change *changeInfo, test *test, d deqp) (*CommitTestResults, testlist.Lists, error) {
// Get the test results for the latest patchset in the change.
testlists, err := test.loadTestLists(ciTestListRelPath)
if err != nil {
return nil, nil, cause.Wrap(err, "Failed to load '%s'", change.latest)
}
if matches := additionalTestsRE.FindAllStringSubmatch(change.commitMessage, -1); len(matches) > 0 {
log.Println("Change description contains additional test patterns")
// Change specifies additional tests to try. Load the full test list.
fullTestLists, err := test.loadTestLists(fullTestListRelPath)
if err != nil {
return nil, nil, cause.Wrap(err, "Failed to load '%s'", change.latest)
}
// Add any tests in the full list that match the pattern to the list to test.
for _, match := range matches {
if len(match) > 1 {
pattern := match[1]
log.Printf("Adding custom tests with pattern '%s'\n", pattern)
filtered := fullTestLists.Filter(func(name string) bool {
ok, _ := filepath.Match(pattern, name)
return ok
})
testlists = append(testlists, filtered...)
}
}
}
cachePath := test.resultsCachePath(testlists, d)
if results, err := loadCommitTestResults(cachePath); err == nil {
return results, testlists, nil // Use cached results
}
// Build the change and test it.
results := test.buildAndRun(testlists, d)
// Cache the results for future tests
if err := results.save(cachePath); err != nil {
log.Printf("Warning: Couldn't save results of test to '%v'\n", cachePath)
}
return results, testlists, nil
}
func (r *regres) testParent(change *changeInfo, testlists testlist.Lists, d deqp) (*CommitTestResults, error) {
// Get the test results for the changes's parent changelist.
test := r.newTest(change.parent)
defer test.cleanup()
cachePath := test.resultsCachePath(testlists, d)
if results, err := loadCommitTestResults(cachePath); err == nil {
return results, nil // Use cached results
}
// Couldn't load cached results. Have to build them.
if err := test.checkout(); err != nil {
return nil, cause.Wrap(err, "Failed to checkout '%s'", change.parent)
}
// Build the parent change and test it.
results := test.buildAndRun(testlists, d)
// Store the results of the parent change to the cache.
if err := results.save(cachePath); err != nil {
log.Printf("Warning: Couldn't save results of test to '%v'\n", cachePath)
}
return results, nil
}
func (r *regres) updateTestLists(client *gerrit.Client) error {
log.Println("Updating test lists")
headHash, err := git.FetchRefHash("HEAD", gitURL)
if err != nil {
return cause.Wrap(err, "Could not get hash of master HEAD")
}
// Get the full test results for latest master.
test := r.newTest(headHash)
defer test.cleanup()
// Always need to checkout the change.
if err := test.checkout(); err != nil {
return cause.Wrap(err, "Failed to checkout '%s'", headHash)
}
d, err := r.getOrBuildDEQP(test)
if err != nil {
return cause.Wrap(err, "Failed to build deqp for '%s'", headHash)
}
// Load the test lists.
testLists, err := test.loadTestLists(fullTestListRelPath)
if err != nil {
return cause.Wrap(err, "Failed to load full test lists for '%s'", headHash)
}
// Build the change.
if err := test.build(); err != nil {
return cause.Wrap(err, "Failed to build '%s'", headHash)
}
// Run the tests on the change.
results, err := test.run(testLists, d)
if err != nil {
return cause.Wrap(err, "Failed to test '%s'", headHash)
}
// Write out the test list status files.
filePaths, err := test.writeTestListsByStatus(testLists, results)
if err != nil {
return cause.Wrap(err, "Failed to write test lists by status")
}
// Stage all the updated test files.
for _, path := range filePaths {
log.Println("Staging", path)
git.Add(test.srcDir, path)
}
log.Println("Checking for existing test list")
existingChange, err := r.findTestListChange(client)
if err != nil {
return err
}
commitMsg := strings.Builder{}
commitMsg.WriteString(consts.TestListUpdateCommitSubjectPrefix + headHash.String()[:8])
if existingChange != nil {
// Reuse gerrit change ID if there's already a change up for review.
commitMsg.WriteString("\n\n")
commitMsg.WriteString("Change-Id: " + existingChange.ChangeID + "\n")
}
if err := git.Commit(test.srcDir, commitMsg.String(), git.CommitFlags{
Name: "SwiftShader Regression Bot",
Email: r.gerritEmail,
}); err != nil {
return cause.Wrap(err, "Failed to commit test results")
}
if r.dryRun {
log.Printf("DRY RUN: post results for review")
} else {
log.Println("Pushing test results for review")
if err := git.Push(test.srcDir, gitURL, "HEAD", "refs/for/master", git.PushFlags{
Username: r.gerritUser,
Password: r.gerritPass,
}); err != nil {
return cause.Wrap(err, "Failed to push test results for review")
}
log.Println("Test results posted for review")
}
change, err := r.findTestListChange(client)
if err != nil {
return err
}
if err := r.postMostCommonFailures(client, change, results); err != nil {
return err
}
return nil
}
// postMostCommonFailures posts the most common failure cases as a review
// comment on the given change.
func (r *regres) postMostCommonFailures(client *gerrit.Client, change *gerrit.ChangeInfo, results *CommitTestResults) error {
const limit = 25
failures := results.commonFailures()
if len(failures) > limit {
failures = failures[:limit]
}
sb := strings.Builder{}
sb.WriteString(fmt.Sprintf("Top %v most common failures:\n", len(failures)))
for _, f := range failures {
lines := strings.Split(f.error, "\n")
if len(lines) == 1 {
line := lines[0]
if line != "" {
sb.WriteString(fmt.Sprintf(" • %d occurrences: %v: %v\n", f.count, f.status, line))
} else {
sb.WriteString(fmt.Sprintf(" • %d occurrences: %v\n", f.count, f.status))
}
} else {
sb.WriteString(fmt.Sprintf(" • %d occurrences: %v:\n", f.count, f.status))
for _, l := range lines {
sb.WriteString(" > ")
sb.WriteString(l)
sb.WriteString("\n")
}
}
sb.WriteString(fmt.Sprintf(" Example test: %v\n", f.exampleTest))
}
msg := sb.String()
if r.dryRun {
log.Printf("DRY RUN: add most common failures to '%v':\n%v\n", change.ChangeID, msg)
} else {
log.Printf("Posting most common failures to '%s'\n", change.ChangeID)
_, _, err := client.Changes.SetReview(change.ChangeID, change.CurrentRevision, &gerrit.ReviewInput{
Message: msg,
Tag: "autogenerated:regress",
})
if err != nil {
return cause.Wrap(err, "Failed to post comments on change '%s'", change.ChangeID)
}
}
return nil
}
func (r *regres) findTestListChange(client *gerrit.Client) (*gerrit.ChangeInfo, error) {
log.Println("Checking for existing test list change")
changes, _, err := client.Changes.QueryChanges(&gerrit.QueryChangeOptions{
QueryOptions: gerrit.QueryOptions{
Query: []string{fmt.Sprintf(`status:open+owner:"%v"`, r.gerritEmail)},
Limit: 1,
},
ChangeOptions: gerrit.ChangeOptions{
AdditionalFields: []string{"CURRENT_REVISION"},
},
})
if err != nil {
return nil, cause.Wrap(err, "Failed to checking for existing test list")
}
if len(*changes) > 0 {
// TODO: This currently assumes that only change changes from
// gerritEmail are test lists updates. This may not always be true.
return &(*changes)[0], nil
}
return nil, nil
}
// changeInfo holds the important information about a single, open change in
// gerrit.
type changeInfo struct {
id string // Gerrit change ID.
pending bool // Is this change waiting a test for the latest patchset?
priority int // Calculated priority based on Gerrit labels.
latest git.Hash // Git hash of the latest patchset in the change.
parent git.Hash // Git hash of the changelist this change is based on.
lastUpdated time.Time // Time the change was last fetched.
commitMessage string
}
// queryChanges updates the changes map by querying gerrit for the latest open
// changes.
func queryChanges(client *gerrit.Client, changes map[string]*changeInfo) error {
log.Println("Checking for latest changes")
results, _, err := client.Changes.QueryChanges(&gerrit.QueryChangeOptions{
QueryOptions: gerrit.QueryOptions{
Query: []string{"status:open+-age:3d"},
Limit: 100,
},
})
if err != nil {
return cause.Wrap(err, "Failed to get list of changes")
}
ids := map[string]bool{}
for _, r := range *results {
ids[r.ChangeID] = true
}
// Add new changes
for id := range ids {
if _, found := changes[id]; !found {
log.Printf("Tracking new change '%v'\n", id)
changes[id] = &changeInfo{id: id}
}
}
// Remove old changes
for id := range changes {
if found := ids[id]; !found {
log.Printf("Untracking change '%v'\n", id)
delete(changes, id)
}
}
return nil
}
// update queries gerrit for information about the given change.
func (c *changeInfo) update(client *gerrit.Client) error {
change, _, err := client.Changes.GetChange(c.id, &gerrit.ChangeOptions{
AdditionalFields: []string{"CURRENT_REVISION", "CURRENT_COMMIT", "MESSAGES", "LABELS"},
})
if err != nil {
return cause.Wrap(err, "Getting info for change '%s'", c.id)
}
current, ok := change.Revisions[change.CurrentRevision]
if !ok {
return fmt.Errorf("Couldn't find current revision for change '%s'", c.id)
}
if len(current.Commit.Parents) == 0 {
return fmt.Errorf("Couldn't find current commit for change '%s' has no parents(?)", c.id)
}
kokoroPresubmit := change.Labels["Kokoro-Presubmit"].Approved.AccountID != 0
codeReviewScore := change.Labels["Code-Review"].Value
codeReviewApproved := change.Labels["Code-Review"].Approved.AccountID != 0
presubmitReady := change.Labels["Presubmit-Ready"].Approved.AccountID != 0
c.priority = 0
if presubmitReady {
c.priority += 10
}
c.priority += codeReviewScore
if codeReviewApproved {
c.priority += 2
}
if kokoroPresubmit {
c.priority++
}
// Is the change from a Googler?
canTest := strings.HasSuffix(current.Commit.Committer.Email, "@google.com")
// Has the latest patchset already been tested?
if canTest {
for _, msg := range change.Messages {
if msg.RevisionNumber == current.Number &&
strings.Contains(msg.Message, reportHeader) {
canTest = false
break
}
}
}
c.pending = canTest
c.latest = git.ParseHash(change.CurrentRevision)
c.parent = git.ParseHash(current.Commit.Parents[0].Commit)
c.commitMessage = current.Commit.Message
return nil
}
func (r *regres) newTest(commit git.Hash) *test {
srcDir := filepath.Join(r.cacheRoot, "src", commit.String())
resDir := filepath.Join(r.cacheRoot, "res", commit.String())
return &test{
r: r,
commit: commit,
srcDir: srcDir,
resDir: resDir,
buildDir: filepath.Join(srcDir, "build"),
}
}
type test struct {
r *regres
commit git.Hash // hash of the commit to test
srcDir string // directory for the SwiftShader checkout
resDir string // directory for the test results
buildDir string // directory for SwiftShader build
keepCheckouts bool // don't delete source & build checkouts after testing
}
// cleanup removes any temporary files used by the test.
func (t *test) cleanup() {
if t.srcDir != "" && !t.keepCheckouts {
os.RemoveAll(t.srcDir)
}
}
// checkout clones the test's source commit into t.src.
func (t *test) checkout() error {
if isDir(t.srcDir) && t.keepCheckouts {
log.Printf("Reusing source cache for commit '%s'\n", t.commit)
return nil
}
log.Printf("Checking out '%s'\n", t.commit)
os.RemoveAll(t.srcDir)
if err := git.Checkout(t.srcDir, gitURL, t.commit); err != nil {
return cause.Wrap(err, "Checking out commit '%s'", t.commit)
}
log.Printf("Checked out commit '%s'\n", t.commit)
return nil
}
// buildAndRun calls t.build() followed by t.run(). Errors are logged and
// reported in the returned CommitTestResults.Error field.
func (t *test) buildAndRun(testLists testlist.Lists, d deqp) *CommitTestResults {
// Build the parent change.
if err := t.build(); err != nil {
msg := fmt.Sprintf("Failed to build '%s'", t.commit)
log.Println(cause.Wrap(err, msg))
return &CommitTestResults{Error: msg}
}
// Run the tests on the parent change.
results, err := t.run(testLists, d)
if err != nil {
msg := fmt.Sprintf("Failed to test change '%s'", t.commit)
log.Println(cause.Wrap(err, msg))
return &CommitTestResults{Error: msg}
}
return results
}
// build builds the SwiftShader source into t.buildDir.
func (t *test) build() error {
log.Printf("Building '%s'\n", t.commit)
if err := os.MkdirAll(t.buildDir, 0777); err != nil {
return cause.Wrap(err, "Failed to create build directory")
}
if err := shell.Shell(buildTimeout, t.r.cmake, t.buildDir,
"-DCMAKE_BUILD_TYPE=Release",
"-DDCHECK_ALWAYS_ON=1",
"-DREACTOR_VERIFY_LLVM_IR=1",
"-DWARNINGS_AS_ERRORS=0",
".."); err != nil {
return err
}
if err := shell.Shell(buildTimeout, t.r.make, t.buildDir, fmt.Sprintf("-j%d", runtime.NumCPU())); err != nil {
return err
}
return nil
}
// run runs all the tests.
func (t *test) run(testLists testlist.Lists, d deqp) (*CommitTestResults, error) {
log.Printf("Running tests for '%s'\n", t.commit)
outDir := filepath.Join(t.srcDir, "out")
if !isDir(outDir) { // https://swiftshader-review.googlesource.com/c/SwiftShader/+/27188
outDir = t.buildDir
}
if !isDir(outDir) {
return nil, fmt.Errorf("Couldn't find output directory")
}
log.Println("outDir:", outDir)
start := time.Now()
// Wait group that completes once all the tests have finished.
wg := sync.WaitGroup{}
results := make(chan TestResult, 256)
numTests := 0
// For each API that we are testing
for _, list := range testLists {
// Resolve the test runner
var exe string
switch list.API {
case testlist.EGL:
exe = filepath.Join(d.path, "build", "modules", "egl", "deqp-egl")
case testlist.GLES2:
exe = filepath.Join(d.path, "build", "modules", "gles2", "deqp-gles2")
case testlist.GLES3:
exe = filepath.Join(d.path, "build", "modules", "gles3", "deqp-gles3")
case testlist.Vulkan:
exe = filepath.Join(d.path, "build", "external", "vulkancts", "modules", "vulkan", "deqp-vk")
default:
return nil, fmt.Errorf("Unknown API '%v'", list.API)
}
if !isFile(exe) {
return nil, fmt.Errorf("Couldn't find dEQP executable at '%s'", exe)
}
// Build a chan for the test names to be run.
tests := make(chan string, len(list.Tests))
// Start a number of go routines to run the tests.
wg.Add(numParallelTests)
for i := 0; i < numParallelTests; i++ {
go func() {
t.deqpTestRoutine(exe, outDir, tests, results)
wg.Done()
}()
}
// Shuffle the test list.
// This attempts to mix heavy-load tests with lighter ones.
shuffled := make([]string, len(list.Tests))
for i, j := range rand.New(rand.NewSource(42)).Perm(len(list.Tests)) {
shuffled[i] = list.Tests[j]
}
// Hand the tests to the deqpTestRoutines.
for _, t := range shuffled {
tests <- t
}
// Close the tests chan to indicate that there are no more tests to run.
// The deqpTestRoutine functions will return once all tests have been
// run.
close(tests)
numTests += len(list.Tests)
}
out := CommitTestResults{
Version: dataVersion,
Tests: map[string]TestResult{},
}
// Collect the results.
finished := make(chan struct{})
lastUpdate := time.Now()
go func() {
start, i := time.Now(), 0
for r := range results {
i++
out.Tests[r.Test] = r
if time.Since(lastUpdate) > time.Minute {
lastUpdate = time.Now()
remaining := numTests - i
log.Printf("Ran %d/%d tests (%v%%). Estimated completion in %v.\n",
i, numTests, percent(i, numTests),
(time.Since(start)/time.Duration(i))*time.Duration(remaining))
}
}
close(finished)
}()
wg.Wait() // Block until all the deqpTestRoutines have finished.
close(results) // Signal no more results.
<-finished // And wait for the result collecting go-routine to finish.
out.Duration = time.Since(start)
return &out, nil
}
func (t *test) writeTestListsByStatus(testLists testlist.Lists, results *CommitTestResults) ([]string, error) {
out := []string{}
for _, list := range testLists {
files := map[testlist.Status]*os.File{}
for _, status := range testlist.Statuses {
path := testlist.FilePathWithStatus(filepath.Join(t.srcDir, list.File), status)
dir := filepath.Dir(path)
os.MkdirAll(dir, 0777)
f, err := os.Create(path)
if err != nil {
return nil, cause.Wrap(err, "Couldn't create file '%v'", path)
}
defer f.Close()
files[status] = f
out = append(out, path)
}
for _, testName := range list.Tests {
if r, found := results.Tests[testName]; found {
fmt.Fprintln(files[r.Status], testName)
}
}
}
return out, nil
}
// resultsCachePath returns the path to the cache results file for the given
// test, testlists and path to deqp.
func (t *test) resultsCachePath(testLists testlist.Lists, d deqp) string {
return filepath.Join(t.resDir, testLists.Hash(), d.hash)
}
// CommitTestResults holds the results the tests across all APIs for a given
// commit. The CommitTestResults structure may be serialized to cache the
// results.
type CommitTestResults struct {
Version int
Error string
Tests map[string]TestResult
Duration time.Duration
}
func loadCommitTestResults(path string) (*CommitTestResults, error) {
f, err := os.Open(path)
if err != nil {
return nil, cause.Wrap(err, "Couldn't open '%s' for loading test results", path)
}
defer f.Close()
var out CommitTestResults
if err := json.NewDecoder(f).Decode(&out); err != nil {
return nil, err
}
if out.Version != dataVersion {
return nil, errors.New("Data is from an old version")
}
return &out, nil
}
func (r *CommitTestResults) save(path string) error {
os.MkdirAll(filepath.Dir(path), 0777)
f, err := os.Create(path)
if err != nil {
return cause.Wrap(err, "Couldn't open '%s' for saving test results", path)
}
defer f.Close()
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
if err := enc.Encode(r); err != nil {
return cause.Wrap(err, "Couldn't encode test results")
}
return nil
}
type testStatusAndError struct {
status testlist.Status
error string
}
type commonFailure struct {
count int
testStatusAndError
exampleTest string
}
func (r *CommitTestResults) commonFailures() []commonFailure {
failures := map[testStatusAndError]int{}
examples := map[testStatusAndError]string{}
for name, test := range r.Tests {
if !test.Status.Failing() {
continue
}
key := testStatusAndError{test.Status, test.Err}
if count, ok := failures[key]; ok {
failures[key] = count + 1
} else {
failures[key] = 1
examples[key] = name
}
}
out := make([]commonFailure, 0, len(failures))
for failure, count := range failures {
out = append(out, commonFailure{count, failure, examples[failure]})
}
sort.Slice(out, func(i, j int) bool { return out[i].count > out[j].count })
return out
}
// compare returns a string describing all differences between two
// CommitTestResults. This string is used as the report message posted to the
// gerrit code review.
func compare(old, new *CommitTestResults) string {
if old.Error != "" {
return old.Error
}
if new.Error != "" {
return new.Error
}
oldStatusCounts, newStatusCounts := map[testlist.Status]int{}, map[testlist.Status]int{}
totalTests := 0
broken, fixed, failing, removed, changed := []string{}, []string{}, []string{}, []string{}, []string{}
for test, new := range new.Tests {
old, found := old.Tests[test]
if !found {
log.Printf("Test result for '%s' not found on old change\n", test)
continue
}
switch {
case !old.Status.Failing() && new.Status.Failing():
broken = append(broken, test)
case !old.Status.Passing() && new.Status.Passing():
fixed = append(fixed, test)
case old.Status != new.Status:
changed = append(changed, test)
case old.Status.Failing() && new.Status.Failing():
failing = append(failing, test) // Still broken
}
totalTests++
if found {
oldStatusCounts[old.Status] = oldStatusCounts[old.Status] + 1
}
newStatusCounts[new.Status] = newStatusCounts[new.Status] + 1
}
for test := range old.Tests {
if _, found := new.Tests[test]; !found {
removed = append(removed, test)
}
}
sb := strings.Builder{}
// list prints the list l to sb, truncating after a limit.
list := func(l []string) {
const max = 10
for i, s := range l {
sb.WriteString(" ")
if i == max {
sb.WriteString(fmt.Sprintf("> %d more\n", len(l)-i))
break
}
sb.WriteString(fmt.Sprintf("> %s", s))
if n, ok := new.Tests[s]; ok {
if o, ok := old.Tests[s]; ok && n != o {
sb.WriteString(fmt.Sprintf(" - [%s -> %s]", o.Status, n.Status))
} else {
sb.WriteString(fmt.Sprintf(" - [%s]", n.Status))
}
sb.WriteString("\n")
for _, line := range strings.Split(n.Err, "\n") {
if line != "" {
sb.WriteString(fmt.Sprintf(" %v\n", line))
}
}
} else {
sb.WriteString("\n")
}
}
}
sb.WriteString(fmt.Sprintf(" Total tests: %d\n", totalTests))
for _, s := range []struct {
label string
status testlist.Status
}{
{" Pass", testlist.Pass},
{" Fail", testlist.Fail},
{" Timeout", testlist.Timeout},
{" UNIMPLEMENTED()", testlist.Unimplemented},
{" UNSUPPORTED()", testlist.Unsupported},
{" UNREACHABLE()", testlist.Unreachable},
{" ASSERT()", testlist.Assert},
{" ABORT()", testlist.Abort},
{" Crash", testlist.Crash},
{" Not Supported", testlist.NotSupported},
{"Compatibility Warning", testlist.CompatibilityWarning},
{" Quality Warning", testlist.QualityWarning},
} {
old, new := oldStatusCounts[s.status], newStatusCounts[s.status]
if old == 0 && new == 0 {
continue
}
change := percent64(int64(new-old), int64(old))
switch {
case old == new:
sb.WriteString(fmt.Sprintf("%s: %v\n", s.label, new))
case change == 0:
sb.WriteString(fmt.Sprintf("%s: %v -> %v (%+d)\n", s.label, old, new, new-old))
default:
sb.WriteString(fmt.Sprintf("%s: %v -> %v (%+d %+d%%)\n", s.label, old, new, new-old, change))
}
}
if old, new := old.Duration, new.Duration; old != 0 && new != 0 {
label := " Time taken"
change := percent64(int64(new-old), int64(old))
switch {
case old == new:
sb.WriteString(fmt.Sprintf("%s: %v\n", label, new))
case change == 0:
sb.WriteString(fmt.Sprintf("%s: %v -> %v\n", label, old, new))
default:
sb.WriteString(fmt.Sprintf("%s: %v -> %v (%+d%%)\n", label, old, new, change))
}
}
if n := len(broken); n > 0 {
sort.Strings(broken)
sb.WriteString(fmt.Sprintf("\n--- This change breaks %d tests: ---\n", n))
list(broken)
}
if n := len(fixed); n > 0 {
sort.Strings(fixed)
sb.WriteString(fmt.Sprintf("\n--- This change fixes %d tests: ---\n", n))
list(fixed)
}
if n := len(removed); n > 0 {
sort.Strings(removed)
sb.WriteString(fmt.Sprintf("\n--- This change removes %d tests: ---\n", n))
list(removed)
}
if n := len(changed); n > 0 {
sort.Strings(changed)
sb.WriteString(fmt.Sprintf("\n--- This change alters %d tests: ---\n", n))
list(changed)
}
if len(broken) == 0 && len(fixed) == 0 && len(removed) == 0 && len(changed) == 0 {
sb.WriteString(fmt.Sprintf("\n--- No change in test results ---\n"))
}
type timingDiff struct {
old time.Duration
new time.Duration
relDelta float64
name string
}
timingDiffs := []timingDiff{}
for name, new := range new.Tests {
if old, ok := old.Tests[name]; ok {
old, new := old.TimeTaken, new.TimeTaken
delta := new.Seconds() - old.Seconds()
absDelta := math.Abs(delta)
relDelta := delta / old.Seconds()
if absDelta > 2.0 && math.Abs(relDelta) > 0.05 { // If change > ±2s and > than ±5% old time...
timingDiffs = append(timingDiffs, timingDiff{
old: old,
new: new,
name: name,
relDelta: relDelta,
})
}
}
}
if len(timingDiffs) > 0 {
sb.WriteString(fmt.Sprintf("\n--- Test duration changes ---\n"))
const limit = 10
if len(timingDiffs) > limit {
sort.Slice(timingDiffs, func(i, j int) bool { return math.Abs(timingDiffs[i].relDelta) > math.Abs(timingDiffs[j].relDelta) })
timingDiffs = timingDiffs[:limit]
}
sort.Slice(timingDiffs, func(i, j int) bool { return timingDiffs[i].relDelta < timingDiffs[j].relDelta })
for _, d := range timingDiffs {
percent := percent64(int64(d.new-d.old), int64(d.old))
sb.WriteString(fmt.Sprintf(" > %v: %v -> %v (%+d%%)\n", d.name, d.old, d.new, percent))
}
}
return sb.String()
}
// TestResult holds the results of a single API test.
type TestResult struct {
Test string
Status testlist.Status
TimeTaken time.Duration
Err string `json:",omitempty"`
}
func (r TestResult) String() string {
if r.Err != "" {
return fmt.Sprintf("%s: %s (%s)", r.Test, r.Status, r.Err)
}
return fmt.Sprintf("%s: %s", r.Test, r.Status)
}
var (
// Regular expression to parse the output of a dEQP test.
deqpRE = regexp.MustCompile(`(Fail|Pass|NotSupported|CompatibilityWarning|QualityWarning) \(([^\)]*)\)`)
// Regular expression to parse a test that failed due to UNIMPLEMENTED()
unimplementedRE = regexp.MustCompile(`[^\n]*UNIMPLEMENTED:[^\n]*`)
// Regular expression to parse a test that failed due to UNSUPPORTED()
unsupportedRE = regexp.MustCompile(`[^\n]*UNSUPPORTED:[^\n]*`)
// Regular expression to parse a test that failed due to UNREACHABLE()
unreachableRE = regexp.MustCompile(`[^\n]*UNREACHABLE:[^\n]*`)
// Regular expression to parse a test that failed due to ASSERT()
assertRE = regexp.MustCompile(`[^\n]*ASSERT\([^\)]*\)[^\n]*`)
// Regular expression to parse a test that failed due to ABORT()
abortRE = regexp.MustCompile(`[^\n]*ABORT:[^\n]*`)
)
// deqpTestRoutine repeatedly runs the dEQP test executable exe with the tests
// taken from tests. The output of the dEQP test is parsed, and the test result
// is written to results.
// deqpTestRoutine only returns once the tests chan has been closed.
// deqpTestRoutine does not close the results chan.
func (t *test) deqpTestRoutine(exe, outDir string, tests <-chan string, results chan<- TestResult) {
nextTest:
for name := range tests {
// log.Printf("Running test '%s'\n", name)
env := []string{
"LD_LIBRARY_PATH=" + t.buildDir + ":" + os.Getenv("LD_LIBRARY_PATH"),
"VK_ICD_FILENAMES=" + filepath.Join(outDir, "Linux", "vk_swiftshader_icd.json"),
"DISPLAY=" + os.Getenv("DISPLAY"),
"LIBC_FATAL_STDERR_=1", // Put libc explosions into logs.
}
start := time.Now()
outRaw, err := shell.Exec(testTimeout, exe, filepath.Dir(exe), env,
"--deqp-surface-type=pbuffer",
"--deqp-shadercache=disable",
"--deqp-log-images=disable",
"--deqp-log-shader-sources=disable",
"--deqp-log-flush=disable",
"-n="+name)
duration := time.Since(start)
out := string(outRaw)
out = strings.ReplaceAll(out, t.srcDir, "<SwiftShader>")
out = strings.ReplaceAll(out, exe, "<dEQP>")
switch err.(type) {
default:
for _, test := range []struct {
re *regexp.Regexp
s testlist.Status
}{
{unimplementedRE, testlist.Unimplemented},
{unsupportedRE, testlist.Unsupported},
{unreachableRE, testlist.Unreachable},
{assertRE, testlist.Assert},
{abortRE, testlist.Abort},
} {
if s := test.re.FindString(out); s != "" {
results <- TestResult{
Test: name,
Status: test.s,
TimeTaken: duration,
Err: s,
}
continue nextTest
}
}
results <- TestResult{
Test: name,
Status: testlist.Crash,
TimeTaken: duration,
Err: out,
}
case shell.ErrTimeout:
log.Printf("Timeout for test '%v'\n", name)
results <- TestResult{
Test: name,
Status: testlist.Timeout,
TimeTaken: duration,
}
case nil:
toks := deqpRE.FindStringSubmatch(out)
if len(toks) < 3 {
err := fmt.Sprintf("Couldn't parse test '%v' output:\n%s", name, out)
log.Println("Warning: ", err)
results <- TestResult{Test: name, Status: testlist.Fail, Err: err}
continue
}
switch toks[1] {
case "Pass":
results <- TestResult{Test: name, Status: testlist.Pass, TimeTaken: duration}
case "NotSupported":
results <- TestResult{Test: name, Status: testlist.NotSupported, TimeTaken: duration}
case "CompatibilityWarning":
results <- TestResult{Test: name, Status: testlist.CompatibilityWarning, TimeTaken: duration}
case "QualityWarning":
results <- TestResult{Test: name, Status: testlist.QualityWarning, TimeTaken: duration}
case "Fail":
var err string
if toks[2] != "Fail" {
err = toks[2]
}
results <- TestResult{Test: name, Status: testlist.Fail, Err: err, TimeTaken: duration}
default:
err := fmt.Sprintf("Couldn't parse test output:\n%s", out)
log.Println("Warning: ", err)
results <- TestResult{Test: name, Status: testlist.Fail, Err: err, TimeTaken: duration}
}
}
}
}
// loadTestLists loads the full test lists from the json file.
// The file is first searched at {t.srcDir}/{relPath}
// If this cannot be found, then the file is searched at the fallback path
// {CWD}/{relPath}
// This allows CLs to alter the list of tests to be run, as well as providing
// a default set.
func (t *test) loadTestLists(relPath string) (testlist.Lists, error) {
// Seach for the test.json file in the checked out source directory.
if path := filepath.Join(t.srcDir, relPath); isFile(path) {
log.Printf("Loading test list '%v' from commit\n", relPath)
return testlist.Load(t.srcDir, path)
}
// Not found there. Search locally.
wd, err := os.Getwd()
if err != nil {
return testlist.Lists{}, cause.Wrap(err, "Couldn't get current working directory")
}
if path := filepath.Join(wd, relPath); isFile(path) {
log.Printf("Loading test list '%v' from regres\n", relPath)
return testlist.Load(wd, relPath)
}
return nil, errors.New("Couldn't find a test list file")
}
// isDir returns true if path is a file.
func isFile(path string) bool {
s, err := os.Stat(path)
if err != nil {
return false
}
return !s.IsDir()
}
// isDir returns true if path is a directory.
func isDir(path string) bool {
s, err := os.Stat(path)
if err != nil {
return false
}
return s.IsDir()
}
// percent returns the percentage completion of i items out of n.
func percent(i, n int) int {
return int(percent64(int64(i), int64(n)))
}
// percent64 returns the percentage completion of i items out of n.
func percent64(i, n int64) int64 {
if n == 0 {
return 0
}
return (100 * i) / n
}
type date struct {
year int
month time.Month
day int
}
func toDate(t time.Time) date {
d := date{}
d.year, d.month, d.day = t.Date()
return d
}
|
[
"\"LD_LIBRARY_PATH\"",
"\"DISPLAY\""
] |
[] |
[
"LD_LIBRARY_PATH",
"DISPLAY"
] |
[]
|
["LD_LIBRARY_PATH", "DISPLAY"]
|
go
| 2 | 0 | |
util.py
|
import numpy as np
from itertools import groupby, accumulate
from collections import Counter
########## Incense ##########
from incense import ExperimentLoader
# Try to locate config file for Mongo DB
import importlib
spec = importlib.util.find_spec('mongodburi')
if spec is not None:
from mongodburi import mongo_uri, db_name
else:
mongo_uri, db_name = None, None
def get_loader(uri=mongo_uri, db=db_name):
loader = ExperimentLoader(
mongo_uri=uri,
db_name=db
)
return loader
########## Util ##########
def group(l):
'''Given a sorted list, group by value and return a list of (beg, len)'''
lens = [len(list(g)) for _, g in groupby(l)]
begs = [0] + list(accumulate(lens))[:-1]
return zip(begs, lens)
def weight(idx, w):
return sum([w[i] for i in idx])
def find_biggest_branch(Y, branches):
'''Biggest cardinality'''
h = np.argmax([len(i) for v,i in branches])
return h
def split_by_test(X, idx, test):
'''
:return: a list of (val of test, idx of branch)
'''
t = test
x = X[idx][:,t]
ix = np.argsort(x)
x, idx = x[ix], idx[ix]
gidx = [(x[b], idx[b:b+l]) for b,l in group(x)]
return gidx
def make_nonexc(Y, sorted=False, aggregate=False):
# return dict of (label, #objects in Y not with the label)
# if aggregate=True, return total sum on num of excluded objects in other classes
if not sorted:
Y = np.sort(Y)
if aggregate:
return sum([(len(Y)-l) * l for b, l in group(Y)])
else:
return dict([(Y[b], len(Y)-l) for b, l in group(Y)])
def make_pairs(Y, idx=None, pairs=False):
'''
:param pairs: return a list of real pairs; otherwise return a length to save time
'''
if idx is None:
idx = np.arange(Y.shape[0])
if len(idx) == 0:
return []
Y = Y[idx]
iY = np.argsort(Y)
Y, idx = Y[iY], idx[iY]
if pairs:
pairs = []
for b,l in group(Y):
for i1 in idx[b:b+l]:
for i2 in idx[b+l:]:
pairs.append((i1,i2))
return pairs
else:
sum = 0
for b, l in group(Y):
sum = sum + l * (len(idx)-b-l)
return sum
########## Data related ##########
def gen_costs(ntest, scale=1, rn=None):
if rn is not None:
r = np.random.RandomState(rn)
c = r.rand(ntest)
else:
c = np.random.rand(ntest)
return c * scale
most_common = lambda l: np.bincount(l).argmax() # labels are non-neg ints
def majority_label(X, Y):
'''use majority class for each object'''
lbl = dict()
for x,y in zip(X,Y):
key = tuple(x)
if key not in lbl:
lbl[key] = [y]
else:
lbl[key].append(y)
# Assign the majority label to each y
for k in lbl.keys():
lbl[k] = most_common(lbl[k])
Y = np.array([lbl[tuple(x)] for x in X])
cnt = Counter([tuple(x) for x in X])
newX, newY = zip(*[(list(k), lbl[k]) for k in cnt.keys()]) # list(k) gives back x
newX, newY = np.array(newX), np.array(newY)
idx = np.arange(newX.shape[0])
tot = sum([v for v in cnt.values()])
w = dict([(i, cnt[tuple(newX[i])] / tot) for i in idx])
X, Y = newX, newY
return X, Y, w
########## TO BE DELETED ##########
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
FrontEndTool/wsgi.py
|
"""
WSGI config for FrontEndTool project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FrontEndTool.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
test/private.py
|
import os
import unittest
from stex_client.private import Private
class PrivateTestCase(unittest.TestCase):
def setUp(self):
self.option = {
'tokenObject': {
'access_token': os.environ.get('ENV_TOKEN'),
},
'accessTokenUrl': 'https://api3.stex.com/oauth/token',
'scope': 'profile trade withdrawal reports push settings',
's2s': True
}
def test_profile_info(self):
res = Private(self.option).profile_info()
self.assertTrue(res['success'])
def test_profile_wallets(self):
res = Private(self.option).profile_wallets()
self.assertTrue(res['success'])
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"ENV_TOKEN"
] |
[]
|
["ENV_TOKEN"]
|
python
| 1 | 0 | |
test/1.2.0/doi-identifiers/D_/test_1_2_0_doi_identifiers_D__bibtex_object.py
|
import os
import pytest
from test.contracts.bibtex_object import Contract
from cffconvert.behavior_1_2_x.bibtex_object import BibtexObject
from cffconvert import Citation
@pytest.fixture(scope="module")
def bibtex_object():
fixture = os.path.join(os.path.dirname(__file__), "CITATION.cff")
with open(fixture, "rt", encoding="utf-8") as f:
cffstr = f.read()
citation = Citation(cffstr)
return BibtexObject(citation.cffobj, initialize_empty=True)
class TestBibtexObject(Contract):
def test_as_string(self, bibtex_object):
actual_bibtex = bibtex_object.add_all().as_string()
fixture = os.path.join(os.path.dirname(__file__), "bibtex.bib")
with open(fixture, "rt", encoding="utf-8") as f:
expected_bibtex = f.read()
assert actual_bibtex == expected_bibtex
def test_author(self, bibtex_object):
assert bibtex_object.add_author().author == 'author = {Test author}'
def test_check_cffobj(self, bibtex_object):
bibtex_object.check_cffobj()
# doesn't need an assert
def test_doi(self, bibtex_object):
assert bibtex_object.add_doi().doi == 'doi = {10.0000/from-doi}'
def test_month(self, bibtex_object):
assert bibtex_object.add_month().month is None
def test_title(self, bibtex_object):
assert bibtex_object.add_title().title == 'title = {Test title}'
def test_url(self, bibtex_object):
assert bibtex_object.add_url().url is None
def test_year(self, bibtex_object):
assert bibtex_object.add_year().year is None
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
todolist/wsgi.py
|
"""
WSGI config for todolist project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todolist.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
broker/segmentio/segmentio_test.go
|
package segmentio_test
import (
"os"
"strings"
"sync/atomic"
"testing"
"github.com/btccom/go-micro/v2/broker"
segmentio "github.com/btccom/go-micro-plugins/broker/segmentio/v2"
segjson "github.com/btccom/go-micro-plugins/codec/segmentio/v2"
)
func BenchmarkSegmentioCodecJsonPublish(b *testing.B) {
// b.Skip()
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
b.Skip()
}
var addrs []string
if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 {
addrs = []string{"127.0.0.1:9092"}
} else {
addrs = strings.Split(addr, ",")
}
brk := segmentio.NewBroker(broker.Addrs(addrs...))
if err := brk.Connect(); err != nil {
b.Fatal(err)
}
defer func() {
if err := brk.Disconnect(); err != nil {
b.Fatal(err)
}
}()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := brk.Publish("test_topic", bm); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkSegmentioCodecSegmentioPublish(b *testing.B) {
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
b.Skip()
}
var addrs []string
if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 {
addrs = []string{"127.0.0.1:9092"}
} else {
addrs = strings.Split(addr, ",")
}
brk := segmentio.NewBroker(broker.Codec(segjson.Marshaler{}), broker.Addrs(addrs...))
if err := brk.Connect(); err != nil {
b.Fatal(err)
}
defer func() {
if err := brk.Disconnect(); err != nil {
b.Fatal(err)
}
}()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := brk.Publish("test_topic", bm); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkSegmentioCodecJsonSubscribe(b *testing.B) {
b.Skip()
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
b.Skip()
}
var addrs []string
if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 {
addrs = []string{"127.0.0.1:9092"}
} else {
addrs = strings.Split(addr, ",")
}
brk := segmentio.NewBroker(broker.Addrs(addrs...))
if err := brk.Connect(); err != nil {
b.Fatal(err)
}
defer func() {
if err := brk.Disconnect(); err != nil {
b.Fatal(err)
}
}()
cnt := 0
var done atomic.Value
done.Store(false)
exit := make(chan struct{})
fn := func(msg broker.Event) error {
if cnt == 0 {
b.ResetTimer()
}
cnt++
if cnt == b.N {
if v := done.Load().(bool); !v {
done.Store(true)
close(exit)
}
}
return msg.Ack()
}
go func() {
for i := 0; i < b.N; i++ {
if v, ok := done.Load().(bool); ok && v {
return
}
if err := brk.Publish("test_topic", bm); err != nil {
b.Fatal(err)
}
}
}()
sub, err := brk.Subscribe("test_topic", fn, broker.Queue("test"))
if err != nil {
b.Fatal(err)
}
defer func() {
if err := sub.Unsubscribe(); err != nil {
b.Fatal(err)
}
}()
<-exit
}
func BenchmarkSegmentioCodecSegmentioSubscribe(b *testing.B) {
if tr := os.Getenv("TRAVIS"); len(tr) > 0 {
b.Skip()
}
var addrs []string
if addr := os.Getenv("BROKER_ADDRS"); len(addr) == 0 {
addrs = []string{"127.0.0.1:9092"}
} else {
addrs = strings.Split(addr, ",")
}
brk := segmentio.NewBroker(broker.Codec(segjson.Marshaler{}), broker.Addrs(addrs...))
if err := brk.Connect(); err != nil {
b.Fatal(err)
}
defer func() {
if err := brk.Disconnect(); err != nil {
b.Fatal(err)
}
}()
cnt := 0
var done atomic.Value
done.Store(false)
exit := make(chan struct{})
fn := func(msg broker.Event) error {
if cnt == 0 {
b.ResetTimer()
}
cnt++
if cnt == b.N {
if v, ok := done.Load().(bool); ok && !v {
done.Store(true)
close(exit)
}
}
return msg.Ack()
}
go func() {
for i := 0; i < b.N; i++ {
if v := done.Load().(bool); v {
return
}
if err := brk.Publish("test_topic", bm); err != nil {
b.Fatal(err)
}
}
}()
sub, err := brk.Subscribe("test_topic", fn, broker.Queue("test"))
if err != nil {
b.Fatal(err)
}
defer func() {
if err := sub.Unsubscribe(); err != nil {
b.Fatal(err)
}
}()
<-exit
}
|
[
"\"TRAVIS\"",
"\"BROKER_ADDRS\"",
"\"TRAVIS\"",
"\"BROKER_ADDRS\"",
"\"TRAVIS\"",
"\"BROKER_ADDRS\"",
"\"TRAVIS\"",
"\"BROKER_ADDRS\""
] |
[] |
[
"BROKER_ADDRS",
"TRAVIS"
] |
[]
|
["BROKER_ADDRS", "TRAVIS"]
|
go
| 2 | 0 | |
src/itop_top.py
|
import cv2
import torch
import torch.nn as nn
import numpy as np
import scipy.io as scio
import os
from PIL import Image
from torch.autograd import Variable
import torch.utils.data
import sys
import model as model
import anchor as anchor
from tqdm import tqdm
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# DataHyperParms
keypointsNumber = 15
cropWidth = 288
cropHeight = 288
batch_size = 12
depthFactor = 50
save_dir = './result'
try:
os.makedirs(save_dir)
except OSError:
pass
testingImageDir = '/data/zhangboshen/CODE/Anchor_Pose_fpn/data/top_test/depthImages/'
keypointsfileTest = '../data/itop_top/itop_top_keypoints3D_test.mat'
bndbox_test = scio.loadmat('../data/itop_top/itop_top_bndbox_test.mat' )['FRbndbox_test']
center_test = scio.loadmat('../data/itop_top/itop_top_center_test.mat')['centre_pixel']
Img_mean = np.load('../data/itop_top/itop_top_mean.npy')[3]
Img_std = np.load('../data/itop_top/itop_top_std.npy')[3]
model_dir = '../model/ITOP_top.pth'
def pixel2world(x,y,z):
worldX = (x - 160.0)*z*0.0035
worldY = (120.0 - y)*z*0.0035
return worldX,worldY
def world2pixel(x,y,z):
pixelX = 160.0 + x / (0.0035 * z)
pixelY = 120.0 - y / (0.0035 * z)
return pixelX,pixelY
keypointsWorldtest = scio.loadmat(keypointsfileTest)['keypoints3D']
keypointsPixeltest = np.ones((len(keypointsWorldtest),15,2),dtype='float32')
keypointsPixeltest_tuple = world2pixel(keypointsWorldtest[:,:,0],keypointsWorldtest[:,:,1],keypointsWorldtest[:,:,2])
keypointsPixeltest[:,:,0] = keypointsPixeltest_tuple[0]
keypointsPixeltest[:,:,1] = keypointsPixeltest_tuple[1]
joint_id_to_name = {
0: 'Head',
1: 'Neck',
2: 'RShoulder',
3: 'LShoulder',
4: 'RElbow',
5: 'LElbow',
6: 'RHand',
7: 'LHand',
8: 'Torso',
9: 'RHip',
10: 'LHip',
11: 'RKnee',
12: 'LKnee',
13: 'RFoot',
14: 'LFoot',
}
def dataPreprocess(index, img, keypointsPixel, keypointsWorld, bndbox, center):
imageOutputs = np.ones((cropHeight, cropWidth, 1), dtype='float32')
labelOutputs = np.ones((keypointsNumber, 3), dtype = 'float32')
new_Xmin = max(bndbox[index][0], 0)
new_Ymin = max(bndbox[index][1], 0)
new_Xmax = min(bndbox[index][2], img.shape[1] - 1)
new_Ymax = min(bndbox[index][3], img.shape[0] - 1)
imCrop = img.copy()[int(new_Ymin):int(new_Ymax), int(new_Xmin):int(new_Xmax)]
imgResize = cv2.resize(imCrop, (cropWidth, cropHeight), interpolation=cv2.INTER_NEAREST)
imgResize = np.asarray(imgResize,dtype = 'float32') # H*W*C
imgResize = (imgResize - Img_mean) / Img_std
## label
label_xy = np.ones((keypointsNumber, 2), dtype = 'float32')
label_xy[:,0] = (keypointsPixel[index,:,0] - new_Xmin)*cropWidth/(new_Xmax - new_Xmin)
label_xy[:,1] = (keypointsPixel[index,:,1] - new_Ymin)*cropHeight/(new_Ymax - new_Ymin) # y
imageOutputs[:,:,0] = imgResize
labelOutputs[:,1] = label_xy[:,0]
labelOutputs[:,0] = label_xy[:,1]
labelOutputs[:,2] = (keypointsWorld.copy()[index,:,2])*depthFactor
imageOutputs = np.asarray(imageOutputs)
imageNCHWOut = imageOutputs.transpose(2, 0, 1) # [H, W, C] --->>> [C, H, W]
imageNCHWOut = np.asarray(imageNCHWOut)
labelOutputs = np.asarray(labelOutputs)
data, label = torch.from_numpy(imageNCHWOut), torch.from_numpy(labelOutputs)
return data, label
###################### Pytorch dataloader #################
class my_dataloader(torch.utils.data.Dataset):
def __init__(self, trainingImageDir, bndbox, keypointsPixel, keypointsWorld, center):
self.trainingImageDir = trainingImageDir
self.mean = Img_mean
self.std = Img_std
self.bndbox = bndbox
self.keypointsPixel = keypointsPixel
self.keypointsWorld = keypointsWorld
self.center = center
def __getitem__(self, index):
data4DTemp = scio.loadmat(self.trainingImageDir + str(index+1) + '.mat')['DepthNormal']
depthTemp = data4DTemp[:,:,3]
data, label = dataPreprocess(index, depthTemp, self.keypointsPixel, self.keypointsWorld, self.bndbox, self.center)
return data, label
def __len__(self):
return len(self.bndbox)
test_image_datasets = my_dataloader(testingImageDir, bndbox_test, keypointsPixeltest, keypointsWorldtest, center_test)
test_dataloaders = torch.utils.data.DataLoader(test_image_datasets, batch_size = batch_size,
shuffle = False, num_workers = 8)
def main():
net = model.A2J_model(num_classes = keypointsNumber)
net.load_state_dict(torch.load(model_dir))
net = net.cuda()
net.eval()
post_precess = anchor.post_process(shape=[cropHeight//16,cropWidth//16],stride=16,P_h=None, P_w=None)
output = torch.FloatTensor()
for i, (img, label) in tqdm(enumerate(test_dataloaders)):
with torch.no_grad():
img, label = img.cuda(), label.cuda()
heads = net(img)
pred_keypoints = post_precess(heads,voting=False)
output = torch.cat([output,pred_keypoints.data.cpu()], 0)
result = output.cpu().data.numpy()
print('Accuracy:', evaluation10CMRule(result,keypointsWorldtest,bndbox_test, center_test))
evaluation10CMRule_perJoint(result,keypointsWorldtest,bndbox_test, center_test)
def evaluation10CMRule(source, target, Bndbox, center):
assert np.shape(source)==np.shape(target), "source has different shape with target"
Test1_ = np.zeros(source.shape)
Test1_[:, :, 0] = source[:,:,1]
Test1_[:, :, 1] = source[:,:,0]
Test1_[:, :, 2] = source[:,:,2]
Test1 = Test1_ # [x, y, z]
for i in range(len(Test1_)):
Test1[i,:,0] = Test1_[i,:,0]*(Bndbox[i,2]-Bndbox[i,0])/cropWidth + Bndbox[i,0] # x
Test1[i,:,1] = Test1_[i,:,1]*(Bndbox[i,3]-Bndbox[i,1])/cropHeight + Bndbox[i,1] # y
Test1[i,:,2] = Test1_[i,:,2]/depthFactor #+ center[i][0][2]
TestWorld = np.ones((len(Test1),keypointsNumber,3))
TestWorld_tuple = pixel2world(Test1[:,:,0],Test1[:,:,1],Test1[:,:,2])
TestWorld[:,:,0] = TestWorld_tuple[0]
TestWorld[:,:,1] = TestWorld_tuple[1]
TestWorld[:,:,2] = Test1[:,:,2]
count = 0
for i in range(len(source)):
for j in range(keypointsNumber):
if np.square(TestWorld[i,j,0] - target[i,j,0]) + np.square(TestWorld[i,j,1] - target[i,j,1]) + np.square(TestWorld[i,j,2] - target[i,j,2])<np.square(0.1): #10cm
count = count + 1
accuracy = count/(len(source)*keypointsNumber)
return accuracy
def evaluation10CMRule_perJoint(source, target, Bndbox, center):
assert np.shape(source)==np.shape(target), "source has different shape with target"
Test1_ = np.zeros(source.shape)
Test1_[:, :, 0] = source[:,:,1]
Test1_[:, :, 1] = source[:,:,0]
Test1_[:, :, 2] = source[:,:,2]
Test1 = Test1_ # [x, y, z]
for i in range(len(Test1_)):
Test1[i,:,0] = Test1_[i,:,0]*(Bndbox[i,2]-Bndbox[i,0])/cropWidth + Bndbox[i,0] # x
Test1[i,:,1] = Test1_[i,:,1]*(Bndbox[i,3]-Bndbox[i,1])/cropHeight + Bndbox[i,1] # y
Test1[i,:,2] = Test1_[i,:,2]/depthFactor #+ center[i][0][2]
TestWorld = np.ones((len(Test1),keypointsNumber,3))
TestWorld_tuple = pixel2world(Test1[:,:,0],Test1[:,:,1],Test1[:,:,2])
TestWorld[:,:,0] = TestWorld_tuple[0]
TestWorld[:,:,1] = TestWorld_tuple[1]
TestWorld[:,:,2] = Test1[:,:,2]
count = 0
accuracy = 0
for j in range(keypointsNumber):
for i in range(len(source)):
if np.square(TestWorld[i,j,0] - target[i,j,0]) + np.square(TestWorld[i,j,1] - target[i,j,1]) + np.square(TestWorld[i,j,2] - target[i,j,2])<np.square(0.1): #10cm
count = count + 1
accuracy = count/(len(source))
print('joint_', j,joint_id_to_name[j], ', accuracy: ', accuracy)
accuracy = 0
count = 0
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
train.py
|
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from model import Model
from test import validation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling, opt.Prediction)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
""" setup loss """
if 'CTC' in opt.Prediction:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
# loss averager
loss_avg = Averager()
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, model.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
if opt.adam:
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
else:
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps)
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
start_time = time.time()
best_accuracy = -1
best_norm_ED = -1
iteration = start_iter
while(True):
# train part
image_tensors, labels = train_dataset.get_batch()
image = image_tensors.to(device)
text, length = converter.encode(labels, batch_max_length=opt.batch_max_length)
batch_size = image.size(0)
if 'CTC' in opt.Prediction:
preds = model(image, text)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
preds = preds.log_softmax(2).permute(1, 0, 2)
cost = criterion(preds, text, preds_size, length)
else:
preds = model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
model.zero_grad()
cost.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) # gradient clipping with 5 (Default)
optimizer.step()
loss_avg.add(cost)
# validation part
if (iteration + 1) % opt.valInterval == 0 or iteration == 0: # To see training progress, we also conduct validation when 'iteration == 0'
elapsed_time = time.time() - start_time
# for log
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
model.eval()
with torch.no_grad():
valid_loss, current_accuracy, current_norm_ED, preds, confidence_score, labels, infer_time, length_of_data = validation(
model, criterion, valid_loader, converter, opt)
model.train()
# training loss and validation loss
loss_log = f'[{iteration+1}/{opt.num_iter}] Train loss: {loss_avg.val():0.5f}, Valid loss: {valid_loss:0.5f}, Elapsed_time: {elapsed_time:0.5f}'
loss_avg.reset()
current_model_log = f'{"Current_accuracy":17s}: {current_accuracy:0.3f}, {"Current_norm_ED":17s}: {current_norm_ED:0.2f}'
# keep best accuracy model (on valid dataset)
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_accuracy.pth')
if current_norm_ED > best_norm_ED:
best_norm_ED = current_norm_ED
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/best_norm_ED.pth')
best_model_log = f'{"Best_accuracy":17s}: {best_accuracy:0.3f}, {"Best_norm_ED":17s}: {best_norm_ED:0.2f}'
loss_model_log = f'{loss_log}\n{current_model_log}\n{best_model_log}'
print(loss_model_log)
log.write(loss_model_log + '\n')
# show some predicted results
dashed_line = '-' * 80
head = f'{"Ground Truth":25s} | {"Prediction":25s} | Confidence Score & T/F'
predicted_result_log = f'{dashed_line}\n{head}\n{dashed_line}\n'
for gt, pred, confidence in zip(labels[:5], preds[:5], confidence_score[:5]):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
predicted_result_log += f'{gt:25s} | {pred:25s} | {confidence:0.4f}\t{str(pred == gt)}\n'
predicted_result_log += f'{dashed_line}'
print(predicted_result_log)
log.write(predicted_result_log + '\n')
# save model per 1e+5 iter.
if (iteration + 1) % 1e+5 == 0:
torch.save(
model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) == opt.num_iter:
print('end the training')
sys.exit()
iteration += 1
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = "2,3"
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', default="/path/to/your/lmdb/train", help='path to training dataset')
parser.add_argument('--valid_data', default="/path/to/your/lmdb/val", help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', default=4, type=int, help='number of data loading workers')
parser.add_argument('--batch_size', default=64, type=int, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=500, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is Adadelta)')
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
""" Data processing """
parser.add_argument('--select_data', type=str, default='/',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='1',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, default="TPS", help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, default="ResNet", help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, default="BiLSTM", help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--Prediction', type=str, default="Attn", help='Prediction stage. CTC|Attn')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512, help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
cmd/register_push.go
|
package cmd
import (
"cloud.google.com/go/pubsub"
"context"
"github.com/k-yomo/pubsub_cli/pkg"
"github.com/mitchellh/colorstring"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"io"
"os"
"strconv"
"time"
)
// newRegisterPushCmd returns the command to register an endpoint for subscribing
func newRegisterPushCmd(out io.Writer) *cobra.Command {
command := &cobra.Command{
Use: "register_push TOPIC_ID ENDPOINT",
Short: "register Pub/Sub push endpoint",
Long: "register new endpoint for push http request from Pub/Sub",
Example: "pubsub_cli register_push test_topic http://localhost:1323/createSubscription --host=localhost:8085 --project=test_project",
Aliases: []string{"r"},
Args: cobra.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
topicID := args[0]
endpoint := args[1]
ackDeadline, _ := cmd.Flags().GetInt(ackDeadlineFlagName)
ackDeadlineSecond := time.Duration(ackDeadline) * time.Second
projectID, err := cmd.Flags().GetString(projectFlagName)
if err != nil {
return err
}
emulatorHost, err := cmd.Flags().GetString(hostFlagName)
if err != nil {
return err
}
gcpCredentialFilePath, err := cmd.Flags().GetString(credFileFlagName)
if err != nil {
return err
}
pubsubClient, err := pkg.NewPubSubClient(cmd.Context(), projectID, emulatorHost, gcpCredentialFilePath)
if err != nil {
return errors.Wrap(err, "initialize pubsub client")
}
return registerPush(cmd.Context(), out, pubsubClient, topicID, endpoint, ackDeadlineSecond)
},
}
command.SetOut(out)
ackDeadlineDefault, _ := strconv.Atoi(os.Getenv("PUBSUB_ACK_DEADLINE"))
command.PersistentFlags().IntVarP(&ackDeadlineDefault, ackDeadlineFlagName, "a", ackDeadlineDefault, "pubsub ack deadline(unit seconds)")
return command
}
// registerPush registers new push endpoint
func registerPush(ctx context.Context, out io.Writer, pubsubClient *pkg.PubSubClient, topicID, endpoint string, ackDeadline time.Duration) error {
topic, _, err := pubsubClient.FindOrCreateTopic(ctx, topicID)
if err != nil {
return errors.Wrapf(err, "[error]find or create topic %s", topicID)
}
_, _ = colorstring.Fprintf(out, "[start] registering push endpoint for %s...\n", topic.String())
subscriptionConfig := pubsub.SubscriptionConfig{
Topic: topic,
AckDeadline: ackDeadline,
ExpirationPolicy: 24 * time.Hour,
PushConfig: pubsub.PushConfig{
Endpoint: endpoint,
Attributes: nil,
AuthenticationMethod: nil,
},
}
if _, err := pubsubClient.CreateSubscription(context.Background(), pkg.UUID(), subscriptionConfig); err != nil {
return errors.Wrapf(err, "register push endpoint for = %s", topic.String())
}
_, _ = colorstring.Fprintf(out, "[green][success] registered %s as an endpoint for %s\n", endpoint, topic.String())
return nil
}
|
[
"\"PUBSUB_ACK_DEADLINE\""
] |
[] |
[
"PUBSUB_ACK_DEADLINE"
] |
[]
|
["PUBSUB_ACK_DEADLINE"]
|
go
| 1 | 0 | |
incendio/base/test/conftest.py
|
"""Test fixtures."""
import ast
import json
import os
NAPALM_TEST_MOCK = ast.literal_eval(os.getenv("NAPALM_TEST_MOCK", default="1"))
NAPALM_HOSTNAME = os.getenv("NAPALM_HOSTNAME", default="127.0.0.1")
NAPALM_USERNAME = os.getenv("NAPALM_USERNAME", default="vagrant")
NAPALM_PASSWORD = os.getenv("NAPALM_PASSWORD", default="vagrant")
NAPALM_OPTIONAL_ARGS = json.loads(
os.getenv("NAPALM_OPTIONAL_ARGS", default='{"port": 12443}')
)
def set_device_parameters(request):
"""Set up the class."""
if NAPALM_TEST_MOCK:
driver = request.cls.patched_driver
else:
driver = request.cls.driver
request.cls.device = driver(
NAPALM_HOSTNAME,
NAPALM_USERNAME,
NAPALM_PASSWORD,
timeout=60,
optional_args=NAPALM_OPTIONAL_ARGS,
)
request.cls.device.open()
def pytest_generate_tests(metafunc, basefile):
"""Generate test cases dynamically."""
if metafunc.function.__dict__.get("build_test_cases", False):
path = os.path.join(
os.path.dirname(basefile), "mocked_data", metafunc.function.__name__
)
if os.path.exists(path):
sub_folders = os.listdir(path)
else:
sub_folders = []
test_cases = []
for test_case in sub_folders:
if os.path.isdir(os.path.join(path, test_case)):
test_cases.append(test_case)
if not test_cases:
test_cases.append("no_test_case_found")
metafunc.parametrize("test_case", test_cases)
|
[] |
[] |
[
"NAPALM_TEST_MOCK",
"NAPALM_HOSTNAME",
"NAPALM_OPTIONAL_ARGS",
"NAPALM_USERNAME",
"NAPALM_PASSWORD"
] |
[]
|
["NAPALM_TEST_MOCK", "NAPALM_HOSTNAME", "NAPALM_OPTIONAL_ARGS", "NAPALM_USERNAME", "NAPALM_PASSWORD"]
|
python
| 5 | 0 | |
internal/config/config.go
|
package config
import (
"github.com/kelseyhightower/envconfig"
"os"
)
const envFileName = ".env"
const devEnv = "dev"
func init() {
if os.Getenv("SERVICE_ENV") == devEnv {
MustLoadEnv()
}
}
// NewConfig returns the settings from the environment.
func NewConfig() *Config {
cfg := &Config{}
err := envconfig.Process("", cfg)
if err != nil {
panic(err)
}
return cfg
}
func (c Config) IsDev() bool {
return c.Service.Env == devEnv
}
|
[
"\"SERVICE_ENV\""
] |
[] |
[
"SERVICE_ENV"
] |
[]
|
["SERVICE_ENV"]
|
go
| 1 | 0 | |
cmd/util.go
|
// Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"os/signal"
"path/filepath"
"sort"
"strconv"
"strings"
multierror "github.com/hashicorp/go-multierror"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
survey "gopkg.in/AlecAivazis/survey.v1"
surveycore "gopkg.in/AlecAivazis/survey.v1/core"
git "gopkg.in/src-d/go-git.v4"
"github.com/pulumi/pulumi/pkg/backend"
"github.com/pulumi/pulumi/pkg/backend/display"
"github.com/pulumi/pulumi/pkg/backend/filestate"
"github.com/pulumi/pulumi/pkg/backend/httpstate"
"github.com/pulumi/pulumi/pkg/backend/state"
"github.com/pulumi/pulumi/pkg/diag/colors"
"github.com/pulumi/pulumi/pkg/engine"
"github.com/pulumi/pulumi/pkg/secrets/passphrase"
"github.com/pulumi/pulumi/pkg/util/cancel"
"github.com/pulumi/pulumi/pkg/util/ciutil"
"github.com/pulumi/pulumi/pkg/util/cmdutil"
"github.com/pulumi/pulumi/pkg/util/contract"
"github.com/pulumi/pulumi/pkg/util/gitutil"
"github.com/pulumi/pulumi/pkg/util/logging"
"github.com/pulumi/pulumi/pkg/util/tracing"
"github.com/pulumi/pulumi/pkg/workspace"
)
func hasDebugCommands() bool {
return cmdutil.IsTruthy(os.Getenv("PULUMI_DEBUG_COMMANDS"))
}
func useLegacyDiff() bool {
return cmdutil.IsTruthy(os.Getenv("PULUMI_ENABLE_LEGACY_DIFF"))
}
// backendInstance is used to inject a backend mock from tests.
var backendInstance backend.Backend
func currentBackend(opts display.Options) (backend.Backend, error) {
if backendInstance != nil {
return backendInstance, nil
}
url, err := workspace.GetCurrentCloudURL()
if err != nil {
return nil, errors.Wrapf(err, "could not get cloud url")
}
if filestate.IsFileStateBackendURL(url) {
return filestate.New(cmdutil.Diag(), url)
}
return httpstate.Login(commandContext(), cmdutil.Diag(), url, opts)
}
// This is used to control the contents of the tracing header.
var tracingHeader = os.Getenv("PULUMI_TRACING_HEADER")
func commandContext() context.Context {
ctx := context.Background()
if cmdutil.IsTracingEnabled() {
if cmdutil.TracingRootSpan != nil {
ctx = opentracing.ContextWithSpan(ctx, cmdutil.TracingRootSpan)
}
tracingOptions := tracing.Options{
PropagateSpans: true,
TracingHeader: tracingHeader,
}
ctx = tracing.ContextWithOptions(ctx, tracingOptions)
}
return ctx
}
// createStack creates a stack with the given name, and optionally selects it as the current.
func createStack(
b backend.Backend, stackRef backend.StackReference, opts interface{}, setCurrent bool,
secretsProvider string) (backend.Stack, error) {
// As part of creating the stack, we also need to configure the secrets provider for the stack.
// We need to do this configuration step for cases where we will be using with the passphrase
// secrets provider or one of the cloud-backed secrets providers. We do not need to do this
// for the Pulumi service backend secrets provider.
isDefaultSecretsProvider := secretsProvider == "" || secretsProvider == "default"
if _, ok := b.(filestate.Backend); ok && isDefaultSecretsProvider {
// The default when using the filestate backend is the passphrase secrets provider
secretsProvider = passphrase.Type
}
if secretsProvider == passphrase.Type {
if _, pharseErr := newPassphraseSecretsManager(stackRef.Name(), stackConfigFile); pharseErr != nil {
return nil, pharseErr
}
} else if !isDefaultSecretsProvider {
// All other non-default secrets providers are handled by the cloud secrets provider which
// uses a URL schema to identify the provider
if _, secretsErr := newCloudSecretsManager(stackRef.Name(), stackConfigFile, secretsProvider); secretsErr != nil {
return nil, secretsErr
}
}
stack, err := b.CreateStack(commandContext(), stackRef, opts)
if err != nil {
// If it's a StackAlreadyExistsError, don't wrap it.
if _, ok := err.(*backend.StackAlreadyExistsError); ok {
return nil, err
}
return nil, errors.Wrapf(err, "could not create stack")
}
if setCurrent {
if err = state.SetCurrentStack(stack.Ref().String()); err != nil {
return nil, err
}
}
return stack, nil
}
// requireStack will require that a stack exists. If stackName is blank, the currently selected stack from
// the workspace is returned. If no stack with either the given name, or a currently selected stack, exists,
// and we are in an interactive terminal, the user will be prompted to create a new stack.
func requireStack(
stackName string, offerNew bool, opts display.Options, setCurrent bool) (backend.Stack, error) {
if stackName == "" {
return requireCurrentStack(offerNew, opts, setCurrent)
}
b, err := currentBackend(opts)
if err != nil {
return nil, err
}
stackRef, err := b.ParseStackReference(stackName)
if err != nil {
return nil, err
}
stack, err := b.GetStack(commandContext(), stackRef)
if err != nil {
return nil, err
}
if stack != nil {
return stack, err
}
// No stack was found. If we're in a terminal, prompt to create one.
if offerNew && cmdutil.Interactive() {
fmt.Printf("The stack '%s' does not exist.\n", stackName)
fmt.Printf("\n")
_, err = cmdutil.ReadConsole("If you would like to create this stack now, please press <ENTER>, otherwise " +
"press ^C")
if err != nil {
return nil, err
}
return createStack(b, stackRef, nil, setCurrent, "")
}
return nil, errors.Errorf("no stack named '%s' found", stackName)
}
func requireCurrentStack(offerNew bool, opts display.Options, setCurrent bool) (backend.Stack, error) {
// Search for the current stack.
b, err := currentBackend(opts)
if err != nil {
return nil, err
}
stack, err := state.CurrentStack(commandContext(), b)
if err != nil {
return nil, err
} else if stack != nil {
return stack, nil
}
// If no current stack exists, and we are interactive, prompt to select or create one.
return chooseStack(b, offerNew, opts, setCurrent)
}
// chooseStack will prompt the user to choose amongst the full set of stacks in the given backend. If offerNew is
// true, then the option to create an entirely new stack is provided and will create one as desired.
func chooseStack(
b backend.Backend, offerNew bool, opts display.Options, setCurrent bool) (backend.Stack, error) {
// Prepare our error in case we need to issue it. Bail early if we're not interactive.
var chooseStackErr string
if offerNew {
chooseStackErr = "no stack selected; please use `pulumi stack select` or `pulumi stack init` to choose one"
} else {
chooseStackErr = "no stack selected; please use `pulumi stack select` to choose one"
}
if !cmdutil.Interactive() {
return nil, errors.New(chooseStackErr)
}
proj, err := workspace.DetectProject()
if err != nil {
return nil, err
}
// List stacks as available options.
project := string(proj.Name)
summaries, err := b.ListStacks(commandContext(), backend.ListStacksFilter{Project: &project})
if err != nil {
return nil, errors.Wrapf(err, "could not query backend for stacks")
}
var options []string
for _, summary := range summaries {
name := summary.Name().String()
options = append(options, name)
}
sort.Strings(options)
// If we are offering to create a new stack, add that to the end of the list.
const newOption = "<create a new stack>"
if offerNew {
options = append(options, newOption)
} else if len(options) == 0 {
// If no options are available, we can't offer a choice!
return nil, errors.New("this command requires a stack, but there are none")
}
// If a stack is already selected, make that the default.
var current string
currStack, currErr := state.CurrentStack(commandContext(), b)
contract.IgnoreError(currErr)
if currStack != nil {
current = currStack.Ref().String()
}
// Customize the prompt a little bit (and disable color since it doesn't match our scheme).
surveycore.DisableColor = true
surveycore.QuestionIcon = ""
surveycore.SelectFocusIcon = opts.Color.Colorize(colors.BrightGreen + ">" + colors.Reset)
message := "\rPlease choose a stack"
if offerNew {
message += ", or create a new one:"
} else {
message += ":"
}
message = opts.Color.Colorize(colors.SpecPrompt + message + colors.Reset)
var option string
if err = survey.AskOne(&survey.Select{
Message: message,
Options: options,
Default: current,
}, &option, nil); err != nil {
return nil, errors.New(chooseStackErr)
}
if option == newOption {
hint := "Please enter your desired stack name"
if b.SupportsOrganizations() {
hint += ".\nTo create a stack in an organization, " +
"use the format <org-name>/<stack-name> (e.g. `acmecorp/dev`)"
}
stackName, readErr := cmdutil.ReadConsole(hint)
if readErr != nil {
return nil, readErr
}
stackRef, parseErr := b.ParseStackReference(stackName)
if parseErr != nil {
return nil, parseErr
}
return createStack(b, stackRef, nil, setCurrent, "")
}
// With the stack name selected, look it up from the backend.
stackRef, err := b.ParseStackReference(option)
if err != nil {
return nil, errors.Wrap(err, "parsing selected stack")
}
stack, err := b.GetStack(commandContext(), stackRef)
if err != nil {
return nil, errors.Wrap(err, "getting selected stack")
}
// If setCurrent is true, we'll persist this choice so it'll be used for future CLI operations.
if setCurrent {
if err = state.SetCurrentStack(stackRef.String()); err != nil {
return nil, err
}
}
return stack, nil
}
// projType represents the various types of Pulumi project. All Pulumi projects are denoted by a
// Pulumi.yaml in the root of the workspace.
type projType string
const (
// pulumiAppProj is a Pulumi application project.
pulumiAppProj projType = "pulumi-app"
// pulumiPolicyProj is a Pulumi resource policy project.
pulumiPolicyProj projType = "pulumi-policy"
)
// parseAndSaveConfigArray parses the config array and saves it as a config for
// the provided stack.
func parseAndSaveConfigArray(s backend.Stack, configArray []string) error {
if len(configArray) == 0 {
return nil
}
commandLineConfig, err := parseConfig(configArray)
if err != nil {
return err
}
if err = saveConfig(s, commandLineConfig); err != nil {
return errors.Wrap(err, "saving config")
}
return nil
}
// readProject attempts to detect and read a project of type `projType` for the current workspace.
// If the project is successfully detected and read, it is returned along with the path to its
// containing directory, which will be used as the root of the project's Pulumi program.
func readProject(projType projType) (*workspace.Project, string, error) {
pwd, err := os.Getwd()
if err != nil {
return nil, "", err
}
// Now that we got here, we have a path, so we will try to load it.
path, err := workspace.DetectProjectPathFrom(pwd)
if err != nil {
return nil, "", errors.Wrapf(err, "failed to find current Pulumi project because of "+
"an error when searching for the Pulumi.yaml file (searching upwards from %s)", pwd)
} else if path == "" {
return nil, "", errReadProjNoPulumiYAML(projType, pwd)
}
proj, err := workspace.LoadProject(path)
if err != nil {
return nil, "", errors.Wrapf(err, "failed to load Pulumi project located at %q", path)
}
return proj, filepath.Dir(path), nil
}
func errReadProjNoPulumiYAML(projType projType, pwd string) error {
switch projType {
case pulumiPolicyProj:
return fmt.Errorf("no Pulumi.yaml project file found (searching upwards from %s)", pwd)
default:
return fmt.Errorf(
"no Pulumi.yaml project file found (searching upwards from %s). If you have not "+
"created a project yet, use `pulumi new` to do so", pwd)
}
}
// anyWriter is an io.Writer that will set itself to `true` iff any call to `anyWriter.Write` is made with a
// non-zero-length slice. This can be used to determine whether or not any data was ever written to the writer.
type anyWriter bool
func (w *anyWriter) Write(d []byte) (int, error) {
if len(d) > 0 {
*w = true
}
return len(d), nil
}
// isGitWorkTreeDirty returns true if the work tree for the current directory's repository is dirty.
func isGitWorkTreeDirty(repoRoot string) (bool, error) {
gitBin, err := exec.LookPath("git")
if err != nil {
return false, err
}
gitStatusCmd := exec.Command(gitBin, "status", "--porcelain", "-z")
var anyOutput anyWriter
var stderr bytes.Buffer
gitStatusCmd.Dir = repoRoot
gitStatusCmd.Stdout = &anyOutput
gitStatusCmd.Stderr = &stderr
if err = gitStatusCmd.Run(); err != nil {
if ee, ok := err.(*exec.ExitError); ok {
ee.Stderr = stderr.Bytes()
}
return false, errors.Wrapf(err, "'git status' failed")
}
return bool(anyOutput), nil
}
// getUpdateMetadata returns an UpdateMetadata object, with optional data about the environment
// performing the update.
func getUpdateMetadata(msg, root string) (*backend.UpdateMetadata, error) {
m := &backend.UpdateMetadata{
Message: msg,
Environment: make(map[string]string),
}
if err := addGitMetadata(root, m); err != nil {
logging.V(3).Infof("errors detecting git metadata: %s", err)
}
addCIMetadataToEnvironment(m.Environment)
return m, nil
}
// addGitMetadata populate's the environment metadata bag with Git-related values.
func addGitMetadata(repoRoot string, m *backend.UpdateMetadata) error {
var allErrors *multierror.Error
// Gather git-related data as appropriate. (Returns nil, nil if no repo found.)
repo, err := gitutil.GetGitRepository(repoRoot)
if err != nil {
return errors.Wrapf(err, "detecting Git repository")
}
if repo == nil {
return nil
}
if err := AddGitRemoteMetadataToMap(repo, m.Environment); err != nil {
allErrors = multierror.Append(allErrors, err)
}
if err := addGitCommitMetadata(repo, repoRoot, m); err != nil {
allErrors = multierror.Append(allErrors, err)
}
return allErrors.ErrorOrNil()
}
// AddGitRemoteMetadataToMap reads the given git repo and adds its metadata to the given map bag.
func AddGitRemoteMetadataToMap(repo *git.Repository, env map[string]string) error {
var allErrors *multierror.Error
// Get the remote URL for this repo.
remoteURL, err := gitutil.GetGitRemoteURL(repo, "origin")
if err != nil {
return errors.Wrap(err, "detecting Git remote URL")
}
if remoteURL == "" {
return nil
}
// Check if the remote URL is a GitHub or a GitLab URL.
if err := addVCSMetadataToEnvironment(remoteURL, env); err != nil {
allErrors = multierror.Append(allErrors, err)
}
return allErrors.ErrorOrNil()
}
func addVCSMetadataToEnvironment(remoteURL string, env map[string]string) error {
// GitLab, Bitbucket, Azure DevOps etc. repo slug if applicable.
// We don't require a cloud-hosted VCS, so swallow errors.
vcsInfo, err := gitutil.TryGetVCSInfo(remoteURL)
if err != nil {
return errors.Wrap(err, "detecting VCS project information")
}
env[backend.VCSRepoOwner] = vcsInfo.Owner
env[backend.VCSRepoName] = vcsInfo.Repo
env[backend.VCSRepoKind] = vcsInfo.Kind
return nil
}
func addGitCommitMetadata(repo *git.Repository, repoRoot string, m *backend.UpdateMetadata) error {
// When running in a CI/CD environment, the current git repo may be running from a
// detached HEAD and may not have have the latest commit message. We fall back to
// CI-system specific environment variables when possible.
ciVars := ciutil.DetectVars()
// Commit at HEAD
head, err := repo.Head()
if err != nil {
return errors.Wrap(err, "getting repository HEAD")
}
hash := head.Hash()
m.Environment[backend.GitHead] = hash.String()
commit, commitErr := repo.CommitObject(hash)
if commitErr != nil {
return errors.Wrap(commitErr, "getting HEAD commit info")
}
// If in detached head, will be "HEAD", and fallback to use value from CI/CD system if possible.
// Otherwise, the value will be like "refs/heads/master".
headName := head.Name().String()
if headName == "HEAD" && ciVars.BranchName != "" {
headName = ciVars.BranchName
}
if headName != "HEAD" {
m.Environment[backend.GitHeadName] = headName
}
// If there is no message set manually, default to the Git commit's title.
msg := strings.TrimSpace(commit.Message)
if msg == "" && ciVars.CommitMessage != "" {
msg = ciVars.CommitMessage
}
if m.Message == "" {
m.Message = gitCommitTitle(msg)
}
// Store committer and author information.
m.Environment[backend.GitCommitter] = commit.Committer.Name
m.Environment[backend.GitCommitterEmail] = commit.Committer.Email
m.Environment[backend.GitAuthor] = commit.Author.Name
m.Environment[backend.GitAuthorEmail] = commit.Author.Email
// If the worktree is dirty, set a bit, as this could be a mistake.
isDirty, err := isGitWorkTreeDirty(repoRoot)
if err != nil {
return errors.Wrapf(err, "checking git worktree dirty state")
}
m.Environment[backend.GitDirty] = strconv.FormatBool(isDirty)
return nil
}
// gitCommitTitle turns a commit message into its title, simply by taking the first line.
func gitCommitTitle(s string) string {
if ixCR := strings.Index(s, "\r"); ixCR != -1 {
s = s[:ixCR]
}
if ixLF := strings.Index(s, "\n"); ixLF != -1 {
s = s[:ixLF]
}
return s
}
// addCIMetadataToEnvironment populates the environment metadata bag with CI/CD-related values.
func addCIMetadataToEnvironment(env map[string]string) {
// Add the key/value pair to env, if there actually is a value.
addIfSet := func(key, val string) {
if val != "" {
env[key] = val
}
}
// Use our built-in CI/CD detection logic.
vars := ciutil.DetectVars()
if vars.Name == "" {
return
}
env[backend.CISystem] = string(vars.Name)
addIfSet(backend.CIBuildID, vars.BuildID)
addIfSet(backend.CIBuildType, vars.BuildType)
addIfSet(backend.CIBuildURL, vars.BuildURL)
addIfSet(backend.CIPRHeadSHA, vars.SHA)
addIfSet(backend.CIPRNumber, vars.PRNumber)
}
type cancellationScope struct {
context *cancel.Context
sigint chan os.Signal
done chan bool
}
func (s *cancellationScope) Context() *cancel.Context {
return s.context
}
func (s *cancellationScope) Close() {
signal.Stop(s.sigint)
close(s.sigint)
<-s.done
}
type cancellationScopeSource int
var cancellationScopes = backend.CancellationScopeSource(cancellationScopeSource(0))
func (cancellationScopeSource) NewScope(events chan<- engine.Event, isPreview bool) backend.CancellationScope {
cancelContext, cancelSource := cancel.NewContext(context.Background())
c := &cancellationScope{
context: cancelContext,
sigint: make(chan os.Signal),
done: make(chan bool),
}
go func() {
for range c.sigint {
// If we haven't yet received a SIGINT, call the cancellation func. Otherwise call the termination
// func.
if cancelContext.CancelErr() == nil {
message := "^C received; cancelling. If you would like to terminate immediately, press ^C again.\n"
if !isPreview {
message += colors.BrightRed + "Note that terminating immediately may lead to orphaned resources " +
"and other inconsistencies.\n" + colors.Reset
}
events <- engine.Event{
Type: engine.StdoutColorEvent,
Payload: engine.StdoutEventPayload{
Message: message,
Color: colors.Always,
},
}
cancelSource.Cancel()
} else {
message := colors.BrightRed + "^C received; terminating" + colors.Reset
events <- engine.Event{
Type: engine.StdoutColorEvent,
Payload: engine.StdoutEventPayload{
Message: message,
Color: colors.Always,
},
}
cancelSource.Terminate()
}
}
close(c.done)
}()
signal.Notify(c.sigint, os.Interrupt)
return c
}
// printJSON simply prints out some object, formatted as JSON, using standard indentation.
func printJSON(v interface{}) error {
out, err := json.MarshalIndent(v, "", " ")
if err != nil {
return err
}
fmt.Println(string(out))
return nil
}
// updateFlagsToOptions ensures that the given update flags represent a valid combination. If so, an UpdateOptions
// is returned with a nil-error; otherwise, the non-nil error contains information about why the combination is invalid.
func updateFlagsToOptions(interactive, skipPreview, yes bool) (backend.UpdateOptions, error) {
if !interactive && !yes {
return backend.UpdateOptions{},
errors.New("--yes must be passed in non-interactive mode")
}
return backend.UpdateOptions{
AutoApprove: yes,
SkipPreview: skipPreview,
}, nil
}
|
[
"\"PULUMI_DEBUG_COMMANDS\"",
"\"PULUMI_ENABLE_LEGACY_DIFF\"",
"\"PULUMI_TRACING_HEADER\""
] |
[] |
[
"PULUMI_DEBUG_COMMANDS",
"PULUMI_TRACING_HEADER",
"PULUMI_ENABLE_LEGACY_DIFF"
] |
[]
|
["PULUMI_DEBUG_COMMANDS", "PULUMI_TRACING_HEADER", "PULUMI_ENABLE_LEGACY_DIFF"]
|
go
| 3 | 0 | |
JUnit.go
|
package main
import (
//"encoding/json"
"encoding/xml"
"errors"
//"fmt"
"log"
"os"
//"strconv"
"time"
"github.com/educlos/testrail"
//str2duration "github.com/xhit/go-str2duration/v2"
"github.com/davecgh/go-spew/spew"
"golang.org/x/net/html/charset"
)
// Testsuites struct below is autogenerated using the awesome https://www.onlinetool.io/xmltogo/ tool!
// Testsuites mirrors the structure of JUnit XML output
/*type Testsuites struct {
XMLName xml.Name `xml:"testsuites"`
Text string `xml:",chardata"`
Testsuite []struct {
Text string `xml:",chardata"`
Name string `xml:"name,attr"`
Errors string `xml:"errors,attr"`
Tests string `xml:"tests,attr"`
Failures string `xml:"failures,attr"`
Time string `xml:"time,attr"`
Timestamp string `xml:"timestamp,attr"`
Skipped string `xml:"skipped,attr"`
Properties struct {
Text string `xml:",chardata"`
Property []struct {
Text string `xml:",chardata"`
Name string `xml:"name,attr"`
Value string `xml:"value,attr"`
} `xml:"property"`
} `xml:"properties"`
Testcase []struct {
Text string `xml:",chardata"`
Classname string `xml:"classname,attr"`
Name string `xml:"name,attr"`
Time string `xml:"time,attr"`
Failure struct {
Text string `xml:",chardata"`
Message string `xml:"message,attr"`
} `xml:"failure"`
Skipped *string `xml:"skipped,omitempty"` // Note that this is a *string, as an empty value will give a "" while non-existent will give nil. This lets us distinguish a <skipped /> from a non-existent tag
} `xml:"testcase"`
} `xml:"testsuite"`
}*/
type Testsuites struct {
XMLName xml.Name `xml:"testsuites"`
Text string `xml:",chardata"`
Testsuite []struct {
Text string `xml:",chardata"`
Errors string `xml:"errors,attr"`
Failures string `xml:"failures,attr"`
Hostname string `xml:"hostname,attr"`
Name string `xml:"name,attr"`
Skipped string `xml:"skipped,attr"`
Tests string `xml:"tests,attr"`
Time string `xml:"time,attr"`
Timestamp string `xml:"timestamp,attr"`
Testcase []struct {
Text string `xml:",chardata"`
Classname string `xml:"classname,attr"`
File string `xml:"file,attr"`
Line string `xml:"line,attr"`
Name string `xml:"name,attr"`
Time string `xml:"time,attr"`
Skipped *struct {
Text string `xml:",chardata"`
Message string `xml:"message,attr"`
Type string `xml:"type,attr"`
} `xml:"skipped,omitempty"`
Properties struct {
Text string `xml:",chardata"`
Property struct {
Text string `xml:",chardata"`
Name string `xml:"name,attr"`
Value string `xml:"value,attr"`
} `xml:"property"`
} `xml:"properties"`
Failure struct {
Text string `xml:",chardata"`
Message string `xml:"message,attr"`
} `xml:"failure"`
} `xml:"testcase"`
} `xml:"testsuite"`
}
func readEnvVars() (string, string, string, string, string) {
testRailServer := os.Getenv("TESTRAIL_SERVER")
if testRailServer == "" {
log.Fatalln("Environment variable TESTRAIL_SERVER not specified")
}
username := os.Getenv("USERNAME")
if username == "" {
log.Fatalln("Environment variable USERNAME not specified")
}
password := os.Getenv("PASSWORD")
if password == "" {
log.Fatalln("Environment variable PASSWORD not specified")
}
projectName := os.Getenv("PROJECT_NAME")
if password == "" {
log.Fatalln("Environment variable PROJECT_NAME not specified")
}
suiteName := os.Getenv("SUITE_NAME")
if suiteName == "" {
log.Fatalln("Environment variable SUITE_NAME not specified")
}
return testRailServer, username, password, projectName, suiteName
}
func readJunitXML(file *os.File) Testsuites {
dec := xml.NewDecoder(file)
dec.CharsetReader = charset.NewReaderLabel
dec.Strict = false
var doc Testsuites
if err := dec.Decode(&doc); err != nil {
log.Panicf(err.Error())
}
return doc
}
func logJunitDetail(tss Testsuites) {
log.Printf("%+v\n", tss.Testsuite)
spew.Dump(tss.Testsuite)
for _, ts := range tss.Testsuite {
log.Printf("Number of tests: %v\n", ts.Tests)
log.Printf("Number of failed tests: %v\n", ts.Failures)
for i, tc := range ts.Testcase {
log.Printf("%v\n", ts)
log.Printf("%v\n", tc)
log.Printf("Testcase %d: %v\n", i, tc)
log.Printf("Testcase %d name: %v\n", i, tc.Name)
log.Printf("Testcase %d failure message: %v\n", i, tc.Failure.Text)
log.Println("-------------------------------------------")
}
}
}
func processResultsToTestRail(j Testsuites, client *testrail.Client, projectID int, suiteID int) {
logJunitDetail(j)
now := time.Now().Format("2006-01-02 15:04:05")
for _, ts := range j.Testsuite {
for i, tc := range ts.Testcase {
//duration,err := str2duration.ParseDuration(fmt.Sprintf("%ss",tc.Time))
//if err != nil {
// log.Fatalf("Error converting %v to duration\n",tc.Time)
//}
tcName := tc.Name
testcaseID, err := getTestCaseID(client, projectID, suiteID, tcName)
if err != nil {
log.Panicf("Couldn't find test case '%s'\n", tc.Name)
}
var tcStatus int
if tc.Failure.Text != "" {
tcStatus = testrail.StatusFailed
} else if tc.Skipped != nil {
//tcStatus = testrail.StatusUntested //StatusUntested results in a failure when posting - library bug??
tcStatus = testrail.StatusRetest
} else {
tcStatus = testrail.StatusPassed
}
tsr := testrail.SendableResult{
//Elapsed: *testrail.TimespanFromDuration(duration),
StatusID: tcStatus,
Comment: tc.Failure.Text,
Version: now,
Defects: "",
//AssignedToID: 1,
}
log.Printf("tsr: %v\n", tsr)
result, err := client.AddResultForCase(projectID, testcaseID, tsr)
if err != nil {
log.Panicf("Error adding results for test case %d: %v\n", i, err)
}
log.Printf("Success adding results for test case %d: %v\n", i, result)
}
}
}
func getProjectID(client *testrail.Client, projectName string) (int, error) {
projects, err := client.GetProjects()
if err != nil {
log.Panicf("Error reading projects: %v\n", err)
}
for _, p := range projects {
//log.Println(p.ID)
//log.Printf("project: %v\n", p)
//log.Printf("project name: %v\n", p.Name)
if p.Name == projectName {
log.Printf("Found project '%s' is id %d\n", projectName, p.ID)
return p.ID, nil
}
}
return 0, errors.New("Couldn't find project")
}
func getSuiteID(client *testrail.Client, projectID int, suiteName string) (int, error) {
suites, err := client.GetSuites(projectID)
if err != nil {
log.Panicf("Error reading suites: %v\n", err)
}
for _, s := range suites {
//log.Println(s.ID)
//log.Printf("suite: %v\n", s)
//log.Printf("suite name: %v\n", s.Name)
if s.Name == suiteName {
log.Printf("Found suite '%s' for project '%d' is id %d\n", suiteName, projectID, s.ID)
return s.ID, nil
}
}
return 0, errors.New("Couldn't find suite")
}
func getTestCaseID(client *testrail.Client, projectID int, suiteID int, testcaseName string) (int, error) {
testcases, err := client.GetCases(projectID, suiteID)
if err != nil {
log.Panicf("Error reading testcases: %v\n", err)
}
for _, tc := range testcases {
//log.Println(s.ID)
//log.Printf("suite: %v\n", s)
//log.Printf("suite name: %v\n", s.Name)
if tc.Title == testcaseName {
log.Printf("Found testcase '%s' for suite '%d', project '%d' is id %d\n", testcaseName, suiteID, projectID, tc.ID)
return tc.ID, nil
}
}
//return 0, errors.New("Couldn't find test case")
newTestcase, err := addTestCase(client, suiteID, testcaseName)
return newTestcase.ID, err
}
func addTestCase(client *testrail.Client, suiteID int, testcaseName string) (testrail.Case, error) {
now := time.Now().Format("2006-01-02 15:04:05")
newTestCase := testrail.SendableCase{
Title: testcaseName,
Date: now,
}
tc, err := client.AddCase(suiteID, newTestCase)
return tc, err
}
func main() {
testrailServer, username, password, projectName, suiteName := readEnvVars()
junitDoc := readJunitXML(os.Stdin)
client := testrail.NewClient(testrailServer, username, password)
projectID, err := getProjectID(client, projectName)
if err != nil {
log.Panicf("Couldn't find project named '%s'\n", projectName)
}
log.Printf("Project ID for '%s' is %d\n", projectName, projectID)
suiteID, err := getSuiteID(client, projectID, suiteName)
if err != nil {
log.Panicf("Couldn't find suite named '%s' for project '%s'\n", suiteName, projectName)
}
log.Printf("Suitename '%s' has id %d\n", suiteName, suiteID)
processResultsToTestRail(junitDoc, client, projectID, suiteID)
//log.Printf("Results: %v\n", success)
}
|
[
"\"TESTRAIL_SERVER\"",
"\"USERNAME\"",
"\"PASSWORD\"",
"\"PROJECT_NAME\"",
"\"SUITE_NAME\""
] |
[] |
[
"TESTRAIL_SERVER",
"USERNAME",
"PASSWORD",
"SUITE_NAME",
"PROJECT_NAME"
] |
[]
|
["TESTRAIL_SERVER", "USERNAME", "PASSWORD", "SUITE_NAME", "PROJECT_NAME"]
|
go
| 5 | 0 | |
lambda/main.go
|
package main
import (
"fmt"
"os"
"github.com/aws/aws-lambda-go/lambda"
)
func HandleRequest() {
fmt.Printf("Hello, Serverless! Running version `%s`", os.Getenv("VERSION"))
}
func main() {
lambda.Start(HandleRequest)
}
|
[
"\"VERSION\""
] |
[] |
[
"VERSION"
] |
[]
|
["VERSION"]
|
go
| 1 | 0 | |
transport/kafka.go
|
package transport
import (
"crypto/tls"
"crypto/x509"
"errors"
"flag"
"fmt"
"os"
"reflect"
"strings"
sarama "github.com/Shopify/sarama"
flowmessage "github.com/cloudflare/goflow/v3/pb"
"github.com/cloudflare/goflow/v3/utils"
proto "github.com/golang/protobuf/proto"
)
var (
KafkaTLS *bool
KafkaSASL *bool
KafkaTopic *string
KafkaSrv *string
KafkaBrk *string
KafkaLogErrors *bool
KafkaHashing *bool
KafkaKeying *string
KafkaVersion *string
kafkaConfigVersion sarama.KafkaVersion = sarama.V0_11_0_0
)
type KafkaState struct {
producer sarama.AsyncProducer
topic string
hashing bool
keying []string
}
// SetKafkaVersion sets the KafkaVersion that is used to set the log message format version
func SetKafkaVersion(version sarama.KafkaVersion) {
kafkaConfigVersion = version
}
// ParseKafkaVersion is a pass through to sarama.ParseKafkaVersion to get a KafkaVersion struct by a string version that can be passed into SetKafkaVersion
// This function is here so that calling code need not import sarama to set KafkaVersion
func ParseKafkaVersion(versionString string) (sarama.KafkaVersion, error) {
return sarama.ParseKafkaVersion(versionString)
}
func RegisterFlags() {
KafkaTLS = flag.Bool("kafka.tls", false, "Use TLS to connect to Kafka")
KafkaSASL = flag.Bool("kafka.sasl", false, "Use SASL/PLAIN data to connect to Kafka (TLS is recommended and the environment variables KAFKA_SASL_USER and KAFKA_SASL_PASS need to be set)")
KafkaTopic = flag.String("kafka.topic", "flow-messages", "Kafka topic to produce to")
KafkaSrv = flag.String("kafka.srv", "", "SRV record containing a list of Kafka brokers (or use kafka.out.brokers)")
KafkaBrk = flag.String("kafka.brokers", "127.0.0.1:9092,[::1]:9092", "Kafka brokers list separated by commas")
KafkaLogErrors = flag.Bool("kafka.log.err", false, "Log Kafka errors")
KafkaHashing = flag.Bool("kafka.hashing", false, "Enable partitioning by hash instead of random")
KafkaKeying = flag.String("kafka.key", "SamplerAddress,DstAS", "Kafka list of fields to do hashing on (partition) separated by commas")
KafkaVersion = flag.String("kafka.version", "0.11.0.0", "Log message version (must be a version that parses per sarama.ParseKafkaVersion)")
}
func StartKafkaProducerFromArgs(log utils.Logger) (*KafkaState, error) {
kVersion, err := ParseKafkaVersion(*KafkaVersion)
if err != nil {
return nil, err
}
SetKafkaVersion(kVersion)
addrs := make([]string, 0)
if *KafkaSrv != "" {
addrs, _ = utils.GetServiceAddresses(*KafkaSrv)
} else {
addrs = strings.Split(*KafkaBrk, ",")
}
return StartKafkaProducer(addrs, *KafkaTopic, *KafkaHashing, *KafkaKeying, *KafkaTLS, *KafkaSASL, *KafkaLogErrors, log)
}
func StartKafkaProducer(addrs []string, topic string, hashing bool, keying string, useTls bool, useSasl bool, logErrors bool, log utils.Logger) (*KafkaState, error) {
kafkaConfig := sarama.NewConfig()
kafkaConfig.Version = kafkaConfigVersion
kafkaConfig.Producer.Return.Successes = false
kafkaConfig.Producer.Return.Errors = logErrors
if useTls {
rootCAs, err := x509.SystemCertPool()
if err != nil {
return nil, errors.New(fmt.Sprintf("Error initializing TLS: %v", err))
}
kafkaConfig.Net.TLS.Enable = true
kafkaConfig.Net.TLS.Config = &tls.Config{RootCAs: rootCAs}
}
var keyingSplit []string
if hashing {
kafkaConfig.Producer.Partitioner = sarama.NewHashPartitioner
keyingSplit = strings.Split(keying, ",")
}
if useSasl {
if !useTls && log != nil {
log.Warn("Using SASL without TLS will transmit the authentication in plaintext!")
}
kafkaConfig.Net.SASL.Enable = true
kafkaConfig.Net.SASL.User = os.Getenv("KAFKA_SASL_USER")
kafkaConfig.Net.SASL.Password = os.Getenv("KAFKA_SASL_PASS")
if kafkaConfig.Net.SASL.User == "" && kafkaConfig.Net.SASL.Password == "" {
return nil, errors.New("Kafka SASL config from environment was unsuccessful. KAFKA_SASL_USER and KAFKA_SASL_PASS need to be set.")
} else if log != nil {
log.Infof("Authenticating as user '%s'...", kafkaConfig.Net.SASL.User)
}
}
kafkaProducer, err := sarama.NewAsyncProducer(addrs, kafkaConfig)
if err != nil {
return nil, err
}
state := KafkaState{
producer: kafkaProducer,
topic: topic,
hashing: hashing,
keying: keyingSplit,
}
if logErrors {
go func() {
for {
select {
case msg := <-kafkaProducer.Errors():
if log != nil {
log.Error(msg)
}
}
}
}()
}
return &state, nil
}
func HashProto(fields []string, flowMessage *flowmessage.FlowMessage) string {
var keyStr string
if flowMessage != nil {
vfm := reflect.ValueOf(flowMessage)
vfm = reflect.Indirect(vfm)
for _, kf := range fields {
fieldValue := vfm.FieldByName(kf)
if fieldValue.IsValid() {
keyStr += fmt.Sprintf("%v-", fieldValue)
}
}
}
return keyStr
}
func (s KafkaState) SendKafkaFlowMessage(flowMessage *flowmessage.FlowMessage) {
var key sarama.Encoder
if s.hashing {
keyStr := HashProto(s.keying, flowMessage)
key = sarama.StringEncoder(keyStr)
}
b, _ := proto.Marshal(flowMessage)
s.producer.Input() <- &sarama.ProducerMessage{
Topic: s.topic,
Key: key,
Value: sarama.ByteEncoder(b),
}
}
func (s KafkaState) Publish(msgs []*flowmessage.FlowMessage) {
for _, msg := range msgs {
s.SendKafkaFlowMessage(msg)
}
}
|
[
"\"KAFKA_SASL_USER\"",
"\"KAFKA_SASL_PASS\""
] |
[] |
[
"KAFKA_SASL_PASS",
"KAFKA_SASL_USER"
] |
[]
|
["KAFKA_SASL_PASS", "KAFKA_SASL_USER"]
|
go
| 2 | 0 | |
docs/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Peony documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 30 16:36:34 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
import inspect
import os
import pathlib
import re
import sys
conf_py = pathlib.Path(inspect.getfile(inspect.currentframe())).absolute()
docs = conf_py.parent
maindir = docs.parent
sys.path.insert(0, str(maindir))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon'
]
rtd = "https://%s.readthedocs.io/en/stable"
python_docs = "https://docs.python.org/3"
intersphinx_mapping = {'python': (python_docs, None),
'aiohttp': (rtd % "aiohttp", None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Peony'
copyright = '2016-2017, Florian Badie'
author = 'odrling'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
init = maindir / "peony" / "__init__.py"
with init.open() as stream:
ex = r'__version__\s*=\s*?[\"\']([^\"\']*)'
match = re.search(ex, stream.read())
version = match.group(1)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# on_rtd is whether we are on readthedocs.org, this line of code
# grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need
# to specify it
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Peony v0.2.2'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a
# favicon of the docs. This file should be a Windows icon file (.ico)
# being 16x16 or 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Peonydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Peony.tex', 'Peony Documentation',
'odrling', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'peony', 'Peony Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Peony', 'Peony Documentation',
author, 'Peony', 'An asynchronous Twitter API client.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
plugins/youtube_dl_echo.py
|
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
import os
import re
import json
import math
import time
import shutil
import random
import ffmpeg
import asyncio
import requests
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from translation import Translation
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram.errors import UserNotParticipant, UserBannedInChannel
from database.database import *
from helper_funcs.display_progress import humanbytes
from helper_funcs.help_uploadbot import DownLoadFile
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from datetime import datetime
from PIL import Image
@Client.on_message(filters.private & filters.regex(pattern=".*http.*"))
async def echo(bot, update):
if Config.LOG_CHANNEL:
try:
log_message = await message.forward(Config.LOG_CHANNEL)
log_info = "Message Sender Information\n"
log_info += "\nFirst Name: " + update.from_user.first_name
log_info += "\nUser ID: " + update.from_user.id
if update.from_user.username:
log_info += "\nUsername: " + update.from_user.username
log_info += "\nUser Link: " + update.from_user.mention
await log_message.reply_text(
text=log_info,
disable_web_page_preview=True,
quote=True
)
except Exception as error:
print(error)
logger.info(update.from_user.id)
fmsg = await update.reply_text(text=Translation.CHECKING_LINK, quote=True)
url = update.text
if Config.UPDATE_CHANNEL:
try:
user = await bot.get_chat_member(Config.UPDATE_CHANNEL, update.from_user.id)
if user.status == "kicked":
await bot.edit_message_text(text=Translation.BANNED_USER_TEXT, message_id=fmsg.message_id)
return
except UserNotParticipant:
await bot.edit_message_text(chat_id=update.chat.id, text=Translation.FORCE_SUBSCRIBE_TEXT, message_id=fmsg.message_id, reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(text="😎 Join Channel 😎", url=f"https://telegram.me/TMC_BOTX")]]))
return
except Exception:
await bot.edit_message_text(chat_id=update.chat.id, text=Translation.SOMETHING_WRONG, message_id=fmsg.message_id)
return
if update.from_user.id not in Config.AUTH_USERS:
# restrict free users from sending more links
if str(update.from_user.id) in Config.ADL_BOT_RQ:
current_time = time.time()
previous_time = Config.ADL_BOT_RQ[str(update.from_user.id)]
process_max_timeout = round(Config.PROCESS_MAX_TIMEOUT/60)
present_time = round(Config.PROCESS_MAX_TIMEOUT-(current_time - previous_time))
Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time()
if round(current_time - previous_time) < Config.PROCESS_MAX_TIMEOUT:
await bot.edit_message_text(chat_id=update.chat.id, text=Translation.FREE_USER_LIMIT_Q_SZE.format(process_max_timeout, present_time), disable_web_page_preview=True, parse_mode="html", message_id=fmsg.message_id)
return
else:
Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time()
youtube_dl_username = None
youtube_dl_password = None
file_name = None
if "|" in url:
url_parts = url.split("|")
if len(url_parts) == 2:
url = url_parts[0]
file_name = url_parts[1]
elif len(url_parts) == 4:
url = url_parts[0]
file_name = url_parts[1]
youtube_dl_username = url_parts[2]
youtube_dl_password = url_parts[3]
else:
for entity in update.entities:
if entity.type == "text_link":
url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
url = url[o:o + l]
if url is not None:
url = url.strip()
if file_name is not None:
file_name = file_name.strip()
if youtube_dl_username is not None:
youtube_dl_username = youtube_dl_username.strip()
if youtube_dl_password is not None:
youtube_dl_password = youtube_dl_password.strip()
logger.info(url)
logger.info(file_name)
else:
for entity in update.entities:
if entity.type == "text_link":
url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
url = url[o:o + l]
if Config.HTTP_PROXY != "":
command_to_exec = [
"youtube-dl",
"--no-warnings",
"--youtube-skip-dash-manifest",
"-j",
url,
"--proxy", Config.HTTP_PROXY
]
else:
command_to_exec = [
"youtube-dl",
"--no-warnings",
"--youtube-skip-dash-manifest",
"-j",
url
]
if "hotstar" in url:
command_to_exec.append("--geo-bypass-country")
command_to_exec.append("IN")
if youtube_dl_username is not None:
command_to_exec.append("--username")
command_to_exec.append(youtube_dl_username)
if youtube_dl_password is not None:
command_to_exec.append("--password")
command_to_exec.append(youtube_dl_password)
process = await asyncio.create_subprocess_exec(
*command_to_exec,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
# logger.info(e_response)
t_response = stdout.decode().strip()
# logger.info(t_response)
if e_response and "nonnumeric port" not in e_response:
# logger.warn("Status : FAIL", exc.returncode, exc.output)
error_message = e_response.replace("please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.", "")
if "This video is only available for registered users." in error_message:
error_message += Translation.SET_CUSTOM_USERNAME_PASSWORD
await bot.send_message(
chat_id=update.chat.id,
text=Translation.NO_VOID_FORMAT_FOUND.format(str(error_message)),
reply_to_message_id=update.message_id,
parse_mode="html",
disable_web_page_preview=True
)
return False
if t_response:
x_reponse = t_response
if "\n" in x_reponse:
x_reponse, _ = x_reponse.split("\n")
response_json = json.loads(x_reponse)
save_ytdl_json_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + ".json"
with open(save_ytdl_json_path, "w", encoding="utf8") as outfile:
json.dump(response_json, outfile, ensure_ascii=False)
# logger.info(response_json)
inline_keyboard = []
duration = None
if "duration" in response_json:
duration = response_json["duration"]
if "formats" in response_json:
for formats in response_json["formats"]:
format_id = formats.get("format_id")
format_string = formats.get("format_note")
if format_string is None:
format_string = formats.get("format")
format_ext = formats.get("ext")
approx_file_size = ""
if "filesize" in formats:
approx_file_size = humanbytes(formats["filesize"])
cb_string_video = "{}|{}|{}".format(
"video", format_id, format_ext)
cb_string_file = "{}|{}|{}".format(
"file", format_id, format_ext)
if format_string is not None and not "audio only" in format_string:
ikeyboard = [
InlineKeyboardButton(
"S " + format_string + " video " + approx_file_size + " ",
callback_data=(cb_string_video).encode("UTF-8")
),
InlineKeyboardButton(
"D " + format_ext + " " + approx_file_size + " ",
callback_data=(cb_string_file).encode("UTF-8")
)
]
"""if duration is not None:
cb_string_video_message = "{}|{}|{}".format(
"vm", format_id, format_ext)
ikeyboard.append(
InlineKeyboardButton(
"VM",
callback_data=(
cb_string_video_message).encode("UTF-8")
)
)"""
else:
ikeyboard = [
InlineKeyboardButton(
"SVideo [" +
"] ( " +
approx_file_size + " )",
callback_data=(cb_string_video).encode("UTF-8")
),
InlineKeyboardButton(
"DFile [" +
"] ( " +
approx_file_size + " )",
callback_data=(cb_string_file).encode("UTF-8")
)
]
inline_keyboard.append(ikeyboard)
if duration is not None:
cb_string_64 = "{}|{}|{}".format("audio", "64k", "mp3")
cb_string_128 = "{}|{}|{}".format("audio", "128k", "mp3")
cb_string = "{}|{}|{}".format("audio", "320k", "mp3")
inline_keyboard.append([
InlineKeyboardButton(
"🎶MP3🎶" + "(" + "64 kbps" + ")", callback_data=cb_string_64.encode("UTF-8")),
InlineKeyboardButton(
"🎶MP3🎶 " + "(" + "128 kbps" + ")", callback_data=cb_string_128.encode("UTF-8"))
])
inline_keyboard.append([
InlineKeyboardButton(
"🎶MP3🎶 " + "(" + "320 kbps" + ")", callback_data=cb_string.encode("UTF-8"))
])
else:
format_id = response_json["format_id"]
format_ext = response_json["ext"]
cb_string_file = "{}|{}|{}".format(
"file", format_id, format_ext)
cb_string_video = "{}|{}|{}".format(
"video", format_id, format_ext)
inline_keyboard.append([
InlineKeyboardButton(
"🎞️SVideo🎞️",
callback_data=(cb_string_video).encode("UTF-8")
),
InlineKeyboardButton(
"🗂️DFile🗂️",
callback_data=(cb_string_file).encode("UTF-8")
)
])
cb_string_file = "{}={}={}".format(
"file", format_id, format_ext)
cb_string_video = "{}={}={}".format(
"video", format_id, format_ext)
inline_keyboard.append([
InlineKeyboardButton(
"video",
callback_data=(cb_string_video).encode("UTF-8")
),
InlineKeyboardButton(
"file",
callback_data=(cb_string_file).encode("UTF-8")
)
])
reply_markup = InlineKeyboardMarkup(inline_keyboard)
thumbnail = Config.DEF_THUMB_NAIL_VID_S
thumbnail_image = Config.DEF_THUMB_NAIL_VID_S
thumb_image_path = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + ".jpg"
if not os.path.exists(thumb_image_path):
mes = await thumb(update.from_user.id)
if mes != None:
m = await bot.get_messages(update.chat.id, mes.msg_id)
await m.download(file_name=thumb_image_path)
thumb_image_path = thumb_image_path
else:
if "thumbnail" in response_json:
if response_json["thumbnail"] is not None:
thumbnail = response_json["thumbnail"]
thumbnail_image = response_json["thumbnail"]
thumb_image_path = DownLoadFile(
thumbnail_image,
Config.DOWNLOAD_LOCATION + "/" +
str(update.from_user.id) + ".jpg",
Config.CHUNK_SIZE,
None, # bot,
Translation.DOWNLOAD_START,
update.message_id,
update.chat.id
)
await fmsg.delete()
await bot.send_message(
chat_id=update.chat.id,
text=Translation.FORMAT_SELECTION.format(thumbnail) + "\n\n" + Translation.SET_CUSTOM_USERNAME_PASSWORD,
reply_markup=reply_markup,
parse_mode="html",
reply_to_message_id=update.message_id
)
else:
inline_keyboard = []
cb_string_file = "{}={}={}".format(
"file", "LFO", "NONE")
cb_string_video = "{}={}={}".format(
"video", "OFL", "ENON")
inline_keyboard.append([
InlineKeyboardButton(
"SVideo",
callback_data=(cb_string_video).encode("UTF-8")
),
InlineKeyboardButton(
"DFile",
callback_data=(cb_string_file).encode("UTF-8")
)
])
reply_markup = InlineKeyboardMarkup(inline_keyboard)
await intmsg.delete()
await bot.send_message(
chat_id=update.chat.id,
text=Translation.FORMAT_SELECTION.format(""),
reply_markup=reply_markup,
parse_mode="html",
reply_to_message_id=update.message_id
)
|
[] |
[] |
[
"WEBHOOK"
] |
[]
|
["WEBHOOK"]
|
python
| 1 | 0 | |
main.py
|
import time
import random
import os
import logging
import sys
from datetime import datetime
from datetime import timedelta
from urllib.parse import urlparse
from urllib.parse import urljoin
import click
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from pyvirtualdisplay import Display
from models import db
from models import Following
from models import Comment
from models import Like
username = os.environ['instagram_username']
password = os.environ['instagram_password']
dir_path = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(
stream=sys.stdout,
level='INFO',
format='%(asctime)s %(levelname)s:%(name)s:%(message)s'
)
log = logging.getLogger('app')
def sleep(duration):
log.info('Sleeping for {} seconds'.format(duration))
time.sleep(duration)
def have_like(p):
return random.randint(1, 100) < p
def get_url(driver):
url = urlparse(driver.current_url)
return urljoin('{}://{}'.format(url.scheme, url.netloc), url.path)
def get_driver(gui=True):
options = webdriver.ChromeOptions()
if not gui:
options.add_argument('headless')
options.add_argument('--no-sandbox')
options.add_argument('window-size=1200x600')
driver = webdriver.Chrome(
executable_path='/usr/local/bin/chromedriver',
chrome_options=options
)
driver.implicitly_wait(15)
return driver
def login(driver, username, password):
login_btn = driver.find_element_by_xpath("//p[@class='izU2O']/a[text()='Log in']")
login_btn.click()
sleep(5)
login_input = driver.find_element_by_xpath("//INPUT[@name='username']")
login_input.send_keys(username)
password_input = driver.find_element_by_xpath("//INPUT[@type='password']")
password_input.send_keys(password)
password_input.send_keys(Keys.RETURN)
sleep(10)
def search(driver, tag):
driver.get('https://www.instagram.com/explore/tags/{tag}/'.format(tag=tag))
sleep(4)
first_image = driver.find_element_by_xpath(
"//article/div[2]/div[1]/div[1]/div[1]"
)
first_image.click()
sleep(2)
def go_to_next_photo(driver):
try:
nex_btn = driver.find_element_by_xpath(
"//a[contains(@class, coreSpriteRightPaginationArrow)][text()='Next']"
)
except Exception:
driver.save_screenshot('screenshot.png')
else:
nex_btn.click()
time.sleep(1)
def is_already_liked(driver):
try:
driver.find_element_by_xpath("//span[@aria-label='Like']")
except NoSuchElementException:
log.info('Picture has already been liked {}'.format(driver.current_url))
return True
else:
log.info('Picture has NOT been liked yet {}'.format(driver.current_url))
return False
def like_post(driver):
url = get_url(driver)
try:
Like.select().where(Like.url == url).get()
except Like.DoesNotExist:
pass
else:
log.info('Post has already been liked {url}'.format(url=url))
return False
try:
like_btn = driver.find_element_by_xpath("//span[@aria-label='Like']")
except NoSuchElementException:
log.info('Could not find like button {}'.format(driver.current_url))
time.sleep(1)
return False
else:
log.info('Found like button. Trying to like {}'.format(driver.current_url))
like_btn.click()
Like.create(url=url)
log.info('Liked picture {url}'.format(url=url))
return True
def comment_post(driver, text):
url = get_url(driver)
try:
Comment.select().where(Comment.url == url).get()
except Comment.DoesNotExist:
pass
else:
log.info('Post has already been commented {url}'.format(url=url))
return False
try:
comment_input = driver.find_element_by_xpath('//TEXTAREA[@placeholder="Add a comment…"]')
except NoSuchElementException as e:
log.info(e)
return False
else:
# comment_input.click()
# comment_input.clear()
# time.sleep(1)
# comment_input = driver.find_element_by_xpath('//TEXTAREA[@placeholder="Add a comment…"]')
# --------------------
driver.execute_script(
"arguments[0].value = '{} ';".format(text), comment_input
)
# An extra space is added here and then deleted.
# This forces the input box to update the reactJS core
comment_input.send_keys("\b")
comment_input = driver.find_element_by_xpath('//TEXTAREA[@placeholder="Add a comment…"]')
comment_input.submit()
# --------------------
# comment_input.send_keys(text)
# comment_input.send_keys(Keys.RETURN)
# comment_input.clear()
Comment.create(url=url, comment=text)
log.info('Commented picture {url} with "{text}"'.format(url=url, text=text))
time.sleep(1)
return True
def subscribe(driver):
name_label = driver.find_element_by_xpath("//article/header//div[@class='e1e1d']/a[text()]")
name = name_label.text
follow_btn = driver.find_element_by_xpath("//article/header/div//button[text()]")
try:
following = Following.select().where(Following.name == name).get()
except Following.DoesNotExist:
pass
else:
log.info(
'Already subscribed on user: @{user} ({following})'.format(
user=name,
following=following
)
)
return False
btn_text = follow_btn.text
if btn_text == 'Follow':
log.info('Going to subscribe on user: @{user}'.format(user=name))
try:
follow_btn.click()
time.sleep(1)
except Exception as e:
log.info(e)
else:
Following.create(name=name)
return True
else:
log.info('Already subscribed on user: @{user}'.format(user=name))
return False
def get_random_comment():
comments = [
'Nice',
'Nice photo',
'Nice picture',
'Nice capture',
'Nice image',
'Nice shot',
'Great photo',
'Great job',
'Awesome picture',
'awesome shot',
'Like it',
'Like this picture',
'Like this photo',
'Like this image',
'Beautiful',
'Beautiful photo',
'Beautiful picture',
'Lovely picture',
'Lovely photo',
'Amazing',
'Amazing shot',
'Amazing capture',
'Amazing photo',
'Wonderful shot',
'Wonderful picture',
'Wonderful photo',
]
return random.choice(comments)
@click.group()
def cli():
pass
@cli.command()
@click.option('--tag', default='landscape', help='Instagram tag')
@click.option('--count', default=100, help='Number of user to follow')
@click.option('--gui/--no-gui', default=True, help='GUI')
def run_follower(tag, count, gui):
driver = get_driver(gui)
driver.get("https://www.instagram.com/")
login(driver, username=username, password=password)
search(driver, tag=tag)
liked = 0
commented = 0
subscribed = 0
while liked < count:
go_to_next_photo(driver)
was_liked = like_post(driver)
if was_liked:
liked += 1
# if have_like(15) and comment_post(driver, text=get_random_comment()):
# if comment_post(driver, text=get_random_comment()):
# commented += 1
if have_like(33) and subscribe(driver):
subscribed += 1
log.info('Liked: {}, Commented: {} Subscribed {}'.format(liked, commented, subscribed))
if was_liked:
duration = random.randint(20, 60)
sleep(duration)
else:
duration = random.randint(1, 8)
sleep(duration)
driver.close()
@cli.command()
@click.option('--count', default=100, help='Number of user to follow')
@click.option('--gui/--no-gui', default=True, help='GUI')
def run_unfollower(count, gui):
initial_count = count
driver = get_driver(gui)
driver.implicitly_wait(3)
driver.get("https://www.instagram.com/")
login(driver, username=username, password=password)
following_users = (
Following.select()
.where(
Following.is_following == True,
Following.date_created < datetime.now() - timedelta(days=14)
)
.order_by(Following.date_created)
)
for following in following_users:
if count <= 0:
return
log.info(
'Going to unfollow `@{user}` ({date})'.format(
user=following.name, date=following.date_created
)
)
driver.get("https://www.instagram.com/{name}".format(name=following.name))
time.sleep(1)
try:
unfollow_btn = driver.find_element_by_xpath("//button[text()='Following']")
except NoSuchElementException:
still_following = False
log.info('Already not following user `@{user}`'.format(user=following.name))
following.is_following = False
following.save()
else:
log.info('Still following user `@{user}`'.format(user=following.name))
still_following = True
unfollow_btn.click()
duration = random.randint(5, 10)
sleep(duration)
try:
unfollow_btn = driver.find_element_by_xpath(
"//div[@class='piCib']//button[text()='Unfollow']"
)
except NoSuchElementException:
pass
else:
still_following = True
unfollow_btn.click()
sleep(2)
tries = 0
while still_following:
driver.refresh()
try:
driver.find_element_by_xpath("//button[text()='Follow']")
except NoSuchElementException:
pass
else:
still_following = False
count -= 1
try:
driver.find_element_by_xpath("//button[text()='Follow Back']")
except NoSuchElementException:
pass
else:
still_following = False
count -= 1
if still_following:
try:
unfollow_btn = driver.find_element_by_xpath("//button[text()='Following']")
except NoSuchElementException:
pass
else:
log.info(
'Still following user `@{user}` (tries {tries})'.format(
user=following.name,
tries=tries
)
)
still_following = True
unfollow_btn.click()
if tries == 0:
break
tries += 1
log.info('-- {count} of {initial_count} users are unfollowed --'.format(
count=initial_count - count, initial_count=initial_count
))
driver.close()
@cli.command()
def init_db():
db.connect()
db.create_tables([Following, Comment, Like])
if __name__ == "__main__":
cli()
|
[] |
[] |
[
"instagram_password",
"instagram_username"
] |
[]
|
["instagram_password", "instagram_username"]
|
python
| 2 | 0 | |
exporter/awsemfexporter/conn.go
|
// Copyright 2020, OpenTelemetry Authors
// Portions of this file Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package awsemfexporter
import (
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
"go.uber.org/zap"
)
type connAttr interface {
newAWSSession(logger *zap.Logger, roleArn string, region string) (*session.Session, error)
getEC2Region(s *session.Session) (string, error)
}
// Conn implements connAttr interface.
type Conn struct{}
func (c *Conn) getEC2Region(s *session.Session) (string, error) {
return ec2metadata.New(s).Region()
}
// AWS STS endpoint constants
const (
STSEndpointPrefix = "https://sts."
STSEndpointSuffix = ".amazonaws.com"
STSAwsCnPartitionIDSuffix = ".amazonaws.com.cn" // AWS China partition.
)
// GetAWSConfigSession returns AWS config and session instances.
func GetAWSConfigSession(logger *zap.Logger, cn connAttr, cfg *Config) (*aws.Config, *session.Session, error) {
var s *session.Session
var err error
var awsRegion string
regionEnv := os.Getenv("AWS_REGION")
if cfg.Region == "" && regionEnv != "" {
awsRegion = regionEnv
logger.Debug("Fetch region from environment variables", zap.String("region", awsRegion))
} else if cfg.Region != "" {
awsRegion = cfg.Region
logger.Debug("Fetch region from commandline/config file", zap.String("region", awsRegion))
} else if !cfg.NoVerifySSL {
var es *session.Session
es, err = getDefaultSession(logger)
if err != nil {
logger.Error("Unable to retrieve default session", zap.Error(err))
} else {
awsRegion, err = cn.getEC2Region(es)
if err != nil {
logger.Error("Unable to retrieve the region from the EC2 instance", zap.Error(err))
} else {
logger.Debug("Fetch region from ec2 metadata", zap.String("region", awsRegion))
}
}
}
if awsRegion == "" {
msg := "Cannot fetch region variable from config file, environment variables and ec2 metadata."
logger.Error(msg)
return nil, nil, awserr.New("NoAwsRegion", msg, nil)
}
s, err = cn.newAWSSession(logger, cfg.RoleARN, awsRegion)
if err != nil {
return nil, nil, err
}
config := &aws.Config{
Region: aws.String(awsRegion),
DisableParamValidation: aws.Bool(true),
MaxRetries: aws.Int(cfg.MaxRetries),
Endpoint: aws.String(cfg.Endpoint),
}
return config, s, nil
}
func (c *Conn) newAWSSession(logger *zap.Logger, roleArn string, region string) (*session.Session, error) {
var s *session.Session
var err error
if roleArn == "" {
s, err = getDefaultSession(logger)
if err != nil {
return s, err
}
} else {
stsCreds, _ := getSTSCreds(logger, region, roleArn)
s, err = session.NewSession(&aws.Config{
Credentials: stsCreds,
})
if err != nil {
logger.Error("Error in creating session object : ", zap.Error(err))
return s, err
}
}
return s, nil
}
// getSTSCreds gets STS credentials from regional endpoint. ErrCodeRegionDisabledException is received if the
// STS regional endpoint is disabled. In this case STS credentials are fetched from STS primary regional endpoint
// in the respective AWS partition.
func getSTSCreds(logger *zap.Logger, region string, roleArn string) (*credentials.Credentials, error) {
t, err := getDefaultSession(logger)
if err != nil {
return nil, err
}
stsCred := getSTSCredsFromRegionEndpoint(logger, t, region, roleArn)
// Make explicit call to fetch credentials.
_, err = stsCred.Get()
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
err = nil
switch aerr.Code() {
case sts.ErrCodeRegionDisabledException:
logger.Error("Region ", zap.String("region", region), zap.String("error", aerr.Error()))
stsCred = getSTSCredsFromPrimaryRegionEndpoint(logger, t, roleArn, region)
}
}
}
return stsCred, err
}
// getSTSCredsFromRegionEndpoint fetches STS credentials for provided roleARN from regional endpoint.
// AWS STS recommends that you provide both the Region and endpoint when you make calls to a Regional endpoint.
// Reference: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code
func getSTSCredsFromRegionEndpoint(logger *zap.Logger, sess *session.Session, region string,
roleArn string) *credentials.Credentials {
regionalEndpoint := getSTSRegionalEndpoint(region)
// if regionalEndpoint is "", the STS endpoint is Global endpoint for classic regions except ap-east-1 - (HKG)
// for other opt-in regions, region value will create STS regional endpoint.
// This will be only in the case, if provided region is not present in aws_regions.go
c := &aws.Config{Region: aws.String(region), Endpoint: ®ionalEndpoint}
st := sts.New(sess, c)
logger.Info("STS Endpoint ", zap.String("endpoint", st.Endpoint))
return stscreds.NewCredentialsWithClient(st, roleArn)
}
// getSTSCredsFromPrimaryRegionEndpoint fetches STS credentials for provided roleARN from primary region endpoint in
// the respective partition.
func getSTSCredsFromPrimaryRegionEndpoint(logger *zap.Logger, t *session.Session, roleArn string,
region string) *credentials.Credentials {
logger.Info("Credentials for provided RoleARN being fetched from STS primary region endpoint.")
partitionID := getPartition(region)
if partitionID == endpoints.AwsPartitionID {
return getSTSCredsFromRegionEndpoint(logger, t, endpoints.UsEast1RegionID, roleArn)
} else if partitionID == endpoints.AwsCnPartitionID {
return getSTSCredsFromRegionEndpoint(logger, t, endpoints.CnNorth1RegionID, roleArn)
} else if partitionID == endpoints.AwsUsGovPartitionID {
return getSTSCredsFromRegionEndpoint(logger, t, endpoints.UsGovWest1RegionID, roleArn)
}
return nil
}
func getSTSRegionalEndpoint(r string) string {
p := getPartition(r)
var e string
if p == endpoints.AwsPartitionID || p == endpoints.AwsUsGovPartitionID {
e = STSEndpointPrefix + r + STSEndpointSuffix
} else if p == endpoints.AwsCnPartitionID {
e = STSEndpointPrefix + r + STSAwsCnPartitionIDSuffix
}
return e
}
func getDefaultSession(logger *zap.Logger) (*session.Session, error) {
result, serr := session.NewSession()
if serr != nil {
logger.Error("Error in creating session object ", zap.Error(serr))
return result, serr
}
return result, nil
}
// getPartition return AWS Partition for the provided region.
func getPartition(region string) string {
p, _ := endpoints.PartitionForRegion(endpoints.DefaultPartitions(), region)
return p.ID()
}
|
[
"\"AWS_REGION\""
] |
[] |
[
"AWS_REGION"
] |
[]
|
["AWS_REGION"]
|
go
| 1 | 0 | |
scripts/notify.py
|
#!/usr/bin/env python
import os
import sys
import slackweb
username = 'CircleCI'
icon_url = 'https://i.imgur.com/FLjAA35.png'
def main():
message = sys.argv[1]
if message == 'started':
_notify('Deploy started', '#66d3e4')
elif message == 'successful':
_notify('Deploy successful', '#41aa58')
elif message == 'failed':
_notify('Deploy failed', '#d10c20')
else:
raise RuntimeError('Invalid message')
def _notify(text, color):
if 'CIRCLE_BUILD_URL' in os.environ:
text += ' <{}|#{}>'.format(
os.environ['CIRCLE_BUILD_URL'],
os.environ['CIRCLE_BUILD_NUM']
)
slack = slackweb.Slack(url=os.environ['SLACK_INCOMING_WEBHOOK'])
attachment = {
'color': color,
'text': text,
'author_name': os.environ['CIRCLE_PROJECT_REPONAME'],
}
slack.notify(
username=username,
icon_url=icon_url,
attachments=[attachment]
)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CIRCLE_BUILD_NUM",
"SLACK_INCOMING_WEBHOOK",
"CIRCLE_PROJECT_REPONAME",
"CIRCLE_BUILD_URL"
] |
[]
|
["CIRCLE_BUILD_NUM", "SLACK_INCOMING_WEBHOOK", "CIRCLE_PROJECT_REPONAME", "CIRCLE_BUILD_URL"]
|
python
| 4 | 0 | |
src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.mapred.JvmManager.JvmEnv;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
/**
* A {@link TaskController} that runs the task JVMs as the user
* who submits the job.
*
* This class executes a setuid executable to implement methods
* of the {@link TaskController}, including launching the task
* JVM and killing it when needed, and also initializing and
* finalizing the task environment.
* <p> The setuid executable is launched using the command line:</p>
* <p>task-controller user-name command command-args, where</p>
* <p>user-name is the name of the owner who submits the job</p>
* <p>command is one of the cardinal value of the
* {@link LinuxTaskController.TaskCommands} enumeration</p>
* <p>command-args depends on the command being launched.</p>
*
* In addition to running and killing tasks, the class also
* sets up appropriate access for the directories and files
* that will be used by the tasks.
*/
class LinuxTaskController extends TaskController {
private static final Log LOG =
LogFactory.getLog(LinuxTaskController.class);
// Name of the executable script that will contain the child
// JVM command line. See writeCommand for details.
private static final String COMMAND_FILE = "taskjvm.sh";
// Path to the setuid executable.
private static String taskControllerExe;
static {
// the task-controller is expected to be under the $HADOOP_HOME/bin
// directory.
File hadoopBin = new File(System.getenv("HADOOP_HOME"), "bin");
taskControllerExe =
new File(hadoopBin, "task-controller").getAbsolutePath();
}
// The list of directory paths specified in the
// variable mapred.local.dir. This is used to determine
// which among the list of directories is picked up
// for storing data for a particular task.
private String[] mapredLocalDirs;
// permissions to set on files and directories created.
// When localized files are handled securely, this string
// will change to something more restrictive. Until then,
// it opens up the permissions for all, so that the tasktracker
// and job owners can access files together.
private static final String FILE_PERMISSIONS = "ugo+rwx";
// permissions to set on components of the path leading to
// localized files and directories. Read and execute permissions
// are required for different users to be able to access the
// files.
private static final String PATH_PERMISSIONS = "go+rx";
public LinuxTaskController() {
super();
}
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
mapredLocalDirs = conf.getStrings("mapred.local.dir");
//Setting of the permissions of the local directory is done in
//setup()
}
/**
* List of commands that the setuid script will execute.
*/
enum TaskCommands {
LAUNCH_TASK_JVM,
KILL_TASK_JVM
}
/**
* Launch a task JVM that will run as the owner of the job.
*
* This method launches a task JVM by executing a setuid
* executable that will switch to the user and run the
* task.
*/
@Override
void launchTaskJVM(TaskController.TaskControllerContext context)
throws IOException {
JvmEnv env = context.env;
// get the JVM command line.
String cmdLine =
TaskLog.buildCommandLine(env.setup, env.vargs, env.stdout, env.stderr,
env.logSize, env.pidFile);
// write the command to a file in the
// task specific cache directory
writeCommand(cmdLine, getTaskCacheDirectory(context));
// Call the taskcontroller with the right parameters.
List<String> launchTaskJVMArgs = buildTaskCommandArgs(context);
ShellCommandExecutor shExec = buildTaskControllerExecutor(
TaskCommands.LAUNCH_TASK_JVM,
context.task.getUser(),
launchTaskJVMArgs, env);
context.shExec = shExec;
shExec.execute();
LOG.debug("output after executing task jvm = " + shExec.getOutput());
}
// convenience API for building command arguments for specific commands
private List<String> buildTaskCommandArgs(TaskControllerContext context) {
List<String> commandArgs = new ArrayList<String>(3);
String taskId = context.task.getTaskID().toString();
String jobId = getJobId(context);
commandArgs.add(jobId);
if(!context.task.isTaskCleanupTask()) {
commandArgs.add(taskId);
}else {
commandArgs.add(taskId + TaskTracker.TASK_CLEANUP_SUFFIX);
}
LOG.debug("getting the task directory as: "
+ getTaskCacheDirectory(context));
commandArgs.add(getDirectoryChosenForTask(
new File(getTaskCacheDirectory(context)),
context));
return commandArgs;
}
// get the Job ID from the information in the TaskControllerContext
private String getJobId(TaskControllerContext context) {
String taskId = context.task.getTaskID().toString();
TaskAttemptID tId = TaskAttemptID.forName(taskId);
String jobId = tId.getJobID().toString();
return jobId;
}
// Get the directory from the list of directories configured
// in mapred.local.dir chosen for storing data pertaining to
// this task.
private String getDirectoryChosenForTask(File directory,
TaskControllerContext context) {
String jobId = getJobId(context);
String taskId = context.task.getTaskID().toString();
for (String dir : mapredLocalDirs) {
File mapredDir = new File(dir);
File taskDir = new File(mapredDir, TaskTracker.getLocalTaskDir(
jobId, taskId, context.task.isTaskCleanupTask()));
if (directory.equals(taskDir)) {
return dir;
}
}
LOG.error("Couldn't parse task cache directory correctly");
throw new IllegalArgumentException("invalid task cache directory "
+ directory.getAbsolutePath());
}
/**
* Kill a launched task JVM running as the user of the job.
*
* This method will launch the task controller setuid executable
* that in turn will kill the task JVM by sending a kill signal.
*/
void killTaskJVM(TaskControllerContext context) {
if(context.task == null) {
LOG.info("Context task null not killing the JVM");
return;
}
JvmEnv env = context.env;
List<String> killTaskJVMArgs = buildTaskCommandArgs(context);
try {
ShellCommandExecutor shExec = buildTaskControllerExecutor(
TaskCommands.KILL_TASK_JVM,
context.task.getUser(),
killTaskJVMArgs,
context.env);
shExec.execute();
LOG.debug("Command output :" +shExec.getOutput());
} catch (IOException ioe) {
LOG.warn("IOException in killing task: " + ioe.getMessage());
}
}
/**
* Setup appropriate permissions for directories and files that
* are used by the task.
*
* As the LinuxTaskController launches tasks as a user, different
* from the daemon, all directories and files that are potentially
* used by the tasks are setup with appropriate permissions that
* will allow access.
*
* Until secure data handling is implemented (see HADOOP-4491 and
* HADOOP-4493, for e.g.), the permissions are set up to allow
* read, write and execute access for everyone. This will be
* changed to restricted access as data is handled securely.
*/
void initializeTask(TaskControllerContext context) {
// Setup permissions for the job and task cache directories.
setupTaskCacheFileAccess(context);
// setup permissions for task log directory
setupTaskLogFileAccess(context);
}
// Allows access for the task to create log files under
// the task log directory
private void setupTaskLogFileAccess(TaskControllerContext context) {
TaskAttemptID taskId = context.task.getTaskID();
File f = TaskLog.getTaskLogFile(taskId, TaskLog.LogName.SYSLOG);
String taskAttemptLogDir = f.getParentFile().getAbsolutePath();
changeDirectoryPermissions(taskAttemptLogDir, FILE_PERMISSIONS, false);
}
// Allows access for the task to read, write and execute
// the files under the job and task cache directories
private void setupTaskCacheFileAccess(TaskControllerContext context) {
String taskId = context.task.getTaskID().toString();
JobID jobId = JobID.forName(getJobId(context));
//Change permission for the task across all the disks
for(String localDir : mapredLocalDirs) {
File f = new File(localDir);
File taskCacheDir = new File(f,TaskTracker.getLocalTaskDir(
jobId.toString(), taskId, context.task.isTaskCleanupTask()));
if(taskCacheDir.exists()) {
changeDirectoryPermissions(taskCacheDir.getPath(),
FILE_PERMISSIONS, true);
}
}//end of local directory Iteration
}
// convenience method to execute chmod.
private void changeDirectoryPermissions(String dir, String mode,
boolean isRecursive) {
int ret = 0;
try {
ret = FileUtil.chmod(dir, mode, isRecursive);
} catch (Exception e) {
LOG.warn("Exception in changing permissions for directory " + dir +
". Exception: " + e.getMessage());
}
if (ret != 0) {
LOG.warn("Could not change permissions for directory " + dir);
}
}
// convenience API to create the executor for launching the
// setuid script.
private ShellCommandExecutor buildTaskControllerExecutor(TaskCommands command,
String userName,
List<String> cmdArgs, JvmEnv env)
throws IOException {
String[] taskControllerCmd = new String[3 + cmdArgs.size()];
taskControllerCmd[0] = taskControllerExe;
taskControllerCmd[1] = userName;
taskControllerCmd[2] = String.valueOf(command.ordinal());
int i = 3;
for (String cmdArg : cmdArgs) {
taskControllerCmd[i++] = cmdArg;
}
if (LOG.isDebugEnabled()) {
for (String cmd : taskControllerCmd) {
LOG.debug("taskctrl command = " + cmd);
}
}
ShellCommandExecutor shExec = null;
if(env.workDir != null && env.workDir.exists()) {
shExec = new ShellCommandExecutor(taskControllerCmd,
env.workDir, env.env);
} else {
shExec = new ShellCommandExecutor(taskControllerCmd);
}
return shExec;
}
// Return the task specific directory under the cache.
private String getTaskCacheDirectory(TaskControllerContext context) {
// In the case of JVM reuse, the task specific directory
// is different from what is set with respect with
// env.workDir. Hence building this from the taskId everytime.
String taskId = context.task.getTaskID().toString();
File cacheDirForJob = context.env.workDir.getParentFile().getParentFile();
if(context.task.isTaskCleanupTask()) {
taskId = taskId + TaskTracker.TASK_CLEANUP_SUFFIX;
}
return new File(cacheDirForJob, taskId).getAbsolutePath();
}
// Write the JVM command line to a file under the specified directory
// Note that the JVM will be launched using a setuid executable, and
// could potentially contain strings defined by a user. Hence, to
// prevent special character attacks, we write the command line to
// a file and execute it.
private void writeCommand(String cmdLine,
String directory) throws IOException {
PrintWriter pw = null;
String commandFile = directory + File.separator + COMMAND_FILE;
LOG.info("Writing commands to " + commandFile);
try {
FileWriter fw = new FileWriter(commandFile);
BufferedWriter bw = new BufferedWriter(fw);
pw = new PrintWriter(bw);
pw.write(cmdLine);
} catch (IOException ioe) {
LOG.error("Caught IOException while writing JVM command line to file. "
+ ioe.getMessage());
} finally {
if (pw != null) {
pw.close();
}
// set execute permissions for all on the file.
File f = new File(commandFile);
if (f.exists()) {
f.setReadable(true, false);
f.setExecutable(true, false);
}
}
}
/**
* Sets up the permissions of the following directories:
*
* Job cache directory
* Archive directory
* Hadoop log directories
*
*/
@Override
void setup() {
//set up job cache directory and associated permissions
String localDirs[] = this.mapredLocalDirs;
for(String localDir : localDirs) {
//Cache root
File cacheDirectory = new File(localDir,TaskTracker.getCacheSubdir());
File jobCacheDirectory = new File(localDir,TaskTracker.getJobCacheSubdir());
if(!cacheDirectory.exists()) {
if(!cacheDirectory.mkdirs()) {
LOG.warn("Unable to create cache directory : " +
cacheDirectory.getPath());
}
}
if(!jobCacheDirectory.exists()) {
if(!jobCacheDirectory.mkdirs()) {
LOG.warn("Unable to create job cache directory : " +
jobCacheDirectory.getPath());
}
}
//Give world writable permission for every directory under
//mapred-local-dir.
//Child tries to write files under it when executing.
changeDirectoryPermissions(localDir, FILE_PERMISSIONS, true);
}//end of local directory manipulations
//setting up perms for user logs
File taskLog = TaskLog.getUserLogDir();
changeDirectoryPermissions(taskLog.getPath(), FILE_PERMISSIONS,false);
}
/*
* Create Job directories across disks and set their permissions to 777
* This way when tasks are run we just need to setup permissions for
* task folder.
*/
@Override
void initializeJob(JobID jobid) {
for(String localDir : this.mapredLocalDirs) {
File jobDirectory = new File(localDir,
TaskTracker.getLocalJobDir(jobid.toString()));
if(!jobDirectory.exists()) {
if(!jobDirectory.mkdir()) {
LOG.warn("Unable to create job cache directory : "
+ jobDirectory.getPath());
continue;
}
}
//Should be recursive because the jar and work folders might be
//present under the job cache directory
changeDirectoryPermissions(
jobDirectory.getPath(), FILE_PERMISSIONS, true);
}
}
}
|
[
"\"HADOOP_HOME\""
] |
[] |
[
"HADOOP_HOME"
] |
[]
|
["HADOOP_HOME"]
|
java
| 1 | 0 | |
examples/http/main.go
|
package main
import (
"context"
"fmt"
"net/http"
"os"
"github.com/bloom42/rz-go/v2"
"github.com/bloom42/rz-go/v2/log"
"github.com/bloom42/rz-go/v2/rzhttp"
"github.com/go-chi/chi"
"github.com/google/uuid"
)
func main() {
env := os.Getenv("GO_ENV")
port := os.Getenv("PORT")
if port == "" {
port = "9090"
}
log.SetLogger(log.With(
rz.Fields(
rz.Caller(true),
rz.String("service", "api"), rz.String("host", "abcd.local"), rz.String("environment", env),
),
))
router := chi.NewRouter()
// replace size field name by latency and disable userAgent logging
loggingMiddleware := rzhttp.Handler(log.Logger(), rzhttp.Duration("latency"), rzhttp.UserAgent(""))
// here the order matters, otherwise loggingMiddleware won't see the request ID
router.Use(requestIDMiddleware)
router.Use(loggingMiddleware)
router.Use(injectLoggerMiddleware(log.Logger()))
router.Get("/", helloWorld)
err := http.ListenAndServe(":"+port, router)
if err != nil {
log.Fatal("listening", rz.Err(err))
}
}
func requestIDMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
uuidv4, _ := uuid.NewRandom()
requestID := uuidv4.String()
w.Header().Set("X-Bloom-Request-ID", requestID)
ctx := context.WithValue(r.Context(), rzhttp.RequestIDCtxKey, requestID)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func injectLoggerMiddleware(logger rz.Logger) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if rid, ok := r.Context().Value(rzhttp.RequestIDCtxKey).(string); ok {
logger = logger.With(rz.Fields(rz.String("request_id", rid)))
ctx := logger.ToCtx(r.Context())
r = r.WithContext(ctx)
}
next.ServeHTTP(w, r)
})
}
}
func helloWorld(w http.ResponseWriter, r *http.Request) {
logger := rz.FromCtx(r.Context())
logger.Info("hello from GET /")
fmt.Fprintf(w, "Hello, you've requested: %s\n", r.URL.Path)
}
|
[
"\"GO_ENV\"",
"\"PORT\""
] |
[] |
[
"PORT",
"GO_ENV"
] |
[]
|
["PORT", "GO_ENV"]
|
go
| 2 | 0 | |
function/src/task/task.py
|
import json
from jsonschema import validate, ValidationError
import boto3
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
import os
import logging
from decimal import Decimal
from datetime import datetime
import ulid
logging.basicConfig(level=logging.INFO)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['TABLE_NAME'])
class TaskNotFoundError(Exception):
pass
class NotTaskOwnerError(Exception):
pass
class Task:
def __init__(
self,
title: str,
owner: str,
id: str = None,
meta: str = 'latest',
priority: str = 'medium',
is_done: str = False,
content: str = None,
created_at: int = None,
updated_at: int = None,
needs_validation: bool = True
):
self.id = id
self.meta = meta
self.title = title
self.owner = owner
self.content = content
self.is_done = is_done
self.priority = priority
self.created_at = created_at
self.updated_at = updated_at
try:
if needs_validation:
with open(os.path.dirname(__file__) + '/task.json') as f:
schema = json.load(f)
validate(vars(self), schema)
except ValidationError as e:
logging.error(e)
raise e
except Exception as e:
logging.error(e)
raise e
def save(self, user_id: str = None):
if self.id is None:
self.id = str(ulid.new())
self.created_at = int(datetime.now().timestamp())
self.updated_at = int(datetime.now().timestamp())
item = self.to_savable_object()
item['id'] = 'Task:{}'.format(self.id)
try:
table.put_item(
Item=item,
ConditionExpression='attribute_not_exists(id)'
)
return self.to_returnable_object()
except ClientError as e:
logging.error(e)
raise e
except Exception as e:
logging.error(e)
raise e
else:
if user_id is None or user_id != self.owner:
raise NotTaskOwnerError
self.updated_at = int(datetime.now().timestamp())
item = self.to_savable_object()
item['id'] = 'Task:{}'.format(self.id)
try:
table.put_item(
Item=item,
ConditionExpression='attribute_exists(id)'
)
return self.to_returnable_object()
except ClientError as e:
logging.error(e)
raise e
except Exception as e:
logging.error(e)
raise e
@classmethod
def get(cls, user_id, task_id):
try:
item = table.get_item(
Key={
'id': "Task:{}".format(task_id),
'meta': "latest"
}
)
if 'Item' in item:
if item['Item'].get('owner') == user_id:
item['Item']['id'] = task_id
task = cls(**item['Item'])
return task
else:
raise NotTaskOwnerError
else:
raise TaskNotFoundError
except TaskNotFoundError as e:
logging.error(e)
raise e
except ClientError as e:
logging.error(e)
raise e
except Exception as e:
raise e
def to_returnable_object(self):
if hasattr(self, 'created_at') and self.created_at is not None:
self.created_at = float(self.created_at)
if hasattr(self, 'updated_at') and self.created_at is not None:
self.updated_at = float(self.updated_at)
return vars(self)
def to_savable_object(self):
if hasattr(self, 'created_at') and self.created_at is not None:
self.created_at = Decimal(str(self.created_at))
if hasattr(self, 'updated_at') and self.updated_at is not None:
self.updated_at = Decimal(str(self.updated_at))
if hasattr(self, 'content') and self.content == '':
self.content = None
return vars(self)
def delete(self, user_id: str = None):
if self.owner != user_id:
raise NotTaskOwnerError
response = table.delete_item(
Key={
'id': "Task:{}".format(self.id),
'meta': 'latest'
},
ReturnValues='ALL_OLD'
)
if 'id' not in response['Attributes']:
raise TaskNotFoundError
response['Attributes']['id'] = response['Attributes']['id'].split(
"Task:")[-1]
deleted_task = Task(**response['Attributes'])
return deleted_task.to_returnable_object()
@classmethod
def __convert_is_done(self, is_done: str):
if is_done == 'false':
return False
elif is_done == 'true':
return True
@classmethod
def search(
cls,
user_id: str,
freeword: str = None,
is_done: str = 'both',
priority: str = None
):
accumed_response = []
params = {
'IndexName': 'owner-meta-index',
'KeyConditionExpression': Key('owner').eq(str(user_id)) & Key('meta').eq('latest')
}
filter_info = None
if freeword is not None:
if filter_info is None:
filter_info = Attr('title').contains(
freeword) & Attr('content').contains(freeword)
else:
filter_info = filter_info & Attr('title').contains(
freeword) & Attr('content').contains(freeword)
if is_done != 'both':
converted_is_done = cls.__convert_is_done(is_done)
if type(converted_is_done) == bool:
if filter_info is None:
filter_info = Attr('is_done').eq(converted_is_done)
else:
filter_info = filter_info & Attr(
'is_done').eq(converted_is_done)
if priority is not None:
if filter_info is None:
filter_info = Attr('priority').eq(priority)
else:
filter_info = filter_info & Attr('priority').eq(priority)
if filter_info is not None:
params['FilterExpression'] = filter_info
response = table.query(**params)
accumed_response += response['Items']
while 'LastEvaluatedKey' in response:
params["ExclusiveStartKey"] = response['LastEvaluatedKey']
response = table.query(**params)
accumed_response += response['Items']
tasks = sorted(accumed_response, key=lambda x: x['id'])
returnable_tasks = []
for each_task in tasks:
each_task['id'] = each_task['id'].split('Task:')[-1]
returnable_tasks.append(Task(**each_task).to_returnable_object())
return returnable_tasks
|
[] |
[] |
[
"TABLE_NAME"
] |
[]
|
["TABLE_NAME"]
|
python
| 1 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend ProjectCoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a projectcoind or projectcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the projectcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/ProjectCoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "ProjectCoin")
return os.path.expanduser("~/.projectcoin")
def read_bitcoin_config(dbdir):
"""Read the projectcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "projectcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a projectcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 11944 if testnet else 1944
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the projectcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(projectcoind):
info = projectcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
projectcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = projectcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(projectcoind):
address_summary = dict()
address_to_account = dict()
for info in projectcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = projectcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = projectcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-projectcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(projectcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(projectcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to projectcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = projectcoind.createrawtransaction(inputs, outputs)
signed_rawtx = projectcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(projectcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = projectcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(projectcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = projectcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(projectcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get ProjectCoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send ProjectCoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of projectcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
projectcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(projectcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(projectcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(projectcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(projectcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = projectcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
internal/querynode/param_table.go
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package querynode
import (
"fmt"
"os"
"path"
"strconv"
"sync"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/util/paramtable"
)
type ParamTable struct {
paramtable.BaseTable
PulsarAddress string
EtcdAddress string
MetaRootPath string
QueryNodeIP string
QueryNodePort int64
QueryNodeID UniqueID
QueryNodeNum int
QueryTimeTickChannelName string
FlowGraphMaxQueueLength int32
FlowGraphMaxParallelism int32
// minio
MinioEndPoint string
MinioAccessKeyID string
MinioSecretAccessKey string
MinioUseSSLStr bool
MinioBucketName string
// search
SearchChannelNames []string
SearchResultChannelNames []string
SearchReceiveBufSize int64
SearchPulsarBufSize int64
SearchResultReceiveBufSize int64
// stats
StatsPublishInterval int
StatsChannelName string
GracefulTime int64
MsgChannelSubName string
SliceIndex int
Log log.Config
}
var Params ParamTable
var once sync.Once
func (p *ParamTable) Init() {
once.Do(func() {
p.BaseTable.Init()
err := p.LoadYaml("advanced/query_node.yaml")
if err != nil {
panic(err)
}
queryNodeIDStr := os.Getenv("QUERY_NODE_ID")
if queryNodeIDStr == "" {
queryNodeIDList := p.QueryNodeIDList()
if len(queryNodeIDList) <= 0 {
queryNodeIDStr = "0"
} else {
queryNodeIDStr = strconv.Itoa(int(queryNodeIDList[0]))
}
}
err = p.Save("_queryNodeID", queryNodeIDStr)
if err != nil {
panic(err)
}
p.initQueryNodeID()
p.initQueryNodeNum()
//p.initQueryTimeTickChannelName()
p.initMinioEndPoint()
p.initMinioAccessKeyID()
p.initMinioSecretAccessKey()
p.initMinioUseSSLStr()
p.initMinioBucketName()
p.initPulsarAddress()
p.initEtcdAddress()
p.initMetaRootPath()
p.initGracefulTime()
p.initMsgChannelSubName()
p.initSliceIndex()
p.initFlowGraphMaxQueueLength()
p.initFlowGraphMaxParallelism()
p.initSearchReceiveBufSize()
p.initSearchPulsarBufSize()
p.initSearchResultReceiveBufSize()
p.initStatsPublishInterval()
//p.initStatsChannelName()
p.initLogCfg()
})
}
// ---------------------------------------------------------- query node
func (p *ParamTable) initQueryNodeID() {
queryNodeID, err := p.Load("_queryNodeID")
if err != nil {
panic(err)
}
id, err := strconv.Atoi(queryNodeID)
if err != nil {
panic(err)
}
p.QueryNodeID = UniqueID(id)
}
func (p *ParamTable) initQueryNodeNum() {
p.QueryNodeNum = len(p.QueryNodeIDList())
}
func (p *ParamTable) initQueryTimeTickChannelName() {
ch, err := p.Load("msgChannel.chanNamePrefix.queryTimeTick")
if err != nil {
log.Error(err.Error())
}
p.QueryTimeTickChannelName = ch
}
// ---------------------------------------------------------- minio
func (p *ParamTable) initMinioEndPoint() {
url, err := p.Load("_MinioAddress")
if err != nil {
panic(err)
}
p.MinioEndPoint = url
}
func (p *ParamTable) initMinioAccessKeyID() {
id, err := p.Load("minio.accessKeyID")
if err != nil {
panic(err)
}
p.MinioAccessKeyID = id
}
func (p *ParamTable) initMinioSecretAccessKey() {
key, err := p.Load("minio.secretAccessKey")
if err != nil {
panic(err)
}
p.MinioSecretAccessKey = key
}
func (p *ParamTable) initMinioUseSSLStr() {
ssl, err := p.Load("minio.useSSL")
if err != nil {
panic(err)
}
sslBoolean, err := strconv.ParseBool(ssl)
if err != nil {
panic(err)
}
p.MinioUseSSLStr = sslBoolean
}
func (p *ParamTable) initMinioBucketName() {
bucketName, err := p.Load("minio.bucketName")
if err != nil {
panic(err)
}
p.MinioBucketName = bucketName
}
func (p *ParamTable) initPulsarAddress() {
url, err := p.Load("_PulsarAddress")
if err != nil {
panic(err)
}
p.PulsarAddress = url
}
// advanced params
// stats
func (p *ParamTable) initStatsPublishInterval() {
p.StatsPublishInterval = p.ParseInt("queryNode.stats.publishInterval")
}
// dataSync:
func (p *ParamTable) initFlowGraphMaxQueueLength() {
p.FlowGraphMaxQueueLength = p.ParseInt32("queryNode.dataSync.flowGraph.maxQueueLength")
}
func (p *ParamTable) initFlowGraphMaxParallelism() {
p.FlowGraphMaxParallelism = p.ParseInt32("queryNode.dataSync.flowGraph.maxParallelism")
}
// msgStream
func (p *ParamTable) initSearchReceiveBufSize() {
p.SearchReceiveBufSize = p.ParseInt64("queryNode.msgStream.search.recvBufSize")
}
func (p *ParamTable) initSearchPulsarBufSize() {
p.SearchPulsarBufSize = p.ParseInt64("queryNode.msgStream.search.pulsarBufSize")
}
func (p *ParamTable) initSearchResultReceiveBufSize() {
p.SearchResultReceiveBufSize = p.ParseInt64("queryNode.msgStream.searchResult.recvBufSize")
}
func (p *ParamTable) initEtcdAddress() {
EtcdAddress, err := p.Load("_EtcdAddress")
if err != nil {
panic(err)
}
p.EtcdAddress = EtcdAddress
}
func (p *ParamTable) initMetaRootPath() {
rootPath, err := p.Load("etcd.rootPath")
if err != nil {
panic(err)
}
subPath, err := p.Load("etcd.metaSubPath")
if err != nil {
panic(err)
}
p.MetaRootPath = rootPath + "/" + subPath
}
func (p *ParamTable) initGracefulTime() {
p.GracefulTime = p.ParseInt64("queryNode.gracefulTime")
}
func (p *ParamTable) initMsgChannelSubName() {
// TODO: subName = namePrefix + "-" + queryNodeID, queryNodeID is assigned by master
name, err := p.Load("msgChannel.subNamePrefix.queryNodeSubNamePrefix")
if err != nil {
log.Error(err.Error())
}
queryNodeIDStr, err := p.Load("_QueryNodeID")
if err != nil {
panic(err)
}
p.MsgChannelSubName = name + "-" + queryNodeIDStr
}
func (p *ParamTable) initStatsChannelName() {
channels, err := p.Load("msgChannel.chanNamePrefix.queryNodeStats")
if err != nil {
panic(err)
}
p.StatsChannelName = channels
}
func (p *ParamTable) initSliceIndex() {
queryNodeID := p.QueryNodeID
queryNodeIDList := p.QueryNodeIDList()
for i := 0; i < len(queryNodeIDList); i++ {
if queryNodeID == queryNodeIDList[i] {
p.SliceIndex = i
return
}
}
p.SliceIndex = -1
}
func (p *ParamTable) initLogCfg() {
p.Log = log.Config{}
format, err := p.Load("log.format")
if err != nil {
panic(err)
}
p.Log.Format = format
level, err := p.Load("log.level")
if err != nil {
panic(err)
}
p.Log.Level = level
devStr, err := p.Load("log.dev")
if err != nil {
panic(err)
}
dev, err := strconv.ParseBool(devStr)
if err != nil {
panic(err)
}
p.Log.Development = dev
p.Log.File.MaxSize = p.ParseInt("log.file.maxSize")
p.Log.File.MaxBackups = p.ParseInt("log.file.maxBackups")
p.Log.File.MaxDays = p.ParseInt("log.file.maxAge")
rootPath, err := p.Load("log.file.rootPath")
if err != nil {
panic(err)
}
if len(rootPath) != 0 {
p.Log.File.Filename = path.Join(rootPath, fmt.Sprintf("querynode-%d.log", p.QueryNodeID))
} else {
p.Log.File.Filename = ""
}
}
|
[
"\"QUERY_NODE_ID\""
] |
[] |
[
"QUERY_NODE_ID"
] |
[]
|
["QUERY_NODE_ID"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.