filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
fall_web_app.py | import streamlit as st
from streamlit_player import st_player
import cv2
import numpy as np
import tempfile
import time
from PIL import Image
############################################################
############################################################
import os
import collections
# comment out below line to enable tensorflow logging outputs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
import tensorflow as tf
import core.yolov4
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from core.config import cfg
from PIL import Image
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorflow._api.v2.compat.v1 import ConfigProto
from tensorflow._api.v2.compat.v1 import InteractiveSession
from deep_sort import preprocessing, nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
max_cosine_distance = 0.4
nn_budget = None
nms_max_overlap = 1.0
# initialize deep sort
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
# calculate cosine distance metric
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
# initialize tracker
tracker = Tracker(metric)
# load configuration for object detector
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES = np.array(cfg.YOLO.STRIDES)
ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS_TINY, True)
NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
XYSCALE = cfg.YOLO.XYSCALE
FRAMEWORK = 'tf'
input_size = 416
video_path = './data/video/fall_sample2.mp4'
saved_model_loaded = tf.saved_model.load('./checkpoints/yolov4-416', tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
############################################################
############################################################
DEMO_VIDEO = 'demo_video.mp4'
st.title('Fall Detection Application Using YOLO')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
st.sidebar.title('Menu')
# st.sidebar.subheader('Parameters')
@st.cache()
def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
app_mode = st.sidebar.selectbox('Please Select',
['About', 'Sample Videos', 'Help', 'Run on Video']
)
if app_mode =='About':
st.markdown('''
This is an application for fall detection of individuals based on the **YOLO V.4** object detection algorithm.
The method used in this algorithm is suitable for detecting falls from a standing position or while walking. \n
This method is based on the proposed method in **Lu, K. L., & Chu, E. T. H. (2018).
An image-based fall detection system for the elderly. Applied Sciences, 8(10), 1995.**
''')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
st.image('TEAM_LOGO.jpg')
elif app_mode == 'Sample Videos':
st.video('demo1.mp4', format='video/mp4', start_time=0)
st.video('demo2.mp4', format='video/mp4', start_time=0)
st.video('demo3.mp4', format='video/mp4', start_time=0)
st.video('demo4.mp4', format='video/mp4', start_time=0)
elif app_mode == 'Help':
st.markdown('''
- The Ratio Factor is a factor which multiplied by the height of the bounding box of
the person at 1.5 seconds before each moment. If the height of the bounding box at each
moment is less than the multiplication value, the algorithm will detect a falling-down occurrence.
The suggested value is 5.5, but values between 5 and 7 are good choices. The higher values will lead to more
conservative results. \n
''')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
####################################################################
####################################################################
elif app_mode == 'Run on Video':
st.set_option('deprecation.showfileUploaderEncoding', False)
st.sidebar.markdown('---')
ratio = st.sidebar.slider('Ratio', min_value=1.0, max_value=8.0, value=5.5, step=0.5)
st.sidebar.markdown('---')
st.markdown(' ## Output')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
stframe = st.empty()
video_file_buffer = st.sidebar.file_uploader("Upload a video", type=['mp4'])
tffile = tempfile.NamedTemporaryFile(delete=False)
if not video_file_buffer:
vid = cv2.VideoCapture(DEMO_VIDEO)
tffile.name = DEMO_VIDEO
else:
tffile.write(video_file_buffer.read())
vid = cv2.VideoCapture(tffile.name)
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps_input = int(vid.get(cv2.CAP_PROP_FPS))
# codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output_res.avi', codec, fps_input, (width, height))
st.sidebar.text('Input Video')
st.sidebar.video(tffile.name)
fps = 0
i = 0
kpi1, kpi2, kpi3 = st.beta_columns(3)
with kpi1:
kpi1 = st.markdown("**Frame Rate**")
kpi1_text = st.markdown("0")
with kpi2:
st.markdown("**Tracked Individuals**")
kpi2_text = st.markdown("0")
with kpi3:
st.markdown("**Fall Detection Status**")
kpi3_text = st.markdown('')
kpi3_text.write(f"<h1 style='text-align: center; color: green;'>{'No Fall'}</h1>", unsafe_allow_html=True)
st.markdown("<hr/>", unsafe_allow_html=True)
###################################################
###################################################
frame_num = 0
# while video is running
# DEFINING A DICTIONARY FOR TRACKING
id_Locs = collections.defaultdict(list) # FOR METHOD THREE
id_ylocs = collections.defaultdict(list) # FOR METHOD ONE
yLocs = []
falls = 0
track_dict = dict()
frame_list = []
while vid.isOpened():
i += 1
ret, frame = vid.read()
if not ret:
continue
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
print('Video has ended or failed, try a different video format!')
break
frame_num += 1
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
start_time = time.time()
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=0.3,
score_threshold=0.2
)
# convert data to numpy arrays and slice out unused elements
num_objects = valid_detections.numpy()[0]
bboxes = boxes.numpy()[0]
bboxes = bboxes[0:int(num_objects)]
scores = scores.numpy()[0]
scores = scores[0:int(num_objects)]
classes = classes.numpy()[0]
classes = classes[0:int(num_objects)]
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, width, height
original_h, original_w, _ = frame.shape
bboxes = utils.format_boxes(bboxes, original_h, original_w)
# store all predictions in one parameter for simplicity when calling functions
pred_bbox = [bboxes, scores, classes, num_objects]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
# allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to customize tracker for only people)
allowed_classes = ['person']
# loop through objects and use class index to get class name, allow only classes in allowed_classes list
names = []
deleted_indx = []
for i in range(num_objects):
class_indx = int(classes[i])
class_name = class_names[class_indx]
if class_name not in allowed_classes:
deleted_indx.append(i)
else:
names.append(class_name)
names = np.array(names)
count = len(names)
# cv2.putText(frame, "Objects being tracked: {}".format(count), (5, 35), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2,
# (0, 255, 0), 2)
# print("Objects being tracked: {}".format(count))
# delete detections that are not in allowed_classes
bboxes = np.delete(bboxes, deleted_indx, axis=0)
scores = np.delete(scores, deleted_indx, axis=0)
# encode yolo detections and feed to tracker
features = encoder(frame, bboxes)
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in
zip(bboxes, scores, names, features)]
# initialize color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# run non-maxima supression
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
tracker.predict()
tracker.update(detections)
# update tracks
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
class_name = track.get_class()
# draw bbox on screen
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1] - 30)),
(int(bbox[0]) + (len(class_name) + len(str(track.track_id))) * 17, int(bbox[1])), color, -1)
# cv2.circle(frame, (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)), 5, color, -1)
# cv2.circle(frame, (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)), 15, (0, 255, 0), -1)
cv2.putText(frame, class_name + "-" + str(track.track_id), (int(bbox[0]), int(bbox[1] - 10)), 0, 0.75,
(255, 255, 255), 2)
#################################################
## PAPER METHOD FOR FALL DETECTION #############
#################################################
frameRate = 25
id_Locs[track.track_id].append([int(bbox[3] - bbox[1]), int(bbox[2] - bbox[0])])
for key, value in id_Locs.items():
if len(value) > int(np.floor(frameRate * 1.5)): # 1.5econds after detection a person:
# if value[-1][0] < (7/8) * value[-1 * int(np.floor(frameRate * 1.5))][0]:
# if value[-1][0] < (5.5 / 8) * value[-1 * int(np.floor(frameRate * 1.5))][0]:
if value[-1][0] < (ratio / 8) * value[-1 * int(np.floor(frameRate * 1.5))][0]:
print("Fall Detected")
cv2.putText(frame, "Person " + str(key) + " Fell Down", (70, 250), cv2.FONT_HERSHEY_PLAIN, 2,
(0, 0, 255), 3)
falls += 1
########################################################
# if enable, then print details about each track
# print("Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}".format(str(track.track_id),
# class_name, (
# int(bbox[0]),
# int(bbox[1]),
# int(bbox[2]),
# int(bbox[3]))))
each_id_list = [frame_num, str(track.track_id), int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)]
frame_list.append(each_id_list)
# calculate frames per second of running detections
fps = 1.0 / (time.time() - start_time)
kpi1_text.write(f"<h1 style='text-align: center; color: red;'>{round(fps, 1)}</h1>", unsafe_allow_html=True)
kpi2_text.write(f"<h1 style='text-align: center; color: red;'>{count}</h1>", unsafe_allow_html=True)
if falls > 0:
cv2.putText(frame, "Fall Detected", (50, 100), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 5)
kpi3_text.write(f"<h1 style='text-align: center; color: red;'>{'Fall Detected'}</h1>", unsafe_allow_html=True)
frame = cv2.resize(frame, (0, 0), fx=0.8, fy=0.8)
frame = image_resize(image=frame, width=640)
stframe.image(frame, channels='RGB', use_column_width=True)
out.write(frame)
vid.release()
out.release()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
kafka/channel/pkg/dispatcher/dispatcher_it_test.go | /*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dispatcher
import (
"context"
"net/http"
"net/http/httptest"
"net/http/httputil"
"os"
"sync"
"testing"
"time"
"github.com/cloudevents/sdk-go/v2/binding"
"github.com/cloudevents/sdk-go/v2/binding/transformer"
protocolhttp "github.com/cloudevents/sdk-go/v2/protocol/http"
"github.com/cloudevents/sdk-go/v2/test"
"go.uber.org/zap"
eventingduck "knative.dev/eventing/pkg/apis/duck/v1beta1"
"knative.dev/eventing/pkg/kncloudevents"
"knative.dev/eventing/pkg/tracing"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
tracingconfig "knative.dev/pkg/tracing/config"
"knative.dev/eventing-contrib/kafka/channel/pkg/utils"
)
// This dispatcher tests the full integration of the dispatcher code with Kafka.
// This test doesn't run on the CI because unit tests script doesn't start a Kafka cluster.
// Use it in emergency situations when you can't reproduce the e2e test failures and the failure might be
// in the dispatcher code.
// Start a kafka cluster with docker: docker run --rm --net=host -e ADV_HOST=localhost -e SAMPLEDATA=0 lensesio/fast-data-dev
// Keep also the port 8080 free for the MessageReceiver
func TestDispatcher(t *testing.T) {
if os.Getenv("CI") == "true" {
t.Skipf("This test can't run in CI")
}
logger, err := zap.NewDevelopment(zap.AddStacktrace(zap.WarnLevel))
if err != nil {
t.Fatal(err)
}
tracing.SetupStaticPublishing(logger.Sugar(), "localhost", &tracingconfig.Config{
Backend: tracingconfig.Zipkin,
Debug: true,
SampleRate: 1.0,
ZipkinEndpoint: "http://localhost:9411/api/v2/spans",
})
dispatcherArgs := KafkaDispatcherArgs{
KnCEConnectionArgs: nil,
ClientID: "testing",
Brokers: []string{"localhost:9092"},
TopicFunc: utils.TopicName,
Logger: logger,
}
// Create the dispatcher. At this point, if Kafka is not up, this thing fails
dispatcher, err := NewDispatcher(context.Background(), &dispatcherArgs)
if err != nil {
t.Skipf("no dispatcher: %v", err)
}
// Start the dispatcher
go func() {
if err := dispatcher.Start(context.Background()); err != nil {
t.Error(err)
}
}()
time.Sleep(1 * time.Second)
// We need a channelaproxy and channelbproxy for handling correctly the Host header
channelAProxy := httptest.NewServer(createReverseProxy(t, "channela.svc"))
defer channelAProxy.Close()
channelBProxy := httptest.NewServer(createReverseProxy(t, "channelb.svc"))
defer channelBProxy.Close()
// Start a bunch of test servers to simulate the various services
transformationsWg := sync.WaitGroup{}
transformationsWg.Add(1)
transformationsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer transformationsWg.Done()
message := protocolhttp.NewMessageFromHttpRequest(r)
defer message.Finish(nil)
err := protocolhttp.WriteResponseWriter(context.Background(), message, 200, w, transformer.AddExtension("transformed", "true"))
if err != nil {
w.WriteHeader(500)
t.Fatal(err)
}
}))
defer transformationsServer.Close()
receiverWg := sync.WaitGroup{}
receiverWg.Add(1)
receiverServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer receiverWg.Done()
transformed := r.Header.Get("ce-transformed")
if transformed != "true" {
w.WriteHeader(500)
t.Fatalf("Expecting ce-transformed: true, found %s", transformed)
}
}))
defer receiverServer.Close()
transformationsFailureWg := sync.WaitGroup{}
transformationsFailureWg.Add(1)
transformationsFailureServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer transformationsFailureWg.Done()
w.WriteHeader(500)
}))
defer transformationsFailureServer.Close()
deadLetterWg := sync.WaitGroup{}
deadLetterWg.Add(1)
deadLetterServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer deadLetterWg.Done()
transformed := r.Header.Get("ce-transformed")
if transformed != "" {
w.WriteHeader(500)
t.Fatalf("Not expecting ce-transformed, found %s", transformed)
}
}))
defer deadLetterServer.Close()
logger.Debug("Test servers",
zap.String("transformations server", transformationsServer.URL),
zap.String("transformations failure server", transformationsFailureServer.URL),
zap.String("receiver server", receiverServer.URL),
zap.String("dead letter server", deadLetterServer.URL),
)
// send -> channela -> sub with transformationServer and reply to channelb -> channelb -> sub with receiver -> receiver
config := Config{
ChannelConfigs: []ChannelConfig{
{
Namespace: "default",
Name: "channela",
HostName: "channela.svc",
Subscriptions: []Subscription{
{
SubscriberSpec: eventingduck.SubscriberSpec{
UID: "aaaa",
Generation: 1,
SubscriberURI: mustParseUrl(t, transformationsServer.URL),
ReplyURI: mustParseUrl(t, channelBProxy.URL),
},
},
{
SubscriberSpec: eventingduck.SubscriberSpec{
UID: "cccc",
Generation: 1,
SubscriberURI: mustParseUrl(t, transformationsFailureServer.URL),
ReplyURI: mustParseUrl(t, channelBProxy.URL),
Delivery: &eventingduck.DeliverySpec{
DeadLetterSink: &duckv1.Destination{URI: mustParseUrl(t, deadLetterServer.URL)},
},
},
},
},
},
{
Namespace: "default",
Name: "channelb",
HostName: "channelb.svc",
Subscriptions: []Subscription{
{
SubscriberSpec: eventingduck.SubscriberSpec{
UID: "bbbb",
Generation: 1,
SubscriberURI: mustParseUrl(t, receiverServer.URL),
},
},
},
},
},
}
err = dispatcher.UpdateHostToChannelMap(&config)
if err != nil {
t.Fatal(err)
}
failed, err := dispatcher.UpdateKafkaConsumers(&config)
if err != nil {
t.Fatal(err)
}
if len(failed) != 0 {
t.Fatal(err)
}
time.Sleep(5 * time.Second)
// Ok now everything should be ready to send the event
httpsender, err := kncloudevents.NewHttpMessageSender(nil, channelAProxy.URL)
if err != nil {
t.Fatal(err)
}
req, err := httpsender.NewCloudEventRequest(context.Background())
if err != nil {
t.Fatal(err)
}
event := test.FullEvent()
_ = protocolhttp.WriteRequest(context.Background(), binding.ToMessage(&event), req)
res, err := httpsender.Send(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 202 {
t.Fatalf("Expected 202, Have %d", res.StatusCode)
}
transformationsFailureWg.Wait()
deadLetterWg.Wait()
transformationsWg.Wait()
receiverWg.Wait()
// Try to close consumer groups
err = dispatcher.UpdateHostToChannelMap(&Config{})
if err != nil {
t.Fatal(err)
}
failed, err = dispatcher.UpdateKafkaConsumers(&Config{})
if err != nil {
t.Fatal(err)
}
if len(failed) != 0 {
t.Fatal(err)
}
}
func createReverseProxy(t *testing.T, host string) *httputil.ReverseProxy {
director := func(req *http.Request) {
target := mustParseUrl(t, "http://localhost:8080")
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
req.URL.Path = target.Path
req.Host = host
}
return &httputil.ReverseProxy{Director: director}
}
func mustParseUrl(t *testing.T, str string) *apis.URL {
url, err := apis.ParseURL(str)
if err != nil {
t.Fatal(err)
}
return url
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
secrets/vault/vault_test.go | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limtations under the License.
package vault
import (
"context"
"errors"
"os"
"testing"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/builtin/logical/transit"
vhttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
"gocloud.dev/secrets"
"gocloud.dev/secrets/driver"
"gocloud.dev/secrets/drivertest"
)
const (
keyID1 = "test-secrets"
keyID2 = "test-secrets2"
apiAddress = "http://127.0.0.1:0"
)
type harness struct {
client *api.Client
close func()
}
func (h *harness) MakeDriver(ctx context.Context) (driver.Keeper, driver.Keeper, error) {
return &keeper{keyID: keyID1, client: h.client}, &keeper{keyID: keyID2, client: h.client}, nil
}
func (h *harness) Close() {
h.close()
}
func newHarness(ctx context.Context, t *testing.T) (drivertest.Harness, error) {
// Start a new test server.
c, cleanup := testVaultServer(t)
// Enable the Transit Secrets Engine to use Vault as an Encryption as a Service.
c.Logical().Write("sys/mounts/transit", map[string]interface{}{
"type": "transit",
})
return &harness{
client: c,
close: cleanup,
}, nil
}
func testVaultServer(t *testing.T) (*api.Client, func()) {
coreCfg := &vault.CoreConfig{
DisableMlock: true,
DisableCache: true,
// Enable the testing transit backend.
LogicalBackends: map[string]logical.Factory{
"transit": transit.Factory,
},
}
cluster := vault.NewTestCluster(t, coreCfg, &vault.TestClusterOptions{
HandlerFunc: vhttp.Handler,
})
cluster.Start()
tc := cluster.Cores[0]
vault.TestWaitActive(t, tc.Core)
tc.Client.SetToken(cluster.RootToken)
return tc.Client, cluster.Cleanup
}
func TestConformance(t *testing.T) {
drivertest.RunConformanceTests(t, newHarness, []drivertest.AsTest{verifyAs{}})
}
type verifyAs struct{}
func (v verifyAs) Name() string {
return "verify As function"
}
func (v verifyAs) ErrorCheck(k *secrets.Keeper, err error) error {
var s string
if k.ErrorAs(err, &s) {
return errors.New("Keeper.ErrorAs expected to fail")
}
return nil
}
// Vault-specific tests.
func TestNoSessionProvidedError(t *testing.T) {
if _, err := Dial(context.Background(), nil); err == nil {
t.Error("got nil, want no auth Config provided")
}
}
func TestNoConnectionError(t *testing.T) {
ctx := context.Background()
// Dial calls vault's NewClient method, which doesn't make the connection. Try
// doing encryption which should fail by no connection.
client, err := Dial(ctx, &Config{
Token: "<Client (Root) Token>",
APIConfig: api.Config{
Address: apiAddress,
},
})
if err != nil {
t.Fatal(err)
}
keeper := OpenKeeper(client, "my-key", nil)
defer keeper.Close()
if _, err := keeper.Encrypt(ctx, []byte("test")); err == nil {
t.Error("got nil, want connection refused")
}
}
func fakeConnectionStringInEnv() func() {
oldURLVal := os.Getenv("VAULT_SERVER_URL")
oldTokenVal := os.Getenv("VAULT_SERVER_TOKEN")
os.Setenv("VAULT_SERVER_URL", "http://myvaultserver")
os.Setenv("VAULT_SERVER_TOKEN", "faketoken")
return func() {
os.Setenv("VAULT_SERVER_URL", oldURLVal)
os.Setenv("VAULT_SERVER_TOKEN", oldTokenVal)
}
}
func TestOpenKeeper(t *testing.T) {
cleanup := fakeConnectionStringInEnv()
defer cleanup()
tests := []struct {
URL string
WantErr bool
}{
// OK.
{"vault://mykey", false},
// Invalid parameter.
{"vault://mykey?param=value", true},
}
ctx := context.Background()
for _, test := range tests {
keeper, err := secrets.OpenKeeper(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
if err == nil {
if err = keeper.Close(); err != nil {
t.Errorf("%s: got error during close: %v", test.URL, err)
}
}
}
}
| [
"\"VAULT_SERVER_URL\"",
"\"VAULT_SERVER_TOKEN\""
]
| []
| [
"VAULT_SERVER_URL",
"VAULT_SERVER_TOKEN"
]
| [] | ["VAULT_SERVER_URL", "VAULT_SERVER_TOKEN"] | go | 2 | 0 | |
cmd/clean.go | /*
Copyright © 2021 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/sealyun/cloud-kernel-rootfs/pkg/ecs"
"github.com/sealyun/cloud-kernel-rootfs/pkg/logger"
"github.com/sealyun/cloud-kernel-rootfs/pkg/vars"
"os"
"github.com/spf13/cobra"
)
var cloud string
var instanceIds []string
// cleanCmd represents the clean command
var cleanCmd = &cobra.Command{
Use: "clean",
Short: "删除已经创建的ecs",
Run: func(cmd *cobra.Command, args []string) {
var c ecs.Cloud
switch cloud {
case "aliyun":
c = &ecs.AliyunEcs{}
case "huaweiyun":
c = &ecs.HuaweiEcs{}
default:
logger.Fatal("不支持该类型的云厂商")
_ = cmd.Help()
os.Exit(0)
}
c.Delete(instanceIds, 10)
},
PreRun: func(cmd *cobra.Command, args []string) {
if vars.AkID == "" {
if v := os.Getenv("ECS_AKID"); v != "" {
vars.AkID = v
}
}
if vars.AkSK == "" {
if v := os.Getenv("ECS_AKSK"); v != "" {
vars.AkSK = v
}
}
if vars.AkID == "" {
logger.Fatal("云厂商的akId为空,无法清空虚拟机")
cmd.Help()
os.Exit(-1)
}
if vars.AkSK == "" {
logger.Fatal("云厂商的akSK为空,无法清空虚拟机")
_ = cmd.Help()
os.Exit(0)
}
if len(instanceIds) == 0 {
logger.Fatal("instance id为空,无法清空虚拟机")
cmd.Help()
os.Exit(0)
}
},
}
func init() {
rootCmd.AddCommand(cleanCmd)
// Here you will define your flags and configuration settings.
cleanCmd.Flags().StringVar(&vars.AkID, "akid", "", "云厂商的 akId")
cleanCmd.Flags().StringVar(&vars.AkSK, "aksk", "", "云厂商的 akSK")
cleanCmd.Flags().StringVar(&cloud, "cloud", "aliyun", "云厂商类型(aliyun,huaweiyun)")
cleanCmd.Flags().StringSliceVar(&instanceIds, "instance", []string{}, "删除ecs的instanceID")
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// cleanCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// cleanCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
| [
"\"ECS_AKID\"",
"\"ECS_AKSK\""
]
| []
| [
"ECS_AKSK",
"ECS_AKID"
]
| [] | ["ECS_AKSK", "ECS_AKID"] | go | 2 | 0 | |
simple_service_registration/vendor/fetchai/connections/p2p_libp2p/libp2p_node/aea/api.go | /* -*- coding: utf-8 -*-
* ------------------------------------------------------------------------------
*
* Copyright 2018-2019 Fetch.AI Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* ------------------------------------------------------------------------------
*/
package aea
import (
"errors"
"log"
"net"
"os"
"strconv"
"strings"
"time"
"github.com/joho/godotenv"
"github.com/rs/zerolog"
proto "google.golang.org/protobuf/proto"
)
// code redandency to avoid import cycle
var logger zerolog.Logger = zerolog.New(zerolog.ConsoleWriter{
Out: os.Stdout,
NoColor: false,
TimeFormat: "15:04:05.000",
}).
With().Timestamp().
Str("package", "AeaApi").
Logger()
type Pipe interface {
Connect() error
Read() ([]byte, error)
Write(data []byte) error
Close() error
}
// Needed to break import cycle
type AgentRecord struct {
ServiceId string
LedgerId string
Address string
PublicKey string
PeerPublicKey string
Signature string
}
/*
AeaApi type
*/
type AeaApi struct {
msgin_path string
msgout_path string
agent_addr string
agent_record *AgentRecord
id string
entry_peers []string
host string
port uint16
host_public string
port_public uint16
host_delegate string
port_delegate uint16
host_monitoring string
port_monitoring uint16
registrationDelay float64
recordsStoragePath string
pipe Pipe
out_queue chan *Envelope
closing bool
connected bool
sandbox bool
standalone bool
}
func (aea AeaApi) AeaAddress() string {
return aea.agent_addr
}
func (aea AeaApi) PrivateKey() string {
return aea.id
}
func (aea AeaApi) Address() (string, uint16) {
return aea.host, aea.port
}
func (aea AeaApi) PublicAddress() (string, uint16) {
return aea.host_public, aea.port_public
}
func (aea AeaApi) DelegateAddress() (string, uint16) {
return aea.host_delegate, aea.port_delegate
}
func (aea AeaApi) MonitoringAddress() (string, uint16) {
return aea.host_monitoring, aea.port_monitoring
}
func (aea AeaApi) EntryPeers() []string {
return aea.entry_peers
}
func (aea AeaApi) AgentRecord() *AgentRecord {
return aea.agent_record
}
func (aea AeaApi) RegistrationDelayInSeconds() float64 {
return aea.registrationDelay
}
func (aea AeaApi) RecordStoragePath() string {
return aea.recordsStoragePath
}
func (aea AeaApi) Put(envelope *Envelope) error {
if aea.standalone {
errorMsg := "node running in standalone mode"
logger.Warn().Msgf(errorMsg)
return errors.New(errorMsg)
}
return write_envelope(aea.pipe, envelope)
}
func (aea *AeaApi) Get() *Envelope {
if aea.standalone {
errorMsg := "node running in standalone mode"
logger.Warn().Msgf(errorMsg)
return nil
}
return <-aea.out_queue
}
func (aea *AeaApi) Queue() <-chan *Envelope {
return aea.out_queue
}
func (aea *AeaApi) Connected() bool {
return aea.connected || aea.standalone
}
func (aea *AeaApi) Stop() {
aea.closing = true
aea.stop()
close(aea.out_queue)
}
func (aea *AeaApi) Init() error {
zerolog.TimeFieldFormat = time.RFC3339Nano
if aea.sandbox {
return nil
}
if aea.connected {
return nil
}
aea.connected = false
env_file := os.Args[1]
logger.Debug().Msgf("env_file: %s", env_file)
// get config
err := godotenv.Overload(env_file)
if err != nil {
log.Fatal("Error loading env file")
}
aea.msgin_path = os.Getenv("AEA_TO_NODE")
aea.msgout_path = os.Getenv("NODE_TO_AEA")
aea.agent_addr = os.Getenv("AEA_AGENT_ADDR")
aea.id = os.Getenv("AEA_P2P_ID")
entry_peers := os.Getenv("AEA_P2P_ENTRY_URIS")
uri := os.Getenv("AEA_P2P_URI")
uri_public := os.Getenv("AEA_P2P_URI_PUBLIC")
uri_delegate := os.Getenv("AEA_P2P_DELEGATE_URI")
uri_monitoring := os.Getenv("AEA_P2P_URI_MONITORING")
por_address := os.Getenv("AEA_P2P_POR_ADDRESS")
if por_address != "" {
record := &AgentRecord{Address: por_address}
record.PublicKey = os.Getenv("AEA_P2P_POR_PUBKEY")
record.PeerPublicKey = os.Getenv("AEA_P2P_POR_PEER_PUBKEY")
record.Signature = os.Getenv("AEA_P2P_POR_SIGNATURE")
record.ServiceId = os.Getenv("AEA_P2P_POR_SERVICE_ID")
record.LedgerId = os.Getenv("AEA_P2P_POR_LEDGER_ID")
aea.agent_record = record
}
registrationDelay := os.Getenv("AEA_P2P_CFG_REGISTRATION_DELAY")
aea.recordsStoragePath = os.Getenv("AEA_P2P_CFG_STORAGE_PATH")
logger.Debug().Msgf("msgin_path: %s", aea.msgin_path)
logger.Debug().Msgf("msgout_path: %s", aea.msgout_path)
logger.Debug().Msgf("id: %s", aea.id)
logger.Debug().Msgf("addr: %s", aea.agent_addr)
logger.Debug().Msgf("entry_peers: %s", entry_peers)
logger.Debug().Msgf("uri: %s", uri)
logger.Debug().Msgf("uri public: %s", uri_public)
logger.Debug().Msgf("uri delegate service: %s", uri_delegate)
if aea.id == "" || uri == "" {
err := errors.New("couldn't get AEA configuration: key and uri are required")
logger.Error().Str("err", err.Error()).Msg("")
return err
}
if aea.msgin_path == "" && aea.msgout_path == "" && aea.agent_addr == "" {
aea.standalone = true
} else if aea.msgin_path == "" || aea.msgout_path == "" || aea.agent_addr == "" {
err := errors.New("couldn't get AEA configuration: pipes paths are required when agent address is provided")
logger.Error().Str("err", err.Error()).Msg("")
return err
}
// parse uri
parts := strings.SplitN(uri, ":", -1)
if len(parts) < 2 {
err := errors.New("malformed Uri " + uri)
logger.Error().Str("err", err.Error()).Msg("")
return err
}
aea.host = parts[0]
port, _ := strconv.ParseUint(parts[1], 10, 16)
aea.port = uint16(port)
// hack: test if port is taken
addr, err := net.ResolveTCPAddr("tcp", uri)
if err != nil {
return err
}
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
logger.Error().Str("err", err.Error()).Msgf("Uri already taken %s", uri)
return err
}
listener.Close()
// parse public uri
if uri_public != "" {
parts = strings.SplitN(uri_public, ":", -1)
if len(parts) < 2 {
err := errors.New("malformed Uri " + uri_public)
logger.Error().Str("err", err.Error()).Msg("")
return err
}
aea.host_public = parts[0]
port, _ = strconv.ParseUint(parts[1], 10, 16)
aea.port_public = uint16(port)
} else {
aea.host_public = ""
aea.port_public = 0
}
// parse delegate uri
if uri_delegate != "" {
parts = strings.SplitN(uri_delegate, ":", -1)
if len(parts) < 2 {
err := errors.New("malformed Uri " + uri_delegate)
logger.Error().Str("err", err.Error()).Msg("")
return err
}
aea.host_delegate = parts[0]
port, _ = strconv.ParseUint(parts[1], 10, 16)
aea.port_delegate = uint16(port)
} else {
aea.host_delegate = ""
aea.port_delegate = 0
}
// parse monitoring uri
if uri_monitoring != "" {
parts = strings.SplitN(uri_monitoring, ":", -1)
if len(parts) < 2 {
err := errors.New("malformed Uri " + uri_monitoring)
logger.Error().Str("err", err.Error()).Msg("")
return err
}
aea.host_monitoring = parts[0]
port, _ = strconv.ParseUint(parts[1], 10, 16)
aea.port_monitoring = uint16(port)
} else {
aea.host_monitoring = ""
aea.port_monitoring = 0
}
// parse entry peers multiaddrs
if len(entry_peers) > 0 {
aea.entry_peers = strings.SplitN(entry_peers, ",", -1)
}
// parse registration delay
if registrationDelay == "" {
aea.registrationDelay = 0.0
} else {
delay, err := strconv.ParseFloat(registrationDelay, 32)
if err != nil {
logger.Error().Str("err", err.Error()).Msgf("malformed RegistrationDelay value")
return err
}
aea.registrationDelay = delay
}
// setup pipe
if !aea.standalone {
aea.pipe = NewPipe(aea.msgin_path, aea.msgout_path)
}
return nil
}
func (aea *AeaApi) Connect() error {
if aea.standalone {
logger.Info().Msg("Successfully running in standalone mode")
return nil
}
// open pipes
err := aea.pipe.Connect()
if err != nil {
logger.Error().Str("err", err.Error()).
Msg("while connecting to pipe")
return err
}
aea.closing = false
//TOFIX(LR) trade-offs between bufferd vs unbuffered channel
aea.out_queue = make(chan *Envelope, 10)
go aea.listen_for_envelopes()
logger.Info().Msg("connected to agent")
aea.connected = true
return nil
}
func UnmarshalEnvelope(buf []byte) (*Envelope, error) {
envelope := &Envelope{}
err := proto.Unmarshal(buf, envelope)
return envelope, err
}
func (aea *AeaApi) listen_for_envelopes() {
//TOFIX(LR) add an exit strategy
for {
envel, err := read_envelope(aea.pipe)
if err != nil {
logger.Error().Str("err", err.Error()).Msg("while receiving envelope")
logger.Info().Msg("disconnecting")
// TOFIX(LR) see above
if !aea.closing {
aea.stop()
}
return
}
if envel.Sender != aea.agent_record.Address {
logger.Error().
Str("err", "Sender ("+envel.Sender+") must match registered address").
Msg("while processing envelope")
// TODO send error back to agent
continue
}
logger.Debug().Msgf("received envelope from agent")
aea.out_queue <- envel
if aea.closing {
return
}
}
}
func (aea *AeaApi) stop() {
aea.pipe.Close()
}
/*
Pipes helpers
*/
func write_envelope(pipe Pipe, envelope *Envelope) error {
data, err := proto.Marshal(envelope)
if err != nil {
logger.Error().Str("err", err.Error()).Msgf("while serializing envelope: %s", envelope)
return err
}
return pipe.Write(data)
}
func read_envelope(pipe Pipe) (*Envelope, error) {
envelope := &Envelope{}
data, err := pipe.Read()
if err != nil {
logger.Error().Str("err", err.Error()).Msg("while receiving data")
return envelope, err
}
err = proto.Unmarshal(data, envelope)
return envelope, err
}
| [
"\"AEA_TO_NODE\"",
"\"NODE_TO_AEA\"",
"\"AEA_AGENT_ADDR\"",
"\"AEA_P2P_ID\"",
"\"AEA_P2P_ENTRY_URIS\"",
"\"AEA_P2P_URI\"",
"\"AEA_P2P_URI_PUBLIC\"",
"\"AEA_P2P_DELEGATE_URI\"",
"\"AEA_P2P_URI_MONITORING\"",
"\"AEA_P2P_POR_ADDRESS\"",
"\"AEA_P2P_POR_PUBKEY\"",
"\"AEA_P2P_POR_PEER_PUBKEY\"",
"\"AEA_P2P_POR_SIGNATURE\"",
"\"AEA_P2P_POR_SERVICE_ID\"",
"\"AEA_P2P_POR_LEDGER_ID\"",
"\"AEA_P2P_CFG_REGISTRATION_DELAY\"",
"\"AEA_P2P_CFG_STORAGE_PATH\""
]
| []
| [
"AEA_AGENT_ADDR",
"AEA_P2P_POR_PEER_PUBKEY",
"AEA_P2P_POR_LEDGER_ID",
"AEA_P2P_DELEGATE_URI",
"AEA_P2P_POR_SIGNATURE",
"AEA_P2P_URI_MONITORING",
"AEA_P2P_ID",
"AEA_P2P_POR_SERVICE_ID",
"NODE_TO_AEA",
"AEA_P2P_CFG_STORAGE_PATH",
"AEA_TO_NODE",
"AEA_P2P_URI",
"AEA_P2P_POR_PUBKEY",
"AEA_P2P_CFG_REGISTRATION_DELAY",
"AEA_P2P_URI_PUBLIC",
"AEA_P2P_POR_ADDRESS",
"AEA_P2P_ENTRY_URIS"
]
| [] | ["AEA_AGENT_ADDR", "AEA_P2P_POR_PEER_PUBKEY", "AEA_P2P_POR_LEDGER_ID", "AEA_P2P_DELEGATE_URI", "AEA_P2P_POR_SIGNATURE", "AEA_P2P_URI_MONITORING", "AEA_P2P_ID", "AEA_P2P_POR_SERVICE_ID", "NODE_TO_AEA", "AEA_P2P_CFG_STORAGE_PATH", "AEA_TO_NODE", "AEA_P2P_URI", "AEA_P2P_POR_PUBKEY", "AEA_P2P_CFG_REGISTRATION_DELAY", "AEA_P2P_URI_PUBLIC", "AEA_P2P_POR_ADDRESS", "AEA_P2P_ENTRY_URIS"] | go | 17 | 0 | |
handlers/configuration.go | package handlers
import (
"os"
"strconv"
)
// LookupCredFile looks up the credential file
func LookupCredFile() string {
val, found := os.LookupEnv("CRED_FILE")
if !found {
val = "/credentials.json"
}
return val
}
// GetMaxJobConfig returns maximum number of jobs allowed
// to run simultanously
func GetMaxJobConfig() int {
maxJobNum, err := strconv.Atoi(os.Getenv("JOB_NUM_MAX"))
// set to 10 if there is no env varibale
if err != nil {
maxJobNum = 10
}
return maxJobNum
}
// GetCleanupTime returns the cleanuo time for completed jobs
func GetCleanupTime() int {
cleanupTime, err := strconv.Atoi(os.Getenv("CLEANUP_TIME"))
if err != nil {
cleanupTime = 120
}
return cleanupTime
}
const (
GRACE_PERIOD int64 = 1 // grace period in seconds before a job is deleted
)
| [
"\"JOB_NUM_MAX\"",
"\"CLEANUP_TIME\""
]
| []
| [
"JOB_NUM_MAX",
"CLEANUP_TIME"
]
| [] | ["JOB_NUM_MAX", "CLEANUP_TIME"] | go | 2 | 0 | |
main.go | package main
import (
"crypto/tls"
"log"
"net"
"net/http"
"net/url"
"os"
"time"
"github.com/codegangsta/negroni"
"github.com/lair-framework/api-server/app"
"gopkg.in/mgo.v2"
)
// TLSDial sets up a TLS connection to MongoDb.
func TLSDial(addr net.Addr) (net.Conn, error) {
return tls.Dial(addr.Network(), addr.String(), &tls.Config{InsecureSkipVerify: true})
}
func main() {
murl := os.Getenv("MONGO_URL")
if murl == "" {
log.Fatal("MONGO_URL environment variable not set")
}
apiListener := os.Getenv("API_LISTENER")
if apiListener == "" {
log.Fatal("API_LISTENER environment variable not set")
}
u, err := url.Parse(murl)
if err != nil {
log.Fatal("Erorr parsing MONGO_URL", err.Error())
}
q, err := url.ParseQuery(u.RawQuery)
if err != nil {
log.Fatal("Error parsing query parameters", err.Error())
}
dname := u.Path[1:]
s := &mgo.Session{}
if opt, ok := q["ssl"]; ok && opt[0] == "true" {
var user, pass string
if u.User != nil {
user = u.User.Username()
p, set := u.User.Password()
if set {
pass = p
}
}
d := &mgo.DialInfo{
Addrs: []string{u.Host},
Direct: true,
Database: dname,
Username: user,
Password: pass,
Dial: TLSDial,
Timeout: time.Duration(10) * time.Second,
}
s, err = mgo.DialWithInfo(d)
if err != nil {
log.Fatal("Could not connect to database. Error: ", err.Error())
}
} else {
s, err = mgo.Dial(murl)
if err != nil {
log.Fatal("Could not connect to database. Error: ", err.Error())
}
}
a := app.New(&app.O{
S: s,
DName: dname,
TransformDirectory: os.Getenv("TRANSFORM_DIR"),
})
db := s.DB(dname)
defer s.Close()
db.C(a.C.Hosts).EnsureIndexKey("projectId", "ipv4")
db.C(a.C.Services).EnsureIndexKey("projectId", "hostId", "port", "protocol")
db.C(a.C.Issues).EnsureIndexKey("projectId", "pluginIds")
db.C(a.C.WebDirectories).EnsureIndexKey("projectId", "hostId", "path", "port")
os.Mkdir(a.Filepath, 0775)
rec := negroni.NewRecovery()
rec.PrintStack = false
n := negroni.New(
negroni.NewLogger(),
rec,
)
n.UseHandler(a.Router())
log.Printf("Listening on %s", apiListener)
log.Fatal(http.ListenAndServe(apiListener, n))
}
| [
"\"MONGO_URL\"",
"\"API_LISTENER\"",
"\"TRANSFORM_DIR\""
]
| []
| [
"TRANSFORM_DIR",
"MONGO_URL",
"API_LISTENER"
]
| [] | ["TRANSFORM_DIR", "MONGO_URL", "API_LISTENER"] | go | 3 | 0 | |
cmd/sonarExecuteScan_test.go | package cmd
import (
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"testing"
piperHttp "github.com/SAP/jenkins-library/pkg/http"
"github.com/SAP/jenkins-library/pkg/mock"
FileUtils "github.com/SAP/jenkins-library/pkg/piperutils"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
//TODO: extract to mock package
type mockDownloader struct {
shouldFail bool
requestedURL []string
requestedFile []string
}
func (m *mockDownloader) DownloadFile(url, filename string, header http.Header, cookies []*http.Cookie) error {
m.requestedURL = append(m.requestedURL, url)
m.requestedFile = append(m.requestedFile, filename)
if m.shouldFail {
return errors.New("something happened")
}
return nil
}
func (m *mockDownloader) SetOptions(options piperHttp.ClientOptions) {}
func mockFileUtilsExists(exists bool) func(string) (bool, error) {
return func(filename string) (bool, error) {
if exists {
return true, nil
}
return false, errors.New("something happened")
}
}
func mockExecLookPath(executable string) (string, error) {
if executable == "local-sonar-scanner" {
return "/usr/bin/sonar-scanner", nil
}
return "", errors.New("something happened")
}
func mockFileUtilsUnzip(t *testing.T, expectSrc string) func(string, string) ([]string, error) {
return func(src, dest string) ([]string, error) {
assert.Equal(t, filepath.Join(dest, expectSrc), src)
return []string{}, nil
}
}
func mockOsRename(t *testing.T, expectOld, expectNew string) func(string, string) error {
return func(old, new string) error {
assert.Regexp(t, expectOld, old)
assert.Equal(t, expectNew, new)
return nil
}
}
func createTaskReportFile(t *testing.T, workingDir string) {
require.NoError(t, os.MkdirAll(filepath.Join(workingDir, ".scannerwork"), 0755))
require.NoError(t, ioutil.WriteFile(filepath.Join(workingDir, ".scannerwork", "report-task.txt"), []byte("projectKey=piper-test\nserverUrl=https://sonarcloud.io\nserverVersion=8.0.0.12345\ndashboardUrl=https://sonarcloud.io/dashboard/index/piper-test\nceTaskId=AXERR2JBbm9IiM5TEST\nceTaskUrl=https://sonarcloud.io/api/ce/task?id=AXERR2JBbm9IiMTEST"), 0755))
require.FileExists(t, filepath.Join(workingDir, ".scannerwork", "report-task.txt"))
}
func TestRunSonar(t *testing.T) {
mockRunner := mock.ExecMockRunner{}
mockClient := mockDownloader{shouldFail: false}
t.Run("default", func(t *testing.T) {
// init
tmpFolder, err := ioutil.TempDir(".", "test-sonar-")
require.NoError(t, err)
defer os.RemoveAll(tmpFolder)
createTaskReportFile(t, tmpFolder)
sonar = sonarSettings{
workingDir: tmpFolder,
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
CustomTLSCertificateLinks: "",
Token: "secret-ABC",
Host: "https://sonar.sap.com",
Organization: "SAP",
ProjectVersion: "1.2.3",
}
fileUtilsExists = mockFileUtilsExists(true)
os.Setenv("PIPER_SONAR_LOAD_CERTIFICATES", "true")
require.Equal(t, "true", os.Getenv("PIPER_SONAR_LOAD_CERTIFICATES"), "PIPER_SONAR_LOAD_CERTIFICATES must be set")
defer func() {
fileUtilsExists = FileUtils.FileExists
os.Unsetenv("PIPER_SONAR_LOAD_CERTIFICATES")
}()
// test
err = runSonar(options, &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.options, "-Dsonar.projectVersion=1.2.3")
assert.Contains(t, sonar.options, "-Dsonar.organization=SAP")
assert.Contains(t, sonar.environment, "SONAR_HOST_URL=https://sonar.sap.com")
assert.Contains(t, sonar.environment, "SONAR_TOKEN=secret-ABC")
assert.Contains(t, sonar.environment, "SONAR_SCANNER_OPTS=-Djavax.net.ssl.trustStore="+filepath.Join(getWorkingDir(), ".certificates", "cacerts"))
assert.FileExists(t, filepath.Join(sonar.workingDir, "sonarExecuteScan_reports.json"))
assert.FileExists(t, filepath.Join(sonar.workingDir, "sonarExecuteScan_links.json"))
})
t.Run("with custom options", func(t *testing.T) {
// init
tmpFolder, err := ioutil.TempDir(".", "test-sonar-")
require.NoError(t, err)
defer os.RemoveAll(tmpFolder)
createTaskReportFile(t, tmpFolder)
sonar = sonarSettings{
workingDir: tmpFolder,
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
Options: []string{"-Dsonar.projectKey=piper"},
}
fileUtilsExists = mockFileUtilsExists(true)
defer func() {
fileUtilsExists = FileUtils.FileExists
}()
// test
err = runSonar(options, &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.options, "-Dsonar.projectKey=piper")
})
}
func TestSonarHandlePullRequest(t *testing.T) {
t.Run("default", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
ChangeID: "123",
PullRequestProvider: "GitHub",
ChangeBranch: "feat/bogus",
ChangeTarget: "master",
Owner: "SAP",
Repository: "jenkins-library",
}
// test
err := handlePullRequest(options)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.options, "sonar.pullrequest.key=123")
assert.Contains(t, sonar.options, "sonar.pullrequest.provider=github")
assert.Contains(t, sonar.options, "sonar.pullrequest.base=master")
assert.Contains(t, sonar.options, "sonar.pullrequest.branch=feat/bogus")
assert.Contains(t, sonar.options, "sonar.pullrequest.github.repository=SAP/jenkins-library")
})
t.Run("unsupported scm provider", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
ChangeID: "123",
PullRequestProvider: "Gerrit",
}
// test
err := handlePullRequest(options)
// assert
assert.Error(t, err)
assert.Equal(t, "Pull-Request provider 'gerrit' is not supported!", err.Error())
})
t.Run("legacy", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
options := sonarExecuteScanOptions{
LegacyPRHandling: true,
ChangeID: "123",
Owner: "SAP",
Repository: "jenkins-library",
GithubToken: "some-token",
DisableInlineComments: true,
}
// test
err := handlePullRequest(options)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.options, "sonar.analysis.mode=preview")
assert.Contains(t, sonar.options, "sonar.github.pullRequest=123")
assert.Contains(t, sonar.options, "sonar.github.oauth=some-token")
assert.Contains(t, sonar.options, "sonar.github.repository=SAP/jenkins-library")
assert.Contains(t, sonar.options, "sonar.github.disableInlineComments=true")
})
}
func TestSonarLoadScanner(t *testing.T) {
mockClient := mockDownloader{shouldFail: false}
t.Run("use preinstalled sonar-scanner", func(t *testing.T) {
// init
ignore := ""
sonar = sonarSettings{
binary: "local-sonar-scanner",
environment: []string{},
options: []string{},
}
execLookPath = mockExecLookPath
defer func() { execLookPath = exec.LookPath }()
// test
err := loadSonarScanner(ignore, &mockClient)
// assert
assert.NoError(t, err)
assert.Equal(t, "local-sonar-scanner", sonar.binary)
})
t.Run("use downloaded sonar-scanner", func(t *testing.T) {
// init
url := "https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.3.0.2102-linux.zip"
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
execLookPath = mockExecLookPath
fileUtilsUnzip = mockFileUtilsUnzip(t, "sonar-scanner-cli-4.3.0.2102-linux.zip")
osRename = mockOsRename(t, "sonar-scanner-4.3.0.2102-linux", ".sonar-scanner")
defer func() {
execLookPath = exec.LookPath
fileUtilsUnzip = FileUtils.Unzip
osRename = os.Rename
}()
// test
err := loadSonarScanner(url, &mockClient)
// assert
assert.NoError(t, err)
assert.Equal(t, url, mockClient.requestedURL[0])
assert.Regexp(t, "sonar-scanner-cli-4.3.0.2102-linux.zip$", mockClient.requestedFile[0])
assert.Equal(t, filepath.Join(getWorkingDir(), ".sonar-scanner", "bin", "sonar-scanner"), sonar.binary)
})
}
func TestSonarLoadCertificates(t *testing.T) {
mockRunner := mock.ExecMockRunner{}
mockClient := mockDownloader{shouldFail: false}
t.Run("use local trust store", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
fileUtilsExists = mockFileUtilsExists(true)
defer func() { fileUtilsExists = FileUtils.FileExists }()
// test
err := loadCertificates("", &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Contains(t, sonar.environment, "SONAR_SCANNER_OPTS=-Djavax.net.ssl.trustStore="+filepath.Join(getWorkingDir(), ".certificates", "cacerts"))
})
t.Run("use local trust store with downloaded certificates", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
fileUtilsExists = mockFileUtilsExists(false)
os.Setenv("PIPER_SONAR_LOAD_CERTIFICATES", "true")
require.Equal(t, "true", os.Getenv("PIPER_SONAR_LOAD_CERTIFICATES"), "PIPER_SONAR_LOAD_CERTIFICATES must be set")
defer func() {
fileUtilsExists = FileUtils.FileExists
os.Unsetenv("PIPER_SONAR_LOAD_CERTIFICATES")
}()
// test
err := loadCertificates("https://sap.com/custom-1.crt,https://sap.com/custom-2.crt", &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Equal(t, "https://sap.com/custom-1.crt", mockClient.requestedURL[0])
assert.Equal(t, "https://sap.com/custom-2.crt", mockClient.requestedURL[1])
assert.Regexp(t, "custom-1.crt$", mockClient.requestedFile[0])
assert.Regexp(t, "custom-2.crt$", mockClient.requestedFile[1])
assert.Contains(t, sonar.environment, "SONAR_SCANNER_OPTS=-Djavax.net.ssl.trustStore="+filepath.Join(getWorkingDir(), ".certificates", "cacerts"))
})
t.Run("use local trust store with downloaded certificates - deactivated", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
fileUtilsExists = mockFileUtilsExists(false)
require.Empty(t, os.Getenv("PIPER_SONAR_LOAD_CERTIFICATES"), "PIPER_SONAR_LOAD_CERTIFICATES must not be set")
defer func() { fileUtilsExists = FileUtils.FileExists }()
// test
err := loadCertificates("any-certificate-url", &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.NotContains(t, sonar.environment, "SONAR_SCANNER_OPTS=-Djavax.net.ssl.trustStore="+filepath.Join(getWorkingDir(), ".certificates", "cacerts"))
})
t.Run("use no trust store", func(t *testing.T) {
// init
sonar = sonarSettings{
binary: "sonar-scanner",
environment: []string{},
options: []string{},
}
fileUtilsExists = mockFileUtilsExists(false)
os.Setenv("PIPER_SONAR_LOAD_CERTIFICATES", "true")
require.Equal(t, "true", os.Getenv("PIPER_SONAR_LOAD_CERTIFICATES"), "PIPER_SONAR_LOAD_CERTIFICATES must be set")
defer func() {
fileUtilsExists = FileUtils.FileExists
os.Unsetenv("PIPER_SONAR_LOAD_CERTIFICATES")
}()
// test
err := loadCertificates("", &mockClient, &mockRunner)
// assert
assert.NoError(t, err)
assert.Empty(t, sonar.environment)
})
}
| [
"\"PIPER_SONAR_LOAD_CERTIFICATES\"",
"\"PIPER_SONAR_LOAD_CERTIFICATES\"",
"\"PIPER_SONAR_LOAD_CERTIFICATES\"",
"\"PIPER_SONAR_LOAD_CERTIFICATES\""
]
| []
| [
"PIPER_SONAR_LOAD_CERTIFICATES"
]
| [] | ["PIPER_SONAR_LOAD_CERTIFICATES"] | go | 1 | 0 | |
pkg/ddevapp/providerPantheon.go | package ddevapp
import (
"github.com/drud/ddev/pkg/globalconfig"
"io/ioutil"
"os"
"path/filepath"
"strings"
"fmt"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
"github.com/drud/go-pantheon/pkg/pantheon"
"gopkg.in/yaml.v2"
)
// PantheonProvider provides pantheon-specific import functionality.
type PantheonProvider struct {
ProviderType string `yaml:"provider"`
app *DdevApp `yaml:"-"`
Sitename string `yaml:"site"`
site pantheon.Site `yaml:"-"`
siteEnvironments pantheon.EnvironmentList `yaml:"-"`
EnvironmentName string `yaml:"environment"`
environment pantheon.Environment `yaml:"-"`
}
// Init handles loading data from saved config.
func (p *PantheonProvider) Init(app *DdevApp) error {
var err error
p.app = app
configPath := app.GetConfigPath("import.yaml")
if fileutil.FileExists(configPath) {
err = p.Read(configPath)
}
p.ProviderType = ProviderPantheon
return err
}
// ValidateField provides field level validation for config settings. This is
// used any time a field is set via `ddev config` on the primary app config, and
// allows provider plugins to have additional validation for top level config
// settings.
func (p *PantheonProvider) ValidateField(field, value string) error {
switch field {
case "Name":
_, err := findPantheonSite(value)
if err != nil {
p.Sitename = value
}
return err
}
return nil
}
// SetSiteNameAndEnv sets the environment of the provider (dev/test/live)
func (p *PantheonProvider) SetSiteNameAndEnv(environment string) {
p.Sitename = p.app.Name
p.EnvironmentName = environment
}
// PromptForConfig provides interactive configuration prompts when running `ddev config pantheon`
func (p *PantheonProvider) PromptForConfig() error {
for {
p.SetSiteNameAndEnv("dev")
err := p.environmentPrompt()
if err == nil {
return nil
}
output.UserOut.Errorf("%v\n", err)
}
}
// GetBackup will download the most recent backup specified by backupType in the given environment. If no environment
// is supplied, the configured environment will be used. Valid values for backupType are "database" or "files".
func (p *PantheonProvider) GetBackup(backupType, environment string) (fileLocation string, importPath string, err error) {
if backupType != "database" && backupType != "files" {
return "", "", fmt.Errorf("could not get backup: %s is not a valid backup type", backupType)
}
// If the user hasn't defined an environment override, use the configured value.
if environment == "" {
environment = p.EnvironmentName
}
// Set the import path blank to use the root of the archive by default.
importPath = ""
err = p.environmentExists(environment)
if err != nil {
return "", "", err
}
session := getPantheonSession()
// Find either a files or database backup, depending on what was asked for.
bl := pantheon.NewBackupList(p.site.ID, environment)
err = session.Request("GET", bl)
if err != nil {
return "", "", err
}
backup, err := p.getPantheonBackupLink(backupType, bl, session, environment)
if err != nil {
return "", "", err
}
p.prepDownloadDir()
destFile := filepath.Join(p.getDownloadDir(), backup.FileName)
// Check to see if this file has been downloaded previously.
// Attempt a new download If we can't stat the file or we get a mismatch on the filesize.
stat, err := os.Stat(destFile)
if err != nil || stat.Size() != int64(backup.Size) {
err = util.DownloadFile(destFile, backup.DownloadURL, true)
if err != nil {
return "", "", err
}
}
if backupType == "files" {
importPath = fmt.Sprintf("files_%s", environment)
}
return destFile, importPath, nil
}
// prepDownloadDir ensures the download cache directories are created and writeable.
func (p *PantheonProvider) prepDownloadDir() {
destDir := p.getDownloadDir()
err := os.MkdirAll(destDir, 0755)
util.CheckErr(err)
}
func (p *PantheonProvider) getDownloadDir() string {
globalDir := globalconfig.GetGlobalDdevDir()
destDir := filepath.Join(globalDir, "pantheon", p.app.Name)
return destDir
}
// getPantheonBackupLink will return a URL for the most recent backyp of archiveType that exist with the BackupList specified.
func (p *PantheonProvider) getPantheonBackupLink(archiveType string, bl *pantheon.BackupList, session *pantheon.AuthSession, environment string) (*pantheon.Backup, error) {
latestBackup := pantheon.Backup{}
for i, backup := range bl.Backups {
if backup.ArchiveType == archiveType && backup.Timestamp > latestBackup.Timestamp {
latestBackup = bl.Backups[i]
}
}
if latestBackup.Timestamp != 0 {
// Get a time-limited backup URL from Pantheon. This requires a POST of the backup type to their API.
err := session.Request("POST", &latestBackup)
if err != nil {
return &pantheon.Backup{}, fmt.Errorf("could not get backup URL: %v", err)
}
return &latestBackup, nil
}
// If no matches were found, just return an empty backup along with an error.
return &pantheon.Backup{}, fmt.Errorf("could not find a backup of type %s. Please visit your pantheon dashboard and ensure the '%s' environment has a backup available", archiveType, environment)
}
// environmentPrompt contains the user prompts for interactive configuration of the pantheon environment.
func (p *PantheonProvider) environmentPrompt() error {
_, err := p.GetEnvironments()
if err != nil {
return err
}
if p.EnvironmentName == "" {
p.EnvironmentName = "dev"
}
fmt.Println("\nConfigure import environment:")
keys := make([]string, 0, len(p.siteEnvironments.Environments))
for k := range p.siteEnvironments.Environments {
keys = append(keys, k)
}
fmt.Println("\n\t- " + strings.Join(keys, "\n\t- ") + "\n")
var environmentPrompt = "Type the name to select an environment to import from"
if p.EnvironmentName != "" {
environmentPrompt = fmt.Sprintf("%s (%s)", environmentPrompt, p.EnvironmentName)
}
fmt.Print(environmentPrompt + ": ")
envName := util.GetInput(p.EnvironmentName)
_, ok := p.siteEnvironments.Environments[envName]
if !ok {
return fmt.Errorf("could not find an environment named '%s'", envName)
}
p.SetSiteNameAndEnv(envName)
return nil
}
// Write the pantheon provider configuration to a spcified location on disk.
func (p *PantheonProvider) Write(configPath string) error {
err := PrepDdevDirectory(filepath.Dir(configPath))
if err != nil {
return err
}
cfgbytes, err := yaml.Marshal(p)
if err != nil {
return err
}
err = ioutil.WriteFile(configPath, cfgbytes, 0644)
if err != nil {
return err
}
return nil
}
// Read pantheon provider configuration from a specified location on disk.
func (p *PantheonProvider) Read(configPath string) error {
source, err := ioutil.ReadFile(configPath)
if err != nil {
return err
}
// Read config values from file.
err = yaml.Unmarshal(source, p)
if err != nil {
return err
}
return nil
}
// GetEnvironments will return a list of environments for the currently configured upstream pantheon site.
func (p *PantheonProvider) GetEnvironments() (pantheon.EnvironmentList, error) {
var el *pantheon.EnvironmentList
// If we've got an already populated environment list, then just use that.
if len(p.siteEnvironments.Environments) > 0 {
return p.siteEnvironments, nil
}
// Otherwise we need to find our environments.
session := getPantheonSession()
if p.site.ID == "" {
site, err := findPantheonSite(p.Sitename)
if err != nil {
return p.siteEnvironments, err
}
p.site = site
}
// Get a list of all active environments for the current site.
el = pantheon.NewEnvironmentList(p.site.ID)
err := session.Request("GET", el)
p.siteEnvironments = *el
return *el, err
}
// Validate ensures that the current configuration is valid (i.e. the configured pantheon site/environment exists)
func (p *PantheonProvider) Validate() error {
return p.environmentExists(p.EnvironmentName)
}
// environmentExists ensures the currently configured pantheon site & environment exists.
func (p *PantheonProvider) environmentExists(environment string) error {
_, err := p.GetEnvironments()
if err != nil {
return err
}
if _, ok := p.siteEnvironments.Environments[environment]; !ok {
return fmt.Errorf("could not find an environment named '%s'", environment)
}
return nil
}
// findPantheonSite ensures the pantheon site specified by name exists, and the current user has access to it.
func findPantheonSite(name string) (pantheon.Site, error) {
session := getPantheonSession()
// Get a list of all sites the current user has access to. Ensure we can find the site which was used in the CLI arguments in that list.
sl := &pantheon.SiteList{}
err := session.Request("GET", sl)
if err != nil {
return pantheon.Site{}, err
}
// Get a list of environments for a given site.
for i, site := range sl.Sites {
if site.Site.Name == name {
return sl.Sites[i], nil
}
}
return pantheon.Site{}, fmt.Errorf("could not find a pantheon site named %s", name)
}
// getPantheonSession loads the pantheon API config from disk and returns a pantheon session struct.
func getPantheonSession() *pantheon.AuthSession {
globalDir := globalconfig.GetGlobalDdevDir()
sessionLocation := filepath.Join(globalDir, "pantheonconfig.json")
// Generate a session object based on the DDEV_PANTHEON_API_TOKEN environment var.
session := &pantheon.AuthSession{}
// Read a previously saved session.
err := session.Read(sessionLocation)
if err != nil {
// If we can't read a previous session fall back to using the API token.
apiToken := os.Getenv("DDEV_PANTHEON_API_TOKEN")
if apiToken == "" {
util.Failed("No saved session could be found and the environment variable DDEV_PANTHEON_API_TOKEN is not set. Please use ddev auth-pantheon or set a DDEV_PANTHEON_API_TOKEN. https://pantheon.io/docs/machine-tokens/ provides instructions on creating a token.")
}
session = pantheon.NewAuthSession(os.Getenv("DDEV_PANTHEON_API_TOKEN"))
}
err = session.Auth()
if err != nil {
output.UserOut.Fatalf("Could not authenticate with pantheon: %v", err)
}
err = session.Write(sessionLocation)
util.CheckErr(err)
return session
}
| [
"\"DDEV_PANTHEON_API_TOKEN\"",
"\"DDEV_PANTHEON_API_TOKEN\""
]
| []
| [
"DDEV_PANTHEON_API_TOKEN"
]
| [] | ["DDEV_PANTHEON_API_TOKEN"] | go | 1 | 0 | |
conf/setting.go | package conf
import (
"encoding/json"
"errors"
"fmt"
"os"
"path"
"strings"
"time"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
"gonelist/pkg/file"
)
// 服务器设置
type Server struct {
ReadTimeout time.Duration `yaml:"read_timeout"`
WriteTimeout time.Duration `yaml:"write_timeout"`
BindGlobal bool `json:"bind_global" yaml:"bind_global"` // 是否绑定到0.0.0.0
DistPATH string `json:"dist_path" yaml:"dist_path"` // 静态文件目录
Gzip bool `json:"gzip" yaml:"gzip"` // 是否打开 Gzip 加速
Port int `json:"port" yaml:"port"` // 绑定端口
SiteUrl string `json:"site_url" yaml:"site_url"` // 网站网址,如 https://gonelist.cugxuan.cn
}
var defaultServerSetting = &Server{
ReadTimeout: 60,
WriteTimeout: 60,
BindGlobal: true,
DistPATH: "./dist/",
Port: 8000,
Gzip: true,
SiteUrl: "https://gonelist.cugxuan.cn",
}
type Admin struct {
EnableWrite bool `json:"enable_write" yaml:"enable_write"` // 是否允许客户端写入文件到onedrive服务端,写入包括创建文件夹,上传文件,删除文件
Secret string `json:"secret" yaml:"secret"` // 写入权限的secret,前端升级权限时需要,建议更改默认secret
UploadSliceSize int `json:"upload_slice_size" yaml:"upload_slice_size"` // 大文件分片上传时得分片大小,默认为32MB,数字为1表示320kb
}
// Local
// 本地目录挂载配置
type Local struct {
Enable bool `json:"enable" yaml:"enable"` // 是否开启本地挂载
Name string `json:"name" yaml:"name"` // 本地挂载的目录在gonelist中显示的名称
Path string `json:"path" yaml:"path"` // 本地挂载目录的路径
}
type Onedrive struct {
// Remote to load RemoteConf
Remote string `json:"remote" yaml:"remote"`
RemoteConf Remote `json:"-" yaml:"-"`
// 刷新模式
Model string `json:"model" yaml:"model"` // 刷新模式
Level int `json:"level" yaml:"level"` // 刷新层级
RefreshTime int `json:"refresh_time" yaml:"refresh_time"` // 自动刷新时间,单位为分钟
// 获取授权代码
ResponseType string `json:"-" yaml:"-"` // 值为 code
ClientID string `json:"client_id" yaml:"client_id"`
RedirectURL string `json:"redirect_url" yaml:"redirect_url"`
State string `json:"state" yaml:"state"` // 用户设置的标识
// 获取 access_token
ClientSecret string `json:"client_secret" yaml:"client_secret"`
Code string `json:"-" yaml:"-"` // 服务器收到的中间内容
GrantType string `json:"-" yaml:"-"` // 值为 authorization_code
Scope string `json:"-" yaml:"-"` // 值为 offline_access files.readwrite.all
AccessToken string `json:"-" yaml:"-"` // 令牌
RefreshToken string `json:"-" yaml:"-"` // 刷新令牌
TokenPath string `json:"token_path" yaml:"token_path"` // token 文件位置
// 其他设置
FolderSub string `json:"folder_sub" yaml:"folder_sub"` // onedrive 的子文件夹
DownloadRedirectPrefix string `json:"download_redirect_prefix" yaml:"download_redirect_prefix"` // 下载重定向前缀
// 目录密码
PassList []*Pass `json:"pass_list" yaml:"pass_list"`
}
// 用户信息设置
type AllSet struct {
Name string `json:"name" yaml:"name"`
PageTitle string `json:"page_title" yaml:"page_title"`
Version string `json:"version" yaml:"version"`
// Server 配置,用于定义服务的特性
Server *Server `json:"server" yaml:"server"`
// 网盘挂载类型
ListType string `json:"list_type" yaml:"list_type"`
// Onedrive
Onedrive *Onedrive `json:"onedrive" yaml:"onedrive"`
// 权限管理
Admin *Admin `json:"admin" yaml:"admin"`
// 本地目录挂载
Local *Local `json:"local" yaml:"local"`
}
var UserSet = &AllSet{}
func LoadUserConfig(configPath string) error {
var (
content []byte
err error
)
if len(configPath) == 0 {
return errors.New("配置文件名不能为空")
}
envValue := os.Getenv("CONF_PATH")
if envValue != "" {
configPath = envValue
}
log.Infof("当前使用的配置文件为:%s", configPath)
if content, err = file.ReadFromFile(configPath); err != nil {
return fmt.Errorf("read config err,path: %s", configPath)
}
err = yaml.Unmarshal(content, &UserSet)
if err != nil {
return fmt.Errorf("导入用户配置出现错误: %w", err)
}
// Server 的设置
if UserSet.Server == nil {
return fmt.Errorf("Server 设置读取出现错误")
}
switch UserSet.ListType {
case "onedrive":
// 处理 Remote 地址
switch UserSet.Onedrive.Remote {
case "onedrive":
UserSet.Onedrive.RemoteConf = OneDrive
case "chinacloud":
UserSet.Onedrive.RemoteConf = ChinaCloud
}
// PassList 设置
if UserSet.Onedrive.FolderSub == "" {
UserSet.Onedrive.FolderSub = "/"
}
if UserSet.Onedrive.PassList == nil {
UserSet.Onedrive.PassList = defaultPassListSetting
}
// TokenPath 不为 "",token 保存在用户设置的目录
// 否则 token 将保存在用户 config.yml 所在的目录
if UserSet.Onedrive.TokenPath == "" {
UserSet.Onedrive.TokenPath = GetTokenPath(configPath)
} else {
// 用户一般写目录,此处转成文件
if !strings.HasSuffix(UserSet.Onedrive.TokenPath, ".token") {
UserSet.Onedrive.TokenPath = path.Join(UserSet.Onedrive.TokenPath, ".token")
}
}
// 设置大文件上传的分片
if UserSet.Admin.UploadSliceSize == 0 {
UserSet.Admin.UploadSliceSize = 100
}
default:
return fmt.Errorf("不支持的网盘挂载类型")
}
setting, _ := json.Marshal(UserSet)
log.Infof("gonelist 监听端口:%v,成功导入用户配置:%+v", UserSet.Server.Port, string(setting))
return nil
}
// return the refresh time from the settings
func GetRefreshTime() time.Duration {
return time.Duration(UserSet.Onedrive.RefreshTime) * time.Minute
}
func GetBindAddr(bind bool, port int) string {
var prefix string
if bind == false {
prefix = "127.0.0.1"
}
return fmt.Sprintf("%s:%d", prefix, port)
}
func GetDistPATH() string {
return UserSet.Server.DistPATH
}
func GetTokenPath(configPath string) string {
lastIndex := strings.LastIndex(configPath, string(os.PathSeparator))
return configPath[:lastIndex+1] + ".token"
}
type Pass struct {
Path string `json:"path"`
Pass string `json:"pass"`
}
var defaultPassListSetting = []*Pass{
{
Path: "",
Pass: "",
},
}
| [
"\"CONF_PATH\""
]
| []
| [
"CONF_PATH"
]
| [] | ["CONF_PATH"] | go | 1 | 0 | |
notebooks/util.py | import os
import time
from collections.abc import Iterable
import cftime
import dask
import intake
import numpy as np
import xarray as xr
import yaml
from dask.distributed import Client
from dask_jobqueue import PBSCluster
path_to_here = os.path.dirname(os.path.realpath(__file__))
USER = os.environ['USER']
PBS_PROJECT = 'NCGD0011'
def attrs_label(attrs):
"""generate a label from long_name and units"""
da_name = ''
if isinstance(attrs, xr.DataArray):
da_name = attrs.name
attrs = attrs.attrs
name = da_name if 'long_name' not in attrs else attrs['long_name']
if len(name) > 30:
name = '\n'.join([name[:30], name[30:]])
units = '' if 'units' not in attrs else f' [{attrs["units"]}]'
return name + units
def label_plots(fig, axs, xoff=-0.04, yoff=0.02):
alp = [chr(i).upper() for i in range(97, 97 + 26)]
for i, ax in enumerate(axs):
p = ax.get_position()
x = p.x0 + xoff
y = p.y1 + yoff
fig.text(x, y, f'{alp[i]}', fontsize=14, fontweight='semibold')
def get_ClusterClient(memory='25GB'):
"""get cluster and client"""
cluster = PBSCluster(
cores=1,
memory=memory,
processes=1,
queue='casper',
local_directory=f'/glade/scratch/{USER}/dask-workers',
log_directory=f'/glade/scratch/{USER}/dask-workers',
resource_spec=f'select=1:ncpus=1:mem={memory}',
project=PBS_PROJECT,
walltime='06:00:00',
interface='ib0',
)
jupyterhub_server_name = os.environ.get('JUPYTERHUB_SERVER_NAME', None)
dashboard_link = 'https://jupyterhub.hpc.ucar.edu/stable/user/{USER}/proxy/{port}/status'
if jupyterhub_server_name:
dashboard_link = (
'https://jupyterhub.hpc.ucar.edu/stable/user/'
+ '{USER}'
+ f'/{jupyterhub_server_name}/proxy/'
+ '{port}/status'
)
dask.config.set({'distributed.dashboard.link': dashboard_link})
client = Client(cluster)
return cluster, client
class timer(object):
"""support reporting timing info with named tasks"""
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tic = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print(f'[{self.name}]: ', end='')
toc = time.time() - self.tic
print(f'{toc:0.5f}s')
def to_datenum(y, m, d, time_units='days since 0001-01-01 00:00:00'):
"""convert year, month, day to number"""
return cftime.date2num(cftime.datetime(y, m, d), units=time_units)
def nday_per_year(year):
return 365
def year_frac(time):
"""compute year fraction"""
year = [d.year for d in time.values]
month = [d.month for d in time.values]
day = [d.day for d in time.values]
t0_year = np.array([to_datenum(y, 1, 1) - 1 for y in year])
t_year = np.array([to_datenum(y, m, d) for y, m, d in zip(year, month, day)])
nday_year = np.array([nday_per_year(y) for y in year])
return year + (t_year - t0_year) / nday_year
def pop_add_cyclic(ds):
"""Make POP grid easily plottable"""
ni = ds.TLONG.shape[1]
xL = int(ni / 2 - 1)
xR = int(xL + ni)
tlon = ds.TLONG.data
tlat = ds.TLAT.data
tlon = np.where(np.greater_equal(tlon, min(tlon[:, 0])), tlon - 360.0, tlon)
lon = np.concatenate((tlon, tlon + 360.0), 1)
lon = lon[:, xL:xR]
if ni == 320:
lon[367:-3, 0] = lon[367:-3, 0] + 360.0
lon = lon - 360.0
lon = np.hstack((lon, lon[:, 0:1] + 360.0))
if ni == 320:
lon[367:, -1] = lon[367:, -1] - 360.0
# -- trick cartopy into doing the right thing:
# it gets confused when the cyclic coords are identical
lon[:, 0] = lon[:, 0] - 1e-8
# -- periodicity
lat = np.concatenate((tlat, tlat), 1)
lat = lat[:, xL:xR]
lat = np.hstack((lat, lat[:, 0:1]))
TLAT = xr.DataArray(lat, dims=('nlat', 'nlon'))
TLONG = xr.DataArray(lon, dims=('nlat', 'nlon'))
dso = xr.Dataset({'TLAT': TLAT, 'TLONG': TLONG})
# copy vars
varlist = [v for v in ds.data_vars if v not in ['TLAT', 'TLONG']]
for v in varlist:
v_dims = ds[v].dims
if not ('nlat' in v_dims and 'nlon' in v_dims):
dso[v] = ds[v]
else:
# determine and sort other dimensions
other_dims = set(v_dims) - {'nlat', 'nlon'}
other_dims = tuple([d for d in v_dims if d in other_dims])
lon_dim = ds[v].dims.index('nlon')
field = ds[v].data
field = np.concatenate((field, field), lon_dim)
field = field[..., :, xL:xR]
field = np.concatenate((field, field[..., :, 0:1]), lon_dim)
dso[v] = xr.DataArray(field, dims=other_dims + ('nlat', 'nlon'), attrs=ds[v].attrs)
# copy coords
for v, da in ds.coords.items():
if not ('nlat' in da.dims and 'nlon' in da.dims):
dso = dso.assign_coords(**{v: da})
return dso
class curator_local_assets(object):
"""Curate an intake catalog with locally-cached assets"""
def __init__(self):
cache_dir = 'data/cache'
os.makedirs(cache_dir, exist_ok=True)
self.catalog_file = f'{path_to_here}/data/catalogs/catalog-local.yml'
if os.path.exists(self.catalog_file):
with open(self.catalog_file, 'r') as fid:
self.catalog = yaml.safe_load(fid)
else:
self.catalog = yaml.safe_load(
"""
description: Local assets
plugins:
source:
- module: intake_xarray
sources: {}
"""
)
def add_source(self, key, urlpath, description, driver='netcdf', overwrite=False, **kwargs):
"""add a new source to the catalog"""
if key in self.catalog['sources']:
if not overwrite:
raise ValueError(f'source {key} exists; set `overwrite` to true to overwrite')
else:
print(f'overwriting "{key}" key in "sources"')
args = dict(urlpath=urlpath)
args.update(kwargs)
self.catalog['sources'][key] = dict(
driver=driver,
description=description,
args=args,
)
self.persist()
def persist(self):
"""write the catalog to disk"""
with open(self.catalog_file, 'w') as fid:
yaml.dump(self.catalog, fid)
def open_catalog(self):
"""return as intake catalog"""
return intake.open_catalog(self.catalog_file)
def __repr__(self):
return self.catalog.__repr__()
def infer_lat_name(ds):
lat_names = ['latitude', 'lat']
for n in lat_names:
if n in ds:
return n
raise ValueError('could not determine lat name')
def infer_lon_name(ds):
lon_names = ['longitude', 'lon']
for n in lon_names:
if n in ds:
return n
raise ValueError('could not determine lon name')
def lat_weights_regular_grid(lat):
"""
Generate latitude weights for equally spaced (regular) global grids.
Weights are computed as sin(lat+dlat/2)-sin(lat-dlat/2) and sum to 2.0.
"""
dlat = np.abs(np.diff(lat))
np.testing.assert_almost_equal(dlat, dlat[0])
w = np.abs(np.sin(np.radians(lat + dlat[0] / 2.0)) - np.sin(np.radians(lat - dlat[0] / 2.0)))
if np.abs(lat[0]) > 89.9999:
w[0] = np.abs(1.0 - np.sin(np.radians(np.pi / 2 - dlat[0])))
if np.abs(lat[-1]) > 89.9999:
w[-1] = np.abs(1.0 - np.sin(np.radians(np.pi / 2 - dlat[0])))
return w
def compute_grid_area(ds, check_total=True):
"""Compute the area of grid cells.
Parameters
----------
ds : xarray.Dataset
Input dataset with latitude and longitude fields
check_total : Boolean, optional
Test that total area is equal to area of the sphere.
Returns
-------
area : xarray.DataArray
DataArray with area field.
"""
radius_earth = 6.37122e6 # m, radius of Earth
area_earth = 4.0 * np.pi * radius_earth ** 2 # area of earth [m^2]e
lon_name = infer_lon_name(ds)
lat_name = infer_lat_name(ds)
weights = lat_weights_regular_grid(ds[lat_name])
area = weights + 0.0 * ds[lon_name] # add 'lon' dimension
area = (area_earth / area.sum(dim=(lat_name, lon_name))) * area
if check_total:
np.testing.assert_approx_equal(np.sum(area), area_earth)
return xr.DataArray(
area, dims=(lat_name, lon_name), attrs={'units': 'm^2', 'long_name': 'area'}
)
| []
| []
| [
"USER",
"JUPYTERHUB_SERVER_NAME"
]
| [] | ["USER", "JUPYTERHUB_SERVER_NAME"] | python | 2 | 0 | |
helper/config.py | """
Responsible for reading in configuration files, validating the proper
format and providing sane defaults for parts that don't have any.
"""
import json
import logging
import logging.config
import os
from os import path
import sys
try:
from urllib import parse
except ImportError: # Python 2.7 support
import urlparse as parse
import flatdict
import yaml
LOGGER = logging.getLogger(__name__)
APPLICATION = {'wake_interval': 60}
DAEMON = {'user': None,
'group': None,
'pidfile': None,
'prevent_core': True}
LOGGING_FORMAT = ('%(levelname) -10s %(asctime)s %(process)-6d '
'%(processName) -20s %(threadName)-12s %(name) -30s '
'%(funcName) -25s L%(lineno)-6d: %(message)s')
LOGGING = {
'disable_existing_loggers': True,
'filters': {},
'formatters': {
'verbose': {
'datefmt': '%Y-%m-%d %H:%M:%S',
'format': LOGGING_FORMAT
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'root': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'level': logging.CRITICAL
}
},
'incremental': False,
'loggers': {},
'root': {
'handlers': ['root']
},
'version': 1}
class Config(object):
"""The Config object holds the current state of the configuration for an
application. If no configuration file is provided, it will used a set of
defaults with very basic behavior for logging and daemonization.
"""
def __init__(self, file_path=None):
"""Create a new instance of the configuration object, passing in the
path to the configuration file.
:param str file_path: The path to the configuration file
:raises: ValueError
"""
self._values = self._default_configuration()
self._file_path = self._normalize_file_path(file_path)
if self._file_path:
self._values.update(self._load_config_file())
def get(self, name, default=None):
"""Return the value for key if key is in the configuration, else default.
:param str name: The key name to return
:param mixed default: The default value for the key
:return: mixed
"""
return self._values.get(name, default)
@property
def application(self):
return self._values['Application'].as_dict()
@property
def daemon(self):
return self._values['Daemon'].as_dict()
@property
def logging(self):
return self._values['Logging'].as_dict()
def reload(self):
"""Reload the configuration from disk returning True if the
configuration has changed from the previous values.
"""
config = self._default_configuration()
if self._file_path:
config.update(self._load_config_file())
if config != self._values:
self._values = config
return True
return False
@staticmethod
def _default_configuration():
"""Return the default configuration for Helper
:rtype: dict
"""
return flatdict.FlatDict({
'Application': APPLICATION,
'Daemon': DAEMON,
'Logging': LOGGING
})
def _load_config_file(self):
"""Load the configuration file into memory, returning the content.
"""
LOGGER.info('Loading configuration from %s', self._file_path)
if self._file_path.endswith('json'):
config = self._load_json_config()
else:
config = self._load_yaml_config()
for key, value in [(k, v) for k, v in config.items()]:
if key.title() != key:
config[key.title()] = value
del config[key]
return flatdict.FlatDict(config)
def _load_json_config(self):
"""Load the configuration file in JSON format
:rtype: dict
"""
try:
return json.loads(self._read_config())
except ValueError as error:
raise ValueError(
'Could not read configuration file: {}'.format(error))
def _load_yaml_config(self):
"""Loads the configuration file from a .yaml or .yml file
:type: dict
"""
try:
config = self._read_config()
except OSError as error:
raise ValueError('Could not read configuration file: %s' % error)
try:
return yaml.safe_load(config)
except yaml.YAMLError as error:
message = '\n'.join([' > %s' % line
for line in str(error).split('\n')])
sys.stderr.write('\n\n Error in the configuration file:\n\n'
'{}\n\n'.format(message))
sys.stderr.write(' Configuration should be a valid YAML file.\n')
sys.stderr.write(' YAML format validation available at '
'http://yamllint.com\n')
raise ValueError(error)
@staticmethod
def _normalize_file_path(file_path):
"""Normalize the file path value.
:param str file_path: The file path as passed in
:rtype: str
"""
if not file_path:
return None
elif file_path.startswith('s3://') or \
file_path.startswith('http://') or \
file_path.startswith('https://'):
return file_path
return path.abspath(file_path)
def _read_config(self):
"""Read the configuration from the various places it may be read from.
:rtype: str
:raises: ValueError
"""
if not self._file_path:
return None
elif self._file_path.startswith('s3://'):
return self._read_s3_config()
elif self._file_path.startswith('http://') or \
self._file_path.startswith('https://'):
return self._read_remote_config()
elif not path.exists(self._file_path):
raise ValueError(
'Configuration file not found: {}'.format(self._file_path))
with open(self._file_path, 'r') as handle:
return handle.read()
def _read_remote_config(self):
"""Read a remote config via URL.
:rtype: str
:raises: ValueError
"""
try:
import requests
except ImportError:
requests = None
if not requests:
raise ValueError(
'Remote config URL specified but requests not installed')
result = requests.get(self._file_path)
if not result.ok:
raise ValueError(
'Failed to retrieve remote config: {}'.format(
result.status_code))
return result.text
def _read_s3_config(self):
"""Read in the value of the configuration file in Amazon S3.
:rtype: str
:raises: ValueError
"""
try:
import boto3
import botocore.exceptions
except ImportError:
boto3, botocore = None, None
if not boto3:
raise ValueError(
's3 URL specified for configuration but boto3 not installed')
parsed = parse.urlparse(self._file_path)
try:
response = boto3.client(
's3', endpoint_url=os.environ.get('S3_ENDPOINT')).get_object(
Bucket=parsed.netloc, Key=parsed.path.lstrip('/'))
except botocore.exceptions.ClientError as e:
raise ValueError(
'Failed to download configuration from S3: {}'.format(e))
return response['Body'].read().decode('utf-8')
class LoggingConfig(object):
"""The Logging class is used for abstracting away dictConfig logging
semantics and can be used by sub-processes to ensure consistent logging
rule application.
"""
DEBUG_ONLY = 'debug_only'
HANDLERS = 'handlers'
LOGGERS = 'loggers'
def __init__(self, configuration, debug=None):
"""Create a new instance of the Logging object passing in the
DictConfig syntax logging configuration and a debug flag.
:param dict configuration: The logging configuration
:param bool debug: Toggles use of debug_only loggers
"""
# Force a NullLogger for some libraries that require it
root_logger = logging.getLogger()
root_logger.addHandler(logging.NullHandler())
self.config = dict(configuration)
self.debug = debug
self.configure()
def update(self, configuration, debug=None):
"""Update the internal configuration values, removing debug_only
handlers if debug is False. Returns True if the configuration has
changed from previous configuration values.
:param dict configuration: The logging configuration
:param bool debug: Toggles use of debug_only loggers
:rtype: bool
"""
if self.config != dict(configuration) and debug != self.debug:
self.config = dict(configuration)
self.debug = debug
self.configure()
return True
return False
def configure(self):
"""Configure the Python stdlib logger"""
if self.debug is not None and not self.debug:
self._remove_debug_handlers()
self._remove_debug_only()
logging.config.dictConfig(self.config)
try:
logging.captureWarnings(True)
except AttributeError:
pass
def _remove_debug_handlers(self):
"""Remove any handlers with an attribute of debug_only that is True and
remove the references to said handlers from any loggers that are
referencing them.
"""
remove = list()
for handler in self.config[self.HANDLERS]:
if self.config[self.HANDLERS][handler].get('debug_only'):
remove.append(handler)
for handler in remove:
del self.config[self.HANDLERS][handler]
for logger in self.config[self.LOGGERS].keys():
logger = self.config[self.LOGGERS][logger]
if handler in logger[self.HANDLERS]:
logger[self.HANDLERS].remove(handler)
self._remove_debug_only()
def _remove_debug_only(self):
"""Iterate through each handler removing the invalid dictConfig key of
debug_only.
"""
LOGGER.debug('Removing debug only from handlers')
for handler in self.config[self.HANDLERS]:
if self.DEBUG_ONLY in self.config[self.HANDLERS][handler]:
del self.config[self.HANDLERS][handler][self.DEBUG_ONLY]
| []
| []
| [
"S3_ENDPOINT"
]
| [] | ["S3_ENDPOINT"] | python | 1 | 0 | |
flash/text/seq2seq/core/model.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from typing import Any, Callable, List, Mapping, Optional, Sequence, Type, Union
import pytorch_lightning as pl
import torch
from pytorch_lightning.utilities import rank_zero_info
from torch import Tensor
from flash.core.finetuning import FlashBaseFinetuning
from flash.core.model import Task
from flash.core.utilities.imports import _TEXT_AVAILABLE
from flash.text.seq2seq.core.finetuning import Seq2SeqFreezeEmbeddings
if _TEXT_AVAILABLE:
from transformers import AutoModelForSeq2SeqLM, PreTrainedTokenizerBase
else:
AutoModelForSeq2SeqLM, PreTrainedTokenizerBase = None, None
def _pad_tensors_to_max_len(model_cfg, tensor, max_length):
pad_token_id = model_cfg.pad_token_id if model_cfg.pad_token_id else model_cfg.eos_token_id
if pad_token_id is None:
raise ValueError(
f"Make sure that either `config.pad_token_id` or `config.eos_token_id` "
f"is defined if tensor has to be padded to `max_length`={max_length}"
)
padded_tensor = pad_token_id * torch.ones((tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device)
padded_tensor[:, :tensor.shape[-1]] = tensor
return padded_tensor
class Seq2SeqTask(Task):
"""General Task for Sequence2Sequence.
Args:
loss_fn: Loss function for training
optimizer: Optimizer to use for training, defaults to `torch.optim.Adam`.
metrics: Metrics to compute for training and evaluation.
learning_rate: Learning rate to use for training, defaults to `3e-4`
val_target_max_length: Maximum length of targets in validation. Defaults to `128`
num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`
"""
def __init__(
self,
backbone: str = 't5-small',
loss_fn: Optional[Union[Callable, Mapping, Sequence]] = None,
optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,
metrics: Union[pl.metrics.Metric, Mapping, Sequence, None] = None,
learning_rate: float = 5e-5,
val_target_max_length: Optional[int] = None,
num_beams: Optional[int] = None,
):
if not _TEXT_AVAILABLE:
raise ModuleNotFoundError("Please, pip install 'lightning-flash[text]'")
os.environ["TOKENIZERS_PARALLELISM"] = "TRUE"
# disable HF thousand warnings
warnings.simplefilter("ignore")
# set os environ variable for multiprocesses
os.environ["PYTHONWARNINGS"] = "ignore"
super().__init__(loss_fn=loss_fn, optimizer=optimizer, metrics=metrics, learning_rate=learning_rate)
self.model = AutoModelForSeq2SeqLM.from_pretrained(backbone)
self.val_target_max_length = val_target_max_length
self.num_beams = num_beams
self._initialize_model_specific_parameters()
def forward(self, x: Any) -> Any:
max_length = self.val_target_max_length if self.val_target_max_length else self.model.config.max_length
num_beams = self.num_beams if self.num_beams else self.model.config.num_beams
generated_tokens = self.model.generate(
input_ids=x['input_ids'], attention_mask=x['attention_mask'], max_length=max_length, num_beams=num_beams
)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < max_length:
generated_tokens = _pad_tensors_to_max_len(
model_cfg=self.model.config, tensor=generated_tokens, max_length=max_length
)
return generated_tokens
def training_step(self, batch: Any, batch_idx: int) -> Tensor:
outputs = self.model(**batch)
loss = outputs[0]
self.log("train_loss", loss)
return loss
def common_step(self, prefix: str, batch: Any) -> torch.Tensor:
generated_tokens = self(batch)
self.compute_metrics(generated_tokens, batch, prefix)
def validation_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0):
self.common_step("val", batch)
def test_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0):
self.common_step("test", batch)
def compute_metrics(self, generated_tokens, batch, prefix):
pass
@property
def task(self) -> Optional[str]:
"""
Override to define AutoConfig task specific parameters stored within the model.
"""
return
def _initialize_model_specific_parameters(self):
task_specific_params = self.model.config.task_specific_params
if task_specific_params:
pars = task_specific_params.get(self.task, {})
rank_zero_info(f"Overriding model paramameters for {self.task} as defined within the model:\n {pars}")
self.model.config.update(pars)
@property
def tokenizer(self) -> 'PreTrainedTokenizerBase':
return self.data_pipeline.data_source.tokenizer
def tokenize_labels(self, labels: Tensor) -> List[str]:
label_str = self.tokenizer.batch_decode(labels, skip_special_tokens=True)
return [str.strip(s) for s in label_str]
def configure_finetune_callback(self) -> List[FlashBaseFinetuning]:
return [Seq2SeqFreezeEmbeddings(self.model.config.model_type, train_bn=True)]
| []
| []
| [
"PYTHONWARNINGS",
"TOKENIZERS_PARALLELISM"
]
| [] | ["PYTHONWARNINGS", "TOKENIZERS_PARALLELISM"] | python | 2 | 0 | |
github/client.go | package github
import (
"fmt"
"io"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/github/hub/version"
)
const (
GitHubHost string = "github.com"
GitHubApiHost string = "api.github.com"
OAuthAppURL string = "http://hub.github.com/"
)
var UserAgent = "Hub " + version.Version
func NewClient(h string) *Client {
return NewClientWithHost(&Host{Host: h})
}
func NewClientWithHost(host *Host) *Client {
return &Client{host}
}
type Client struct {
Host *Host
}
func (client *Client) FetchPullRequests(project *Project, filterParams map[string]interface{}, limit int, filter func(*PullRequest) bool) (pulls []PullRequest, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/pulls?per_page=%d", project.Owner, project.Name, perPage(limit, 100))
if filterParams != nil {
query := url.Values{}
for key, value := range filterParams {
switch v := value.(type) {
case string:
query.Add(key, v)
}
}
path += "&" + query.Encode()
}
pulls = []PullRequest{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching pull requests", res, err); err != nil {
return
}
path = res.Link("next")
pullsPage := []PullRequest{}
if err = res.Unmarshal(&pullsPage); err != nil {
return
}
for _, pr := range pullsPage {
if filter == nil || filter(&pr) {
pulls = append(pulls, pr)
if limit > 0 && len(pulls) == limit {
path = ""
break
}
}
}
}
return
}
func (client *Client) PullRequest(project *Project, id string) (pr *PullRequest, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/pulls/%s", project.Owner, project.Name, id))
if err = checkStatus(200, "getting pull request", res, err); err != nil {
return
}
pr = &PullRequest{}
err = res.Unmarshal(pr)
return
}
func (client *Client) PullRequestPatch(project *Project, id string) (patch io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.GetFile(fmt.Sprintf("repos/%s/%s/pulls/%s", project.Owner, project.Name, id), patchMediaType)
if err = checkStatus(200, "getting pull request patch", res, err); err != nil {
return
}
return res.Body, nil
}
func (client *Client) CreatePullRequest(project *Project, params map[string]interface{}) (pr *PullRequest, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/pulls", project.Owner, project.Name), params)
if err = checkStatus(201, "creating pull request", res, err); err != nil {
if res != nil && res.StatusCode == 404 {
projectUrl := strings.SplitN(project.WebURL("", "", ""), "://", 2)[1]
err = fmt.Errorf("%s\nAre you sure that %s exists?", err, projectUrl)
}
return
}
pr = &PullRequest{}
err = res.Unmarshal(pr)
return
}
func (client *Client) RequestReview(project *Project, prNumber int, params map[string]interface{}) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", project.Owner, project.Name, prNumber), params)
if err = checkStatus(201, "requesting reviewer", res, err); err != nil {
return
}
res.Body.Close()
return
}
func (client *Client) CommitPatch(project *Project, sha string) (patch io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.GetFile(fmt.Sprintf("repos/%s/%s/commits/%s", project.Owner, project.Name, sha), patchMediaType)
if err = checkStatus(200, "getting commit patch", res, err); err != nil {
return
}
return res.Body, nil
}
type Gist struct {
Files map[string]GistFile `json:"files"`
}
type GistFile struct {
RawUrl string `json:"raw_url"`
}
func (client *Client) GistPatch(id string) (patch io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("gists/%s", id))
if err = checkStatus(200, "getting gist patch", res, err); err != nil {
return
}
gist := Gist{}
if err = res.Unmarshal(&gist); err != nil {
return
}
rawUrl := ""
for _, file := range gist.Files {
rawUrl = file.RawUrl
break
}
res, err = api.GetFile(rawUrl, textMediaType)
if err = checkStatus(200, "getting gist patch", res, err); err != nil {
return
}
return res.Body, nil
}
func (client *Client) Repository(project *Project) (repo *Repository, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s", project.Owner, project.Name))
if err = checkStatus(200, "getting repository info", res, err); err != nil {
return
}
repo = &Repository{}
err = res.Unmarshal(&repo)
return
}
func (client *Client) CreateRepository(project *Project, description, homepage string, isPrivate bool) (repo *Repository, err error) {
repoURL := "user/repos"
if project.Owner != client.Host.User {
repoURL = fmt.Sprintf("orgs/%s/repos", project.Owner)
}
params := map[string]interface{}{
"name": project.Name,
"description": description,
"homepage": homepage,
"private": isPrivate,
}
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(repoURL, params)
if err = checkStatus(201, "creating repository", res, err); err != nil {
return
}
repo = &Repository{}
err = res.Unmarshal(repo)
return
}
func (client *Client) DeleteRepository(project *Project) error {
api, err := client.simpleApi()
if err != nil {
return err
}
repoURL := fmt.Sprintf("repos/%s/%s", project.Owner, project.Name)
res, err := api.Delete(repoURL)
return checkStatus(204, "deleting repository", res, err)
}
type Release struct {
Name string `json:"name"`
TagName string `json:"tag_name"`
TargetCommitish string `json:"target_commitish"`
Body string `json:"body"`
Draft bool `json:"draft"`
Prerelease bool `json:"prerelease"`
Assets []ReleaseAsset `json:"assets"`
TarballUrl string `json:"tarball_url"`
ZipballUrl string `json:"zipball_url"`
HtmlUrl string `json:"html_url"`
UploadUrl string `json:"upload_url"`
ApiUrl string `json:"url"`
CreatedAt time.Time `json:"created_at"`
PublishedAt time.Time `json:"published_at"`
}
type ReleaseAsset struct {
Name string `json:"name"`
Label string `json:"label"`
DownloadUrl string `json:"browser_download_url"`
ApiUrl string `json:"url"`
}
func (client *Client) FetchReleases(project *Project, limit int, filter func(*Release) bool) (releases []Release, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/releases?per_page=%d", project.Owner, project.Name, perPage(limit, 100))
releases = []Release{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching releases", res, err); err != nil {
return
}
path = res.Link("next")
releasesPage := []Release{}
if err = res.Unmarshal(&releasesPage); err != nil {
return
}
for _, release := range releasesPage {
if filter == nil || filter(&release) {
releases = append(releases, release)
if limit > 0 && len(releases) == limit {
path = ""
break
}
}
}
}
return
}
func (client *Client) FetchRelease(project *Project, tagName string) (*Release, error) {
releases, err := client.FetchReleases(project, 100, func(release *Release) bool {
return release.TagName == tagName
})
if err == nil {
if len(releases) < 1 {
return nil, fmt.Errorf("Unable to find release with tag name `%s'", tagName)
} else {
return &releases[0], nil
}
} else {
return nil, err
}
}
func (client *Client) CreateRelease(project *Project, releaseParams *Release) (release *Release, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/releases", project.Owner, project.Name), releaseParams)
if err = checkStatus(201, "creating release", res, err); err != nil {
return
}
release = &Release{}
err = res.Unmarshal(release)
return
}
func (client *Client) EditRelease(release *Release, releaseParams map[string]interface{}) (updatedRelease *Release, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PatchJSON(release.ApiUrl, releaseParams)
if err = checkStatus(200, "editing release", res, err); err != nil {
return
}
updatedRelease = &Release{}
err = res.Unmarshal(updatedRelease)
return
}
func (client *Client) DeleteRelease(release *Release) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Delete(release.ApiUrl)
if err = checkStatus(204, "deleting release", res, err); err != nil {
return
}
return
}
func (client *Client) UploadReleaseAsset(release *Release, filename, label string) (asset *ReleaseAsset, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
parts := strings.SplitN(release.UploadUrl, "{", 2)
uploadUrl := parts[0]
uploadUrl += "?name=" + url.QueryEscape(filepath.Base(filename))
if label != "" {
uploadUrl += "&label=" + url.QueryEscape(label)
}
res, err := api.PostFile(uploadUrl, filename)
if err = checkStatus(201, "uploading release asset", res, err); err != nil {
return
}
asset = &ReleaseAsset{}
err = res.Unmarshal(asset)
return
}
func (client *Client) DeleteReleaseAsset(asset *ReleaseAsset) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Delete(asset.ApiUrl)
err = checkStatus(204, "deleting release asset", res, err)
return
}
func (client *Client) DownloadReleaseAsset(url string) (asset io.ReadCloser, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
resp, err := api.GetFile(url, "application/octet-stream")
if err = checkStatus(200, "downloading asset", resp, err); err != nil {
return
}
return resp.Body, err
}
type CIStatusResponse struct {
State string `json:"state"`
Statuses []CIStatus `json:"statuses"`
}
type CIStatus struct {
State string `json:"state"`
Context string `json:"context"`
TargetUrl string `json:"target_url"`
}
type CheckRunsResponse struct {
CheckRuns []CheckRun `json:"check_runs"`
}
type CheckRun struct {
Status string `json:"status"`
Conclusion string `json:"conclusion"`
Name string `json:"name"`
HtmlUrl string `json:"html_url"`
}
func (client *Client) FetchCIStatus(project *Project, sha string) (status *CIStatusResponse, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get(fmt.Sprintf("repos/%s/%s/commits/%s/status", project.Owner, project.Name, sha))
if err = checkStatus(200, "fetching statuses", res, err); err != nil {
return
}
status = &CIStatusResponse{}
if err = res.Unmarshal(status); err != nil {
return
}
res, err = api.GetFile(fmt.Sprintf("repos/%s/%s/commits/%s/check-runs", project.Owner, project.Name, sha), checksType)
if err == nil && (res.StatusCode == 403 || res.StatusCode == 404 || res.StatusCode == 422) {
return
}
if err = checkStatus(200, "fetching checks", res, err); err != nil {
return
}
checks := &CheckRunsResponse{}
if err = res.Unmarshal(checks); err != nil {
return
}
for _, checkRun := range checks.CheckRuns {
state := "pending"
if checkRun.Status == "completed" {
state = checkRun.Conclusion
}
checkStatus := CIStatus{
State: state,
Context: checkRun.Name,
TargetUrl: checkRun.HtmlUrl,
}
status.Statuses = append(status.Statuses, checkStatus)
}
return
}
type Repository struct {
Name string `json:"name"`
FullName string `json:"full_name"`
Parent *Repository `json:"parent"`
Owner *User `json:"owner"`
Private bool `json:"private"`
HasWiki bool `json:"has_wiki"`
Permissions *RepositoryPermissions `json:"permissions"`
HtmlUrl string `json:"html_url"`
DefaultBranch string `json:"default_branch"`
}
type RepositoryPermissions struct {
Admin bool `json:"admin"`
Push bool `json:"push"`
Pull bool `json:"pull"`
}
func (client *Client) ForkRepository(project *Project, params map[string]interface{}) (repo *Repository, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/forks", project.Owner, project.Name), params)
if err = checkStatus(202, "creating fork", res, err); err != nil {
return
}
repo = &Repository{}
err = res.Unmarshal(repo)
return
}
type Issue struct {
Number int `json:"number"`
State string `json:"state"`
Title string `json:"title"`
Body string `json:"body"`
User *User `json:"user"`
PullRequest *PullRequest `json:"pull_request"`
Head *PullRequestSpec `json:"head"`
Base *PullRequestSpec `json:"base"`
MaintainerCanModify bool `json:"maintainer_can_modify"`
Comments int `json:"comments"`
Labels []IssueLabel `json:"labels"`
Assignees []User `json:"assignees"`
Milestone *Milestone `json:"milestone"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
RequestedReviewers []User `json:"requested_reviewers"`
RequestedTeams []Team `json:"requested_teams"`
ApiUrl string `json:"url"`
HtmlUrl string `json:"html_url"`
}
type PullRequest Issue
type PullRequestSpec struct {
Label string `json:"label"`
Ref string `json:"ref"`
Sha string `json:"sha"`
Repo *Repository `json:"repo"`
}
func (pr *PullRequest) IsSameRepo() bool {
return pr.Head != nil && pr.Head.Repo != nil &&
pr.Head.Repo.Name == pr.Base.Repo.Name &&
pr.Head.Repo.Owner.Login == pr.Base.Repo.Owner.Login
}
func (pr *PullRequest) HasRequestedReviewer(name string) bool {
for _, user := range pr.RequestedReviewers {
if strings.EqualFold(user.Login, name) {
return true
}
}
return false
}
func (pr *PullRequest) HasRequestedTeam(name string) bool {
for _, team := range pr.RequestedTeams {
if strings.EqualFold(team.Name, name) {
return true
}
}
return false
}
type IssueLabel struct {
Name string `json:"name"`
Color string `json:"color"`
}
type User struct {
Login string `json:"login"`
}
type Team struct {
Name string `json:"name"`
}
type Milestone struct {
Number int `json:"number"`
Title string `json:"title"`
}
func (client *Client) FetchIssues(project *Project, filterParams map[string]interface{}, limit int, filter func(*Issue) bool) (issues []Issue, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/issues?per_page=%d", project.Owner, project.Name, perPage(limit, 100))
if filterParams != nil {
query := url.Values{}
for key, value := range filterParams {
switch v := value.(type) {
case string:
query.Add(key, v)
}
}
path += "&" + query.Encode()
}
issues = []Issue{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching issues", res, err); err != nil {
return
}
path = res.Link("next")
issuesPage := []Issue{}
if err = res.Unmarshal(&issuesPage); err != nil {
return
}
for _, issue := range issuesPage {
if filter == nil || filter(&issue) {
issues = append(issues, issue)
if limit > 0 && len(issues) == limit {
path = ""
break
}
}
}
}
return
}
func (client *Client) CreateIssue(project *Project, params interface{}) (issue *Issue, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PostJSON(fmt.Sprintf("repos/%s/%s/issues", project.Owner, project.Name), params)
if err = checkStatus(201, "creating issue", res, err); err != nil {
return
}
issue = &Issue{}
err = res.Unmarshal(issue)
return
}
func (client *Client) UpdateIssue(project *Project, issueNumber int, params map[string]interface{}) (err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.PatchJSON(fmt.Sprintf("repos/%s/%s/issues/%d", project.Owner, project.Name, issueNumber), params)
if err = checkStatus(200, "updating issue", res, err); err != nil {
return
}
res.Body.Close()
return
}
func (client *Client) FetchLabels(project *Project) (labels []IssueLabel, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/labels?per_page=100", project.Owner, project.Name)
labels = []IssueLabel{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching labels", res, err); err != nil {
return
}
path = res.Link("next")
labelsPage := []IssueLabel{}
if err = res.Unmarshal(&labelsPage); err != nil {
return
}
labels = append(labels, labelsPage...)
}
return
}
func (client *Client) FetchMilestones(project *Project) (milestones []Milestone, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
path := fmt.Sprintf("repos/%s/%s/milestones?per_page=100", project.Owner, project.Name)
milestones = []Milestone{}
var res *simpleResponse
for path != "" {
res, err = api.Get(path)
if err = checkStatus(200, "fetching milestones", res, err); err != nil {
return
}
path = res.Link("next")
milestonesPage := []Milestone{}
if err = res.Unmarshal(&milestonesPage); err != nil {
return
}
milestones = append(milestones, milestonesPage...)
}
return
}
func (client *Client) CurrentUser() (user *User, err error) {
api, err := client.simpleApi()
if err != nil {
return
}
res, err := api.Get("user")
if err = checkStatus(200, "getting current user", res, err); err != nil {
return
}
user = &User{}
err = res.Unmarshal(user)
return
}
type AuthorizationEntry struct {
Token string `json:"token"`
}
func isToken(api *simpleClient, password string) bool {
api.PrepareRequest = func(req *http.Request) {
req.Header.Set("Authorization", "token "+password)
}
res, _ := api.Get("user")
if res != nil && res.StatusCode == 200 {
return true
}
return false
}
func (client *Client) FindOrCreateToken(user, password, twoFactorCode string) (token string, err error) {
api := client.apiClient()
if len(password) >= 40 && isToken(api, password) {
return password, nil
}
params := map[string]interface{}{
"scopes": []string{"repo"},
"note_url": OAuthAppURL,
}
api.PrepareRequest = func(req *http.Request) {
req.SetBasicAuth(user, password)
if twoFactorCode != "" {
req.Header.Set("X-GitHub-OTP", twoFactorCode)
}
}
count := 1
maxTries := 9
for {
params["note"], err = authTokenNote(count)
if err != nil {
return
}
res, postErr := api.PostJSON("authorizations", params)
if postErr != nil {
err = postErr
break
}
if res.StatusCode == 201 {
auth := &AuthorizationEntry{}
if err = res.Unmarshal(auth); err != nil {
return
}
token = auth.Token
break
} else if res.StatusCode == 422 && count < maxTries {
count++
} else {
errInfo, e := res.ErrorInfo()
if e == nil {
err = errInfo
} else {
err = e
}
return
}
}
return
}
func (client *Client) ensureAccessToken() (err error) {
if client.Host.AccessToken == "" {
host, err := CurrentConfig().PromptForHost(client.Host.Host)
if err == nil {
client.Host = host
}
}
return
}
func (client *Client) simpleApi() (c *simpleClient, err error) {
err = client.ensureAccessToken()
if err != nil {
return
}
c = client.apiClient()
c.PrepareRequest = func(req *http.Request) {
req.Header.Set("Authorization", "token "+client.Host.AccessToken)
}
return
}
func (client *Client) apiClient() *simpleClient {
httpClient := newHttpClient(os.Getenv("HUB_TEST_HOST"), os.Getenv("HUB_VERBOSE") != "")
apiRoot := client.absolute(normalizeHost(client.Host.Host))
if client.Host != nil && client.Host.Host != GitHubHost {
apiRoot.Path = "/api/v3/"
}
return &simpleClient{
httpClient: httpClient,
rootUrl: apiRoot,
}
}
func (client *Client) absolute(host string) *url.URL {
u, _ := url.Parse("https://" + host + "/")
if client.Host != nil && client.Host.Protocol != "" {
u.Scheme = client.Host.Protocol
}
return u
}
func normalizeHost(host string) string {
host = strings.ToLower(host)
if host == "" {
host = GitHubHost
}
if host == GitHubHost {
host = GitHubApiHost
}
return host
}
func checkStatus(expectedStatus int, action string, response *simpleResponse, err error) error {
if err != nil {
return fmt.Errorf("Error %s: %s", action, err.Error())
} else if response.StatusCode != expectedStatus {
errInfo, err := response.ErrorInfo()
if err == nil {
return FormatError(action, errInfo)
} else {
return fmt.Errorf("Error %s: %s (HTTP %d)", action, err.Error(), response.StatusCode)
}
} else {
return nil
}
}
func FormatError(action string, err error) (ee error) {
switch e := err.(type) {
default:
ee = err
case *errorInfo:
statusCode := e.Response.StatusCode
var reason string
if s := strings.SplitN(e.Response.Status, " ", 2); len(s) >= 2 {
reason = strings.TrimSpace(s[1])
}
errStr := fmt.Sprintf("Error %s: %s (HTTP %d)", action, reason, statusCode)
var errorSentences []string
for _, err := range e.Errors {
switch err.Code {
case "custom":
errorSentences = append(errorSentences, err.Message)
case "missing_field":
errorSentences = append(errorSentences, fmt.Sprintf("Missing field: \"%s\"", err.Field))
case "already_exists":
errorSentences = append(errorSentences, fmt.Sprintf("Duplicate value for \"%s\"", err.Field))
case "invalid":
errorSentences = append(errorSentences, fmt.Sprintf("Invalid value for \"%s\"", err.Field))
case "unauthorized":
errorSentences = append(errorSentences, fmt.Sprintf("Not allowed to change field \"%s\"", err.Field))
}
}
var errorMessage string
if len(errorSentences) > 0 {
errorMessage = strings.Join(errorSentences, "\n")
} else {
errorMessage = e.Message
}
if errorMessage != "" {
errStr = fmt.Sprintf("%s\n%s", errStr, errorMessage)
}
ee = fmt.Errorf(errStr)
}
return
}
func authTokenNote(num int) (string, error) {
n := os.Getenv("USER")
if n == "" {
n = os.Getenv("USERNAME")
}
if n == "" {
whoami := exec.Command("whoami")
whoamiOut, err := whoami.Output()
if err != nil {
return "", err
}
n = strings.TrimSpace(string(whoamiOut))
}
h, err := os.Hostname()
if err != nil {
return "", err
}
if num > 1 {
return fmt.Sprintf("hub for %s@%s %d", n, h, num), nil
}
return fmt.Sprintf("hub for %s@%s", n, h), nil
}
func perPage(limit, max int) int {
if limit > 0 {
limit = limit + (limit / 2)
if limit < max {
return limit
}
}
return max
}
| [
"\"HUB_TEST_HOST\"",
"\"HUB_VERBOSE\"",
"\"USER\"",
"\"USERNAME\""
]
| []
| [
"USER",
"USERNAME",
"HUB_VERBOSE",
"HUB_TEST_HOST"
]
| [] | ["USER", "USERNAME", "HUB_VERBOSE", "HUB_TEST_HOST"] | go | 4 | 0 | |
astrodata/doc/ad_UserManual/conf.py | #
# Astrodata User Manual build configuration file, created
# from team template.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- Setting up path to import modules ---------------------------------------
on_rtd = os.environ.get('READTHEDOCS') == 'True'
print(' Printing current working directory for debugging:')
print((' ' + os.getcwd()))
if on_rtd:
sys.path.insert(0, os.path.abspath('./../../../'))
else:
sys.path.insert(0, os.path.abspath('./../../../'))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Astrodata User Manual'
copyright = '2020, Association of Universities for Research in Astronomy'
# Note that AURA owns the Copyright, not you.
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = 'April 2020'
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AstrodataUserManual'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# This will remove blank pages.
'classoptions': ',openany,oneside',
'babel': '\\usepackage[english]{babel}',
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
'preamble': '\\usepackage{appendix} \\setcounter{tocdepth}{0}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index-latex', 'AstrodataUserManual.tex', 'Astrodata User Manual',
'Kathleen Labrie', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
latex_logo = 'images/GeminiLogo_new_2014.jpg'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'astrodatausermanual', 'Astrodata User Manual',
['Kathleen Labrie'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AstrodataUserManual', 'Astrodata User Manual',
'Kathleen Labrie', 'AstrodataUserManual',
'User manual for the astrodata package.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'astropy': ('https://docs.astropy.org/en/stable/', None),
'gemini_instruments': ('https://dragons-recipe-system-programmers-manual.readthedocs.io/en/latest/', None),
'geminidr': ('https://dragons-recipe-system-programmers-manual.readthedocs.io/en/latest/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'python': ('https://docs.python.org/3', None),
}
# Activate the todos
todo_include_todos = True
# -- Automatically generate API documentation --------------------------------
# -- Enable autoapi ----------------------------------------------------------
def run_api_doc(_):
"""
Automatic API generator
This method is used to generate API automatically by importing all the
modules and sub-modules inside a package.
It is equivalent to run:
>>> sphinx-apidoc --force --no-toc --separate --module --output-dir api/ ../../ ../../cal_service
It is useful because it creates .rst files on the file.
NOTE
----
This does not work with PyCharm default build. If you want to trigger
this function, use the standard `$ make html` in the command line.
The .rst files will be generated. After that, you can use PyCharm's
build helper.
"""
build_packages = [
'astrodata',
'gemini_instruments'
]
current_path = os.getcwd()
relative_path = "../../../"
print(("Current Path:", current_path))
for p in build_packages:
build_path = os.path.join(current_path, relative_path, p)
ignore_paths = [
'doc',
'test',
'tests',
]
ignore_paths = [os.path.join(build_path, i) for i in ignore_paths]
argv = [
"--force",
"--no-toc",
# "--separate",
"--module",
"--output-dir", "api/",
build_path
] + ignore_paths
sys.path.insert(0, build_path)
try:
# Sphinx 1.7+
from sphinx.ext import apidoc
apidoc.main(argv)
except ImportError:
# Sphinx 1.6 (and earlier)
from sphinx import apidoc
argv.insert(0, apidoc.__file__)
apidoc.main(argv)
# -- Finishing with a setup that will run always -----------------------------
def setup(app):
# Adding style in order to have the todos show up in a red box.
app.add_css_file('todo-styles.css')
app.add_css_file('rtd_theme_overrides.css')
app.add_css_file('css/rtd_theme_overrides_references.css')
# Automatic API generation
app.connect('builder-inited', run_api_doc)
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
consumers/neo4j_connector.py | import os
import json
import logging
from neo4j import GraphDatabase
from datetime import datetime, timedelta
logging.basicConfig()
logger = logging.getLogger("neo4j_connector")
logger.setLevel(logging.INFO)
NEO4J_QUERIES_FILES = [
'queries.json',
]
class NeoDB(object):
"""
Neo4j Wrapper around `neo4j.GraphDatabase`,
which is in charge of instaurating a connection with
the backend Neo4j database.
This should never be instantiated directly.
"""
def __init__(self):
self._parse_config()
self._connect()
def _parse_config(self):
"""
uri: The URI of Neo4j (e.g., bolt://neo4j-bolt-service:7687)
username: Username for Neo4j
password: Password for Neo4j
If no config has been passed to __init__,
fetch the connection string from environment variables
"""
self._neo4j_uri = os.environ['NEO4J_URI']
self._neo4j_user = os.environ['NEO4J_USER']
self._neo4j_password = os.environ['NEO4J_SECRETS_PASSWORD']
def _connect(self):
"""
Instantiate the Neo4j python driver
"""
self._driver = GraphDatabase.driver(self._neo4j_uri,
auth=(self._neo4j_user, self._neo4j_password))
logger.info('Neo4J Client instantiated: {}'.format(self._neo4j_uri))
@staticmethod
def _exec_query(tx, query, kwargs):
if kwargs:
result = tx.run(query, **kwargs)
else:
result = tx.run(query)
values = [record.data() for record in result]
return values
def query(self, q, kwargs=None):
with self._driver.session() as session:
return session.read_transaction(self._exec_query, q, kwargs)
def close(self):
self._driver.close()
class Neo4jConnector(object):
"""
Main connector which abstract over the actual execution of queries,
and provide an interface to run queries and obtain results
"""
def __init__(self):
# Initialize DB
self.db = NeoDB()
# Load the queries file into memory
self._load_queries()
def _load_queries(self):
extracted = []
for fname in NEO4J_QUERIES_FILES:
path = os.path.join("/app/", fname)
if not os.path.isfile(path):
logger.warning('File "{}" not found. Skipping...'.format(path))
continue
with open(path, 'r') as fp:
logger.debug('Loading queries file: {}'.format(path))
body = fp.read()
temp = body.strip()[1:-1]
extracted.append(temp)
queries_str = "[%s]" % (",".join(extracted))
self.QUERIES = json.loads(queries_str)
logger.info(f"{len(self.QUERIES)} queries loaded")
#
# UTILS
#
@staticmethod
def _n_recent_days(N):
return (datetime.utcnow() - timedelta(days=N))
def _parse_dynamic_params(self, q):
params = q.get('params', '')
kwargs = ""
if params:
# Iterate through the parameters and verify if one matches the supported types
for p in params.keys():
kwargs = {}
# The query has a parameter specifying to
# retrieve the assets for the N most recent days
if p == "n_recent_days":
kwargs[params[p]["param_name"]] = \
str(self._n_recent_days(params[p]["param_value"]))
return kwargs
#
# FILTERS
#
def _filter_by_tags(self, queries, tags):
"""
Returns all the queries which contain *all* the tags provided
(it is an AND)
"""
if type(tags) is not list:
tags = list(tags)
return [q for q in queries if all(elem in q['tags'] for elem in tags)]
def _filter_by_account(self, cypher, account):
if account:
if 'WHERE' in cypher:
cypher = cypher.replace(
' WHERE ', ' WHERE a.name = "{}" and '.format(account))
else:
cypher = cypher.replace(
' RETURN ', ' WHERE a.name = "{}" RETURN '.format(account))
return cypher
#
# EXECUTE QUERIES
#
def query_raw(self, cypher):
logger.info("Executing a raw query: {}".format(cypher))
return self.db.query(cypher)
def _execute_queries(self, queries, account):
queries_result = []
for q in queries:
# Parse optional dynamic parameters
kwargs = self._parse_dynamic_params(q)
# If an account is provided, inject a WHERE clause to filter by account
cypher = self._filter_by_account(q['query'], account)
# Add return clause
cypher = "{} {}".format(cypher, q['return'])
# Execute the query and parse results as dictionaries
logger.debug(f"Running query: {cypher}")
records = self.db.query(cypher, kwargs)
# Add records to result list
temp = {}
temp['name'] = q['name']
temp['description'] = q['description']
temp['headers'] = q['result_headers']
temp['result'] = records
logger.debug(f"Result: {len(records)} records")
queries_result.append(temp)
return queries_result
def query_by_tag(self, tags, account=None):
logger.info("Executing queries by tag: {}".format(tags))
# Filter queries
selected_queries = self._filter_by_tags(self.QUERIES, tags)
# Run queries
return self._execute_queries(selected_queries, account)
if __name__ == '__main__':
Neo4jConnector()
| []
| []
| [
"NEO4J_SECRETS_PASSWORD",
"NEO4J_URI",
"NEO4J_USER"
]
| [] | ["NEO4J_SECRETS_PASSWORD", "NEO4J_URI", "NEO4J_USER"] | python | 3 | 0 | |
manage.py | import os
from flask_script import Manager
from bdc_geoserver import app
manager = Manager(app)
@manager.command
def run():
HOST = os.environ.get('SERVER_HOST', '0.0.0.0')
try:
PORT = int(os.environ.get('PORT', '5000'))
except ValueError:
PORT = 5000
app.run(HOST, PORT)
@manager.command
def test():
"""Run the unit tests."""
import pytest
pytest.main(["-s", "tests/"])
if __name__ == '__main__':
manager.run() | []
| []
| [
"PORT",
"SERVER_HOST"
]
| [] | ["PORT", "SERVER_HOST"] | python | 2 | 0 | |
irregularquiz/irregular_verb.py | #!/usr/bin/env python
# -*- mode:python; coding:utf-8; -*-
# author: Ruslan Pisarev
import os
import random
from json import dumps
from sqlite3 import connect
from bottle import route, run, request, redirect
@route('/')
def main_page_irregular():
return open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
"template/main.html")).read()
@route('/en/irregular/verb')
def irr_page_open():
return open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
"template/irregular_verb.html")).read()
@route('/contacts')
def contacts():
return redirect('/')
def check_verb(verb, sample):
return any([verb == v.strip() for v in sample.split(",")])
def gen_correct_tr(correct_verbs, user_verbs, rights):
tr = """<tr>\n<td>{}</td>\n<td>{}</td>\n<td>{}</td>\n</tr>\n"""
tds = []
for x in range(3):
if rights[x]:
content = """<span style="color:green">{}</span>""".format(user_verbs[x])
else:
content = """<span style="color:red">{}</span>
<br><span style="color:blue">{}</span>""".format(
user_verbs[x], correct_verbs[x])
tds.append(content)
return tr.format(tds[0], tds[1], tds[2])
@route('/check/en/irregular_verb', method='POST')
def check_irregular_verb():
position = int(request.forms.get("position"))
num = request.forms.get("verbid")
input1 = request.forms.get("input1")
input2 = request.forms.get("input2")
main = request.forms.get("main")
sqlite_connect = connect(os.path.join(os.path.abspath(os.path.dirname(__file__)),
"irregular_verb_en.db"))
c = sqlite_connect.cursor()
query = "select `infinitive`, `past_simple`, `past_participle` from irregular where id={}"
c.execute(query.format(num))
res = c.fetchall()[0]
c.close()
user_verbs = []
if position == 0:
first = True
user_verbs.append(main)
else:
first = check_verb(input1, res[0])
user_verbs.append(input1)
if position == 1:
user_verbs.append(main)
second = True
elif position == 0:
second = check_verb(input1, res[1])
user_verbs.append(input1)
else:
second = check_verb(input2, res[1])
user_verbs.append(input2)
if position == 2:
third = True
user_verbs.append(main)
else:
third = check_verb(input2, res[2])
user_verbs.append(input2)
verbs_correct = (first, second, third)
checkpoints = len([v for v in verbs_correct if v]) - 1
tr = gen_correct_tr(res, user_verbs, verbs_correct)
return dumps({"tr": tr + create_quiz_tr(),
"checkpoints": checkpoints})
@route('/start/en/irregular_verb', method='POST')
def start_irregular_verb():
return create_quiz_tr()
def irregular(infinitive, kind, forms):
return "<tr><td>{}</td><td>{}</td><td>{}</td></tr>".format(
infinitive, forms[0], forms[1])
def choice_verb(position):
verb_form = {0: "infinitive",
1: "past_simple",
2: "past_participle"}[position]
verb_number = random.choice(range(188))
sqlite_connect = connect(os.path.join(os.path.abspath(os.path.dirname(__file__)),
"irregular_verb_en.db"))
c = sqlite_connect.cursor()
query = "select `{}` from irregular where id = {}"
c.execute(query.format(verb_form, verb_number))
res = c.fetchall()[0][0]
c.close()
return res.split(",")[random.choice(range(2))] if "," in res else res, verb_number
def create_quiz_tr():
position = random.choice(range(3))
verb, num = choice_verb(position)
tr = """<tr data-position="{}">\n<td>{}</td>\n<td>{}</td>\n<td>{}</td>\n</tr>\n"""
tds = []
input_count = 1
for td in range(3):
if td == position:
content = """<span id="main_value" data-num="{}">{}</span>""".format(
num, verb)
else:
content = """<input type="text" size="20" id="input{}">""".format(
input_count)
input_count += 1
tds.append(content)
return tr.format(position, tds[0], tds[1], tds[2])
run(host='0.0.0.0', port=int(os.environ.get("PORT", 5000)))
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
pkg/utils/rbacevaluate.go | // SPDX-FileCopyrightText: 2020-present Open Networking Foundation <[email protected]>
//
// SPDX-License-Identifier: Apache-2.0
package utils
import (
"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"os"
"strings"
)
const semicolon = ";" // From grpcinterceptors.go in onos-lib-go
// TemporaryEvaluate - simple evaluation of rules until OpenPolicyAgent is added
// This is so that aether-config can be deployed to the cloud in 2021 Q1 with simple RBAC
// It applies to Set (gnmi) and CompactChanges(admin) and RollbackNetworkChange(admin)
// TODO replace the following with fine grained RBAC using OpenPolicyAgent Rego in 2021 Q2
func TemporaryEvaluate(md metautils.NiceMD) error {
adminGroups := os.Getenv("ADMINGROUPS")
var match bool
for _, g := range strings.Split(md.Get("groups"), semicolon) {
if strings.Contains(adminGroups, g) {
match = true
break
}
}
if !match {
return status.Errorf(codes.Unauthenticated, "Set allowed only for %s", adminGroups)
}
return nil
}
| [
"\"ADMINGROUPS\""
]
| []
| [
"ADMINGROUPS"
]
| [] | ["ADMINGROUPS"] | go | 1 | 0 | |
example_apps/basic_app.py | import os
from flask import Flask, render_template_string
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import login_required, UserManager, UserMixin, SQLAlchemyAdapter
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///basic_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', '[email protected]')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <[email protected]>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = int(os.getenv('MAIL_USE_SSL', True))
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
def create_app():
""" Flask application factory """
# Setup Flask app and app.config
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Initialize Flask extensions
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
mail = Mail(app) # Initialize Flask-Mail
# Define the User data model. Make sure to add flask_user UserMixin !!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User authentication information
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Create all database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User) # Register the User model
user_manager = UserManager(db_adapter, app) # Initialize Flask-User
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Home page</h2>
<p>This page can be accessed by anyone.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def members_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Members page</h2>
<p>This page can only be accessed by authenticated users.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
{% endblock %}
""")
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=True)
| []
| []
| [
"MAIL_SERVER",
"MAIL_PASSWORD",
"DATABASE_URL",
"MAIL_PORT",
"MAIL_DEFAULT_SENDER",
"SECRET_KEY",
"MAIL_USERNAME",
"MAIL_USE_SSL"
]
| [] | ["MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "MAIL_PORT", "MAIL_DEFAULT_SENDER", "SECRET_KEY", "MAIL_USERNAME", "MAIL_USE_SSL"] | python | 8 | 0 | |
middleware/middleware.go | package middleware
import (
"context"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"os"
"time"
api "github.com/kubesure/sidecar-security/api"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
var customerDataSvc = os.Getenv("CUSTOMER_DATA_SVC")
var customerDataSvcPort = os.Getenv("CUSTOMER_DATA_SVC_Port")
var fraudCheckTCPSvc = os.Getenv("FRAUD_CHECK_SVC")
var fraudCheckTCPSvcPort = os.Getenv("FRAUD_CHECK_SVC_Port")
//initializes logurs with info level
func init() {
log.SetFormatter(&log.JSONFormatter{})
log.SetOutput(os.Stdout)
log.SetLevel(log.InfoLevel)
}
//Customer type represents a customer
type Customer struct {
accountNumer string
CIF int64
}
//FraudCheck message is sent to fraud checker to run fraud checks on the request.
type fraudCheck struct {
smhSegIDVersion string
smhMsgVersion string
smhTranType string
smhCustType string
smhActType string
smhSource string
fromAccount string
clientIP string
customerID string
}
//TCP message from FraudCheck is parsed into fraudCheckRes
type fraudCheckRes struct {
isOk bool
}
//TimeoutHandler is a customer timeout handler which return 504 when
//middlewares or origin does not respond with http.Server.WriteTimeout
func TimeoutHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusGatewayTimeout)
})
}
//Logger middleware logs orgin's request
func Logger(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Infof("logging: request middleware")
next.ServeHTTP(w, r)
log.Infof("logging: response middleware")
})
}
//Auth middleware authenticates request
func Auth(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Infof("Authenticating request: ")
if r.Header.Get("user") != "foo" {
w.WriteHeader(http.StatusUnauthorized)
return
}
log.Infof("Auth: Pass")
next.ServeHTTP(w, r)
})
}
//Final middleware forwards request to orgin
func Final(proxy *httputil.ReverseProxy) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Infof("Passing call to origin")
proxy.ServeHTTP(w, r)
})
}
//FraudChecker middleware checks if the request is fradulent.
func FraudChecker(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Infof("Fraud checking request...")
//reterive customerData from GRCP service Customer.getCustomer
c, cerr := customerData(r)
if cerr != nil {
log.Errorf("Error getting customer data %v", cerr)
w.WriteHeader(http.StatusServiceUnavailable)
return
}
//Make the message for Fraud checking TCP service
msg, merr := makeFTCPMessage(r, c)
if merr != nil {
log.Errorf("error while making tcp message %v", merr)
w.WriteHeader(http.StatusServiceUnavailable)
return
}
conn, ferr := fraudSrvConn()
if ferr != nil {
log.Errorf("Error while connecting to Fraud Server %v", cerr)
w.WriteHeader(http.StatusServiceUnavailable)
return
}
defer conn.Close()
_, werr := conn.Write([]byte(*msg))
if werr != nil {
log.Errorf("Error while sending message to TCP server %v", werr)
w.WriteHeader(http.StatusServiceUnavailable)
return
}
//Read message from TCP service until EOF
tcpmsg, rerr := ioutil.ReadAll(conn)
if rerr != nil {
log.Errorf("Error while reading message to TCP server %v", rerr)
w.WriteHeader(http.StatusServiceUnavailable)
return
}
//Parse response from fraud check service
fcheck, ferr := parseFTCPResponse(string(tcpmsg))
if ferr != nil {
log.Errorf("Error while reading message to TCP server %v", rerr)
w.WriteHeader(http.StatusServiceUnavailable)
return
}
//Check if request is fraudulent
if !fcheck.isOk {
log.Infof("Fraudulent request received")
w.WriteHeader(http.StatusUnauthorized)
return
}
//Dispatch to next middleware check
next.ServeHTTP(w, r)
})
}
//make the TCP Fraud check message from request and Customer data
func makeFTCPMessage(r *http.Request, c *Customer) (*string, error) {
message := string("smh_seg_id_version:000004|smh_source:")
return &message, nil
}
//Pulls customer data from Customer.getCustomer GRCP service
func customerData(r *http.Request) (*Customer, error) {
conn, derr := cdataGrpcSrvConn()
if derr != nil {
return nil, derr
}
defer conn.Close()
client := api.NewCustomerClient(conn)
customer, merr := makeCustomerData(r, client)
if merr != nil {
return nil, merr
}
return customer, nil
}
//Makes the customer data using customer data grpc service input for service is
//read from request header and body. Grpc service return datas cached in a in transient store.
func makeCustomerData(r *http.Request, client api.CustomerClient) (*Customer, error) {
req := &api.CustomerRequest{Version: "v1", AccountNumber: "12345"}
res, cerr := client.GetCustomer(context.Background(), req)
if cerr != nil {
return nil, cerr
}
c := &Customer{}
c.CIF = res.CIF
return c, nil
}
//Parses the TCP response from TCP Fraud check service
func parseFTCPResponse(msg string) (*fraudCheckRes, error) {
log.Infof("parsing %v", msg)
return &fraudCheckRes{isOk: true}, nil
}
//Create a GRPC connection to Customer Data Service
func cdataGrpcSrvConn() (*grpc.ClientConn, error) {
//Change to return error instead of defaulting
if len(customerDataSvcPort) == 0 {
customerDataSvcPort = "50051"
}
conn, derr := grpc.Dial(customerDataSvc+":"+customerDataSvcPort, grpc.WithInsecure())
if derr != nil {
return nil, derr
}
return conn, nil
}
//Create a TCP connection to Fraud checking service
func fraudSrvConn() (net.Conn, error) {
//Change to return error instead of defaulting
if len(fraudCheckTCPSvcPort) == 0 {
fraudCheckTCPSvcPort = "8090"
}
log.Infof("connecting to fraud server %v", fraudCheckTCPSvc+":"+fraudCheckTCPSvcPort)
d := net.Dialer{Timeout: 2 * time.Second}
conn, cerr := d.Dial("tcp", fraudCheckTCPSvc+":"+fraudCheckTCPSvcPort)
if cerr != nil {
return nil, cerr
}
return conn, nil
}
| [
"\"CUSTOMER_DATA_SVC\"",
"\"CUSTOMER_DATA_SVC_Port\"",
"\"FRAUD_CHECK_SVC\"",
"\"FRAUD_CHECK_SVC_Port\""
]
| []
| [
"FRAUD_CHECK_SVC",
"CUSTOMER_DATA_SVC_Port",
"CUSTOMER_DATA_SVC",
"FRAUD_CHECK_SVC_Port"
]
| [] | ["FRAUD_CHECK_SVC", "CUSTOMER_DATA_SVC_Port", "CUSTOMER_DATA_SVC", "FRAUD_CHECK_SVC_Port"] | go | 4 | 0 | |
src/test/java/org/mariadb/r2dbc/integration/ConnectionMetadataTest.java | /*
* Copyright 2020 MariaDB Ab.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mariadb.r2dbc.integration;
import static org.junit.jupiter.api.Assertions.*;
import io.r2dbc.spi.ConnectionFactoryMetadata;
import io.r2dbc.spi.ConnectionMetadata;
import org.junit.jupiter.api.Test;
import org.mariadb.r2dbc.BaseConnectionTest;
import org.mariadb.r2dbc.api.MariadbConnectionMetadata;
public class ConnectionMetadataTest extends BaseConnectionTest {
@Test
void connectionMeta() {
ConnectionMetadata meta = sharedConn.getMetadata();
assertTrue(
meta.getDatabaseProductName().equals("MariaDB")
|| meta.getDatabaseProductName().equals("MySQL"));
if (isMariaDBServer()) {
assertTrue(meta.getDatabaseVersion().contains("10."));
} else {
assertTrue(
meta.getDatabaseVersion().contains("5.") || meta.getDatabaseVersion().contains("8."));
}
String value = System.getenv("DB");
if (value != null) {
String type;
String version;
// testing env
if (System.getenv("TRAVIS") != null) {
type = value.substring(0, value.indexOf(":"));
version = value.substring(value.indexOf(":") + 1);
} else {
// appveyor test only mariadb
type = "MariaDB";
version = value;
}
assertTrue(meta.getDatabaseVersion().contains(version));
assertEquals(type.toLowerCase(), meta.getDatabaseProductName().toLowerCase());
}
}
@Test
void factoryMeta() {
ConnectionFactoryMetadata meta = factory.getMetadata();
assertEquals("MariaDB", meta.getName());
}
@Test
void metadataInfo() {
MariadbConnectionMetadata meta = sharedConn.getMetadata();
assertTrue(meta.getMajorVersion() >= 5);
assertTrue(meta.getMinorVersion() > -1);
assertTrue(meta.getPatchVersion() > -1);
}
}
| [
"\"DB\"",
"\"TRAVIS\""
]
| []
| [
"DB",
"TRAVIS"
]
| [] | ["DB", "TRAVIS"] | java | 2 | 0 | |
src/art/asgi.py | """
ASGI config for art project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'art.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cli/commands.go | // Copyright (c) 2014-2019 Ludovic Fauvet
// Licensed under the MIT license
package cli
import (
"bufio"
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/exec"
"reflect"
"sort"
"strings"
"sync"
"text/tabwriter"
"time"
"github.com/Myself5/mirrorbits/core"
"github.com/Myself5/mirrorbits/filesystem"
"github.com/Myself5/mirrorbits/mirrors"
"github.com/Myself5/mirrorbits/rpc"
"github.com/Myself5/mirrorbits/utils"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/empty"
"github.com/howeyc/gopass"
"github.com/op/go-logging"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"gopkg.in/yaml.v2"
)
const (
commentSeparator = "##### Comments go below this line #####"
defaultRPCTimeout = time.Second * 10
)
var (
log = logging.MustGetLogger("main")
)
type cli struct {
sync.Mutex
rpcconn *grpc.ClientConn
creds *loginCreds
}
// ParseCommands parses the command line and call the appropriate functions
func ParseCommands(args ...string) error {
c := &cli{
creds: &loginCreds{
Password: core.RPCPassword,
},
}
if len(args) > 0 && args[0] != "help" {
method, exists := c.getMethod(args[0])
if !exists {
fmt.Println("Error: Command not found:", args[0])
return c.CmdHelp()
}
if len(c.creds.Password) == 0 && core.RPCAskPass {
fmt.Print("Password: ")
passwd, err := gopass.GetPasswdMasked()
if err != nil {
return err
}
c.creds.Password = string(passwd)
}
ret := method.Func.CallSlice([]reflect.Value{
reflect.ValueOf(c),
reflect.ValueOf(args[1:]),
})[0].Interface()
if c.rpcconn != nil {
c.rpcconn.Close()
}
if ret == nil {
return nil
}
return ret.(error)
}
return c.CmdHelp()
}
func (c *cli) getMethod(name string) (reflect.Method, bool) {
methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
return reflect.TypeOf(c).MethodByName(methodName)
}
func (c *cli) CmdHelp() error {
help := fmt.Sprintf("Usage: mirrorbits [OPTIONS] COMMAND [arg...]\n\nA smart download redirector.\n\n")
help += fmt.Sprintf("Server commands:\n %-10.10s%s\n\n", "daemon", "Start the server")
help += fmt.Sprintf("CLI commands:\n")
for _, command := range [][]string{
{"add", "Add a new mirror"},
{"disable", "Disable a mirror"},
{"edit", "Edit a mirror"},
{"enable", "Enable a mirror"},
{"export", "Export the mirror database"},
{"list", "List all mirrors"},
{"logs", "Print logs of a mirror"},
{"refresh", "Refresh the local repository"},
{"reload", "Reload configuration"},
{"remove", "Remove a mirror"},
{"scan", "(Re-)Scan a mirror"},
{"show", "Print a mirror configuration"},
{"stats", "Show download stats"},
{"upgrade", "Seamless binary upgrade"},
{"version", "Print version information"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
}
fmt.Fprintf(os.Stderr, "%s\n", help)
return nil
}
// SubCmd prints the usage of a subcommand
func SubCmd(name, signature, description string) *flag.FlagSet {
flags := flag.NewFlagSet(name, flag.ContinueOnError)
flags.Usage = func() {
fmt.Fprintf(os.Stderr, "\nUsage: mirrorbits %s %s\n\n%s\n\n", name, signature, description)
flags.PrintDefaults()
}
return flags
}
type ByDate []*rpc.Mirror
func (d ByDate) Len() int { return len(d) }
func (d ByDate) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d ByDate) Less(i, j int) bool { return d[i].StateSince.Seconds > d[j].StateSince.Seconds }
func (c *cli) CmdList(args ...string) error {
cmd := SubCmd("list", "", "Get the list of mirrors")
http := cmd.Bool("http", false, "Print HTTP addresses")
rsync := cmd.Bool("rsync", false, "Print rsync addresses")
ftp := cmd.Bool("ftp", false, "Print FTP addresses")
location := cmd.Bool("location", false, "Print the country and continent code")
state := cmd.Bool("state", true, "Print the state of the mirror")
score := cmd.Bool("score", false, "Print the score of the mirror")
disabled := cmd.Bool("disabled", false, "List disabled mirrors only")
enabled := cmd.Bool("enabled", false, "List enabled mirrors only")
down := cmd.Bool("down", false, "List only mirrors currently down")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
list, err := client.List(ctx, &empty.Empty{})
if err != nil {
log.Fatal("list error:", err)
}
sort.Sort(ByDate(list.Mirrors))
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 0, '\t', 0)
fmt.Fprint(w, "Identifier ")
if *score == true {
fmt.Fprint(w, "\tSCORE")
}
if *http == true {
fmt.Fprint(w, "\tHTTP ")
}
if *rsync == true {
fmt.Fprint(w, "\tRSYNC ")
}
if *ftp == true {
fmt.Fprint(w, "\tFTP ")
}
if *location == true {
fmt.Fprint(w, "\tLOCATION ")
}
if *state == true {
fmt.Fprint(w, "\tSTATE\tSINCE")
}
fmt.Fprint(w, "\n")
for _, mirror := range list.Mirrors {
if *disabled == true {
if mirror.Enabled == true {
continue
}
}
if *enabled == true {
if mirror.Enabled == false {
continue
}
}
if *down == true {
if mirror.Up == true {
continue
}
}
stateSince, err := ptypes.Timestamp(mirror.StateSince)
if err != nil {
log.Fatal("list error:", err)
}
fmt.Fprintf(w, "%s ", mirror.Name)
if *score == true {
fmt.Fprintf(w, "\t%d ", mirror.Score)
}
if *http == true {
fmt.Fprintf(w, "\t%s ", mirror.HttpURL)
}
if *rsync == true {
fmt.Fprintf(w, "\t%s ", mirror.RsyncURL)
}
if *ftp == true {
fmt.Fprintf(w, "\t%s ", mirror.FtpURL)
}
if *location == true {
countries := strings.Split(mirror.CountryCodes, " ")
countryCode := "/"
if len(countries) >= 1 {
countryCode = countries[0]
}
fmt.Fprintf(w, "\t%s (%s) ", countryCode, mirror.ContinentCode)
}
if *state == true {
if mirror.Enabled == false {
fmt.Fprintf(w, "\tdisabled")
} else if mirror.Up == true {
fmt.Fprintf(w, "\tup")
} else {
fmt.Fprintf(w, "\tdown")
}
fmt.Fprintf(w, " \t(%s)", stateSince.Format(time.RFC1123))
}
fmt.Fprint(w, "\n")
}
w.Flush()
return nil
}
func (c *cli) CmdAdd(args ...string) error {
cmd := SubCmd("add", "[OPTIONS] IDENTIFIER", "Add a new mirror")
http := cmd.String("http", "", "HTTP base URL")
rsync := cmd.String("rsync", "", "RSYNC base URL (for scanning only)")
ftp := cmd.String("ftp", "", "FTP base URL (for scanning only)")
sponsorName := cmd.String("sponsor-name", "", "Name of the sponsor")
sponsorURL := cmd.String("sponsor-url", "", "URL of the sponsor")
sponsorLogo := cmd.String("sponsor-logo", "", "URL of a logo to display for this mirror")
adminName := cmd.String("admin-name", "", "Admin's name")
adminEmail := cmd.String("admin-email", "", "Admin's email")
customData := cmd.String("custom-data", "", "Associated data to return when the mirror is selected (i.e. json document)")
continentOnly := cmd.Bool("continent-only", false, "The mirror should only handle its continent")
countryOnly := cmd.Bool("country-only", false, "The mirror should only handle its country")
asOnly := cmd.Bool("as-only", false, "The mirror should only handle clients in the same AS number")
score := cmd.Int("score", 0, "Weight to give to the mirror during selection")
comment := cmd.String("comment", "", "Comment")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
if strings.Contains(cmd.Arg(0), " ") {
fmt.Fprintf(os.Stderr, "The identifier cannot contain a space\n")
os.Exit(-1)
}
if *http == "" {
fmt.Fprintf(os.Stderr, "You *must* pass at least an HTTP URL\n")
os.Exit(-1)
}
if !strings.HasPrefix(*http, "http://") && !strings.HasPrefix(*http, "https://") {
*http = "http://" + *http
}
_, err := url.Parse(*http)
if err != nil {
fmt.Fprintf(os.Stderr, "Can't parse url\n")
os.Exit(-1)
}
mirror := &mirrors.Mirror{
Name: cmd.Arg(0),
HttpURL: *http,
RsyncURL: *rsync,
FtpURL: *ftp,
SponsorName: *sponsorName,
SponsorURL: *sponsorURL,
SponsorLogoURL: *sponsorLogo,
AdminName: *adminName,
AdminEmail: *adminEmail,
CustomData: *customData,
ContinentOnly: *continentOnly,
CountryOnly: *countryOnly,
ASOnly: *asOnly,
Score: *score,
Comment: *comment,
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
m, err := rpc.MirrorToRPC(mirror)
if err != nil {
log.Fatal("edit error:", err)
}
reply, err := client.AddMirror(ctx, m)
if err != nil {
if err.Error() == rpc.ErrNameAlreadyTaken.Error() {
log.Fatalf("Mirror %s already exists!\n", mirror.Name)
}
log.Fatal("edit error:", err)
}
for i := 0; i < len(reply.Warnings); i++ {
fmt.Println(reply.Warnings[i])
if i == len(reply.Warnings)-1 {
fmt.Println("")
}
}
if reply.Country != "" {
fmt.Println("Mirror location:")
fmt.Printf("Latitude: %.4f\n", reply.Latitude)
fmt.Printf("Longitude: %.4f\n", reply.Longitude)
fmt.Printf("Continent: %s\n", reply.Continent)
fmt.Printf("Country: %s\n", reply.Country)
fmt.Printf("ASN: %s\n", reply.ASN)
fmt.Println("")
}
fmt.Printf("Mirror '%s' added successfully\n", mirror.Name)
fmt.Printf("Enable this mirror using\n $ mirrorbits enable %s\n", mirror.Name)
return nil
}
func (c *cli) CmdRemove(args ...string) error {
cmd := SubCmd("remove", "IDENTIFIER", "Remove an existing mirror")
force := cmd.Bool("f", false, "Never prompt for confirmation")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
id, name := c.matchMirror(cmd.Arg(0))
if *force == false {
fmt.Printf("Removing %s, are you sure? [y/N]", name)
reader := bufio.NewReader(os.Stdin)
s, _ := reader.ReadString('\n')
switch s[0] {
case 'y', 'Y':
break
default:
return nil
}
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
_, err := client.RemoveMirror(ctx, &rpc.MirrorIDRequest{
ID: int32(id),
})
if err != nil {
log.Fatal("remove error:", err)
}
fmt.Printf("Mirror '%s' removed successfully\n", name)
return nil
}
func (c *cli) CmdScan(args ...string) error {
cmd := SubCmd("scan", "[IDENTIFIER]", "(Re-)Scan a mirror")
enable := cmd.Bool("enable", false, "Enable the mirror automatically if the scan is successful")
all := cmd.Bool("all", false, "Scan all mirrors at once")
ftp := cmd.Bool("ftp", false, "Force a scan using FTP")
rsync := cmd.Bool("rsync", false, "Force a scan using rsync")
timeout := cmd.Uint("timeout", 0, "Timeout in seconds")
if err := cmd.Parse(args); err != nil {
return nil
}
if !*all && cmd.NArg() != 1 || *all && cmd.NArg() != 0 {
cmd.Usage()
return nil
}
client := c.GetRPC()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
list := make(map[int]string)
// Get the list of mirrors to scan
if *all == true {
reply, err := client.MatchMirror(ctx, &rpc.MatchRequest{
Pattern: "", // Match all of them
})
if err != nil {
return errors.New("Cannot fetch the list of mirrors")
}
for _, m := range reply.Mirrors {
list[int(m.ID)] = m.Name
}
} else {
// Single mirror
id, name := c.matchMirror(cmd.Arg(0))
list[id] = name
}
// Set the method of the scan (if not default)
var method rpc.ScanMirrorRequest_Method
if *ftp == false && *rsync == false {
method = rpc.ScanMirrorRequest_ALL
} else if *rsync == true {
method = rpc.ScanMirrorRequest_RSYNC
} else if *ftp == true {
method = rpc.ScanMirrorRequest_FTP
}
for id, name := range list {
if *timeout > 0 {
ctx, cancel = context.WithTimeout(context.Background(), time.Duration(*timeout)*time.Second)
defer cancel()
}
fmt.Printf("Scanning %s... ", name)
reply, err := client.ScanMirror(ctx, &rpc.ScanMirrorRequest{
ID: int32(id),
AutoEnable: *enable,
Protocol: method,
})
if err != nil {
s := status.Convert(err)
if s.Code() == codes.FailedPrecondition || len(list) == 1 {
return errors.New("\nscan error: " + grpc.ErrorDesc(err))
}
fmt.Println("scan error:", grpc.ErrorDesc(err))
continue
} else {
fmt.Printf("%d files indexed, %d known and %d removed\n", reply.FilesIndexed, reply.KnownIndexed, reply.Removed)
if reply.GetTZOffsetMs() != 0 {
fmt.Printf(" ∟ Timezone offset detected and corrected: %d milliseconds\n", reply.TZOffsetMs)
}
if reply.Enabled {
fmt.Println(" ∟ Enabled")
}
}
}
return nil
}
func (c *cli) CmdRefresh(args ...string) error {
cmd := SubCmd("refresh", "", "Scan the local repository")
rehash := cmd.Bool("rehash", false, "Force a rehash of the files")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
fmt.Print("Refreshing the local repository... ")
client := c.GetRPC()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err := client.RefreshRepository(ctx, &rpc.RefreshRepositoryRequest{
Rehash: *rehash,
})
if err != nil {
fmt.Println("")
log.Fatal(err)
}
fmt.Println("done")
return nil
}
func (c *cli) matchMirror(pattern string) (id int, name string) {
if len(pattern) == 0 {
return -1, ""
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
reply, err := client.MatchMirror(ctx, &rpc.MatchRequest{
Pattern: pattern,
})
if err != nil {
fmt.Fprintf(os.Stderr, "mirror matching: %s\n", err)
os.Exit(1)
}
switch len(reply.Mirrors) {
case 0:
fmt.Fprintf(os.Stderr, "No match for '%s'\n", pattern)
os.Exit(1)
case 1:
id, name, err := GetSingle(reply.Mirrors)
if err != nil {
log.Fatal("unexpected error:", err)
}
return id, name
default:
fmt.Fprintln(os.Stderr, "Multiple match:")
for _, mirror := range reply.Mirrors {
fmt.Fprintf(os.Stderr, " %s\n", mirror.Name)
}
os.Exit(1)
}
return
}
func GetSingle(list []*rpc.MirrorID) (int, string, error) {
if len(list) == 0 {
return -1, "", errors.New("list is empty")
} else if len(list) > 1 {
return -1, "", errors.New("too many results")
}
return int(list[0].ID), list[0].Name, nil
}
func (c *cli) CmdEdit(args ...string) error {
cmd := SubCmd("edit", "[IDENTIFIER]", "Edit a mirror")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
// Find the editor to use
editor := os.Getenv("EDITOR")
if editor == "" {
log.Fatal("Environment variable $EDITOR not set")
}
id, _ := c.matchMirror(cmd.Arg(0))
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
rpcm, err := client.MirrorInfo(ctx, &rpc.MirrorIDRequest{
ID: int32(id),
})
if err != nil {
log.Fatal("edit error:", err)
}
mirror, err := rpc.MirrorFromRPC(rpcm)
if err != nil {
log.Fatal("edit error:", err)
}
// Generate a yaml configuration string from the struct
out, err := yaml.Marshal(mirror)
// Open a temporary file
f, err := ioutil.TempFile(os.TempDir(), "edit")
if err != nil {
log.Fatal("Cannot create temporary file:", err)
}
defer os.Remove(f.Name())
f.WriteString("# You can now edit this mirror configuration.\n" +
"# Just save and quit when you're done.\n\n")
f.WriteString(string(out))
f.WriteString(fmt.Sprintf("\n%s\n\n%s\n", commentSeparator, mirror.Comment))
f.Close()
// Checksum the original file
chk, _ := filesystem.Sha256sum(f.Name())
reopen:
// Launch the editor with the filename as first parameter
exe := exec.Command(editor, f.Name())
exe.Stdin = os.Stdin
exe.Stdout = os.Stdout
exe.Stderr = os.Stderr
err = exe.Run()
if err != nil {
log.Fatal(err)
}
// Read the file back
out, err = ioutil.ReadFile(f.Name())
if err != nil {
log.Fatal("Cannot read file", f.Name())
}
// Checksum the file back and compare
chk2, _ := filesystem.Sha256sum(f.Name())
if bytes.Compare(chk, chk2) == 0 {
fmt.Println("Aborted - settings are unmodified, so there is nothing to change.")
return nil
}
var comment string
yamlstr := string(out)
commentIndex := strings.Index(yamlstr, commentSeparator)
if commentIndex > 0 {
comment = strings.TrimSpace(yamlstr[commentIndex+len(commentSeparator):])
yamlstr = yamlstr[:commentIndex]
}
reopen := func(err error) bool {
eagain:
fmt.Printf("%s\nRetry? [Y/n]", err.Error())
reader := bufio.NewReader(os.Stdin)
s, _ := reader.ReadString('\n')
switch s[0] {
case 'y', 'Y', 10:
return true
case 'n', 'N':
fmt.Println("Aborted")
return false
default:
goto eagain
}
}
// Fill the struct from the yaml
err = yaml.Unmarshal([]byte(yamlstr), &mirror)
if err != nil {
switch reopen(err) {
case true:
goto reopen
case false:
return nil
}
}
mirror.Comment = comment
ctx, cancel = context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
m, err := rpc.MirrorToRPC(mirror)
if err != nil {
log.Fatal("edit error:", err)
}
reply, err := client.UpdateMirror(ctx, m)
if err != nil {
if err.Error() == rpc.ErrNameAlreadyTaken.Error() {
switch reopen(errors.New("Name already taken")) {
case true:
goto reopen
case false:
return nil
}
}
log.Fatal("edit error:", err)
}
if len(reply.Diff) > 0 {
fmt.Println(reply.Diff)
}
fmt.Printf("Mirror '%s' edited successfully\n", mirror.Name)
return nil
}
func (c *cli) CmdShow(args ...string) error {
cmd := SubCmd("show", "[IDENTIFIER]", "Print a mirror configuration")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
id, _ := c.matchMirror(cmd.Arg(0))
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
rpcm, err := client.MirrorInfo(ctx, &rpc.MirrorIDRequest{
ID: int32(id),
})
if err != nil {
log.Fatal("edit error:", err)
}
mirror, err := rpc.MirrorFromRPC(rpcm)
if err != nil {
log.Fatal("edit error:", err)
}
// Generate a yaml configuration string from the struct
out, err := yaml.Marshal(mirror)
if err != nil {
log.Fatal("show error:", err)
}
fmt.Printf("%s\nComment:\n%s\n", out, mirror.Comment)
return nil
}
func (c *cli) CmdExport(args ...string) error {
cmd := SubCmd("export", "[format]", "Export the mirror database.\n\nAvailable formats: mirmon")
rsync := cmd.Bool("rsync", true, "Export rsync URLs")
http := cmd.Bool("http", true, "Export http URLs")
ftp := cmd.Bool("ftp", true, "Export ftp URLs")
disabled := cmd.Bool("disabled", true, "Export disabled mirrors")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
if cmd.Arg(0) != "mirmon" {
fmt.Fprintf(os.Stderr, "Unsupported format\n")
cmd.Usage()
return nil
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
list, err := client.List(ctx, &empty.Empty{})
if err != nil {
log.Fatal("export error:", err)
}
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 0, '\t', 0)
for _, m := range list.Mirrors {
if *disabled == false {
if m.Enabled == false {
continue
}
}
ccodes := strings.Fields(m.CountryCodes)
urls := make([]string, 0, 3)
if *rsync == true && m.RsyncURL != "" {
urls = append(urls, m.RsyncURL)
}
if *http == true && m.HttpURL != "" {
urls = append(urls, m.HttpURL)
}
if *ftp == true && m.FtpURL != "" {
urls = append(urls, m.FtpURL)
}
for _, u := range urls {
fmt.Fprintf(w, "%s\t%s\t%s\n", ccodes[0], u, m.AdminEmail)
}
}
w.Flush()
return nil
}
func (c *cli) CmdEnable(args ...string) error {
cmd := SubCmd("enable", "[IDENTIFIER]", "Enable a mirror")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
c.changeStatus(cmd.Arg(0), true)
return nil
}
func (c *cli) CmdDisable(args ...string) error {
cmd := SubCmd("disable", "[IDENTIFIER]", "Disable a mirror")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
c.changeStatus(cmd.Arg(0), false)
return nil
}
func (c *cli) changeStatus(pattern string, enabled bool) {
id, name := c.matchMirror(pattern)
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
_, err := client.ChangeStatus(ctx, &rpc.ChangeStatusRequest{
ID: int32(id),
Enabled: enabled,
})
if err != nil {
if enabled {
log.Fatalf("Couldn't enable mirror '%s': %s\n", name, err)
} else {
log.Fatalf("Couldn't disable mirror '%s': %s\n", name, err)
}
}
if enabled {
fmt.Printf("Mirror '%s' enabled successfully\n", name)
} else {
fmt.Printf("Mirror '%s' disabled successfully\n", name)
}
return
}
func (c *cli) CmdStats(args ...string) error {
cmd := SubCmd("stats", "[OPTIONS] [mirror|file] [IDENTIFIER|PATTERN]", "Show download stats for a particular mirror or a file pattern")
dateStart := cmd.String("start-date", "", "Starting date (format YYYY-MM-DD)")
dateEnd := cmd.String("end-date", "", "Ending date (format YYYY-MM-DD)")
human := cmd.Bool("h", true, "Human readable version")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 || (cmd.Arg(0) != "mirror" && cmd.Arg(0) != "file") {
cmd.Usage()
return nil
}
start, err := time.Parse("2006-1-2", *dateStart)
if err != nil {
start = time.Now()
}
startproto, _ := ptypes.TimestampProto(start)
end, err := time.Parse("2006-1-2", *dateEnd)
if err != nil {
end = time.Now()
}
endproto, _ := ptypes.TimestampProto(end)
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
if cmd.Arg(0) == "file" {
// File stats
reply, err := client.StatsFile(ctx, &rpc.StatsFileRequest{
Pattern: cmd.Arg(1),
DateStart: startproto,
DateEnd: endproto,
})
if err != nil {
log.Fatal("file stats error:", err)
}
// Format the results
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 0, '\t', 0)
// Sort keys and count requests
var keys []string
var requests int64
for k, req := range reply.Files {
requests += req
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprintf(w, "%s:\t%d\n", k, reply.Files[k])
}
if len(keys) > 0 {
// Add a line separator
fmt.Fprintf(w, "\t\n")
}
fmt.Fprintf(w, "Total download requests: \t%d\n", requests)
w.Flush()
} else if cmd.Arg(0) == "mirror" {
// Mirror stats
id, name := c.matchMirror(cmd.Arg(1))
reply, err := client.StatsMirror(ctx, &rpc.StatsMirrorRequest{
ID: int32(id),
DateStart: startproto,
DateEnd: endproto,
})
if err != nil {
log.Fatal("mirror stats error:", err)
}
// Format the results
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 0, '\t', 0)
fmt.Fprintf(w, "Identifier:\t%s\n", name)
if !reply.Mirror.Enabled {
fmt.Fprintf(w, "Status:\tdisabled\n")
} else if reply.Mirror.Up {
fmt.Fprintf(w, "Status:\tup\n")
} else {
fmt.Fprintf(w, "Status:\tdown\n")
}
fmt.Fprintf(w, "Download requests:\t%d\n", reply.Requests)
fmt.Fprint(w, "Bytes transferred:\t")
if *human {
fmt.Fprintln(w, utils.ReadableSize(reply.Bytes))
} else {
fmt.Fprintln(w, reply.Bytes)
}
w.Flush()
}
return nil
}
func (c *cli) CmdLogs(args ...string) error {
cmd := SubCmd("logs", "[IDENTIFIER]", "Print logs of a mirror")
maxResults := cmd.Uint("l", 500, "Maximum number of logs to return")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
id, name := c.matchMirror(cmd.Arg(0))
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
resp, err := client.GetMirrorLogs(ctx, &rpc.GetMirrorLogsRequest{
ID: int32(id),
MaxResults: int32(*maxResults),
})
if err != nil {
log.Fatal("logs error:", err)
}
if len(resp.Line) == 0 {
fmt.Printf("No logs for %s\n", name)
return nil
}
fmt.Printf("Printing logs for %s:\n", name)
for _, l := range resp.Line {
fmt.Println(l)
}
return nil
}
func (c *cli) CmdReload(args ...string) error {
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
_, err := client.Reload(ctx, &empty.Empty{})
if err != nil {
log.Fatal("upgrade error:", err)
}
return nil
}
func (c *cli) CmdUpgrade(args ...string) error {
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
_, err := client.Upgrade(ctx, &empty.Empty{})
if err != nil {
log.Fatal("upgrade error:", err)
}
return nil
}
func (c *cli) CmdVersion(args ...string) error {
fmt.Printf("Client:\n")
core.PrintVersion(core.GetVersionInfo())
fmt.Println()
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
reply, err := client.GetVersion(ctx, &empty.Empty{})
if err != nil {
s := status.Convert(err)
return errors.Wrap(s.Err(), "version error")
}
if reply.Version != "" {
fmt.Printf("Server:\n")
core.PrintVersion(core.VersionInfo{
Version: reply.Version,
Build: reply.Build,
GoVersion: reply.GoVersion,
OS: reply.OS,
Arch: reply.Arch,
GoMaxProcs: int(reply.GoMaxProcs),
})
}
return nil
}
| [
"\"EDITOR\""
]
| []
| [
"EDITOR"
]
| [] | ["EDITOR"] | go | 1 | 0 | |
train_model.py | # -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
from PIL import Image
import random
import os
from sample import sample_conf
from tensorflow.python.framework.errors_impl import NotFoundError
# 设置以下环境变量可开启CPU识别
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class TrainError(Exception):
pass
class TrainModel(object):
def __init__(self, img_path, char_set, model_save_dir, verify=False):
# 模型路径
self.model_save_dir = model_save_dir
# 打乱文件顺序+校验图片格式
self.img_path = img_path
self.img_list = os.listdir(img_path)
# 校验格式
if verify:
self.confirm_image_suffix()
# 打乱文件顺序
random.seed(time.time())
random.shuffle(self.img_list)
# 获得图片宽高和字符长度基本信息
label, captcha_array = self.gen_captcha_text_image(self.img_list[0])
captcha_shape = captcha_array.shape
captcha_shape_len = len(captcha_shape)
if captcha_shape_len == 3:
image_height, image_width, channel = captcha_shape
self.channel = channel
elif captcha_shape_len == 2:
image_height, image_width = captcha_shape
else:
raise TrainError("图片转换为矩阵时出错,请检查图片格式")
# 初始化变量
# 图片尺寸
self.image_height = image_height
self.image_width = image_width
# 验证码长度(位数)
self.max_captcha = len(label)
# 验证码字符类别
self.char_set = char_set
self.char_set_len = len(char_set)
# 相关信息打印
print("-->图片尺寸: {} X {}".format(image_height, image_width))
print("-->验证码长度: {}".format(self.max_captcha))
print("-->验证码共{}类 {}".format(self.char_set_len, char_set))
print("-->使用测试集为 {}".format(img_path))
# tf初始化占位符
self.X = tf.placeholder(tf.float32, [None, image_height * image_width]) # 特征向量
self.Y = tf.placeholder(tf.float32, [None, self.max_captcha * self.char_set_len]) # 标签
self.keep_prob = tf.placeholder(tf.float32) # dropout值
self.w_alpha = 0.01
self.b_alpha = 0.1
# test model input and output
print(">>> Start model test")
batch_x, batch_y = self.get_batch(0, size=100)
print(">>> input batch images shape: {}".format(batch_x.shape))
print(">>> input batch labels shape: {}".format(batch_y.shape))
def gen_captcha_text_image(self, img_name):
"""
返回一个验证码的array形式和对应的字符串标签
:return:tuple (str, numpy.array)
"""
# 标签
label = img_name.split("_")[0]
# 文件
img_file = os.path.join(self.img_path, img_name)
captcha_image = Image.open(img_file)
captcha_array = np.array(captcha_image) # 向量化
return label, captcha_array
@staticmethod
def convert2gray(img):
"""
图片转为灰度图,如果是3通道图则计算,单通道图则直接返回
:param img:
:return:
"""
if len(img.shape) > 2:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
def text2vec(self, text):
"""
转标签为oneHot编码
:param text: str
:return: numpy.array
"""
text_len = len(text)
if text_len > self.max_captcha:
raise ValueError('验证码最长{}个字符'.format(self.max_captcha))
vector = np.zeros(self.max_captcha * self.char_set_len)
for i, ch in enumerate(text):
idx = i * self.char_set_len + self.char_set.index(ch)
vector[idx] = 1
return vector
def get_batch(self, n, size=128):
batch_x = np.zeros([size, self.image_height * self.image_width]) # 初始化
batch_y = np.zeros([size, self.max_captcha * self.char_set_len]) # 初始化
max_batch = int(len(self.img_list) / size)
# print(max_batch)
if max_batch - 1 < 0:
raise TrainError("训练集图片数量需要大于每批次训练的图片数量")
if n > max_batch - 1:
n = n % max_batch
s = n * size
e = (n + 1) * size
this_batch = self.img_list[s:e]
# print("{}:{}".format(s, e))
for i, img_name in enumerate(this_batch):
label, image_array = self.gen_captcha_text_image(img_name)
image_array = self.convert2gray(image_array) # 灰度化图片
batch_x[i, :] = image_array.flatten() / 255 # flatten 转为一维
batch_y[i, :] = self.text2vec(label) # 生成 oneHot
return batch_x, batch_y
def confirm_image_suffix(self):
# 在训练前校验所有文件格式
print("开始校验所有图片后缀")
for index, img_name in enumerate(self.img_list):
print("{} image pass".format(index), end='\r')
if not img_name.endswith(sample_conf['image_suffix']):
raise TrainError('confirm images suffix:you request [.{}] file but get file [{}]'
.format(sample_conf['image_suffix'], img_name))
print("所有图片格式校验通过")
def model(self):
x = tf.reshape(self.X, shape=[-1, self.image_height, self.image_width, 1])
print(">>> input x: {}".format(x))
# 卷积层1
wc1 = tf.get_variable(name='wc1', shape=[3, 3, 1, 32], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc1 = tf.Variable(self.b_alpha * tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='SAME'), bc1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, self.keep_prob)
# 卷积层2
wc2 = tf.get_variable(name='wc2', shape=[3, 3, 32, 64], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc2 = tf.Variable(self.b_alpha * tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, wc2, strides=[1, 1, 1, 1], padding='SAME'), bc2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, self.keep_prob)
# 卷积层3
wc3 = tf.get_variable(name='wc3', shape=[3, 3, 64, 128], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc3 = tf.Variable(self.b_alpha * tf.random_normal([128]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, wc3, strides=[1, 1, 1, 1], padding='SAME'), bc3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, self.keep_prob)
print(">>> convolution 3: ", conv3.shape)
next_shape = conv3.shape[1] * conv3.shape[2] * conv3.shape[3]
# 全连接层1
wd1 = tf.get_variable(name='wd1', shape=[next_shape, 1024], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bd1 = tf.Variable(self.b_alpha * tf.random_normal([1024]))
dense = tf.reshape(conv3, [-1, wd1.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, wd1), bd1))
dense = tf.nn.dropout(dense, self.keep_prob)
# 全连接层2
wout = tf.get_variable('name', shape=[1024, self.max_captcha * self.char_set_len], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bout = tf.Variable(self.b_alpha * tf.random_normal([self.max_captcha * self.char_set_len]))
y_predict = tf.add(tf.matmul(dense, wout), bout)
return y_predict
def train_cnn(self):
y_predict = self.model()
print(">>> input batch predict shape: {}".format(y_predict.shape))
print(">>> End model test")
# 计算概率 损失
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_predict, labels=self.Y))
# 梯度下降
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
# 计算准确率
predict = tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]) # 预测结果
max_idx_p = tf.argmax(predict, 2) # 预测结果
max_idx_l = tf.argmax(tf.reshape(self.Y, [-1, self.max_captcha, self.char_set_len]), 2) # 标签
# 计算准确率
correct_pred = tf.equal(max_idx_p, max_idx_l)
accuracy = tf.reduce_mean(tf.reduce_min(tf.cast(correct_pred, tf.float32), axis=1))
# 模型保存对象
saver = tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
# 恢复模型
if os.path.exists(self.model_save_dir):
try:
saver.restore(sess, self.model_save_dir)
# 判断捕获model文件夹中没有模型文件的错误
except NotFoundError:
print("model文件夹为空,将创建新模型")
else:
pass
step = 1
for i in range(3000):
batch_x, batch_y = self.get_batch(i, size=128)
_, cost_ = sess.run([optimizer, cost], feed_dict={self.X: batch_x, self.Y: batch_y, self.keep_prob: 0.75})
if step % 10 == 0:
batch_x_test, batch_y_test = self.get_batch(i, size=100)
acc = sess.run(accuracy, feed_dict={self.X: batch_x_test, self.Y: batch_y_test, self.keep_prob: 1.})
print("第{}次训练 >>> 准确率为 {} >>> loss {}".format(step, acc, cost_))
# 准确率达到99%后保存并停止
if acc > 0.99:
saver.save(sess, self.model_save_dir)
break
# 每训练500轮就保存一次
if i % 500 == 0:
saver.save(sess, self.model_save_dir)
step += 1
saver.save(sess, self.model_save_dir)
def recognize_captcha(self):
label, captcha_array = self.gen_captcha_text_image(random.choice(self.img_list))
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9, "origin:" + label, ha='center', va='center', transform=ax.transAxes)
plt.imshow(captcha_array)
# 预测图片
image = self.convert2gray(captcha_array)
image = image.flatten() / 255
y_predict = self.model()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, self.model_save_dir)
predict = tf.argmax(tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]), 2)
text_list = sess.run(predict, feed_dict={self.X: [image], self.keep_prob: 1.})
predict_text = text_list[0].tolist()
print("正确: {} 预测: {}".format(label, predict_text))
# 显示图片和预测结果
p_text = ""
for p in predict_text:
p_text += str(self.char_set[p])
print(p_text)
plt.text(20, 1, 'predict:{}'.format(p_text))
plt.show()
def main():
train_image_dir = sample_conf["train_image_dir"]
char_set = sample_conf["char_set"]
model_save_dir = sample_conf["model_save_dir"]
tm = TrainModel(train_image_dir, char_set, model_save_dir, verify=False)
tm.train_cnn() # 开始训练模型
# tm.recognize_captcha() # 识别图片示例
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
cmd/e2e/e2e.go | /*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"io/ioutil"
"os"
"runtime"
"strconv"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/clientauth"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
)
var (
authConfig = flag.String("auth_config", os.Getenv("HOME")+"/.kubernetes_auth", "Path to the auth info file.")
host = flag.String("host", "", "The host to connect to")
)
func waitForPodRunning(c *client.Client, id string) {
for {
time.Sleep(5 * time.Second)
pod, err := c.Pods(api.NamespaceDefault).Get(id)
if err != nil {
glog.Warningf("Get pod failed: %v", err)
continue
}
if pod.CurrentState.Status == api.PodRunning {
break
}
glog.Infof("Waiting for pod status to be running (%s)", pod.CurrentState.Status)
}
}
func loadObjectOrDie(filePath string) interface{} {
data, err := ioutil.ReadFile(filePath)
if err != nil {
glog.Fatalf("Failed to read pod: %v", err)
}
obj, err := latest.Codec.Decode(data)
if err != nil {
glog.Fatalf("Failed to decode pod: %v", err)
}
return obj
}
func loadPodOrDie(filePath string) *api.Pod {
obj := loadObjectOrDie(filePath)
pod, ok := obj.(*api.Pod)
if !ok {
glog.Fatalf("Failed to load pod: %v", obj)
}
return pod
}
func loadClientOrDie() *client.Client {
config := client.Config{
Host: *host,
}
auth, err := clientauth.LoadFromFile(*authConfig)
if err != nil {
glog.Fatalf("Error loading auth: %v", err)
}
config, err = auth.MergeWithConfig(config)
if err != nil {
glog.Fatalf("Error creating client")
}
c, err := client.New(&config)
if err != nil {
glog.Fatalf("Error creating client")
}
return c
}
func TestKubernetesROService(c *client.Client) bool {
svc := api.ServiceList{}
err := c.Get().
Namespace("default").
AbsPath("/api/v1beta1/proxy/services/kubernetes-ro/api/v1beta1/services").
Do().
Into(&svc)
if err != nil {
glog.Errorf("unexpected error listing services using ro service: %v", err)
return false
}
var foundRW, foundRO bool
for i := range svc.Items {
if svc.Items[i].Name == "kubernetes" {
foundRW = true
}
if svc.Items[i].Name == "kubernetes-ro" {
foundRO = true
}
}
if !foundRW {
glog.Error("no RW service found")
}
if !foundRO {
glog.Error("no RO service found")
}
if !foundRW || !foundRO {
return false
}
return true
}
func TestPodUpdate(c *client.Client) bool {
podClient := c.Pods(api.NamespaceDefault)
pod := loadPodOrDie("./api/examples/pod.json")
value := strconv.Itoa(time.Now().Nanosecond())
pod.Labels["time"] = value
_, err := podClient.Create(pod)
if err != nil {
glog.Errorf("Failed to create pod: %v", err)
return false
}
defer podClient.Delete(pod.Name)
waitForPodRunning(c, pod.Name)
pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
if len(pods.Items) != 1 {
glog.Errorf("Failed to find the correct pod")
return false
}
podOut, err := podClient.Get(pod.Name)
if err != nil {
glog.Errorf("Failed to get pod: %v", err)
return false
}
value = "time" + value
pod.Labels["time"] = value
pod.ResourceVersion = podOut.ResourceVersion
pod.DesiredState.Manifest.UUID = podOut.DesiredState.Manifest.UUID
pod, err = podClient.Update(pod)
if err != nil {
glog.Errorf("Failed to update pod: %v", err)
return false
}
waitForPodRunning(c, pod.Name)
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
if len(pods.Items) != 1 {
glog.Errorf("Failed to find the correct pod after update.")
return false
}
glog.Infof("pod update OK")
return true
}
func main() {
flag.Parse()
runtime.GOMAXPROCS(runtime.NumCPU())
util.ReallyCrash = true
util.InitLogs()
defer util.FlushLogs()
go func() {
defer util.FlushLogs()
time.Sleep(3 * time.Minute)
glog.Fatalf("This test has timed out.")
}()
c := loadClientOrDie()
tests := []func(c *client.Client) bool{
TestKubernetesROService,
// TODO(brendandburns): fix this test and re-add it: TestPodUpdate,
}
passed := true
for _, test := range tests {
testPassed := test(c)
if !testPassed {
passed = false
}
}
if !passed {
glog.Fatalf("Tests failed")
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
misc/cgo/testshared/shared_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package shared_test
import (
"bufio"
"bytes"
"debug/elf"
"encoding/binary"
"errors"
"flag"
"fmt"
"go/build"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
)
var gopathInstallDir, gorootInstallDir, suffix string
// This is the smallest set of packages we can link into a shared
// library (runtime/cgo is built implicitly).
var minpkgs = []string{"runtime", "sync/atomic"}
var soname = "libruntime,sync-atomic.so"
// run runs a command and calls t.Errorf if it fails.
func run(t *testing.T, msg string, args ...string) {
c := exec.Command(args[0], args[1:]...)
if output, err := c.CombinedOutput(); err != nil {
t.Errorf("executing %s (%s) failed %s:\n%s", strings.Join(args, " "), msg, err, output)
}
}
// goCmd invokes the go tool with the installsuffix set up by TestMain. It calls
// t.Fatalf if the command fails.
func goCmd(t *testing.T, args ...string) string {
newargs := []string{args[0], "-installsuffix=" + suffix}
if testing.Verbose() {
newargs = append(newargs, "-x")
}
newargs = append(newargs, args[1:]...)
c := exec.Command("go", newargs...)
stderr := new(strings.Builder)
var output []byte
var err error
if testing.Verbose() {
fmt.Printf("+ go %s\n", strings.Join(args, " "))
c.Stderr = os.Stderr
stderr.WriteString("(output above)")
} else {
c.Stderr = stderr
}
output, err = c.Output()
if err != nil {
if t != nil {
t.Helper()
t.Fatalf("executing %s failed %v:\n%s", strings.Join(c.Args, " "), err, stderr)
} else {
log.Fatalf("executing %s failed %v:\n%s", strings.Join(c.Args, " "), err, stderr)
}
}
return string(bytes.TrimSpace(output))
}
// TestMain calls testMain so that the latter can use defer (TestMain exits with os.Exit).
func testMain(m *testing.M) (int, error) {
// Because go install -buildmode=shared $standard_library_package always
// installs into $GOROOT, here are some gymnastics to come up with a unique
// installsuffix to use in this test that we can clean up afterwards.
myContext := build.Default
runtimeP, err := myContext.Import("runtime", ".", build.ImportComment)
if err != nil {
return 0, fmt.Errorf("import failed: %v", err)
}
for i := 0; i < 10000; i++ {
try := fmt.Sprintf("%s_%d_dynlink", runtimeP.PkgTargetRoot, rand.Int63())
err = os.Mkdir(try, 0700)
if os.IsExist(err) {
continue
}
if err == nil {
gorootInstallDir = try
}
break
}
if err != nil {
return 0, fmt.Errorf("can't create temporary directory: %v", err)
}
if gorootInstallDir == "" {
return 0, errors.New("could not create temporary directory after 10000 tries")
}
if testing.Verbose() {
fmt.Printf("+ mkdir -p %s\n", gorootInstallDir)
}
defer os.RemoveAll(gorootInstallDir)
// Some tests need to edit the source in GOPATH, so copy this directory to a
// temporary directory and chdir to that.
gopath, err := ioutil.TempDir("", "testshared")
if err != nil {
return 0, fmt.Errorf("TempDir failed: %v", err)
}
if testing.Verbose() {
fmt.Printf("+ mkdir -p %s\n", gopath)
}
defer os.RemoveAll(gopath)
modRoot := filepath.Join(gopath, "src", "testshared")
if err := overlayDir(modRoot, "testdata"); err != nil {
return 0, err
}
if testing.Verbose() {
fmt.Printf("+ cd %s\n", modRoot)
}
os.Chdir(modRoot)
os.Setenv("PWD", modRoot)
if err := ioutil.WriteFile("go.mod", []byte("module testshared\n"), 0666); err != nil {
return 0, err
}
os.Setenv("GOPATH", gopath)
if testing.Verbose() {
fmt.Printf("+ export GOPATH=%s\n", gopath)
}
myContext.GOPATH = gopath
// All tests depend on runtime being built into a shared library. Because
// that takes a few seconds, do it here and have all tests use the version
// built here.
suffix = strings.Split(filepath.Base(gorootInstallDir), "_")[2]
goCmd(nil, append([]string{"install", "-buildmode=shared"}, minpkgs...)...)
myContext.InstallSuffix = suffix + "_dynlink"
depP, err := myContext.Import("./depBase", ".", build.ImportComment)
if err != nil {
return 0, fmt.Errorf("import failed: %v", err)
}
if depP.PkgTargetRoot == "" {
gopathInstallDir = filepath.Dir(goCmd(nil, "list", "-buildmode=shared", "-f", "{{.Target}}", "./depBase"))
} else {
gopathInstallDir = filepath.Join(depP.PkgTargetRoot, "testshared")
}
return m.Run(), nil
}
func TestMain(m *testing.M) {
log.SetFlags(log.Lshortfile)
flag.Parse()
// Some of the tests install binaries into a custom GOPATH.
// That won't work if GOBIN is set.
os.Unsetenv("GOBIN")
exitCode, err := testMain(m)
if err != nil {
log.Fatal(err)
}
os.Exit(exitCode)
}
// The shared library was built at the expected location.
func TestSOBuilt(t *testing.T) {
_, err := os.Stat(filepath.Join(gorootInstallDir, soname))
if err != nil {
t.Error(err)
}
}
func hasDynTag(f *elf.File, tag elf.DynTag) bool {
ds := f.SectionByType(elf.SHT_DYNAMIC)
if ds == nil {
return false
}
d, err := ds.Data()
if err != nil {
return false
}
for len(d) > 0 {
var t elf.DynTag
switch f.Class {
case elf.ELFCLASS32:
t = elf.DynTag(f.ByteOrder.Uint32(d[0:4]))
d = d[8:]
case elf.ELFCLASS64:
t = elf.DynTag(f.ByteOrder.Uint64(d[0:8]))
d = d[16:]
}
if t == tag {
return true
}
}
return false
}
// The shared library does not have relocations against the text segment.
func TestNoTextrel(t *testing.T) {
sopath := filepath.Join(gorootInstallDir, soname)
f, err := elf.Open(sopath)
if err != nil {
t.Fatal("elf.Open failed: ", err)
}
defer f.Close()
if hasDynTag(f, elf.DT_TEXTREL) {
t.Errorf("%s has DT_TEXTREL set", soname)
}
}
// The shared library does not contain symbols called ".dup"
func TestNoDupSymbols(t *testing.T) {
sopath := filepath.Join(gorootInstallDir, soname)
f, err := elf.Open(sopath)
if err != nil {
t.Fatal("elf.Open failed: ", err)
}
defer f.Close()
syms, err := f.Symbols()
if err != nil {
t.Errorf("error reading symbols %v", err)
return
}
for _, s := range syms {
if s.Name == ".dup" {
t.Fatalf("%s contains symbol called .dup", sopath)
}
}
}
// The install command should have created a "shlibname" file for the
// listed packages (and runtime/cgo, and math on arm) indicating the
// name of the shared library containing it.
func TestShlibnameFiles(t *testing.T) {
pkgs := append([]string{}, minpkgs...)
pkgs = append(pkgs, "runtime/cgo")
if runtime.GOARCH == "arm" {
pkgs = append(pkgs, "math")
}
for _, pkg := range pkgs {
shlibnamefile := filepath.Join(gorootInstallDir, pkg+".shlibname")
contentsb, err := ioutil.ReadFile(shlibnamefile)
if err != nil {
t.Errorf("error reading shlibnamefile for %s: %v", pkg, err)
continue
}
contents := strings.TrimSpace(string(contentsb))
if contents != soname {
t.Errorf("shlibnamefile for %s has wrong contents: %q", pkg, contents)
}
}
}
// Is a given offset into the file contained in a loaded segment?
func isOffsetLoaded(f *elf.File, offset uint64) bool {
for _, prog := range f.Progs {
if prog.Type == elf.PT_LOAD {
if prog.Off <= offset && offset < prog.Off+prog.Filesz {
return true
}
}
}
return false
}
func rnd(v int32, r int32) int32 {
if r <= 0 {
return v
}
v += r - 1
c := v % r
if c < 0 {
c += r
}
v -= c
return v
}
func readwithpad(r io.Reader, sz int32) ([]byte, error) {
data := make([]byte, rnd(sz, 4))
_, err := io.ReadFull(r, data)
if err != nil {
return nil, err
}
data = data[:sz]
return data, nil
}
type note struct {
name string
tag int32
desc string
section *elf.Section
}
// Read all notes from f. As ELF section names are not supposed to be special, one
// looks for a particular note by scanning all SHT_NOTE sections looking for a note
// with a particular "name" and "tag".
func readNotes(f *elf.File) ([]*note, error) {
var notes []*note
for _, sect := range f.Sections {
if sect.Type != elf.SHT_NOTE {
continue
}
r := sect.Open()
for {
var namesize, descsize, tag int32
err := binary.Read(r, f.ByteOrder, &namesize)
if err != nil {
if err == io.EOF {
break
}
return nil, fmt.Errorf("read namesize failed: %v", err)
}
err = binary.Read(r, f.ByteOrder, &descsize)
if err != nil {
return nil, fmt.Errorf("read descsize failed: %v", err)
}
err = binary.Read(r, f.ByteOrder, &tag)
if err != nil {
return nil, fmt.Errorf("read type failed: %v", err)
}
name, err := readwithpad(r, namesize)
if err != nil {
return nil, fmt.Errorf("read name failed: %v", err)
}
desc, err := readwithpad(r, descsize)
if err != nil {
return nil, fmt.Errorf("read desc failed: %v", err)
}
notes = append(notes, ¬e{name: string(name), tag: tag, desc: string(desc), section: sect})
}
}
return notes, nil
}
func dynStrings(t *testing.T, path string, flag elf.DynTag) []string {
t.Helper()
f, err := elf.Open(path)
if err != nil {
t.Fatalf("elf.Open(%q) failed: %v", path, err)
}
defer f.Close()
dynstrings, err := f.DynString(flag)
if err != nil {
t.Fatalf("DynString(%s) failed on %s: %v", flag, path, err)
}
return dynstrings
}
func AssertIsLinkedToRegexp(t *testing.T, path string, re *regexp.Regexp) {
t.Helper()
for _, dynstring := range dynStrings(t, path, elf.DT_NEEDED) {
if re.MatchString(dynstring) {
return
}
}
t.Errorf("%s is not linked to anything matching %v", path, re)
}
func AssertIsLinkedTo(t *testing.T, path, lib string) {
t.Helper()
AssertIsLinkedToRegexp(t, path, regexp.MustCompile(regexp.QuoteMeta(lib)))
}
func AssertHasRPath(t *testing.T, path, dir string) {
t.Helper()
for _, tag := range []elf.DynTag{elf.DT_RPATH, elf.DT_RUNPATH} {
for _, dynstring := range dynStrings(t, path, tag) {
for _, rpath := range strings.Split(dynstring, ":") {
if filepath.Clean(rpath) == filepath.Clean(dir) {
return
}
}
}
}
t.Errorf("%s does not have rpath %s", path, dir)
}
// Build a trivial program that links against the shared runtime and check it runs.
func TestTrivialExecutable(t *testing.T) {
goCmd(t, "install", "-linkshared", "./trivial")
run(t, "trivial executable", "../../bin/trivial")
AssertIsLinkedTo(t, "../../bin/trivial", soname)
AssertHasRPath(t, "../../bin/trivial", gorootInstallDir)
}
// Build a trivial program in PIE mode that links against the shared runtime and check it runs.
func TestTrivialExecutablePIE(t *testing.T) {
goCmd(t, "build", "-buildmode=pie", "-o", "trivial.pie", "-linkshared", "./trivial")
run(t, "trivial executable", "./trivial.pie")
AssertIsLinkedTo(t, "./trivial.pie", soname)
AssertHasRPath(t, "./trivial.pie", gorootInstallDir)
}
// Build a division test program and check it runs.
func TestDivisionExecutable(t *testing.T) {
goCmd(t, "install", "-linkshared", "./division")
run(t, "division executable", "../../bin/division")
}
// Build an executable that uses cgo linked against the shared runtime and check it
// runs.
func TestCgoExecutable(t *testing.T) {
goCmd(t, "install", "-linkshared", "./execgo")
run(t, "cgo executable", "../../bin/execgo")
}
func checkPIE(t *testing.T, name string) {
f, err := elf.Open(name)
if err != nil {
t.Fatal("elf.Open failed: ", err)
}
defer f.Close()
if f.Type != elf.ET_DYN {
t.Errorf("%s has type %v, want ET_DYN", name, f.Type)
}
if hasDynTag(f, elf.DT_TEXTREL) {
t.Errorf("%s has DT_TEXTREL set", name)
}
}
func TestTrivialPIE(t *testing.T) {
name := "trivial_pie"
goCmd(t, "build", "-buildmode=pie", "-o="+name, "./trivial")
defer os.Remove(name)
run(t, name, "./"+name)
checkPIE(t, name)
}
func TestCgoPIE(t *testing.T) {
name := "cgo_pie"
goCmd(t, "build", "-buildmode=pie", "-o="+name, "./execgo")
defer os.Remove(name)
run(t, name, "./"+name)
checkPIE(t, name)
}
// Build a GOPATH package into a shared library that links against the goroot runtime
// and an executable that links against both.
func TestGopathShlib(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase")
shlib := goCmd(t, "list", "-f", "{{.Shlib}}", "-buildmode=shared", "-linkshared", "./depBase")
AssertIsLinkedTo(t, shlib, soname)
goCmd(t, "install", "-linkshared", "./exe")
AssertIsLinkedTo(t, "../../bin/exe", soname)
AssertIsLinkedTo(t, "../../bin/exe", filepath.Base(shlib))
AssertHasRPath(t, "../../bin/exe", gorootInstallDir)
AssertHasRPath(t, "../../bin/exe", filepath.Dir(gopathInstallDir))
// And check it runs.
run(t, "executable linked to GOPATH library", "../../bin/exe")
}
// The shared library contains a note listing the packages it contains in a section
// that is not mapped into memory.
func testPkgListNote(t *testing.T, f *elf.File, note *note) {
if note.section.Flags != 0 {
t.Errorf("package list section has flags %v, want 0", note.section.Flags)
}
if isOffsetLoaded(f, note.section.Offset) {
t.Errorf("package list section contained in PT_LOAD segment")
}
if note.desc != "testshared/depBase\n" {
t.Errorf("incorrect package list %q, want %q", note.desc, "testshared/depBase\n")
}
}
// The shared library contains a note containing the ABI hash that is mapped into
// memory and there is a local symbol called go.link.abihashbytes that points 16
// bytes into it.
func testABIHashNote(t *testing.T, f *elf.File, note *note) {
if note.section.Flags != elf.SHF_ALLOC {
t.Errorf("abi hash section has flags %v, want SHF_ALLOC", note.section.Flags)
}
if !isOffsetLoaded(f, note.section.Offset) {
t.Errorf("abihash section not contained in PT_LOAD segment")
}
var hashbytes elf.Symbol
symbols, err := f.Symbols()
if err != nil {
t.Errorf("error reading symbols %v", err)
return
}
for _, sym := range symbols {
if sym.Name == "go.link.abihashbytes" {
hashbytes = sym
}
}
if hashbytes.Name == "" {
t.Errorf("no symbol called go.link.abihashbytes")
return
}
if elf.ST_BIND(hashbytes.Info) != elf.STB_LOCAL {
t.Errorf("%s has incorrect binding %v, want STB_LOCAL", hashbytes.Name, elf.ST_BIND(hashbytes.Info))
}
if f.Sections[hashbytes.Section] != note.section {
t.Errorf("%s has incorrect section %v, want %s", hashbytes.Name, f.Sections[hashbytes.Section].Name, note.section.Name)
}
if hashbytes.Value-note.section.Addr != 16 {
t.Errorf("%s has incorrect offset into section %d, want 16", hashbytes.Name, hashbytes.Value-note.section.Addr)
}
}
// A Go shared library contains a note indicating which other Go shared libraries it
// was linked against in an unmapped section.
func testDepsNote(t *testing.T, f *elf.File, note *note) {
if note.section.Flags != 0 {
t.Errorf("package list section has flags %v, want 0", note.section.Flags)
}
if isOffsetLoaded(f, note.section.Offset) {
t.Errorf("package list section contained in PT_LOAD segment")
}
// libdepBase.so just links against the lib containing the runtime.
if note.desc != soname {
t.Errorf("incorrect dependency list %q, want %q", note.desc, soname)
}
}
// The shared library contains notes with defined contents; see above.
func TestNotes(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase")
shlib := goCmd(t, "list", "-f", "{{.Shlib}}", "-buildmode=shared", "-linkshared", "./depBase")
f, err := elf.Open(shlib)
if err != nil {
t.Fatal(err)
}
defer f.Close()
notes, err := readNotes(f)
if err != nil {
t.Fatal(err)
}
pkgListNoteFound := false
abiHashNoteFound := false
depsNoteFound := false
for _, note := range notes {
if note.name != "Go\x00\x00" {
continue
}
switch note.tag {
case 1: // ELF_NOTE_GOPKGLIST_TAG
if pkgListNoteFound {
t.Error("multiple package list notes")
}
testPkgListNote(t, f, note)
pkgListNoteFound = true
case 2: // ELF_NOTE_GOABIHASH_TAG
if abiHashNoteFound {
t.Error("multiple abi hash notes")
}
testABIHashNote(t, f, note)
abiHashNoteFound = true
case 3: // ELF_NOTE_GODEPS_TAG
if depsNoteFound {
t.Error("multiple dependency list notes")
}
testDepsNote(t, f, note)
depsNoteFound = true
}
}
if !pkgListNoteFound {
t.Error("package list note not found")
}
if !abiHashNoteFound {
t.Error("abi hash note not found")
}
if !depsNoteFound {
t.Error("deps note not found")
}
}
// Build a GOPATH package (depBase) into a shared library that links against the goroot
// runtime, another package (dep2) that links against the first, and an
// executable that links against dep2.
func TestTwoGopathShlibs(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase")
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./dep2")
goCmd(t, "install", "-linkshared", "./exe2")
run(t, "executable linked to GOPATH library", "../../bin/exe2")
}
func TestThreeGopathShlibs(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase")
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./dep2")
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./dep3")
goCmd(t, "install", "-linkshared", "./exe3")
run(t, "executable linked to GOPATH library", "../../bin/exe3")
}
// If gccgo is not available or not new enough, call t.Skip.
func requireGccgo(t *testing.T) {
t.Helper()
gccgoName := os.Getenv("GCCGO")
if gccgoName == "" {
gccgoName = "gccgo"
}
gccgoPath, err := exec.LookPath(gccgoName)
if err != nil {
t.Skip("gccgo not found")
}
cmd := exec.Command(gccgoPath, "-dumpversion")
output, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("%s -dumpversion failed: %v\n%s", gccgoPath, err, output)
}
if string(output) < "5" {
t.Skipf("gccgo too old (%s)", strings.TrimSpace(string(output)))
}
gomod, err := exec.Command("go", "env", "GOMOD").Output()
if err != nil {
t.Fatalf("go env GOMOD: %v", err)
}
if len(bytes.TrimSpace(gomod)) > 0 {
t.Skipf("gccgo not supported in module mode; see golang.org/issue/30344")
}
}
// Build a GOPATH package into a shared library with gccgo and an executable that
// links against it.
func TestGoPathShlibGccgo(t *testing.T) {
requireGccgo(t)
libgoRE := regexp.MustCompile("libgo.so.[0-9]+")
goCmd(t, "install", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "./depBase")
// Run 'go list' after 'go install': with gccgo, we apparently don't know the
// shlib location until after we've installed it.
shlib := goCmd(t, "list", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "-f", "{{.Shlib}}", "./depBase")
AssertIsLinkedToRegexp(t, shlib, libgoRE)
goCmd(t, "install", "-compiler=gccgo", "-linkshared", "./exe")
AssertIsLinkedToRegexp(t, "../../bin/exe", libgoRE)
AssertIsLinkedTo(t, "../../bin/exe", filepath.Base(shlib))
AssertHasRPath(t, "../../bin/exe", filepath.Dir(shlib))
// And check it runs.
run(t, "gccgo-built", "../../bin/exe")
}
// The gccgo version of TestTwoGopathShlibs: build a GOPATH package into a shared
// library with gccgo, another GOPATH package that depends on the first and an
// executable that links the second library.
func TestTwoGopathShlibsGccgo(t *testing.T) {
requireGccgo(t)
libgoRE := regexp.MustCompile("libgo.so.[0-9]+")
goCmd(t, "install", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "./depBase")
goCmd(t, "install", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "./dep2")
goCmd(t, "install", "-compiler=gccgo", "-linkshared", "./exe2")
// Run 'go list' after 'go install': with gccgo, we apparently don't know the
// shlib location until after we've installed it.
dep2 := goCmd(t, "list", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "-f", "{{.Shlib}}", "./dep2")
depBase := goCmd(t, "list", "-compiler=gccgo", "-buildmode=shared", "-linkshared", "-f", "{{.Shlib}}", "./depBase")
AssertIsLinkedToRegexp(t, depBase, libgoRE)
AssertIsLinkedToRegexp(t, dep2, libgoRE)
AssertIsLinkedTo(t, dep2, filepath.Base(depBase))
AssertIsLinkedToRegexp(t, "../../bin/exe2", libgoRE)
AssertIsLinkedTo(t, "../../bin/exe2", filepath.Base(dep2))
AssertIsLinkedTo(t, "../../bin/exe2", filepath.Base(depBase))
// And check it runs.
run(t, "gccgo-built", "../../bin/exe2")
}
// Testing rebuilding of shared libraries when they are stale is a bit more
// complicated that it seems like it should be. First, we make everything "old": but
// only a few seconds old, or it might be older than gc (or the runtime source) and
// everything will get rebuilt. Then define a timestamp slightly newer than this
// time, which is what we set the mtime to of a file to cause it to be seen as new,
// and finally another slightly even newer one that we can compare files against to
// see if they have been rebuilt.
var oldTime = time.Now().Add(-9 * time.Second)
var nearlyNew = time.Now().Add(-6 * time.Second)
var stampTime = time.Now().Add(-3 * time.Second)
// resetFileStamps makes "everything" (bin, src, pkg from GOPATH and the
// test-specific parts of GOROOT) appear old.
func resetFileStamps() {
chtime := func(path string, info os.FileInfo, err error) error {
return os.Chtimes(path, oldTime, oldTime)
}
reset := func(path string) {
if err := filepath.Walk(path, chtime); err != nil {
log.Fatalf("resetFileStamps failed: %v", err)
}
}
reset("../../bin")
reset("../../pkg")
reset("../../src")
reset(gorootInstallDir)
}
// touch changes path and returns a function that changes it back.
// It also sets the time of the file, so that we can see if it is rewritten.
func touch(t *testing.T, path string) (cleanup func()) {
data, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
old := make([]byte, len(data))
copy(old, data)
if bytes.HasPrefix(data, []byte("!<arch>\n")) {
// Change last digit of build ID.
// (Content ID in the new content-based build IDs.)
const marker = `build id "`
i := bytes.Index(data, []byte(marker))
if i < 0 {
t.Fatal("cannot find build id in archive")
}
j := bytes.IndexByte(data[i+len(marker):], '"')
if j < 0 {
t.Fatal("cannot find build id in archive")
}
i += len(marker) + j - 1
if data[i] == 'a' {
data[i] = 'b'
} else {
data[i] = 'a'
}
} else {
// assume it's a text file
data = append(data, '\n')
}
if err := ioutil.WriteFile(path, data, 0666); err != nil {
t.Fatal(err)
}
if err := os.Chtimes(path, nearlyNew, nearlyNew); err != nil {
t.Fatal(err)
}
return func() {
if err := ioutil.WriteFile(path, old, 0666); err != nil {
t.Fatal(err)
}
}
}
// isNew returns if the path is newer than the time stamp used by touch.
func isNew(t *testing.T, path string) bool {
t.Helper()
fi, err := os.Stat(path)
if err != nil {
t.Fatal(err)
}
return fi.ModTime().After(stampTime)
}
// Fail unless path has been rebuilt (i.e. is newer than the time stamp used by
// isNew)
func AssertRebuilt(t *testing.T, msg, path string) {
t.Helper()
if !isNew(t, path) {
t.Errorf("%s was not rebuilt (%s)", msg, path)
}
}
// Fail if path has been rebuilt (i.e. is newer than the time stamp used by isNew)
func AssertNotRebuilt(t *testing.T, msg, path string) {
t.Helper()
if isNew(t, path) {
t.Errorf("%s was rebuilt (%s)", msg, path)
}
}
func TestRebuilding(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase")
goCmd(t, "install", "-linkshared", "./exe")
info := strings.Fields(goCmd(t, "list", "-buildmode=shared", "-linkshared", "-f", "{{.Target}} {{.Shlib}}", "./depBase"))
if len(info) != 2 {
t.Fatalf("go list failed to report Target and/or Shlib")
}
target := info[0]
shlib := info[1]
// If the source is newer than both the .a file and the .so, both are rebuilt.
t.Run("newsource", func(t *testing.T) {
resetFileStamps()
cleanup := touch(t, "./depBase/dep.go")
defer func() {
cleanup()
goCmd(t, "install", "-linkshared", "./exe")
}()
goCmd(t, "install", "-linkshared", "./exe")
AssertRebuilt(t, "new source", target)
AssertRebuilt(t, "new source", shlib)
})
// If the .a file is newer than the .so, the .so is rebuilt (but not the .a)
t.Run("newarchive", func(t *testing.T) {
resetFileStamps()
AssertNotRebuilt(t, "new .a file before build", target)
goCmd(t, "list", "-linkshared", "-f={{.ImportPath}} {{.Stale}} {{.StaleReason}} {{.Target}}", "./depBase")
AssertNotRebuilt(t, "new .a file before build", target)
cleanup := touch(t, target)
defer func() {
cleanup()
goCmd(t, "install", "-v", "-linkshared", "./exe")
}()
goCmd(t, "install", "-v", "-linkshared", "./exe")
AssertNotRebuilt(t, "new .a file", target)
AssertRebuilt(t, "new .a file", shlib)
})
}
func appendFile(t *testing.T, path, content string) {
t.Helper()
f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0660)
if err != nil {
t.Fatalf("os.OpenFile failed: %v", err)
}
defer func() {
err := f.Close()
if err != nil {
t.Fatalf("f.Close failed: %v", err)
}
}()
_, err = f.WriteString(content)
if err != nil {
t.Fatalf("f.WriteString failed: %v", err)
}
}
func createFile(t *testing.T, path, content string) {
t.Helper()
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
if err != nil {
t.Fatalf("os.OpenFile failed: %v", err)
}
_, err = f.WriteString(content)
if closeErr := f.Close(); err == nil {
err = closeErr
}
if err != nil {
t.Fatalf("WriteString failed: %v", err)
}
}
func TestABIChecking(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase")
goCmd(t, "install", "-linkshared", "./exe")
// If we make an ABI-breaking change to depBase and rebuild libp.so but not exe,
// exe will abort with a complaint on startup.
// This assumes adding an exported function breaks ABI, which is not true in
// some senses but suffices for the narrow definition of ABI compatibility the
// toolchain uses today.
resetFileStamps()
createFile(t, "./depBase/break.go", "package depBase\nfunc ABIBreak() {}\n")
defer os.Remove("./depBase/break.go")
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase")
c := exec.Command("../../bin/exe")
output, err := c.CombinedOutput()
if err == nil {
t.Fatal("executing exe did not fail after ABI break")
}
scanner := bufio.NewScanner(bytes.NewReader(output))
foundMsg := false
const wantPrefix = "abi mismatch detected between the executable and lib"
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), wantPrefix) {
foundMsg = true
break
}
}
if err = scanner.Err(); err != nil {
t.Errorf("scanner encountered error: %v", err)
}
if !foundMsg {
t.Fatalf("exe failed, but without line %q; got output:\n%s", wantPrefix, output)
}
// Rebuilding exe makes it work again.
goCmd(t, "install", "-linkshared", "./exe")
run(t, "rebuilt exe", "../../bin/exe")
// If we make a change which does not break ABI (such as adding an unexported
// function) and rebuild libdepBase.so, exe still works, even if new function
// is in a file by itself.
resetFileStamps()
createFile(t, "./depBase/dep2.go", "package depBase\nfunc noABIBreak() {}\n")
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./depBase")
run(t, "after non-ABI breaking change", "../../bin/exe")
}
// If a package 'explicit' imports a package 'implicit', building
// 'explicit' into a shared library implicitly includes implicit in
// the shared library. Building an executable that imports both
// explicit and implicit builds the code from implicit into the
// executable rather than fetching it from the shared library. The
// link still succeeds and the executable still runs though.
func TestImplicitInclusion(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./explicit")
goCmd(t, "install", "-linkshared", "./implicitcmd")
run(t, "running executable linked against library that contains same package as it", "../../bin/implicitcmd")
}
// Tests to make sure that the type fields of empty interfaces and itab
// fields of nonempty interfaces are unique even across modules,
// so that interface equality works correctly.
func TestInterface(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./iface_a")
// Note: iface_i gets installed implicitly as a dependency of iface_a.
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./iface_b")
goCmd(t, "install", "-linkshared", "./iface")
run(t, "running type/itab uniqueness tester", "../../bin/iface")
}
// Access a global variable from a library.
func TestGlobal(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./globallib")
goCmd(t, "install", "-linkshared", "./global")
run(t, "global executable", "../../bin/global")
AssertIsLinkedTo(t, "../../bin/global", soname)
AssertHasRPath(t, "../../bin/global", gorootInstallDir)
}
// Run a test using -linkshared of an installed shared package.
// Issue 26400.
func TestTestInstalledShared(t *testing.T) {
goCmd(nil, "test", "-linkshared", "-test.short", "sync/atomic")
}
// Test generated pointer method with -linkshared.
// Issue 25065.
func TestGeneratedMethod(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue25065")
}
| [
"\"GCCGO\""
]
| []
| [
"GCCGO"
]
| [] | ["GCCGO"] | go | 1 | 0 | |
gorush/notification_apns_test.go | package gorush
import (
"context"
"encoding/json"
"log"
"net/http"
"net/url"
"os"
"testing"
"time"
"github.com/appleboy/gorush/config"
"github.com/buger/jsonparser"
"github.com/sideshow/apns2"
"github.com/stretchr/testify/assert"
)
const certificateValidP12 = `MIIKlgIBAzCCClwGCSqGSIb3DQEHAaCCCk0EggpJMIIKRTCCBMcGCSqGSIb3DQEHBqCCBLgwggS0AgEAMIIErQYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQID/GJtcRhjvwCAggAgIIEgE5ralQoQBDgHgdp5+EwBaMjcZEJUXmYRdVCttIwfN2OxlIs54tob3/wpUyWGqJ+UXy9X+4EsWpDPUfTN/w88GMgj0kftpTqG0+3Hu/9pkZO4pdLCiyMGOJnXCOdhHFirtTXAR3QvnKKIpXIKrmZ4rcr/24Uvd/u669Tz8VDgcGOQazKeyvtdW7TJBxMFRv+IsQi/qCj5PkQ0jBbZ1LAc4C8mCMwOcH+gi/e471mzPWihQmynH2yJlZ4jb+taxQ/b8Dhlni2vcIMn+HknRk3Cyo8jfFvvO0BjvVvEAPxPJt7X96VFFS2KlyXjY3zt0siGrzQpczgPB/1vTqhQUvoOBw6kcXWgOjwt+gR8Mmo2DELnQqGhbYuWu52doLgVvD+zGr5vLYXHz6gAXnI6FVyHb+oABeBet3cer3EzGR7r+VoLmWSBm8SyRHwi0mxE63S7oD1j22jaTo7jnQBFZaY+cPaATcFjqW67x4j8kXh9NRPoINSgodLJrgmet2D1iOKuLTkCWf0UTi2HUkn9Zf0y+IIViZaVE4mWaGb9xTBClfa4KwM5gSz3jybksFKbtnzzPFuzClu+2mdthJs/58Ao40eyaykNmzSPhDv1F8Mai8bfaAqSdcBl5ZB2PF33xhuNSS4j2uIh1ICGv9DueyN507iEMQO2yCcaQTMKejV7/52h9LReS5/QPXDJhWMVpTb5FGCP7EmO0lZTeBNO5MlDzDQfz5xcFqHqfoby2sfAMU8HNB8wzdcwHtacgKGLBjLkapxyTsqYE5Kry6UxclvF4soR8TZoQ69E7WsKZLmTaw2+msmnDJubpY0NqkRqkVk7umtVC0D+w6AIKDrY58HMlm80/ImgGXwybA1kuZMxqMzaH/xFiAHOSIGuVPtGgGFYNEdGbfOryuhFo9l1nSECWm8MN9hYwB1Rn9p6rkd+zrvbU1zv13drtrZ/vL0NlT02tlkS8NdWLGJkZhWgc2c89GyRb7mjuHRHu/BWGED3y7vjHo/lnkPsLJXw0ovIlqhtW0BtN/xSpGg0phDbn0Et5jb7Xmc+fWimgbtIUHcnJOV5QSYFzlR+kbzx0oKRARU4B3CWkdPeaXkrmw0IriS6vOdZcM8YBJ6BtXEDLsrSH7tHxeknYHLEl0uy9Oc1+Huyrz8j7Zxo8SQj9H+RX0HeMl8YB3HUBLHYcqCEBjm7mHI4rP8ULVkC5oCA5w3tJfMyvS/jZRiwMUyr0tiWhrh/AM3wPPX54cqozefojWKrqGtK9I+n0cfwW9rU3FsUcpMTo9uQ27O7NejKP2X/LLMZkQvWUEabZNjNrWsbp6d51/frfIR7kRlZAmmt2yS23h6w6RvKTAVUrNatEyzokfNAIDml6lYLweNJATZU08BznhPpuvh3bKOSos5uaJBYpsOYexoMGnAig428qypw0cmv6sCjO/xdIL86COVNQp/UtjcXJ9/E0bnVmzfpgA3WCy+29YXPx7DZ1U+bQ9jOO/P9pwqLwTH+gpcZiVm3ru1Tmiq6iZ8cG7tMLfTBNXljvtlDzCCBXYGCSqGSIb3DQEHAaCCBWcEggVjMIIFXzCCBVsGCyqGSIb3DQEMCgECoIIE7jCCBOowHAYKKoZIhvcNAQwBAzAOBAgCvAo2HCM89AICCAAEggTIOcfaF6qWYXlo+BNBjYIllg0VwQSJXZmcqj2vXlDPIPrTuQ+QDmGnhYR6hVbcMrk3o7eQhH3ThyHM+KEzkYx1IAYCOdEQXYcFguoDG1CxHrgE1Y0H8yndc/yPw2tqkx6X9ZemdYp3welXZjYgUi9MKvGbN6lZ0cFTU+2+0+H/IyKQ3OUjDNymhOxypOPBaK2eQsJ7XumgJ6nLvNZDRx/f277J+LD/z0pOhzUOljhvA3dkBMpEvomX4erZihErunqP1jbH9O3eIYq9J7czGS2xuckolW19KqWOyWh8KRI/LnAqiEh2e0hZ7lpltj79PenO66VGPbn2f85A6b6PD4kipgoMB2IRibkoodyn/oo3WizO386fqtEfUlbFmxI4y4utobWe7nZ2VuBLgA/mgyyxqAJK1erM98NDWB/Njo1CPsaMl9ubXKPOyIZG0fOLUa23DfkJUEiCb839yKc2oEJkI0wtrvbeh1TAPv4vL4TxiXdiJ/6YrSa0/FQh6nqk1jiK+p22MzvEIkDOyPqk/GsAlc/k2kQ/M86tF50wtc08wnXv8+G8k6qTZ7VCluffzAUt64La47qj8XIfh7tKleznzQSbyjlNX8DsFVzGbCg9G4PKxrLAVnKEgIK1kOopSF1UUMqSKE0D3s5AURQhX8/Cf9h+WtNsWK+y7EMOntsBc2op0M7fQ9Jm73NF7CCYeqb0W7sziJSzqJsJgNp0+ArAcZQExeltxAb6kye3Z5JtP/oaB+jmcHKy9l/nhzKA3MzJwCZ5Q3oviPlNqJvFVBmGEEvC6iULLuv6VSxNdB2uH3Tsfa1TMOOHOadBTcyWatjscYS9ynkXuw1+8+FvEu3EV0UwopZmlSaYfMKQ2jshT4Cgg1zy15uKjomojtAaaF+D/U6KZVQk/7rzdaDmvkJvNtc5n9BW96tmrOhI6L+/WihS570qaitQUsHBBTOetlHXYEPiOkH8BhjzNHXLH9YpC8OEQOhO+1jEninDKNdbU7SCqV0+YE6kfR5Bfkw2MxoIQLtUnHjK6GR/q3fxo1TirbTe8c8dp907wgcXkT/rONX/iG1JTjxV2ixR1oM68LYI3eJzY801/xBSnmOjdzOPUHXCNHDTf9kPjkOtZWkGbZugf4ckRH/L8dK2Vo4QpFUN8AZjomanzLxjQZ+DVFNoPDT2K+0pezsMiwSJlyBGoIQHN0/2zVNVLo/KfARIOac1iC8+duj5S/1c52+PvP7FkMe72QUV0KUQ7AJHXUvQtFZx4Ny579/B/3c4D72CFSydhw3/+nL9+Nz956UafZ6G7HZ96frMTgajMcXQe1uXwgN2iTnnNtLdcC/ARHS1RkjgXHohO+VGuQxOo23PPABVaxex2SGGXX7Fc4MI2Xr4uaimZIzcUkuHUnhZQGkcFlVekZ/wJXookq0Fv8DuPuv7mGCx6BKERU9I+NMU6xLNe6VsfkS8t5uVq1EIINnddGl9VGpqOPN8EgU47gh6CcDkP8sxXsT8pZ1vQyJrUlWGYp68/okoQ+7lqnd06wzVDIwAE/+pq9PUxLdNvYE0sNe4JrEcKO0xp/zxCqLjHLT+rB896v2OsU0BA5tPQA7xkKp4PuQr6qO8fTVyfhImVmoFX6b9VgtLHIlJMVowIwYJKoZIhvcNAQkVMRYEFIwanwBmvSRCuV0e6/5ei8oEPXODMDMGCSqGSIb3DQEJFDEmHiQAQQBQAE4AUwAvADIAIABQAHIAaQB2AGEAdABlACAASwBlAHkwMTAhMAkGBSsOAwIaBQAEFK7XWCbKGSKmxNqE2E8dmCfwhaQxBAjPcbkv12ro6gICCAA=`
const certificateValidPEM = `QmFnIEF0dHJpYnV0ZXMKICAgIGxvY2FsS2V5SUQ6IDhDIDFBIDlGIDAwIDY2IEJEIDI0IDQyIEI5IDVEIDFFIEVCIEZFIDVFIDhCIENBIDA0IDNEIDczIDgzIAogICAgZnJpZW5kbHlOYW1lOiBBUE5TLzIgUHJpdmF0ZSBLZXkKc3ViamVjdD0vQz1OWi9TVD1XZWxsaW5ndG9uL0w9V2VsbGluZ3Rvbi9PPUludGVybmV0IFdpZGdpdHMgUHR5IEx0ZC9PVT05WkVINjJLUlZWL0NOPUFQTlMvMiBEZXZlbG9wbWVudCBJT1MgUHVzaCBTZXJ2aWNlczogY29tLnNpZGVzaG93LkFwbnMyCmlzc3Vlcj0vQz1OWi9TVD1XZWxsaW5ndG9uL0w9V2VsbGluZ3Rvbi9PPUFQTlMvMiBJbmMuL09VPUFQTlMvMiBXb3JsZHdpZGUgRGV2ZWxvcGVyIFJlbGF0aW9ucy9DTj1BUE5TLzIgV29ybGR3aWRlIERldmVsb3BlciBSZWxhdGlvbnMgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkKLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ2ekNDQXRNQ0FRSXdEUVlKS29aSWh2Y05BUUVMQlFBd2djTXhDekFKQmdOVkJBWVRBazVhTVJNd0VRWUQKVlFRSUV3cFhaV3hzYVc1bmRHOXVNUk13RVFZRFZRUUhFd3BYWld4c2FXNW5kRzl1TVJRd0VnWURWUVFLRXd0QgpVRTVUTHpJZ1NXNWpMakV0TUNzR0ExVUVDeE1rUVZCT1V5OHlJRmR2Y214a2QybGtaU0JFWlhabGJHOXdaWElnClVtVnNZWFJwYjI1ek1VVXdRd1lEVlFRREV6eEJVRTVUTHpJZ1YyOXliR1IzYVdSbElFUmxkbVZzYjNCbGNpQlMKWld4aGRHbHZibk1nUTJWeWRHbG1hV05oZEdsdmJpQkJkWFJvYjNKcGRIa3dIaGNOTVRZd01UQTRNRGd6TkRNdwpXaGNOTWpZd01UQTFNRGd6TkRNd1dqQ0JzakVMTUFrR0ExVUVCaE1DVGxveEV6QVJCZ05WQkFnVENsZGxiR3hwCmJtZDBiMjR4RXpBUkJnTlZCQWNUQ2xkbGJHeHBibWQwYjI0eElUQWZCZ05WQkFvVEdFbHVkR1Z5Ym1WMElGZHAKWkdkcGRITWdVSFI1SUV4MFpERVRNQkVHQTFVRUN4TUtPVnBGU0RZeVMxSldWakZCTUQ4R0ExVUVBeE00UVZCTwpVeTh5SUVSbGRtVnNiM0J0Wlc1MElFbFBVeUJRZFhOb0lGTmxjblpwWTJWek9pQmpiMjB1YzJsa1pYTm9iM2N1ClFYQnVjekl3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRRFkwYzFUS0I1b1pQd1EKN3QxQ3dNSXJ2cUI2R0lVM3RQeTZSaGNrWlhUa09COFllQldKN1VLZkN6OEhHSEZWb21CUDBUNU9VYmVxUXpxVwpZSmJRelo4YTZaTXN6YkwwbE80WDkrKzNPaTUvVHRBd09VT0s4ck9GTjI1bTJLZnNheUhRWi80dldTdEsyRndtCjVhSmJHTGxwSC9iLzd6MUQ0dmhtTWdvQnVUMUl1eWhHaXlGeGxaOUV0VGxvRnZzcU0xRTVmWVpPU1pBQ3lYVGEKSzR2ZGdiUU1nVVZzSTcxNEZBZ0xUbEswVWVpUmttS20zcGRidGZWYnJ0aHpJK0lIWEtJdFVJeStGbjIwUFJNaApkU25henRTejd0Z0JXQ0l4MjJxdmNZb2dIV2lPZ1VZSU03NzJ6RTJ5OFVWT3I4RHNpUmxzT0hTQTdFSTRNSmNRCkcyRlVxMlovQWdNQkFBRXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBR3lmeU8ySE1nY2RlQmN6M2J0NUJJTFgKZjdSQTIvVW1WSXdjS1IxcW90VHNGK1BuQm1jSUxleU9RZ0RlOXRHVTVjUmM3OWtEdDNKUm1NWVJPRklNZ0ZSZgpXZjIydU9LdGhvN0dRUWFLdkcrYmtnTVZkWUZSbEJIbkYrS2VxS0g4MXFiOXArQ1Q0SXcwR2VoSUwxRGlqRkxSClZJQUlCWXB6NG9CUENJRTFJU1ZUK0ZnYWYzSkFoNTlrYlBiTnc5QUlEeGFCdFA4RXV6U1ROd2ZieG9HYkNvYlMKV2kxVThJc0N3UUZ0OHRNMW00WlhEMUNjWklyR2RyeWVBaFZrdktJSlJpVTVRWVdJMm5xWk4rSnFRdWNtOWFkMAptWU81bUprSW9iVWE0K1pKaENQS0VkbWdwRmJSR2swd1Z1YURNOUN2NlAyc3JzWUFqYU80eTNWUDBHdk5LUkk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KQmFnIEF0dHJpYnV0ZXMKICAgIGxvY2FsS2V5SUQ6IDhDIDFBIDlGIDAwIDY2IEJEIDI0IDQyIEI5IDVEIDFFIEVCIEZFIDVFIDhCIENBIDA0IDNEIDczIDgzIAogICAgZnJpZW5kbHlOYW1lOiBBUE5TLzIgUHJpdmF0ZSBLZXkKS2V5IEF0dHJpYnV0ZXM6IDxObyBBdHRyaWJ1dGVzPgotLS0tLUJFR0lOIFJTQSBQUklWQVRFIEtFWS0tLS0tCk1JSUVvd0lCQUFLQ0FRRUEyTkhOVXlnZWFHVDhFTzdkUXNEQ0s3NmdlaGlGTjdUOHVrWVhKR1YwNURnZkdIZ1YKaWUxQ253cy9CeGh4VmFKZ1Q5RStUbEczcWtNNmxtQ1cwTTJmR3VtVExNMnk5SlR1Ri9mdnR6b3VmMDdRTURsRAppdkt6aFRkdVp0aW43R3NoMEdmK0wxa3JTdGhjSnVXaVd4aTVhUi8yLys4OVErTDRaaklLQWJrOVNMc29Sb3NoCmNaV2ZSTFU1YUJiN0tqTlJPWDJHVGttUUFzbDAyaXVMM1lHMERJRkZiQ085ZUJRSUMwNVN0Rkhva1pKaXB0NlgKVzdYMVc2N1ljeVBpQjF5aUxWQ012aFo5dEQwVElYVXAyczdVcys3WUFWZ2lNZHRxcjNHS0lCMW9qb0ZHQ0RPKwo5c3hOc3ZGRlRxL0E3SWtaYkRoMGdPeENPRENYRUJ0aFZLdG1md0lEQVFBQkFvSUJBUUNXOFpDSStPQWFlMXRFCmlwWjlGMmJXUDNMSExYVG84RllWZENBK1ZXZUlUazNQb2lJVWtKbVYwYVdDVWhEc3RndG81ZG9EZWo1c0NUdXIKWHZqL3luYWVyTWVxSkZZV2tld2p3WmNnTHlBWnZ3dU8xdjdmcDlFMHgvOVRHRGZuampuUE5lYXVuZHhXMGNOdAp6T1kzbDBIVkhzeTlKcGUzUURjQUpvdnk0VHY1K2hGWTRrRHhVQkdzeWp2aFNjVmdLZzV0TGtKY2xtM3NPdS9MCkd5THFwd05JM09KQWRNSXVWRDROMkJaMWFPRWFwNm1wMnk4SWUwL1I0WVdjYVo1QTRQdzd4VVBsNlNYYzl1dWEKLzc4UVRFUnRQQzZlanlDQmlFMDVhOG0zUTNpdWQzWHRubHl3czJLd2hnQkFmRTZNNHpSL2YzT1FCN1pJWE1oeQpacG1aWnc1eEFvR0JBUFluODRJcmxJUWV0V1FmdlBkTTdLemdoNlVESEN1Z25sQ0RnaHdZcFJKR2k4aE1mdVpWCnhOSXJZQUp6TFlEUTAxbEZKUkpnV1hUY2JxejlOQnoxbmhnK2NOT3oxL0tZKzM4ZXVkZWU2RE5ZbXp0UDdqRFAKMmpuYVMrZHRqQzhoQVhPYm5GcUcrTmlsTURMTHU2YVJtckphSW1ialNyZnlMaUU2bXZKN3U4MW5Bb0dCQU9GOQpnOTN3WjBtTDFyazJzNVd3SEdUTlUvSGFPdG1XUzR6N2tBN2Y0UWFSdWIrTXdwcFptbURaUEhwaVpYN0JQY1p6CmlPUFFoK3huN0lxUkdvUVdCTHlrQlZ0OHpaRm9MWkpvQ1IzbjYzbGV4NUE0cC8wUHAxZ0ZaclIreFg4UFlWb3MKM3llZWlXeVBLc1hYTmMwczVRd0haY1g2V2I4RUhUaFRYR0NCZXRjcEFvR0FNZVFKQzlJUGFQUGNhZTJ3M0NMQQpPWTNNa0ZwZ0JFdXFxc0RzeHdzTHNmZVFiMGxwMHYrQlErTzhzdUpyVDVlRHJxMUFCVWgzK1NLUVlBbDEzWVMrCnhVVXFrdzM1YjljbjZpenRGOUhDV0YzV0lLQmpzNHI5UFFxTXBkeGpORTRwUUNoQytXb3YxNkVyY3JBdVdXVmIKaUZpU2JtNFUvOUZiSGlzRnFxMy9jM01DZ1lCK3Z6U3VQZ0Z3MzcrMG9FRFZ0UVpneXVHU29wNU56Q052ZmIvOQovRzNhYVhORmJuTzhtdjBoenpvbGVNV2dPRExuSis0Y1VBejNIM3RnY0N1OWJ6citaaHYwenZRbDlhOFlDbzZGClZ1V1BkVzByYmcxUE84dE91TXFBVG5ubzc5WkMvOUgzelM5bDdCdVkxVjJTbE5leXFUM1Z5T0ZGYzZTUkVwcHMKVEp1bDhRS0JnQXhuUUI4TUE3elBVTHUxY2x5YUpMZHRFZFJQa0tXTjdsS1lwdGMwZS9WSGZTc0t4c2VXa2ZxaQp6Z1haNTFrUVRyVDZaYjZIWVJmd0MxbU1YSFdSS1J5WWpBbkN4VmltNllRZCtLVlQ0OWlSRERBaUlGb01HQTRpCnZ2Y0lsbmVxT1paUERJb0tKNjBJak8vRFpIV2t3NW1MamFJclQrcVEzWEFHZEpBMTNoY20KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K`
const authkeyInvalidP8 = `TUlHSEFnRUFNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQkcwd2F3SUJBUVFnRWJWemZQblpQeGZBeXhxRQpaVjA1bGFBb0pBbCsvNlh0Mk80bU9CNjExc09oUkFOQ0FBU2dGVEtqd0pBQVU5NWcrKy92ektXSGt6QVZtTk1JCnRCNXZUalpPT0l3bkViNzBNc1daRkl5VUZEMVA5R3dzdHo0K2FrSFg3dkk4Qkg2aEhtQm1mWlpaCg==`
const authkeyValidP8 = `LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ0ViVnpmUG5aUHhmQXl4cUUKWlYwNWxhQW9KQWwrLzZYdDJPNG1PQjYxMXNPaFJBTkNBQVNnRlRLandKQUFVOTVnKysvdnpLV0hrekFWbU5NSQp0QjV2VGpaT09Jd25FYjcwTXNXWkZJeVVGRDFQOUd3c3R6NCtha0hYN3ZJOEJINmhIbUJtZmVRbAotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==`
func TestDisabledAndroidIosConf(t *testing.T) {
PushConf, _ = config.LoadConf("")
PushConf.Android.Enabled = false
PushConf.Huawei.Enabled = false
err := CheckPushConf()
assert.Error(t, err)
assert.Equal(t, "Please enable iOS, Android or Huawei config in yml config", err.Error())
}
func TestMissingIOSCertificate(t *testing.T) {
PushConf, _ = config.LoadConf("")
PushConf.Ios.Enabled = true
PushConf.Ios.KeyPath = ""
PushConf.Ios.KeyBase64 = ""
err := CheckPushConf()
assert.Error(t, err)
assert.Equal(t, "Missing iOS certificate key", err.Error())
PushConf.Ios.KeyPath = "test.pem"
err = CheckPushConf()
assert.Error(t, err)
assert.Equal(t, "certificate file does not exist", err.Error())
}
func TestIOSNotificationStructure(t *testing.T) {
var dat map[string]interface{}
var unix = time.Now().Unix()
test := "test"
expectBadge := 0
message := "Welcome notification Server"
expiration := int64(time.Now().Unix())
req := PushNotification{
ApnsID: test,
Topic: test,
Expiration: &expiration,
Priority: "normal",
Message: message,
Badge: &expectBadge,
Sound: Sound{
Critical: 1,
Name: test,
Volume: 1.0,
},
ContentAvailable: true,
Data: D{
"key1": "test",
"key2": 2,
},
Category: test,
URLArgs: []string{"a", "b"},
}
notification := GetIOSNotification(req)
dump, _ := json.Marshal(notification.Payload)
data := []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
panic(err)
}
alert, _ := jsonparser.GetString(data, "aps", "alert")
badge, _ := jsonparser.GetInt(data, "aps", "badge")
soundName, _ := jsonparser.GetString(data, "aps", "sound", "name")
soundCritical, _ := jsonparser.GetInt(data, "aps", "sound", "critical")
soundVolume, _ := jsonparser.GetFloat(data, "aps", "sound", "volume")
contentAvailable, _ := jsonparser.GetInt(data, "aps", "content-available")
category, _ := jsonparser.GetString(data, "aps", "category")
key1 := dat["key1"].(interface{})
key2 := dat["key2"].(interface{})
aps := dat["aps"].(map[string]interface{})
urlArgs := aps["url-args"].([]interface{})
assert.Equal(t, test, notification.ApnsID)
assert.Equal(t, test, notification.Topic)
assert.Equal(t, unix, notification.Expiration.Unix())
assert.Equal(t, ApnsPriorityLow, notification.Priority)
assert.Equal(t, message, alert)
assert.Equal(t, expectBadge, int(badge))
assert.Equal(t, expectBadge, *req.Badge)
assert.Equal(t, test, soundName)
assert.Equal(t, 1.0, soundVolume)
assert.Equal(t, int64(1), soundCritical)
assert.Equal(t, 1, int(contentAvailable))
assert.Equal(t, "test", key1)
assert.Equal(t, 2, int(key2.(float64)))
assert.Equal(t, test, category)
assert.Contains(t, urlArgs, "a")
assert.Contains(t, urlArgs, "b")
}
func TestIOSSoundAndVolume(t *testing.T) {
var dat map[string]interface{}
test := "test"
message := "Welcome notification Server"
req := PushNotification{
ApnsID: test,
Topic: test,
Priority: "normal",
Message: message,
Sound: Sound{
Critical: 3,
Name: test,
Volume: 4.5,
},
}
notification := GetIOSNotification(req)
dump, _ := json.Marshal(notification.Payload)
data := []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
panic(err)
}
alert, _ := jsonparser.GetString(data, "aps", "alert")
soundName, _ := jsonparser.GetString(data, "aps", "sound", "name")
soundCritical, _ := jsonparser.GetInt(data, "aps", "sound", "critical")
soundVolume, _ := jsonparser.GetFloat(data, "aps", "sound", "volume")
assert.Equal(t, test, notification.ApnsID)
assert.Equal(t, test, notification.Topic)
assert.Equal(t, ApnsPriorityLow, notification.Priority)
assert.Equal(t, message, alert)
assert.Equal(t, test, soundName)
assert.Equal(t, 4.5, soundVolume)
assert.Equal(t, int64(3), soundCritical)
req.SoundName = "foobar"
req.SoundVolume = 5.5
notification = GetIOSNotification(req)
dump, _ = json.Marshal(notification.Payload)
data = []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
panic(err)
}
soundName, _ = jsonparser.GetString(data, "aps", "sound", "name")
soundVolume, _ = jsonparser.GetFloat(data, "aps", "sound", "volume")
soundCritical, _ = jsonparser.GetInt(data, "aps", "sound", "critical")
assert.Equal(t, 5.5, soundVolume)
assert.Equal(t, int64(1), soundCritical)
assert.Equal(t, "foobar", soundName)
req = PushNotification{
ApnsID: test,
Topic: test,
Priority: "normal",
Message: message,
Sound: map[string]interface{}{
"critical": 3,
"name": "test",
"volume": 4.5,
},
}
notification = GetIOSNotification(req)
dump, _ = json.Marshal(notification.Payload)
data = []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
panic(err)
}
soundName, _ = jsonparser.GetString(data, "aps", "sound", "name")
soundVolume, _ = jsonparser.GetFloat(data, "aps", "sound", "volume")
soundCritical, _ = jsonparser.GetInt(data, "aps", "sound", "critical")
assert.Equal(t, 4.5, soundVolume)
assert.Equal(t, int64(3), soundCritical)
assert.Equal(t, "test", soundName)
req = PushNotification{
ApnsID: test,
Topic: test,
Priority: "normal",
Message: message,
Sound: "default",
}
notification = GetIOSNotification(req)
dump, _ = json.Marshal(notification.Payload)
data = []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
panic(err)
}
soundName, _ = jsonparser.GetString(data, "aps", "sound")
assert.Equal(t, "default", soundName)
}
func TestIOSSummaryArg(t *testing.T) {
var dat map[string]interface{}
test := "test"
message := "Welcome notification Server"
req := PushNotification{
ApnsID: test,
Topic: test,
Priority: "normal",
Message: message,
Alert: Alert{
SummaryArg: "test",
SummaryArgCount: 3,
},
}
notification := GetIOSNotification(req)
dump, _ := json.Marshal(notification.Payload)
data := []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
panic(err)
}
assert.Equal(t, test, notification.ApnsID)
assert.Equal(t, test, notification.Topic)
assert.Equal(t, ApnsPriorityLow, notification.Priority)
assert.Equal(t, "test", dat["aps"].(map[string]interface{})["alert"].(map[string]interface{})["summary-arg"])
assert.Equal(t, float64(3), dat["aps"].(map[string]interface{})["alert"].(map[string]interface{})["summary-arg-count"])
}
// Silent Notification which payload’s aps dictionary must not contain the alert, sound, or badge keys.
// ref: https://goo.gl/m9xyqG
func TestSendZeroValueForBadgeKey(t *testing.T) {
var dat map[string]interface{}
test := "test"
message := "Welcome notification Server"
req := PushNotification{
ApnsID: test,
Topic: test,
Priority: "normal",
Message: message,
Sound: test,
ContentAvailable: true,
MutableContent: true,
ThreadID: test,
}
notification := GetIOSNotification(req)
dump, _ := json.Marshal(notification.Payload)
data := []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
log.Println(err)
panic(err)
}
alert, _ := jsonparser.GetString(data, "aps", "alert")
badge, _ := jsonparser.GetInt(data, "aps", "badge")
sound, _ := jsonparser.GetString(data, "aps", "sound")
threadID, _ := jsonparser.GetString(data, "aps", "thread-id")
contentAvailable, _ := jsonparser.GetInt(data, "aps", "content-available")
mutableContent, _ := jsonparser.GetInt(data, "aps", "mutable-content")
if req.Badge != nil {
t.Errorf("req.Badge must be nil")
}
assert.Equal(t, test, notification.ApnsID)
assert.Equal(t, test, notification.Topic)
assert.Equal(t, ApnsPriorityLow, notification.Priority)
assert.Equal(t, message, alert)
assert.Equal(t, 0, int(badge))
assert.Equal(t, test, sound)
assert.Equal(t, test, threadID)
assert.Equal(t, 1, int(contentAvailable))
assert.Equal(t, 1, int(mutableContent))
// Add Bage
expectBadge := 10
req.Badge = &expectBadge
notification = GetIOSNotification(req)
dump, _ = json.Marshal(notification.Payload)
data = []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
log.Println(err)
panic(err)
}
if req.Badge == nil {
t.Errorf("req.Badge must be equal %d", *req.Badge)
}
badge, _ = jsonparser.GetInt(data, "aps", "badge")
assert.Equal(t, expectBadge, *req.Badge)
assert.Equal(t, expectBadge, int(badge))
}
// Silent Notification:
// The payload’s aps dictionary must include the content-available key with a value of 1.
// The payload’s aps dictionary must not contain the alert, sound, or badge keys.
// ref: https://goo.gl/m9xyqG
func TestCheckSilentNotification(t *testing.T) {
var dat map[string]interface{}
test := "test"
req := PushNotification{
ApnsID: test,
Topic: test,
CollapseID: test,
Priority: "normal",
ContentAvailable: true,
}
notification := GetIOSNotification(req)
dump, _ := json.Marshal(notification.Payload)
data := []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
log.Println(err)
panic(err)
}
assert.Equal(t, test, notification.CollapseID)
assert.Equal(t, test, notification.ApnsID)
assert.Equal(t, test, notification.Topic)
assert.Nil(t, dat["aps"].(map[string]interface{})["alert"])
assert.Nil(t, dat["aps"].(map[string]interface{})["sound"])
assert.Nil(t, dat["aps"].(map[string]interface{})["badge"])
}
// URL: https://goo.gl/5xFo3C
// Example 2
// {
// "aps" : {
// "alert" : {
// "title" : "Game Request",
// "body" : "Bob wants to play poker",
// "action-loc-key" : "PLAY"
// },
// "badge" : 5
// },
// "acme1" : "bar",
// "acme2" : [ "bang", "whiz" ]
// }
func TestAlertStringExample2ForIos(t *testing.T) {
var dat map[string]interface{}
test := "test"
title := "Game Request"
body := "Bob wants to play poker"
actionLocKey := "PLAY"
req := PushNotification{
ApnsID: test,
Topic: test,
Priority: "normal",
Alert: Alert{
Title: title,
Body: body,
ActionLocKey: actionLocKey,
},
}
notification := GetIOSNotification(req)
dump, _ := json.Marshal(notification.Payload)
data := []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
log.Println(err)
panic(err)
}
assert.Equal(t, title, dat["aps"].(map[string]interface{})["alert"].(map[string]interface{})["title"])
assert.Equal(t, body, dat["aps"].(map[string]interface{})["alert"].(map[string]interface{})["body"])
assert.Equal(t, actionLocKey, dat["aps"].(map[string]interface{})["alert"].(map[string]interface{})["action-loc-key"])
}
// URL: https://goo.gl/5xFo3C
// Example 3
// {
// "aps" : {
// "alert" : "You got your emails.",
// "badge" : 9,
// "sound" : "bingbong.aiff"
// },
// "acme1" : "bar",
// "acme2" : 42
// }
func TestAlertStringExample3ForIos(t *testing.T) {
var dat map[string]interface{}
test := "test"
badge := 9
sound := "bingbong.aiff"
req := PushNotification{
ApnsID: test,
Topic: test,
Priority: "normal",
ContentAvailable: true,
Message: test,
Badge: &badge,
Sound: sound,
}
notification := GetIOSNotification(req)
dump, _ := json.Marshal(notification.Payload)
data := []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
log.Println(err)
panic(err)
}
assert.Equal(t, sound, dat["aps"].(map[string]interface{})["sound"])
assert.Equal(t, float64(badge), dat["aps"].(map[string]interface{})["badge"].(float64))
assert.Equal(t, test, dat["aps"].(map[string]interface{})["alert"])
}
func TestMessageAndTitle(t *testing.T) {
var dat map[string]interface{}
test := "test"
message := "Welcome notification Server"
title := "Welcome notification Server title"
req := PushNotification{
ApnsID: test,
Topic: test,
Priority: "normal",
Message: message,
Title: title,
ContentAvailable: true,
}
notification := GetIOSNotification(req)
dump, _ := json.Marshal(notification.Payload)
data := []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
log.Println(err)
panic(err)
}
alert, _ := jsonparser.GetString(data, "aps", "alert")
alertBody, _ := jsonparser.GetString(data, "aps", "alert", "body")
alertTitle, _ := jsonparser.GetString(data, "aps", "alert", "title")
assert.Equal(t, test, notification.ApnsID)
assert.Equal(t, ApnsPriorityLow, notification.Priority)
assert.Equal(t, message, alertBody)
assert.Equal(t, title, alertTitle)
assert.NotEqual(t, message, alert)
// Add alert body
messageOverride := "Welcome notification Server overridden"
req.Alert.Body = messageOverride
notification = GetIOSNotification(req)
dump, _ = json.Marshal(notification.Payload)
data = []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
log.Println(err)
panic(err)
}
alertBodyOverridden, _ := jsonparser.GetString(data, "aps", "alert", "body")
alertTitle, _ = jsonparser.GetString(data, "aps", "alert", "title")
assert.Equal(t, messageOverride, alertBodyOverridden)
assert.NotEqual(t, message, alertBodyOverridden)
assert.Equal(t, title, alertTitle)
}
func TestIOSAlertNotificationStructure(t *testing.T) {
var dat map[string]interface{}
test := "test"
req := PushNotification{
Message: "Welcome",
Title: test,
Alert: Alert{
Action: test,
ActionLocKey: test,
Body: test,
LaunchImage: test,
LocArgs: []string{"a", "b"},
LocKey: test,
Subtitle: test,
TitleLocArgs: []string{"a", "b"},
TitleLocKey: test,
},
}
notification := GetIOSNotification(req)
dump, _ := json.Marshal(notification.Payload)
data := []byte(string(dump))
if err := json.Unmarshal(data, &dat); err != nil {
log.Println(err)
panic(err)
}
action, _ := jsonparser.GetString(data, "aps", "alert", "action")
actionLocKey, _ := jsonparser.GetString(data, "aps", "alert", "action-loc-key")
body, _ := jsonparser.GetString(data, "aps", "alert", "body")
launchImage, _ := jsonparser.GetString(data, "aps", "alert", "launch-image")
locKey, _ := jsonparser.GetString(data, "aps", "alert", "loc-key")
title, _ := jsonparser.GetString(data, "aps", "alert", "title")
subtitle, _ := jsonparser.GetString(data, "aps", "alert", "subtitle")
titleLocKey, _ := jsonparser.GetString(data, "aps", "alert", "title-loc-key")
aps := dat["aps"].(map[string]interface{})
alert := aps["alert"].(map[string]interface{})
titleLocArgs := alert["title-loc-args"].([]interface{})
locArgs := alert["loc-args"].([]interface{})
assert.Equal(t, test, action)
assert.Equal(t, test, actionLocKey)
assert.Equal(t, test, body)
assert.Equal(t, test, launchImage)
assert.Equal(t, test, locKey)
assert.Equal(t, test, title)
assert.Equal(t, test, subtitle)
assert.Equal(t, test, titleLocKey)
assert.Contains(t, titleLocArgs, "a")
assert.Contains(t, titleLocArgs, "b")
assert.Contains(t, locArgs, "a")
assert.Contains(t, locArgs, "b")
}
func TestDisabledIosNotifications(t *testing.T) {
ctx := context.Background()
PushConf, _ = config.LoadConf("")
PushConf.Ios.Enabled = false
PushConf.Ios.KeyPath = "../certificate/certificate-valid.pem"
err := InitAPNSClient()
assert.Nil(t, err)
PushConf.Android.Enabled = true
PushConf.Android.APIKey = os.Getenv("ANDROID_API_KEY")
androidToken := os.Getenv("ANDROID_TEST_TOKEN")
req := RequestPush{
Notifications: []PushNotification{
//ios
{
Tokens: []string{"11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7"},
Platform: PlatFormIos,
Message: "Welcome",
},
// android
{
Tokens: []string{androidToken, androidToken + "_"},
Platform: PlatFormAndroid,
Message: "Welcome",
},
},
}
count, logs := queueNotification(ctx, req)
assert.Equal(t, 2, count)
assert.Equal(t, 0, len(logs))
}
func TestWrongIosCertificateExt(t *testing.T) {
PushConf, _ = config.LoadConf("")
PushConf.Ios.Enabled = true
PushConf.Ios.KeyPath = "test"
err := InitAPNSClient()
assert.Error(t, err)
assert.Equal(t, "wrong certificate key extension", err.Error())
PushConf.Ios.KeyPath = ""
PushConf.Ios.KeyBase64 = "abcd"
PushConf.Ios.KeyType = "abcd"
err = InitAPNSClient()
assert.Error(t, err)
assert.Equal(t, "wrong certificate key type", err.Error())
}
func TestAPNSClientDevHost(t *testing.T) {
PushConf, _ = config.LoadConf("")
PushConf.Ios.Enabled = true
PushConf.Ios.KeyPath = "../certificate/certificate-valid.p12"
err := InitAPNSClient()
assert.Nil(t, err)
assert.Equal(t, apns2.HostDevelopment, ApnsClient.Host)
PushConf.Ios.KeyPath = ""
PushConf.Ios.KeyBase64 = certificateValidP12
PushConf.Ios.KeyType = "p12"
err = InitAPNSClient()
assert.Nil(t, err)
assert.Equal(t, apns2.HostDevelopment, ApnsClient.Host)
}
func TestAPNSClientProdHost(t *testing.T) {
PushConf, _ = config.LoadConf("")
PushConf.Ios.Enabled = true
PushConf.Ios.Production = true
PushConf.Ios.KeyPath = "../certificate/certificate-valid.pem"
err := InitAPNSClient()
assert.Nil(t, err)
assert.Equal(t, apns2.HostProduction, ApnsClient.Host)
PushConf.Ios.KeyPath = ""
PushConf.Ios.KeyBase64 = certificateValidPEM
PushConf.Ios.KeyType = "pem"
err = InitAPNSClient()
assert.Nil(t, err)
assert.Equal(t, apns2.HostProduction, ApnsClient.Host)
}
func TestAPNSClientInvaildToken(t *testing.T) {
PushConf, _ = config.LoadConf("")
PushConf.Ios.Enabled = true
PushConf.Ios.KeyPath = "../certificate/authkey-invalid.p8"
err := InitAPNSClient()
assert.Error(t, err)
PushConf.Ios.KeyPath = ""
PushConf.Ios.KeyBase64 = authkeyInvalidP8
PushConf.Ios.KeyType = "p8"
err = InitAPNSClient()
assert.Error(t, err)
//empty key-id or team-id
PushConf.Ios.Enabled = true
PushConf.Ios.KeyPath = "../certificate/authkey-valid.p8"
err = InitAPNSClient()
assert.Error(t, err)
PushConf.Ios.KeyID = "key-id"
PushConf.Ios.TeamID = ""
err = InitAPNSClient()
assert.Error(t, err)
PushConf.Ios.KeyID = ""
PushConf.Ios.TeamID = "team-id"
err = InitAPNSClient()
assert.Error(t, err)
}
func TestAPNSClientVaildToken(t *testing.T) {
PushConf, _ = config.LoadConf("")
PushConf.Ios.Enabled = true
PushConf.Ios.KeyPath = "../certificate/authkey-valid.p8"
PushConf.Ios.KeyID = "key-id"
PushConf.Ios.TeamID = "team-id"
err := InitAPNSClient()
assert.NoError(t, err)
assert.Equal(t, apns2.HostDevelopment, ApnsClient.Host)
PushConf.Ios.Production = true
err = InitAPNSClient()
assert.NoError(t, err)
assert.Equal(t, apns2.HostProduction, ApnsClient.Host)
// test base64
PushConf.Ios.Production = false
PushConf.Ios.KeyPath = ""
PushConf.Ios.KeyBase64 = authkeyValidP8
PushConf.Ios.KeyType = "p8"
err = InitAPNSClient()
assert.NoError(t, err)
assert.Equal(t, apns2.HostDevelopment, ApnsClient.Host)
PushConf.Ios.Production = true
err = InitAPNSClient()
assert.NoError(t, err)
assert.Equal(t, apns2.HostProduction, ApnsClient.Host)
}
func TestAPNSClientUseProxy(t *testing.T) {
PushConf, _ = config.LoadConf("")
PushConf.Ios.Enabled = true
PushConf.Ios.KeyPath = "../certificate/certificate-valid.p12"
PushConf.Core.HTTPProxy = "http://127.0.0.1:8080"
_ = SetProxy(PushConf.Core.HTTPProxy)
err := InitAPNSClient()
assert.Nil(t, err)
assert.Equal(t, apns2.HostDevelopment, ApnsClient.Host)
req, _ := http.NewRequest("GET", apns2.HostDevelopment, nil)
actualProxyURL, err := ApnsClient.HTTPClient.Transport.(*http.Transport).Proxy(req)
assert.Nil(t, err)
expectedProxyURL, _ := url.ParseRequestURI(PushConf.Core.HTTPProxy)
assert.Equal(t, expectedProxyURL, actualProxyURL)
PushConf.Ios.KeyPath = "../certificate/authkey-valid.p8"
PushConf.Ios.TeamID = "example.team"
PushConf.Ios.KeyID = "example.key"
err = InitAPNSClient()
assert.Nil(t, err)
assert.Equal(t, apns2.HostDevelopment, ApnsClient.Host)
assert.NotNil(t, ApnsClient.Token)
req, _ = http.NewRequest("GET", apns2.HostDevelopment, nil)
actualProxyURL, err = ApnsClient.HTTPClient.Transport.(*http.Transport).Proxy(req)
assert.Nil(t, err)
expectedProxyURL, _ = url.ParseRequestURI(PushConf.Core.HTTPProxy)
assert.Equal(t, expectedProxyURL, actualProxyURL)
http.DefaultTransport.(*http.Transport).Proxy = nil
}
func TestPushToIOS(t *testing.T) {
PushConf, _ = config.LoadConf("")
MaxConcurrentIOSPushes = make(chan struct{}, PushConf.Ios.MaxConcurrentPushes)
PushConf.Ios.Enabled = true
PushConf.Ios.KeyPath = "../certificate/certificate-valid.pem"
err := InitAPNSClient()
assert.Nil(t, err)
err = InitAppStatus()
assert.Nil(t, err)
req := PushNotification{
Tokens: []string{"11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7", "11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef1"},
Platform: 1,
Message: "Welcome",
}
// send fail
PushToIOS(req)
}
func TestApnsHostFromRequest(t *testing.T) {
PushConf, _ = config.LoadConf("")
PushConf.Ios.Enabled = true
PushConf.Ios.KeyPath = "../certificate/certificate-valid.pem"
err := InitAPNSClient()
assert.Nil(t, err)
err = InitAppStatus()
assert.Nil(t, err)
req := PushNotification{
Production: true,
}
client := getApnsClient(req)
assert.Equal(t, apns2.HostProduction, client.Host)
req = PushNotification{
Development: true,
}
client = getApnsClient(req)
assert.Equal(t, apns2.HostDevelopment, client.Host)
req = PushNotification{}
PushConf.Ios.Production = true
client = getApnsClient(req)
assert.Equal(t, apns2.HostProduction, client.Host)
PushConf.Ios.Production = false
client = getApnsClient(req)
assert.Equal(t, apns2.HostDevelopment, client.Host)
}
| [
"\"ANDROID_API_KEY\"",
"\"ANDROID_TEST_TOKEN\""
]
| []
| [
"ANDROID_TEST_TOKEN",
"ANDROID_API_KEY"
]
| [] | ["ANDROID_TEST_TOKEN", "ANDROID_API_KEY"] | go | 2 | 0 | |
pkg/testutils/integrationtestharness.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testutils
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/golang/glog"
"io/ioutil"
"k8s.io/kops/cloudmock/aws/mockec2"
"k8s.io/kops/cloudmock/aws/mockroute53"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/util/pkg/vfs"
"os"
"path"
"path/filepath"
"testing"
)
type IntegrationTestHarness struct {
TempDir string
T *testing.T
// The original kops DefaultChannelBase value, restored on Close
originalDefaultChannelBase string
}
func NewIntegrationTestHarness(t *testing.T) *IntegrationTestHarness {
h := &IntegrationTestHarness{}
tempDir, err := ioutil.TempDir("", "test")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
h.TempDir = tempDir
vfs.Context.ResetMemfsContext(true)
// Replace the default channel path with a local filesystem path, so we don't try to retrieve it from a server
{
channelPath, err := filepath.Abs(path.Join("../../channels/"))
if err != nil {
t.Fatalf("error resolving stable channel path: %v", err)
}
channelPath += "/"
h.originalDefaultChannelBase = kops.DefaultChannelBase
kops.DefaultChannelBase = "file://" + channelPath
}
return h
}
func (h *IntegrationTestHarness) Close() {
if h.TempDir != "" {
if os.Getenv("KEEP_TEMP_DIR") != "" {
glog.Infof("NOT removing temp directory, because KEEP_TEMP_DIR is set: %s", h.TempDir)
} else {
err := os.RemoveAll(h.TempDir)
if err != nil {
h.T.Fatalf("failed to remove temp dir %q: %v", h.TempDir, err)
}
}
}
if h.originalDefaultChannelBase != "" {
kops.DefaultChannelBase = h.originalDefaultChannelBase
}
}
func (h *IntegrationTestHarness) SetupMockAWS() {
cloud := awsup.InstallMockAWSCloud("us-test-1", "abc")
mockEC2 := &mockec2.MockEC2{}
cloud.MockEC2 = mockEC2
mockRoute53 := &mockroute53.MockRoute53{}
cloud.MockRoute53 = mockRoute53
mockRoute53.MockCreateZone(&route53.HostedZone{
Id: aws.String("/hostedzone/Z1AFAKE1ZON3YO"),
Name: aws.String("example.com."),
Config: &route53.HostedZoneConfig{
PrivateZone: aws.Bool(false),
},
}, nil)
mockRoute53.MockCreateZone(&route53.HostedZone{
Id: aws.String("/hostedzone/Z2AFAKE1ZON3NO"),
Name: aws.String("internal.example.com."),
Config: &route53.HostedZoneConfig{
PrivateZone: aws.Bool(true),
},
}, []*route53.VPC{{
VPCId: aws.String("vpc-234"),
}})
mockRoute53.MockCreateZone(&route53.HostedZone{
Id: aws.String("/hostedzone/Z3AFAKE1ZOMORE"),
Name: aws.String("private.example.com."),
Config: &route53.HostedZoneConfig{
PrivateZone: aws.Bool(true),
},
}, []*route53.VPC{{
VPCId: aws.String("vpc-123"),
}})
mockEC2.Images = append(mockEC2.Images, &ec2.Image{
ImageId: aws.String("ami-12345678"),
Name: aws.String("k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21"),
OwnerId: aws.String(awsup.WellKnownAccountKopeio),
RootDeviceName: aws.String("/dev/xvda"),
})
mockEC2.Images = append(mockEC2.Images, &ec2.Image{
ImageId: aws.String("ami-15000000"),
Name: aws.String("k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09"),
OwnerId: aws.String(awsup.WellKnownAccountKopeio),
RootDeviceName: aws.String("/dev/xvda"),
})
}
| [
"\"KEEP_TEMP_DIR\""
]
| []
| [
"KEEP_TEMP_DIR"
]
| [] | ["KEEP_TEMP_DIR"] | go | 1 | 0 | |
pilot/pkg/serviceregistry/kube/client.go | // Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package kube implements the shared and reusable library for Kubernetes
package kube
import (
"fmt"
"os"
// TODO(nmittler): Remove this
_ "github.com/golang/glog"
multierror "github.com/hashicorp/go-multierror"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"os/user"
"istio.io/istio/pkg/log"
// import GKE cluster authentication plugin
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// import OIDC cluster authentication plugin, e.g. for Tectonic
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
// ResolveConfig checks whether to use the in-cluster or out-of-cluster config
func ResolveConfig(kubeconfig string) (string, error) {
// Consistency with kubectl
if kubeconfig == "" {
kubeconfig = os.Getenv("KUBECONFIG")
}
if kubeconfig == "" {
usr, err := user.Current()
if err == nil {
defaultCfg := usr.HomeDir + "/.kube/config"
_, err := os.Stat(kubeconfig)
if err != nil {
kubeconfig = defaultCfg
}
}
}
if kubeconfig != "" {
info, err := os.Stat(kubeconfig)
if err != nil {
if os.IsNotExist(err) {
err = fmt.Errorf("kubernetes configuration file %q does not exist", kubeconfig)
} else {
err = multierror.Append(err, fmt.Errorf("kubernetes configuration file %q", kubeconfig))
}
return "", err
}
// if it's an empty file, switch to in-cluster config
if info.Size() == 0 {
log.Info("using in-cluster configuration")
return "", nil
}
}
return kubeconfig, nil
}
// CreateInterface is a helper function to create Kubernetes interface
func CreateInterface(kubeconfig string) (*rest.Config, kubernetes.Interface, error) {
kube, err := ResolveConfig(kubeconfig)
if err != nil {
return nil, nil, err
}
config, err := clientcmd.BuildConfigFromFlags("", kube)
if err != nil {
return nil, nil, err
}
client, err := kubernetes.NewForConfig(config)
return config, client, err
}
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
AMmodel/__init__.py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def transfer_weights(source_model: tf.keras.Model,
target_model: tf.keras.Model):
"""
Function to transfer weights from trained model to other one
Args:
source_model: trained `tf.keras.Model`
target_model: target `tf.keras.Model`
Returns:
trained target_model
"""
target_model.set_weights(source_model.get_weights())
return target_model
def load_from_saved_model(model: tf.keras.Model,
saved_path: str):
"""
Load model from saved model path
Args:
model: newly built `tf.keras.Model`
saved_path: `str` path to saved model
Returns:
loaded model
"""
try:
saved_model = tf.keras.models.load_model(saved_path)
except Exception as e:
raise e
model = transfer_weights(saved_model, model)
return model
def load_from_weights(model: tf.keras.Model,
saved_path: str):
"""
Load model from saved weights path
Args:
model: newly built `tf.keras.Model`
saved_path: `str` path to saved weights
Returns:
loaded model
"""
try:
model.load_weights(saved_path)
except Exception as e:
raise e
return model
| []
| []
| []
| [] | [] | python | null | null | null |
internal/cmdutil/cmdutil.go | package cmdutil
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/planetscale/cli/internal/config"
"github.com/planetscale/cli/internal/printer"
ps "github.com/planetscale/planetscale-go/planetscale"
"github.com/spf13/cobra"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
exec "golang.org/x/sys/execabs"
)
const WarnAuthMessage = "not authenticated yet. Please run 'pscale auth login'" +
"or create a service token with 'pscale service-token create'"
// Helper is passed to every single command and is used by individual
// subcommands.
type Helper struct {
// Config contains globally sourced configuration
Config *config.Config
ConfigFS *config.ConfigFS
// Client returns the PlanetScale API client
Client func() (*ps.Client, error)
// Printer is used to print output of a command to stdout.
Printer *printer.Printer
// bebug defines the debug mode
debug *bool
}
func (h *Helper) SetDebug(debug *bool) {
h.debug = debug
}
func (h *Helper) Debug() bool { return *h.debug }
// required arguments are not available.
func RequiredArgs(reqArgs ...string) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
n := len(reqArgs)
if len(args) >= n {
return nil
}
missing := reqArgs[len(args):]
a := fmt.Sprintf("arguments <%s>", strings.Join(missing, ", "))
if len(missing) == 1 {
a = fmt.Sprintf("argument <%s>", missing[0])
}
return fmt.Errorf("missing %s \n\n%s", a, cmd.UsageString())
}
}
// CheckAuthentication checks whether the user is authenticated and returns a
// actionable error message.
func CheckAuthentication(cfg *config.Config) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
if cfg.IsAuthenticated() {
return nil
}
return errors.New(WarnAuthMessage)
}
}
// NewZapLogger returns a logger to be used with the sql-proxy. By default it
// only outputs error leveled messages, unless debug is true.
func NewZapLogger(debug bool) *zap.Logger {
encoderCfg := zapcore.EncoderConfig{
MessageKey: "msg",
LevelKey: "level",
NameKey: "logger",
TimeKey: "T",
EncodeLevel: zapcore.LowercaseColorLevelEncoder,
EncodeTime: zapcore.RFC3339TimeEncoder,
EncodeDuration: zapcore.StringDurationEncoder,
}
level := zap.ErrorLevel
if debug {
level = zap.DebugLevel
}
logger := zap.New(zapcore.NewCore(zapcore.NewConsoleEncoder(encoderCfg), os.Stdout, level))
return logger
}
// IsUnderHomebrew checks whether the given binary is under the homebrew path.
// copied from: https://github.com/cli/cli/blob/trunk/cmd/gh/main.go#L298
func IsUnderHomebrew(binpath string) bool {
if binpath == "" {
return false
}
brewExe, err := exec.LookPath("brew")
if err != nil {
return false
}
brewPrefixBytes, err := exec.Command(brewExe, "--prefix").Output()
if err != nil {
return false
}
brewBinPrefix := filepath.Join(strings.TrimSpace(string(brewPrefixBytes)), "bin") + string(filepath.Separator)
return strings.HasPrefix(binpath, brewBinPrefix)
}
// HasHomebrew check whether the user has installed brew
func HasHomebrew() bool {
_, err := exec.LookPath("brew")
return err == nil
}
// MySQLClientPath checks whether the 'mysql' client exists and returns the
// path to the binary. The returned error contains instructions to install the
// client.
func MySQLClientPath() (string, error) {
// 'brew install mysql-client' installs the client into an unusual path
// https://docs.brew.sh/FAQ#why-should-i-install-homebrew-in-the-default-location
var homebrewPrefix string
switch runtime.GOOS {
case "darwin":
homebrewPrefix = "/usr/local"
if runtime.GOARCH == "arm64" {
homebrewPrefix = "/opt/homebrew"
}
case "linux":
homebrewPrefix = "/home/linuxbrew/.linuxbrew"
}
oldpath := os.Getenv("PATH")
newpath := homebrewPrefix + "/opt/mysql-client/bin/" + string(os.PathListSeparator) + oldpath
defer func() {
if err := os.Setenv("PATH", oldpath); err != nil {
fmt.Println("failed to restore PATH", err)
}
}()
if err := os.Setenv("PATH", newpath); err != nil {
return "", err
}
path, err := exec.LookPath("mysql")
if err == nil {
return path, nil
}
msg := "couldn't find the 'mysql' command-line tool required to run this command."
installURL := "https://docs.planetscale.com/reference/planetscale-environment-setup"
switch runtime.GOOS {
case "darwin":
if HasHomebrew() {
return "", fmt.Errorf("%s\nTo install, run: brew install mysql-client", msg)
}
installURL = "https://docs.planetscale.com/reference/planetscale-environment-setup#macos-instructions"
case "linux":
installURL = "https://docs.planetscale.com/reference/planetscale-environment-setup#linux-instructions"
case "windows":
installURL = "https://docs.planetscale.com/reference/planetscale-environment-setup#windows-instructions"
}
return "", fmt.Errorf("%s\nTo install, follow the instructions: %s", msg, installURL)
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
tools/openvino_dev/setup.py | #!/usr/bin/env python3
# Copyright (C) 2018-2021 Intel Corporation SPDX-License-Identifier: Apache-2.0
""" Use this script to create a openvino-dev wheel package:
$ python3 setup.py bdist_wheel
"""
# pylint: disable-msg=line-too-long
import os
import sys
import platform
import subprocess # nosec
import shutil
from distutils import log
from distutils.command.build import build
from distutils.command.clean import clean
from pathlib import Path
from fnmatch import fnmatchcase
import pkg_resources
from setuptools.command.install import install
from setuptools import setup, find_namespace_packages
PYTHON_VERSION = f'python{sys.version_info.major}.{sys.version_info.minor}'
SCRIPT_DIR = Path(__file__).resolve().parents[0]
OPENVINO_DIR = Path(__file__).resolve().parents[2]
SRC_DIR = SCRIPT_DIR / 'src'
PKG_INSTALL_CFG = {
'openvino-mo': {
'src_dir': OPENVINO_DIR / 'model-optimizer',
'black_list': ['*unit_tests*'],
'prefix': 'mo',
'extract_entry_points': True,
'extract_requirements': True,
'extract_extras': True,
},
'benchmark_tool': {
'src_dir': OPENVINO_DIR / 'tools' / 'benchmark_tool',
'black_list': [],
'prefix': 'benchmark_tool',
'extract_entry_points': True,
},
"accuracy_checker": {
'src_dir': OPENVINO_DIR / 'tools' / 'pot' / 'thirdparty' / 'open_model_zoo' / 'tools' / 'accuracy_checker', # noqa:E501
'black_list': ['*tests*'],
'prefix': 'accuracy_checker',
'extract_entry_points': True,
},
"omz_tools": {
'src_dir': OPENVINO_DIR / 'tools' / 'pot' / 'thirdparty' / 'open_model_zoo' / 'tools' / 'model_tools', # noqa:E501
'black_list': [],
'prefix': 'omz_tools',
'extract_requirements': True,
'extract_entry_points': True,
'extract_extras': True,
},
"pot": {
'src_dir': OPENVINO_DIR / 'tools' / 'pot',
'black_list': ['*tests*'],
'prefix': 'pot',
'extract_entry_points': True,
},
}
def ignore_patterns(*patterns):
"""
Filter names by given patterns
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class CustomBuild(build):
"""Custom implementation of build"""
def run(self):
# pylint: disable-msg=too-many-locals
self.announce('Installing packages', level=log.INFO)
for cmp, cmp_data in PKG_INSTALL_CFG.items():
self.announce(f'Processing package: {cmp}', level=log.INFO)
subprocess.call([sys.executable, 'setup.py', 'install',
'--root', str(SCRIPT_DIR),
'--prefix', str(cmp_data.get("prefix"))],
cwd=str(cmp_data.get('src_dir')))
# grab installed modules
lib_dir = 'lib/site-packages' if platform.system() == 'Windows' else f'lib/{PYTHON_VERSION}/site-packages'
src = SCRIPT_DIR / cmp_data.get('prefix') / lib_dir
egg_info = list(src.glob('**/*.egg-info'))
if egg_info:
def raw_req(req):
req.marker = None
return str(req)
distributions = pkg_resources.find_distributions(str(Path(egg_info[0]).parent))
for dist in distributions:
self.announce(f'Distribution: {dist.egg_name()}', level=log.INFO)
# load install_requires list
install_requires = list(sorted(map(raw_req, dist.requires())))
self.announce(f'Install requires: {install_requires}', level=log.INFO)
if cmp_data.get("extract_requirements"):
self.distribution.install_requires.extend(install_requires)
# load extras_require
if cmp_data.get("extract_extras"):
for extra in dist.extras:
if extra not in self.distribution.extras_require:
self.distribution.extras_require[extra] = []
extras_require = set(map(raw_req, dist.requires((extra,))))
self.announce(f'Extras: {extra}:{extras_require}', level=log.INFO)
self.distribution.extras_require[extra].extend(extras_require)
# extract console scripts
if cmp_data.get("extract_entry_points"):
for console_scripts in dist.get_entry_map('console_scripts'):
self.announce(f'Entry point: {console_scripts}', level=log.INFO)
entry = dist.get_entry_info('console_scripts', console_scripts)
self.distribution.entry_points['console_scripts'].append(str(entry))
# copy modules to the build directory
dst = Path(self.build_lib)
black_list = cmp_data.get('black_list')
exclude = ignore_patterns('*ez_setup*', '*__pycache__*', '*.egg-info*', *black_list)
for path in src.glob('**/*'):
if path.is_dir() or exclude(str(path)):
continue
path_rel = path.relative_to(src)
(dst / path_rel.parent).mkdir(exist_ok=True, parents=True)
shutil.copyfile(path, dst / path_rel)
# add dependecy on runtime package
runtime_req = [f'openvino=={self.distribution.get_version()}']
self.distribution.install_requires.extend(runtime_req)
self.announce(f'{self.distribution.install_requires}', level=log.DEBUG)
self.announce(f'{self.distribution.extras_require}', level=log.DEBUG)
self.announce(f'{self.distribution.entry_points}', level=log.DEBUG)
class CustomInstall(install):
"""Enable build_clib during the installation"""
def run(self):
self.run_command('build')
install.run(self)
class CustomClean(clean):
"""Clean up staging directories"""
def clean(self, install_cfg):
"""Clean components staging directories"""
for comp, comp_data in install_cfg.items():
install_prefix = comp_data.get('prefix')
self.announce(f'Cleaning {comp}: {install_prefix}', level=log.INFO)
if os.path.exists(install_prefix):
shutil.rmtree(install_prefix)
def run(self):
self.clean(PKG_INSTALL_CFG)
for pattern in './build ./dist **/*.pyc **/*.tgz **/*.egg-info'.split(' '):
paths = SCRIPT_DIR.glob(pattern)
for path in paths:
if path.is_file() and path.exists():
path = path.parent
self.announce(f'Cleaning: {path}', level=log.INFO)
shutil.rmtree(path)
clean.run(self)
def get_description(desc_file_path):
"""read description from README.md"""
with open(desc_file_path, 'r', encoding='utf-8') as fstream:
description = fstream.read()
return description
with (SCRIPT_DIR / 'requirements.txt').open() as requirements:
install_reqs = [
str(requirement)
for requirement
in pkg_resources.parse_requirements(requirements)
]
setup(
name='openvino-dev',
version=os.getenv('OPENVINO_VERSION', '0.0.0'),
author='Intel® Corporation',
license='OSI Approved :: Apache Software License',
author_email='[email protected]',
url='https://docs.openvinotoolkit.org/latest/index.html',
download_url='https://github.com/openvinotoolkit/openvino/tags',
description='OpenVINO™ Developer Package',
long_description=get_description(SCRIPT_DIR.parents[1] / 'docs/install_guides/pypi-openvino-dev.md'),
long_description_content_type='text/markdown',
classifiers=[
'Programming Language :: Python :: 3',
'OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
cmdclass={
'build': CustomBuild,
'install': CustomInstall,
'clean': CustomClean,
},
entry_points = {
'console_scripts': [],
},
install_requires=install_reqs,
packages=find_namespace_packages(where=str(SRC_DIR)),
package_dir={'': str(SRC_DIR)},
)
| []
| []
| [
"OPENVINO_VERSION"
]
| [] | ["OPENVINO_VERSION"] | python | 1 | 0 | |
search-spaces/3rd-round.py | # search space for hyperparameter optimization
xgb_space = {'model': xgb.XGBClassifier,
'params': {'n_estimators' : hp.normal('xgb_n', 500, 100),
'learning_rate' : hp.uniform('xgb_eta', 0.01, 0.03),
'max_depth' : hp.quniform('xgb_max_depth', 2, 8, 1),
'min_child_weight' : hp.quniform('xgb_min_child_weight', 1, 6, 1),
'subsample' : hp.uniform('xgb_subsample', 0.8, 1),
'gamma' : hp.uniform('xgb_gamma', 0.0, 0.4),
'colsample_bytree' : hp.uniform('xgb_colsample_bytree', 0.2, 0.8),
'objective': hp.choice('xgb_obj', ['binary:logistic']),
'scale_pos_weight': hp.uniform('xgb_w', 1.0, 4.0)
},
'preproc': {'na_input': {'strategy': 'mean'},
'var_thres': {'threshold': 0.0},
'sel_perc': {'score_func': hp.choice('sel_sf', [f_classif, chi2]),
'percentile': hp.quniform('sel_p', 50, 100, 5)}
},
'resmpl': hp.choice('resmpl', [{'method': False, 'params': False},
{'method': SMOTE,
'params': {'ratio': hp.uniform('rat_s', 1, 24),
'verbose': False,
'kind': 'regular'}},
{'method': NearMiss,
'params': {'ratio': hp.uniform('rat_nm', 1, 24),
'verbose': False,
'version': hp.choice('k_n',[1, 2, 3])}},
]),
'data': hp.choice('dc',[{'real': 'data/engineered-real/train.csv',
'cat': 'data/engineered-cat/train',
'ground-truth': 'data/target.csv'},
{'real': 'data/selected/st-train.csv',
'cat': None,
'ground-truth': 'data/target.csv'}]),
'feat_exp': {'n': 0}, #hp.quniform('exp_n', 0, 100, 20)
'fit_params': {'eval_metric': 'auc'},
'y_transf': hp.choice('trf', [None]),
}
# over and undersampling did not improve results
# just testing stability selection
# search space for hyperparameter optimization
# search space for hyperparameter optimization
# search space for hyperparameter optimization
xgb_space = {'model': xgb.XGBClassifier,
'params': {'n_estimators' : hp.normal('xgb_n', 500, 100),
'learning_rate' : hp.uniform('xgb_eta', 0.01, 0.03),
'max_depth' : hp.quniform('xgb_max_depth', 2, 8, 1),
'min_child_weight' : hp.quniform('xgb_min_child_weight', 1, 6, 1),
'subsample' : hp.uniform('xgb_subsample', 0.8, 1),
'gamma' : hp.uniform('xgb_gamma', 0.0, 0.4),
'colsample_bytree' : hp.uniform('xgb_colsample_bytree', 0.2, 0.8),
'objective': hp.choice('xgb_obj', ['binary:logistic']),
'scale_pos_weight': hp.uniform('xgb_w', 1.0, 4.0)
},
'preproc': {'na_input': {'strategy': 'mean'},
'var_thres': {'threshold': 0.0},
'sel_perc': {False}
},
'resmpl': hp.choice('resmpl', [{'method': False, 'params': False}]),
'data': hp.choice('dc',[{'real': 'data/selected/st-train.csv',
'cat': None,
'ground-truth': 'data/target.csv'}]),
'feat_exp': {'n': 0}, #hp.quniform('exp_n', 0, 100, 20)
'fit_params': {'eval_metric': 'auc'},
'y_transf': hp.choice('trf', [None]),
}
| []
| []
| []
| [] | [] | python | null | null | null |
peer/common/ordererenv.go | /*
Copyright IBM Corp. 2016-2017 All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package common
import (
"os"
"time"
"deepchain/common/flogging"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
OrderingEndpoint string
tlsEnabled bool
clientAuth bool
caFile string
keyFile string
certFile string
ordererTLSHostnameOverride string
connTimeout time.Duration
)
// SetOrdererEnv adds orderer-specific settings to the global Viper environment
func SetOrdererEnv(cmd *cobra.Command, args []string) {
// read in the legacy logging level settings and, if set,
// notify users of the FABRIC_LOGGING_SPEC env variable
var loggingLevel string
if viper.GetString("logging_level") != "" {
loggingLevel = viper.GetString("logging_level")
} else {
loggingLevel = viper.GetString("logging.level")
}
if loggingLevel != "" {
mainLogger.Warning("CORE_LOGGING_LEVEL is no longer supported, please use the FABRIC_LOGGING_SPEC environment variable")
}
// need to init logging here as cobra does not currently support
// chaining PersistentPreRun functions
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
flogging.InitFromSpec(loggingSpec)
// set the orderer environment from flags
viper.Set("orderer.tls.rootcert.file", caFile)
viper.Set("orderer.tls.clientKey.file", keyFile)
viper.Set("orderer.tls.clientCert.file", certFile)
viper.Set("orderer.address", OrderingEndpoint)
viper.Set("orderer.tls.serverhostoverride", ordererTLSHostnameOverride)
viper.Set("orderer.tls.enabled", tlsEnabled)
viper.Set("orderer.tls.clientAuthRequired", clientAuth)
viper.Set("orderer.client.connTimeout", connTimeout)
}
// AddOrdererFlags adds flags for orderer-related commands
func AddOrdererFlags(cmd *cobra.Command) {
flags := cmd.PersistentFlags()
flags.StringVarP(&OrderingEndpoint, "orderer", "o", "", "Ordering service endpoint")
flags.BoolVarP(&tlsEnabled, "tls", "", false, "Use TLS when communicating with the orderer endpoint")
flags.BoolVarP(&clientAuth, "clientauth", "", false,
"Use mutual TLS when communicating with the orderer endpoint")
flags.StringVarP(&caFile, "cafile", "", "",
"Path to file containing PEM-encoded trusted certificate(s) for the ordering endpoint")
flags.StringVarP(&keyFile, "keyfile", "", "",
"Path to file containing PEM-encoded private key to use for mutual TLS "+
"communication with the orderer endpoint")
flags.StringVarP(&certFile, "certfile", "", "",
"Path to file containing PEM-encoded X509 public key to use for "+
"mutual TLS communication with the orderer endpoint")
flags.StringVarP(&ordererTLSHostnameOverride, "ordererTLSHostnameOverride",
"", "", "The hostname override to use when validating the TLS connection to the orderer.")
flags.DurationVarP(&connTimeout, "connTimeout",
"", 3*time.Second, "Timeout for client to connect")
}
| [
"\"FABRIC_LOGGING_SPEC\""
]
| []
| [
"FABRIC_LOGGING_SPEC"
]
| [] | ["FABRIC_LOGGING_SPEC"] | go | 1 | 0 | |
server.go | package main
import (
"io"
"math/rand"
"net"
"github.com/ttaylorr/minecraft/chat"
"github.com/ttaylorr/minecraft/protocol"
"github.com/ttaylorr/minecraft/protocol/packet"
)
func main() {
conn, _ := net.Listen("tcp", "0.0.0.0:25565")
for {
client, _ := conn.Accept()
go handleConnection(protocol.NewConnection(client))
}
}
func handleConnection(c *protocol.Connection) {
for {
p, err := c.Next()
if err == io.EOF {
return
}
switch t := p.(type) {
case packet.Handshake:
state := protocol.State(uint8(t.NextState))
c.SetState(state)
case packet.StatusRequest:
resp := packet.StatusResponse{}
resp.Status.Version.Name = "1.8.8"
resp.Status.Version.Protocol = 47
resp.Status.Players.Max = rand.Intn(100)
resp.Status.Players.Online = rand.Intn(101)
resp.Status.Description = chat.TextComponent{
"Hello from Golang!",
chat.Component{
Bold: true,
Color: chat.ColorRed,
},
}
c.Write(resp)
case packet.StatusPing:
pong := packet.StatusPong{}
pong.Payload = t.Payload
c.Write(pong)
}
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
doc/integrations/cortx-s3-slack-bot/app.py | import os
import boto3
import asyncio
from csv import reader
from pathlib import Path
from dotenv import load_dotenv
from botocore.exceptions import ClientError
from slack_bolt.async_app import AsyncApp
from slack_sdk.errors import SlackApiError
from elasticsearch_connector import ElasticsearchConnector
from process_resume import process_resume
from upload_file_to_s3 import upload_file_to_s3
from get_file_from_s3 import get_file_from_s3
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
# connections
# Create CORTX connector
es_client = ElasticsearchConnector(
elastic_domain=os.environ.get("ELASTIC_DOMAIN"), elastic_port=os.environ.get("ELASTIC_PORT"))
# Creating a Bolt app
app = AsyncApp(
token=os.environ.get("SLACK_BOT_TOKEN"),
signing_secret=os.environ.get("SLACK_SIGNING_SECRET"),
)
# Creating a CORTX S3 client
s3_client = boto3.client(
's3', endpoint_url=str(os.environ.get('ENDPOINT_URL')),
aws_access_key_id=str(os.environ.get('AWS_ACCESS_KEY_ID')), aws_secret_access_key=str(os.environ.get('AWS_SECRET_ACCESS_KEY'))
)
# Creating an AWS Textract client
textract_client = boto3.client('textract', aws_access_key_id=str(os.environ.get('AMAZON_AWS_ACCESS_KEY_ID')),
aws_secret_access_key=str(os.environ.get('AMAZON_AWS_SECRET_ACCESS_KEY')))
# Creating an AWS Comprehend client
comprehend_client = boto3.client('comprehend', aws_access_key_id=str(os.environ.get('AMAZON_AWS_ACCESS_KEY_ID')),
aws_secret_access_key=str(os.environ.get('AMAZON_AWS_SECRET_ACCESS_KEY')))
@app.middleware # or app.use(log_request)
async def log_request(body, next):
return await next()
# cortx_connector.create_new_bucket("test")
@app.event({
"type": "message",
"subtype": "file_share"
})
async def file_shared(event, say, ack):
"""Whenever a new file is shared in any slack channel, add it to cortx"""
await ack()
channel_id = event["channel"]
user_id = event['user']
file_data = event['files'][0]
await say(text="{} uploaded to s3 bucket!".format(file_data["name"]), channel=channel_id)
await upload_file_to_s3(s3_client=s3_client, es_client=es_client, file_data=file_data, token=os.environ.get(
"SLACK_USER_TOKEN"))
@app.command('/cortx-s3-get')
async def cortx_s3_get(ack, say, command, payload, respond):
"""
Find a file from s3 and send it to the user privately
"""
await ack()
file_name = command['text']
channel_id = payload["user_id"]
await say(text="Trying to find {} in the S3 bucket ....".format(
file_name), channel=channel_id)
isFileFound = await get_file_from_s3(s3_client, es_client, file_name)
if isFileFound:
isDownloaded = False
counter = 0
file_path = os.path.join(os.getcwd(), 'downloads', file_name)
await say(text="Found {}!!!".format(
file_name), channel=channel_id)
# Call the files.upload method using the WebClient
# Uploading files requires the `files:write` scope
while(isDownloaded == False and counter < 20):
if(os.path.exists(file_path)):
try:
result = await app.client.files_upload(
channels=channel_id,
# initial_comment="{}".format(
# file_name),
file=file_path,
)
isDownloaded = True
except SlackApiError as e:
print("Error uploading file: {}".format(e))
await say(text="There was an error with slack! Please try again in a while".format(
file_name), channel=channel_id)
else:
await asyncio.sleep(1)
counter += 1
# print(result)
if os.path.exists(file_path):
os.remove(path=file_path)
else:
await say(text="Sorry! No file was found with the name {}".format(
file_name), channel=channel_id)
@app.command("/cortx-s3-search")
async def cortx_s3_search(body, ack, respond, client):
"""Search for a file on the s3 bucket. Enter a first three words"""
await ack(
text="Accepted!",
blocks=[
{
"type": "section",
"block_id": "b",
"text": {
"type": "mrkdwn",
"text": "Opening file search dialog",
},
}
],
)
res = await client.views_open(
trigger_id=body["trigger_id"],
view={
"type": "modal",
"callback_id": "search",
"title": {
"type": "plain_text",
"text": "Cortx S3 Bucket"
},
"submit": {
"type": "plain_text",
"text": "Submit",
"emoji": True
},
"close": {
"type": "plain_text",
"text": "Cancel",
"emoji": True
},
"blocks": [
{
"type": "input",
"block_id": "my_block",
"element": {
"type": "external_select",
"action_id": "file_select",
"min_query_length": 3,
"placeholder": {
"type": "plain_text",
"text": "What are you looking for?"
}
},
"label": {
"type": "plain_text",
"text": "Search for your file!",
"emoji": True
}
}
]
}
)
@app.options("file_select")
async def show_options(ack, body, payload):
"""Populates the options with all the search results from elasticsearch
For Example: -
'abc' will give 'abc.csv', 'abcd.csv' ,'abcde.csv' as results
"""
options = []
search_term = body["value"]
files_found = es_client.search(search_term)
for file in files_found:
options.append(
{"text": {"type": "plain_text", "text": file["_id"]}, "value": file["_id"]})
await ack(options=options)
@app.view("search")
async def view_submission(ack, say, body, respond):
await ack()
file_name = body["view"]["state"]["values"]["my_block"]["file_select"]["selected_option"]["value"]
channel_id = body["user"]["id"]
await say(text="Getting {} ....".format(
file_name), channel=channel_id)
isFileFound = await get_file_from_s3(s3_client, es_client, file_name)
if isFileFound:
isDownloaded = False
counter = 0
# Call the files.upload method using the WebClient
# Uploading files requires the `files:write` scope
while(isDownloaded == False and counter < 20):
file_path = os.path.join(os.getcwd(), 'downloads', file_name)
if(os.path.exists(file_path)):
try:
result = await app.client.files_upload(
channels=channel_id,
# initial_comment="{}".format(
# file_name),
file=file_path,
)
isDownloaded = True
except SlackApiError as e:
print("Error uploading file: {}".format(e))
await say(text="There was an error with slack! Please try again in a while".format(
file_name), channel=channel_id)
else:
await asyncio.sleep(1)
counter += 1
else:
await say(text="Sorry! No file was found with the name {}".format(
file_name), channel=channel_id)
@app.command('/cortx-s3-delete')
async def cortx_s3_delete(ack, say, command, payload, respond):
"""
Delete a file from cortx s3 given the filename
"""
file_name = command['text']
channel_id = payload["user_id"]
if(es_client.check_if_doc_exists(file_name=file_name)):
await ack(blocks=[
{
"type": "section",
"block_id": "b",
"text": {
"type": "mrkdwn",
"text": "Are you sure you want to permanently delete {} from the s3 bucket ?".format(file_name),
},
"accessory": {
"type": "button",
"action_id": "delete_file_button",
"text": {"type": "plain_text", "text": "Delete"},
"value": file_name,
"style": "danger"
},
}
])
else:
await ack(
blocks=[
{
"type": "section",
"block_id": "b",
"text": {
"type": "mrkdwn",
"text": ":negative_squared_cross_mark: No file found with the name {}".format(
file_name
),
},
}
],
)
@app.action('delete_file_button')
async def delete_file_button_clicked(ack, body, respond):
"""
This function is called when the delete file button is clicked on slack
"""
file_name = body["actions"][0]["value"]
await ack()
try:
response = s3_client.delete_object(Bucket='testbucket', Key=file_name)
es_client.delete_doc(file_name)
await respond(
blocks=[
{
"type": "section",
"block_id": "b",
"text": {
"type": "mrkdwn",
"text": ":white_check_mark: {} was deleted :wastebasket: !".format(
file_name
),
},
}
],
)
except ClientError as e:
await respond(
blocks=[
{
"type": "section",
"block_id": "b",
"text": {
"type": "mrkdwn",
"text": ":X: Couldn't delete {} from S3".format(
file_name
),
},
}
],
)
print(e)
@app.command('/cortx-s3-resume-data')
async def cortx_s3_resume_data(ack, body, say, respond, payload):
"""Getting resume data from S3 bucket"""
await ack()
csv_file_path = os.path.join(os.getcwd(), 'resume_data', 'resume_data.csv')
fields = []
with open(csv_file_path, 'r') as read_obj:
csv_reader = reader(read_obj)
for count, row in enumerate(csv_reader):
if count == 0:
fields.append({
"type": "mrkdwn",
"text": "*Name*",
})
fields.append(
{
"type": "mrkdwn",
"text": "*Email*"
}
)
else:
fields.append({
"type": "plain_text",
"text": row[0],
"emoji": True
}
)
fields.append(
{
"type": "mrkdwn",
"text": row[1]
}
)
await respond(
blocks=[{
"type": "section",
"fields": fields
}]
)
@app.event({
"type": "message",
"subtype": "channel_join"
})
async def channel_join(ack, event, respond, say):
"""This function is called when we someone joins the cortx-s3-test slack channel"""
await ack()
user_id = event["user"]
joined_channel_id = event["channel"]
result = await app.client.conversations_list()
channels = result["channels"]
channel_list = ['cortx-s3-test']
general_channel = [
channel for channel in channels if channel['name'] in channel_list]
general_channel_id = general_channel[0]['id']
if(joined_channel_id == general_channel_id):
await say(channel=joined_channel_id, text=f"Hey <@{user_id}> welcome to <#{joined_channel_id}>! \n Please share your resume in this channel.\n Use the /cortx-s3-upload-resume command so that we can process it, upload only .jpeg or .pdf files\n")
@app.command('/cortx-s3-upload-resume')
async def cortx_s3_upload_resume(ack, say, command, payload):
"""Uploading a resume to Slack to process it and add it to the resume-data.csv file"""
await ack()
file_name = command['text']
user_id = payload["user_id"]
channel_id = payload["channel_id"]
isFileFound = await get_file_from_s3(s3_client, es_client, file_name)
if isFileFound:
await say(text=f"Thank you <@{user_id}> for uploading your resume!", channel=channel_id)
isDownloaded = False
counter = 0
while(isDownloaded == False and counter < 20):
file_path = os.path.join(os.getcwd(), 'downloads', file_name)
if(os.path.exists(file_path)):
isDownloaded = True
process_resume(textract_client=textract_client, comprehend_client=comprehend_client,
file_name=file_name, s3_client=s3_client, es_client=es_client)
else:
await asyncio.sleep(1)
counter += 1
if os.path.exists(file_path):
os.remove(path=file_path)
else:
await say(text="Sorry! No file was found with the name {}".format(
file_name), channel=channel_id)
@app.event("file_shared")
async def file_was_shared():
pass
@app.event({
"type": "message",
"subtype": "message_deleted"
})
async def file_delete():
pass
if __name__ == '__main__':
app.start(port=int(os.environ.get("PORT", 3000)))
| []
| []
| [
"PORT",
"AMAZON_AWS_SECRET_ACCESS_KEY",
"ENDPOINT_URL",
"AWS_SECRET_ACCESS_KEY",
"AMAZON_AWS_ACCESS_KEY_ID",
"ELASTIC_DOMAIN",
"ELASTIC_PORT",
"SLACK_USER_TOKEN",
"AWS_ACCESS_KEY_ID",
"SLACK_BOT_TOKEN",
"SLACK_SIGNING_SECRET"
]
| [] | ["PORT", "AMAZON_AWS_SECRET_ACCESS_KEY", "ENDPOINT_URL", "AWS_SECRET_ACCESS_KEY", "AMAZON_AWS_ACCESS_KEY_ID", "ELASTIC_DOMAIN", "ELASTIC_PORT", "SLACK_USER_TOKEN", "AWS_ACCESS_KEY_ID", "SLACK_BOT_TOKEN", "SLACK_SIGNING_SECRET"] | python | 11 | 0 | |
ares/static/VolumeGlobal.py | """
IntergalacticMedium.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Fri May 24 11:31:06 2013
Description:
"""
import numpy as np
from ..util.Warnings import *
from ..util import ProgressBar
from ..physics.Constants import *
import types, os, re, sys, pickle
from ..util.Misc import num_freq_bins
from ..physics import SecondaryElectrons
from scipy.integrate import dblquad, romb, simps, quad, trapz
from ..util.Warnings import tau_tab_z_mismatch, tau_tab_E_mismatch
try:
import h5py
have_h5py = True
except ImportError:
have_h5py = False
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
log10 = np.log(10.)
E_th = np.array([13.6, 24.4, 54.4])
defkwargs = \
{
'zf':None,
'xray_flux':None,
'epsilon_X': None,
'Gamma': None,
'gamma': None,
'return_rc': False,
'energy_units':False,
'Emax': None,
#'zxavg':0.0,
#'igm':True,
'xavg': 0.0,
'igm_h_1': 1.0,
'igm_h_2': 0.0,
'igm_he_2': 0.0,
'igm_he_3': 0.0,
'cgm_h_1': 1.0,
'cgm_h_2': 0.0,
'cgm_he_2': 0.0,
'cgm_he_3': 0.0,
'igm_e': 0.0,
}
species_i_to_str = {0:'h_1', 1:'he_1', 2:'he_2'}
class GlobalVolume(object):
def __init__(self, background):
"""
Initialize a GlobalVolume.
Parameters
----------
background : ares.solvers.UniformBackground instance.
"""
self.background = background
self.pf = background.pf
self.grid = background.grid
self.cosm = background.cosm
self.hydr = background.hydr
self.pops = background.pops
self.Npops = len(self.pops)
# Include helium opacities approximately?
self.approx_He = self.pf['include_He'] and self.pf['approx_He']
# Include helium opacities self-consistently?
self.self_consistent_He = self.pf['include_He'] \
and (not self.pf['approx_He'])
self.esec = \
SecondaryElectrons(method=self.pf["secondary_ionization"])
# Choose function for computing bound-free absorption cross-sections
if self.pf['approx_sigma']:
from ..physics.CrossSections import \
ApproximatePhotoIonizationCrossSection as sigma
else:
from ..physics.CrossSections import \
PhotoIonizationCrossSection as sigma
self.sigma = sigma
self.sigma0 = sigma(E_th[0]) # Hydrogen ionization threshold
self._set_integrator()
@property
def rates_no_RT(self):
if not hasattr(self, '_rates_no_RT'):
self._rates_no_RT = \
{'k_ion': np.zeros((self.grid.dims,
self.grid.N_absorbers)),
'k_heat': np.zeros((self.grid.dims,
self.grid.N_absorbers)),
'k_ion2': np.zeros((self.grid.dims,
self.grid.N_absorbers, self.grid.N_absorbers)),
}
return self._rates_no_RT
#def _fetch_tau(self, pop, zpf, Epf):
# """
# Look for optical depth tables. Supply corrected energy and redshift
# arrays if there is a mistmatch between those generated from information
# in the parameter file and those found in the optical depth table.
#
# .. note:: This will only be called from UniformBackground, and on
# populations which are using the generator framework.
#
# Parameters
# ----------
# popid : int
# ID # for population of interest.
# zpf : np.ndarray
# What the redshifts should be according to the parameter file.
# Epf : np.ndarray
# What the energies should be according to the parameter file.
#
# Returns
# -------
# Energies and redshifts, potentially revised from Epf and zpf.
#
# """
#
# for i in range(self.Npops):
# if pop == self.pops[i]:
# band = self.background.bands_by_pop[i]
# break
#
# # First, look in CWD or $ARES (if it exists)
# self.tabname = self._load_tau(pop, pop.pf['tau_prefix'])
#
# if not self.tabname:
# return zpf, Epf, None
#
# # If we made it this far, we found a table that may be suitable
# ztab, Etab, tau = self._read_tau(self.tabname)
#
# # Return right away if there's no potential for conflict
# if (zpf is None) and (Epf is None):
# return ztab, Etab, tau
#
# # Figure out if the tables need fixing
# zmax_ok = \
# (ztab.max() >= zpf.max()) or \
# np.allclose(ztab.max(), zpf.max())
# zmin_ok = \
# (ztab.min() <= zpf.min()) or \
# np.allclose(ztab.min(), zpf.min())
#
# Emin_ok = \
# (Etab.min() <= Epf.min()) or \
# np.allclose(Etab.min(), Epf.min())
#
# # Results insensitive to Emax (so long as its relatively large)
# # so be lenient with this condition (100 eV or 1% difference
# # between parameter file and lookup table)
# Emax_ok = np.allclose(Etab.max(), Epf.max(), atol=100., rtol=1e-2)
#
# # Check redshift bounds
# if not (zmax_ok and zmin_ok):
# if not zmax_ok:
# tau_tab_z_mismatch(self, zmin_ok, zmax_ok, ztab)
# sys.exit(1)
# else:
# if self.pf['verbose']:
# tau_tab_z_mismatch(self, zmin_ok, zmax_ok, ztab)
#
# if not (Emax_ok and Emin_ok):
# if self.pf['verbose']:
# tau_tab_E_mismatch(pop, self.tabname, Emin_ok, Emax_ok, Etab)
#
# if Etab.max() < Epf.max():
# sys.exit(1)
#
# # Correct for inconsistencies between parameter file and table
# # By effectively masking out those elements with tau -> inf
# if Epf.min() > Etab.min():
# Ediff = Etab - Epf.min()
# i_E0 = np.argmin(np.abs(Ediff))
# if Ediff[i_E0] < 0:
# i_E0 += 1
#
# #tau[:,0:i_E0+1] = np.inf
# else:
# i_E0 = 0
#
# if Epf.max() < Etab.max():
# Ediff = Etab - Epf.max()
# i_E1 = np.argmin(np.abs(Ediff))
# if Ediff[i_E1] < 0:
# i_E1 += 1
#
# #tau[:,i_E1+1:] = np.inf
# else:
# i_E1 = None
#
# # We're done!
# return ztab, Etab[i_E0:i_E1], tau[:,i_E0:i_E1]
@property
def E(self):
if not hasattr(self, '_E'):
self._tabulate_atomic_data()
return self._E
@property
def sigma_E(self):
if not hasattr(self, '_sigma_E'):
self._tabulate_atomic_data()
return self._sigma_E
def _tabulate_atomic_data(self):
"""
Pre-compute cross sections and such for each source population.
Returns
-------
Nothing. Sets the following attributes:
sigma_E
log_sigma_E
fheat, flya, fion
"""
# Remember: these will all be [Npops, Nbands/pop, Nenergies/band]
self._E = self.background.energies
self.logE = [[] for k in range(self.Npops)]
self.dlogE = [[] for k in range(self.Npops)]
self.fheat = [[] for k in range(self.Npops)]
self.flya = [[] for k in range(self.Npops)]
# These are species dependent
self._sigma_E = {}
self.fion = {}
for species in ['h_1', 'he_1', 'he_2']:
self._sigma_E[species] = [[] for k in range(self.Npops)]
self.fion[species] = [[] for k in range(self.Npops)]
##
# Note: If secondary_ionization > 1, there will be an ionized fraction
# dimension in fion and fheat.
##
# Loop over populations
for i, pop in enumerate(self.pops):
# This means the population is completely approximate
if not np.any(self.background.solve_rte[i]):
self.logE[i] = [None]
self.dlogE[i] = [None]
self.fheat[i] = [None]
self.flya[i] = [None]
for species in ['h_1', 'he_1', 'he_2']:
self.fion[species][i] = [None]
self._sigma_E[species][i] = [None]
continue
##
# If we make it here, the population has at least one band that
# requires a detailed solution to the RTE
##
Nbands = len(self.background.energies[i])
self.logE[i] = [None for k in range(Nbands)]
self.dlogE[i] = [None for k in range(Nbands)]
self.fheat[i] = [None for k in range(Nbands)]
self.flya[i] = [None for k in range(Nbands)]
for species in ['h_1', 'he_1', 'he_2']:
self.fion[species][i] = [None for k in range(Nbands)]
self._sigma_E[species][i] = [None for k in range(Nbands)]
# Loop over each band for this population
for j, band in enumerate(self.background.bands_by_pop[i]):
if band is None:
continue
need_tab = self.pops[i].is_xray_src \
and np.any(np.array(band) > E_LL)
if (not self.background.solve_rte[i][j]) or \
(not need_tab):
continue
else:
self.fheat[i][j] = \
[np.ones([self.background.energies[i][j].size,
len(self.esec.x)]) \
for j in range(Nbands)]
self.flya[i] = \
[np.ones([self.background.energies[i][j].size,
len(self.esec.x)]) \
for j in range(Nbands)]
for species in ['h_1', 'he_1', 'he_2']:
if self.esec.method > 1:
self._sigma_E[species][i] = \
[np.ones([self.background.energies[i][j].size,
len(self.esec.x)]) \
for j in range(Nbands)]
self.fion[species][i] = \
[np.ones([self.background.energies[i][j].size,
len(self.esec.x)]) \
for j in range(Nbands)]
else:
self._sigma_E[species][i] = [None for k in range(Nbands)]
self.fion[species][i] = [None for k in range(Nbands)]
self.fheat[i] = [None for k in range(Nbands)]
self.flya[i] = [None for k in range(Nbands)]
# More convenient variables
E = self._E[i][j]
N = E.size
# Compute some things we need, like bound-free cross-section
self.logE[i][j] = np.log10(E)
self.dlogE[i][j] = np.diff(self.logE[i][j])
#
for k, species in enumerate(['h_1', 'he_1', 'he_2']):
self._sigma_E[species][i][j] = \
np.array(map(lambda E: self.sigma(E, k), E))
# Pre-compute secondary ionization and heating factors
if self.esec.method > 1:
# Don't worry: we'll fill these in in a sec!
self.fheat[i][j] = np.ones([N, len(self.esec.x)])
self.flya[i][j] = np.ones([N, len(self.esec.x)])
# Must evaluate at ELECTRON energy, not photon energy
for k, nrg in enumerate(E - E_th[0]):
self.fheat[i][j][k] = \
self.esec.DepositionFraction(self.esec.x, E=nrg,
channel='heat')
self.fion['h_1'][i][j][k] = \
self.esec.DepositionFraction(self.esec.x, E=nrg,
channel='h_1')
if self.pf['secondary_lya']:
self.flya[i][j][k] = \
self.esec.DepositionFraction(self.esec.x, E=nrg,
channel='lya')
# Helium
if self.pf['include_He'] and not self.pf['approx_He']:
# Don't worry: we'll fill these in in a sec!
self.fion['he_1'][i][j] = np.ones([N, len(self.esec.x)])
self.fion['he_2'][i][j] = np.ones([N, len(self.esec.x)])
for k, nrg in enumerate(E - E_th[1]):
self.fion['he_1'][i][j][k] = \
self.esec.DepositionFraction(self.esec.x,
E=nrg, channel='he_1')
for k, nrg in enumerate(E - E_th[2]):
self.fion['he_2'][i][j][k] = \
self.esec.DepositionFraction(self.esec.x,
E=nrg, channel='he_2')
else:
self.fion['he_1'][i][j] = np.zeros([N, len(self.esec.x)])
self.fion['he_2'][i][j] = np.zeros([N, len(self.esec.x)])
return
def _set_integrator(self):
self.integrator = self.pf["unsampled_integrator"]
self.sampled_integrator = self.pf["sampled_integrator"]
self.rtol = self.pf["integrator_rtol"]
self.atol = self.pf["integrator_atol"]
self.divmax = int(self.pf["integrator_divmax"])
#def _read_tau(self, fn):
# """ Read optical depth table. """
#
# if type(fn) is dict:
#
# E0 = fn['E'].min()
# E1 = fn['E'].max()
# E = fn['E']
# z = fn['z']
# x = z + 1
# N = E.size
#
# R = x[1] / self.x[0]
#
# tau = fn['tau']
#
# elif re.search('hdf5', fn):
#
# f = h5py.File(self.tabname, 'r')
#
# E0 = min(f['photon_energy'].value)
# E1 = max(f['photon_energy'].value)
# E = f['photon_energy'].value
# z = f['redshift'].value
# x = z + 1
# N = E.size
#
# R = x[1] / x[0]
#
# tau = f['tau'].value
# f.close()
#
# elif re.search('npz', fn) or re.search('pkl', fn):
#
# if re.search('pkl', fn):
# f = open(fn, 'rb')
# data = pickle.load(f)
# else:
# f = open(fn, 'r')
# data = dict(np.load(f))
#
# E0 = data['E'].min()
# E1 = data['E'].max()
# E = data['E']
# z = data['z']
# x = z + 1
# N = E.size
#
# R = x[1] / x[0]
#
# tau = tau = data['tau']
# f.close()
# else:
# raise NotImplemented('Don\'t know how to read %s.' % fn)
#
# return z, E, tau
#def _tau_name(self, pop, suffix='hdf5'):
# """
# Return name of table based on its properties.
# """
#
# if not have_h5py:
# suffix == 'pkl'
#
# HorHe = 'He' if self.pf['include_He'] else 'H'
#
# zf = self.pf['final_redshift']
# zi = self.pf['initial_redshift']
#
# L, N = self._tau_shape(pop)
#
# E0 = pop.pf['pop_Emin']
# E1 = pop.pf['pop_Emax']
#
# fn = lambda z1, z2, E1, E2: \
# 'optical_depth_%s_%ix%i_z_%i-%i_logE_%.2g-%.2g.%s' \
# % (HorHe, L, N, z1, z2, E1, E2, suffix)
#
# return fn(zf, zi, np.log10(E0), np.log10(E1)), fn
#def _load_tau(self, pop, prefix=None):
# """
# Find an optical depth table.
# """
#
# fn, fn_func = self._tau_name(pop)
#
# if prefix is None:
# ares_dir = os.environ.get('ARES')
# if not ares_dir:
# print "No ARES environment variable."
# return None
#
# input_dirs = [os.path.join(ares_dir,'input','optical_depth')]
#
# else:
# if type(prefix) is str:
# input_dirs = [prefix]
# else:
# input_dirs = prefix
#
# guess = os.path.join(input_dirs[0], fn)
# if os.path.exists(guess):
# return guess
#
# ## Find exactly what table should be
# zmin, zmax, Nz, lEmin, lEmax, chem, pre, post = self._parse_tab(fn)
#
# ok_matches = []
# perfect_matches = []
#
# # Loop through input directories
# for input_dir in input_dirs:
#
# # Loop over files in input_dir, look for best match
# for fn1 in os.listdir(input_dir):
#
# if re.search('hdf5', fn1) and (not have_h5py):
# continue
#
# tab_name = os.path.join(input_dir, fn1)
#
# try:
# zmin_f, zmax_f, Nz_f, lEmin_f, lEmax_f, chem_f, p1, p2 = \
# self._parse_tab(fn1)
# except:
# continue
#
# # Dealbreakers
# if Nz_f != Nz:
# continue
# if zmax_f < zmax:
# continue
# if chem_f != chem:
# continue
#
# # Continue with possible matches
# for fmt in ['pkl', 'npz', 'hdf5']:
#
# if fn1 == fn and fmt == self.pf['preferred_format']:
# perfect_matches.append(tab_name)
# continue
#
# if c and fmt == self.pf['preferred_format']:
# perfect_matches.append(tab_name)
# continue
#
# # If number of redshift bins and energy range right...
# if re.search(pre, fn1) and re.search(post, fn1):
# if re.search(fmt, fn1) and fmt == self.pf['preferred_format']:
# perfect_matches.append(tab_name)
# else:
# ok_matches.append(tab_name)
#
# # If number of redshift bins is right...
# elif re.search(pre, fn1):
#
# if re.search(fmt, fn1) and fmt == self.pf['preferred_format']:
# perfect_matches.append(tab_name)
# else:
# ok_matches.append(tab_name)
#
# if perfect_matches:
# return perfect_matches[0]
# elif ok_matches:
# return ok_matches[0]
# else:
# return None
#def _parse_tab(self, fn):
#
# tmp1, tmp2 = fn.split('_z_')
# pre = tmp1[0:tmp1.rfind('x')]
# red, tmp3 = fn.split('_logE_')
# post = '_logE_' + tmp3.replace('.hdf5', '')
#
# # Find exactly what table should be
# zmin, zmax = map(float, red[red.rfind('z')+2:].partition('-')[0::2])
# logEmin, logEmax = map(float, tmp3[tmp3.rfind('E')+1:tmp3.rfind('.')].partition('-')[0::2])
#
# Nz = pre[pre.rfind('_')+1:]
#
# # Hack off Nz string and optical_depth_
# chem = pre.strip(Nz)[14:-1]#.strip('optical_depth_')
#
# return zmin, zmax, int(Nz), logEmin, logEmax, chem, pre, post
#
#def _tau_shape(self, pop):
# """
# Determine dimensions of optical depth table.
#
# Unfortunately, this is a bit redundant with the procedure in
# self._init_xrb, but that's the way it goes.
# """
#
# # Set up log-grid in parameter x = 1 + z
# x = np.logspace(np.log10(1+self.pf['final_redshift']),
# np.log10(1+self.pf['initial_redshift']),
# int(pop.pf['pop_tau_Nz']))
# z = x - 1.
# logx = np.log10(x)
# logz = np.log10(z)
#
# # Constant ratio between elements in x-grid
# R = x[1] / x[0]
# logR = np.log10(R)
#
# E0 = pop.pf['pop_Emin']
#
# # Create mapping to frequency space
# E = 1. * E0
# n = 1
# while E < pop.pf['pop_Emax']:
# E = E0 * R**(n - 1)
# n += 1
#
# # Set attributes for dimensions of optical depth grid
# L = len(x)
#
# # Frequency grid must be index 1-based.
# N = num_freq_bins(L, zi=self.pf['initial_redshift'],
# zf=self.pf['final_redshift'], Emin=E0,
# Emax=pop.pf['pop_Emax'])
# N -= 1
#
# return L, N
def RestFrameEnergy(self, z, E, zp):
"""
Return energy of a photon observed at (z, E) and emitted at zp.
"""
return E * (1. + zp) / (1. + z)
def ObserverFrameEnergy(self, z, Ep, zp):
"""
What is the energy of a photon observed at redshift z and emitted
at redshift zp and energy Ep?
"""
return Ep * (1. + z) / (1. + zp)
def Jc(self, z, E):
"""
Flux corresponding to one photon per hydrogen atom at redshift z.
"""
return c * self.cosm.nH0 * (1. + z)**3 / 4. / np.pi \
/ (E * erg_per_ev / h)
def rate_to_coefficient(self, z, species=0, zone='igm', **kw):
"""
Convert an ionization/heating rate to a rate coefficient.
Provides units of per atom.
"""
if self.pf['photon_counting']:
prefix = zone
else:
prefix = 'igm'
if species == 0:
weight = 1. / self.cosm.nH(z) / kw['%s_h_1' % prefix]
elif species == 1:
weight = 1. / self.cosm.nHe(z) / kw['%s_he_1' % prefix]
elif species == 2:
weight = 1. / self.cosm.nHe(z) / kw['%s_he_2' % prefix]
return weight
def coefficient_to_rate(self, z, species=0, **kw):
return 1. / self.rate_to_coefficient(z, species, **kw)
def _fix_kwargs(self, functionify=False, popid=0, band=0, **kwargs):
kw = defkwargs.copy()
kw.update(kwargs)
pop = self.pops[popid]
if functionify and type(kw['xavg']) is not types.FunctionType:
tmp = kw['xavg']
kw['xavg'] = lambda z: tmp
if kw['zf'] is None and pop is not None:
kw['zf'] = pop.zform
if not self.background.solve_rte[popid][band]:
pass
elif (kw['Emax'] is None) and self.background.solve_rte[popid][band] and \
np.any(self.background.bands_by_pop[popid] > pop.pf['pop_EminX']):
kw['Emax'] = self.background.energies[popid][band][-1]
return kw
def HeatingRate(self, z, species=0, popid=0, band=0, **kwargs):
"""
Compute heating rate density due to emission from this population.
Parameters
----------
z : int, float
Redshift of interest.
species : int
Atom whose liberated electrons cause heating.
Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)
===============
relevant kwargs
===============
xray_flux : np.ndarray
Array of fluxes corresponding to photon energies in self.igm.E.
return_rc : bool
Return actual heating rate, or rate coefficient for heating?
Former has units of erg s**-1 cm**-3, latter has units of
erg s**-1 cm**-3 atom**-1.
Returns
-------
Proper heating rate density in units of in erg s**-1 cm**-3 at redshift z,
due to electrons previously bound to input species.
"""
pop = self.pops[popid]
if not pop.pf['pop_heat_src_igm'] or (z >= pop.zform):
return 0.0
if pop.pf['pop_heat_rate'] is not None:
return pop.HeatingRate(z)
# Grab defaults, do some patches if need be
kw = self._fix_kwargs(**kwargs)
species_str = species_i_to_str[species]
if pop.pf['pop_k_heat_igm'] is not None:
return pop.pf['pop_k_heat_igm'](z)
if band is not None:
solve_rte = self.background.solve_rte[popid][band]
else:
solve_rte = False
# Compute fraction of photo-electron energy deposited as heat
if pop.pf['pop_fXh'] is None:
# Interpolate in energy and ionized fraction
if (self.esec.method > 1) and solve_rte:
if kw['igm_e'] <= self.esec.x[0]:
fheat = self.fheat[popid][band][:,0]
else:
i_x = np.argmin(np.abs(kw['igm_e'] - self.esec.x))
if self.esec.x[i_x] > kw['igm_e']:
i_x -= 1
j = i_x + 1
fheat = self.fheat[popid][band][:,i_x] \
+ (self.fheat[popid][band][:,j] - self.fheat[popid][band][:,i_x]) \
* (kw['igm_e'] - self.esec.x[i_x]) \
/ (self.esec.x[j] - self.esec.x[i_x])
elif self.esec.method > 1:
raise ValueError('Only know how to do advanced secondary ionization with solve_rte=True')
else:
fheat = self.esec.DepositionFraction(kw['igm_e'])[0]
else:
fheat = pop.pf['pop_fXh']
# Assume heating rate density at redshift z is only due to emission
# from sources at redshift z
if not solve_rte:
weight = self.rate_to_coefficient(z, species, **kw)
Lx = pop.LuminosityDensity(z, Emin=pop.pf['pop_Emin_xray'],
Emax=pop.pf['pop_Emax'])
return weight * fheat * Lx * (1. + z)**3
##
# Otherwise, do the full calculation
##
# Re-normalize to help integrator
norm = J21_num * self.sigma0
# Computes excess photo-electron energy due to ionizations by
# photons with energy E (normalized by sigma0 * Jhat)
if kw['fluxes'][popid] is None:
# If we're approximating helium, must add contributions now
# since we'll never explicitly call this method w/ species=1.
if self.approx_He:
integrand = lambda E, zz: \
self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg']) \
* (self.sigma(E) * (E - E_th[0]) \
+ self.cosm.y * self.sigma(E, species=1) * (E - E_th[1])) \
* fheat / norm / ev_per_hz
# Otherwise, just heating via hydrogen photo-electrons
else:
integrand = lambda E, zz: \
self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'],
zxavg=kw['zxavg']) * self.sigma(E, species=1) \
* (E - E_th[species]) * fheat / norm / ev_per_hz
# This means the fluxes have been computed already - integrate
# over discrete set of points
else:
integrand = self.sigma_E[species_str][popid][band] \
* (self._E[popid][band] - E_th[species])
if self.approx_He:
integrand += self.cosm.y * self.sigma_E['he_1'][popid][band] \
* (self._E[popid][band] - E_th[1])
integrand *= kw['fluxes'][popid][band] * fheat / norm / ev_per_hz
# Compute integral over energy
if type(integrand) == types.FunctionType:
heat, err = dblquad(integrand, z, kw['zf'], lambda a: self.E0,
lambda b: kw['Emax'], epsrel=self.rtol, epsabs=self.atol)
else:
if kw['Emax'] is not None:
imax = np.argmin(np.abs(self._E[popid][band] - kw['Emax']))
if imax == 0:
return 0.0
elif imax == (len(self._E[popid][band]) - 1):
imax = None
if self.sampled_integrator == 'romb':
raise ValueError("Romberg's method cannot be used for integrating subintervals.")
heat = romb(integrand[0:imax] * self.E[0:imax],
dx=self.dlogE[0:imax])[0] * log10
else:
heat = simps(integrand[0:imax] * self._E[popid][band][0:imax],
x=self.logE[popid][band][0:imax]) * log10
else:
imin = np.argmin(np.abs(self._E[popid][band] - pop.pf['pop_Emin']))
if self.sampled_integrator == 'romb':
heat = romb(integrand[imin:] * self._E[popid][band][imin:],
dx=self.dlogE[popid][band][imin:])[0] * log10
elif self.sampled_integrator == 'trapz':
heat = np.trapz(integrand[imin:] * self._E[popid][band][imin:],
x=self.logE[popid][band][imin:]) * log10
else:
heat = simps(integrand[imin:] * self._E[popid][band][imin:],
x=self.logE[popid][band][imin:]) * log10
# Re-normalize, get rid of per steradian units
heat *= 4. * np.pi * norm * erg_per_ev
# Currently a rate coefficient, returned value depends on return_rc
if kw['return_rc']:
pass
else:
heat *= self.coefficient_to_rate(z, species, **kw)
return heat
def IonizationRateCGM(self, z, species=0, popid=0, band=0, **kwargs):
"""
Compute growth rate of HII regions.
Parameters
----------
z : float
current redshift
species : int
Ionization rate for what atom?
Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)
===============
relevant kwargs
===============
fluxes : np.ndarray
Array of fluxes corresponding to photon energies in self.igm.E.
return_rc : bool
Return actual heating rate, or rate coefficient for heating?
Former has units of erg s**-1 cm**-3, latter has units of
erg s**-1 cm**-3 atom**-1.
Returns
-------
Ionization rate. Units determined by value of return_rc keyword
argument, which is False by default.
"""
pop = self.pops[popid]
if band is not None:
b = self.background.bands_by_pop[popid][band]
if not np.any(np.array(b) > E_LL):
return 0.0
if not np.allclose(b[0], E_LL, atol=0.1, rtol=0):
return 0.0
else:
b = [13.6, 24.6]
if (not pop.pf['pop_ion_src_cgm']) or (z > pop.zform):
return 0.0
# Need some guidance from 1-D calculations to do this
if species > 0:
return 0.0
if pop.pf['pop_ion_rate'] is not None:
return pop.IonizationRateCGM(z)
kw = defkwargs.copy()
kw.update(kwargs)
if pop.pf['pop_k_ion_cgm'] is not None:
return self.pf['pop_k_ion_cgm'](z)
if kw['return_rc']:
weight = self.rate_to_coefficient(z, species, **kw)
else:
weight = 1.0
Qdot = pop.PhotonLuminosityDensity(z, Emin=13.6, Emax=24.6)
return weight * Qdot * (1. + z)**3
def IonizationRateIGM(self, z, species=0, popid=0, band=0, **kwargs):
"""
Compute volume averaged hydrogen ionization rate.
Parameters
----------
z : float
redshift
species : int
HI, HeI, or HeII (species=0, 1, 2, respectively)
Returns
-------
Volume averaged ionization rate in units of ionizations per
second. If return_rc=True, will be in units of ionizations per
second per atom.
"""
pop = self.pops[popid]
# z between zform, zdead? must be careful for BHs
if (not pop.pf['pop_ion_src_igm']) or (z > pop.zform):
return 0.0
# Grab defaults, do some patches if need be
kw = self._fix_kwargs(**kwargs)
species_str = species_i_to_str[species]
if pop.pf['pop_k_ion_igm'] is not None:
return pop.pf['pop_k_ion_igm'](z)
if band is not None:
solve_rte = self.background.solve_rte[popid][band]
else:
solve_rte = False
if (not solve_rte) or \
(not np.any(self.background.bands_by_pop[popid] > pop.pf['pop_EminX'])):
Lx = pop.LuminosityDensity(z, Emin=pop.pf['pop_Emin_xray'],
Emax=pop.pf['pop_Emax'])
weight = self.rate_to_coefficient(z, species, **kw)
primary = weight * Lx \
* (1. + z)**3 / pop.pf['pop_Ex'] / erg_per_ev
fion = self.esec.DepositionFraction(kw['igm_e'], channel='h_1')[0]
return primary * (1. + fion) * (pop.pf['pop_Ex'] - E_th[0]) \
/ E_th[0]
# Full calculation - much like computing integrated flux
norm = J21_num * self.sigma0
# Integrate over function
if kw['fluxes'][popid] is None:
integrand = lambda E, zz: \
self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'],
zxavg=kw['zxavg']) * self.sigma(E, species=species) \
/ norm / ev_per_hz
ion, err = dblquad(integrand, z, kw['zf'], lambda a: self.E0,
lambda b: kw['Emax'], epsrel=self.rtol, epsabs=self.atol)
# Integrate over set of discrete points
else:
integrand = self.sigma_E[species_str][popid][band] \
* kw['fluxes'][popid][band] / norm / ev_per_hz
if self.sampled_integrator == 'romb':
ion = romb(integrand * self.E[popid][band],
dx=self.dlogE[popid][band])[0] * log10
else:
ion = simps(integrand * self.E[popid][band],
x=self.logE[popid][band]) * log10
# Re-normalize
ion *= 4. * np.pi * norm
# Currently a rate coefficient, returned value depends on return_rc
if kw['return_rc']:
pass
else:
ion *= self.coefficient_to_rate(z, species, **kw)
return ion
def SecondaryIonizationRateIGM(self, z, species=0, donor=0, popid=0,
band=0, **kwargs):
"""
Compute volume averaged secondary ionization rate.
Parameters
----------
z : float
redshift
species : int
Ionization rate of what atom?
Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)
donor : int
Which atom gave the electron?
Can be 0, 1, or 2 (HI, HeI, and HeII, respectively)
===============
relevant kwargs
===============
fluxes : np.ndarray
Array of fluxes corresponding to photon energies in self.igm.E.
return_rc : bool
Return actual heating rate, or rate coefficient for heating?
Former has units of erg s**-1 cm**-3, latter has units of
erg s**-1 cm**-3 atom**-1.
Returns
-------
Volume averaged ionization rate due to secondary electrons,
in units of ionizations per second.
"""
pop = self.pops[popid]
if self.pf['secondary_ionization'] == 0:
return 0.0
if not pop.pf['pop_ion_src_igm']:
return 0.0
if band is not None:
solve_rte = self.background.solve_rte[popid][band]
else:
solve_rte = False
# Computed in IonizationRateIGM in this case
if not solve_rte:
return 0.0
if not np.any(self.background.bands_by_pop[popid] > pop.pf['pop_EminX']):
return 0.0
if ((donor or species) in [1,2]) and (not self.pf['include_He']):
return 0.0
# Grab defaults, do some patches if need be
kw = self._fix_kwargs(**kwargs)
#if self.pf['gamma_igm'] is not None:
# return self.pf['gamma_igm'](z)
species_str = species_i_to_str[species]
donor_str = species_i_to_str[donor]
if self.esec.method > 1 and solve_rte:
fion_const = 1.
if kw['igm_e'] == 0:
fion = self.fion[species_str][popid][band][:,0]
else:
i_x = np.argmin(np.abs(kw['igm_e'] - self.esec.x))
if self.esec.x[i_x] > kw['igm_e']:
i_x -= 1
j = i_x + 1
fion = self.fion[species_str][popid][band][:,i_x] \
+ (self.fion[species_str][popid][band][:,j] - self.fion[species_str][popid][:,i_x]) \
* (kw['igm_e'] - self.esec.x[i_x]) \
/ (self.esec.x[j] - self.esec.x[i_x])
elif self.esec.method > 1:
raise ValueError('Only know how to do advanced secondary ionization with solve_rte=True')
else:
fion = 1.0
fion_const = self.esec.DepositionFraction(kw['igm_e'],
channel=species_str)[0]
norm = J21_num * self.sigma0
if kw['fluxes'][popid] is None:
if self.pf['approx_He']: # assumes lower integration limit > 4 Ryd
integrand = lambda E, zz: \
self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'],
zxavg=kw['zxavg']) * (self.sigma(E) * (E - E_th[0]) \
+ self.cosm.y * self.sigma(E, 1) * (E - E_th[1])) \
/ E_th[0] / norm / ev_per_hz
else:
integrand = lambda E, zz: \
self.rb.AngleAveragedFluxSlice(z, E, zz, xavg=kw['xavg'],
zxavg=kw['zxavg']) * self.sigma(E) * (E - E_th[0]) \
/ E_th[0] / norm / ev_per_hz
else:
integrand = fion * self.sigma_E[donor_str][popid][band] \
* (self.E[popid][band] - E_th[donor])
if self.pf['approx_He']:
integrand += self.cosm.y * self.sigma_E['he_1'][popid][band] \
* (self.E[popid][band] - E_th[1])
integrand = integrand
integrand *= kw['fluxes'][popid][band] / E_th[species] / norm \
/ ev_per_hz
if type(integrand) == types.FunctionType:
ion, err = dblquad(integrand, z, kw['zf'], lambda a: self.E0,
lambda b: kw['Emax'], epsrel=self.rtol, epsabs=self.atol)
else:
if self.sampled_integrator == 'romb':
ion = romb(integrand * self.E[popid][band],
dx=self.dlogE[popid][band])[0] * log10
else:
ion = simps(integrand * self.E[popid][band],
x=self.logE[popid][band]) * log10
# Re-normalize
ion *= 4. * np.pi * norm * fion_const
# Currently a rate coefficient, returned value depends on return_rc
if kw['return_rc']:
pass
else:
ion *= self.coefficient_to_rate(z, species, **kw)
return ion
def DiffuseLymanAlphaFlux(self, z, **kwargs):
"""
Flux of Lyman-alpha photons induced by photo-electron collisions.
"""
raise NotImplemented('hey fix me')
if not self.pf['secondary_lya']:
return 0.0
#return 1e-25
# Grab defaults, do some patches if need be
kw = self._fix_kwargs(**kwargs)
# Compute fraction of photo-electron energy deposited as Lya excitation
if self.esec.method > 1 and (kw['fluxes'][popid] is not None):
if kw['igm_e'] == 0:
flya = self.flya[:,0]
else:
i_x = np.argmin(np.abs(kw['igm_e'] - self.esec.x))
if self.esec.x[i_x] > kw['igm_e']:
i_x -= 1
j = i_x + 1
flya = self.flya[:,i_x] \
+ (self.flya[:,j] - self.flya[:,i_x]) \
* (kw['igm_e'] - self.esec.x[i_x]) \
/ (self.esec.x[j] - self.esec.x[i_x])
else:
return 0.0
# Re-normalize to help integrator
norm = J21_num * self.sigma0
# Compute integrand
integrand = self.sigma_E[species_str] * (self.E - E_th[species])
integrand *= kw['fluxes'] * flya / norm / ev_per_hz
if kw['Emax'] is not None:
imax = np.argmin(np.abs(self.E - kw['Emax']))
if imax == 0:
return 0.0
if self.sampled_integrator == 'romb':
raise ValueError("Romberg's method cannot be used for integrating subintervals.")
heat = romb(integrand[0:imax] * self.E[0:imax], dx=self.dlogE[0:imax])[0] * log10
else:
heat = simps(integrand[0:imax] * self.E[0:imax], x=self.logE[0:imax]) * log10
else:
imin = np.argmin(np.abs(self.E - self.pop.pf['source_Emin']))
if self.sampled_integrator == 'romb':
heat = romb(integrand[imin:] * self.E[imin:],
dx=self.dlogE[imin:])[0] * log10
elif self.sampled_integrator == 'trapz':
heat = np.trapz(integrand[imin:] * self.E[imin:],
x=self.logE[imin:]) * log10
else:
heat = simps(integrand[imin:] * self.E[imin:],
x=self.logE[imin:]) * log10
# Re-normalize, get rid of per steradian units
heat *= 4. * np.pi * norm * erg_per_ev
# Currently a rate coefficient, returned value depends on return_rc
if kw['return_rc']:
pass
else:
heat *= self.coefficient_to_rate(z, species, **kw)
return heat
| []
| []
| [
"ARES"
]
| [] | ["ARES"] | python | 1 | 0 | |
util.go | package main
import (
"bytes"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/base32"
"fmt"
"io"
"net/http"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"github.com/golang/glog"
"github.com/gorilla/mux"
"gopkg.in/yaml.v2"
)
const (
EnvironmentDevelopment string = "dev"
EnvironmentProduction string = "production"
SPECTRE_DEFAULT_BRAND string = "Spectre"
)
type ReadCloser struct {
io.Reader
io.Closer
}
type WriteCloser struct {
io.Writer
io.Closer
}
func constructMAC(message, key []byte) []byte {
mac := hmac.New(sha256.New, key)
mac.Write(message)
return mac.Sum(nil)
}
func checkMAC(message, messageMAC, key []byte) bool {
return hmac.Equal(messageMAC, constructMAC(message, key))
}
var base32Encoder = base32.NewEncoding("abcdefghjkmnopqrstuvwxyz23456789")
func generateRandomBytes(nbytes int) ([]byte, error) {
uuid := make([]byte, nbytes)
n, err := rand.Read(uuid)
if n != len(uuid) || err != nil {
return []byte{}, err
}
return uuid, nil
}
func generateRandomBase32String(nbytes, outlen int) (string, error) {
uuid, err := generateRandomBytes(nbytes)
if err != nil {
return "", err
}
s := base32Encoder.EncodeToString(uuid)
if outlen == -1 {
outlen = len(s)
}
return s[0:outlen], nil
}
func YAMLUnmarshalFile(filename string, i interface{}) error {
path, err := os.Getwd()
if err != nil {
panic(err)
}
yamlFile, err := os.Open(path + "/" + filename)
if err != nil {
return err
}
fi, err := yamlFile.Stat()
if err != nil {
return err
}
yml := make([]byte, fi.Size())
io.ReadFull(yamlFile, yml)
yamlFile.Close()
err = yaml.Unmarshal(yml, i)
if err != nil {
return err
}
return nil
}
func SlurpFile(path string) (out []byte, err error) {
var file *os.File
if file, err = os.Open(path); err == nil {
buf := &bytes.Buffer{}
io.Copy(buf, file)
out = buf.Bytes()
file.Close()
}
return
}
func BaseURLForRequest(r *http.Request) *url.URL {
determinedScheme := "http"
if RequestIsHTTPS(r) {
determinedScheme = "https"
}
return &url.URL{
Scheme: determinedScheme,
User: r.URL.User,
Host: r.Host,
Path: "/",
}
}
func RequestIsHTTPS(r *http.Request) bool {
proto := strings.ToLower(r.Header.Get("X-Forwarded-Proto"))
if proto == "" {
proto = strings.ToLower(r.URL.Scheme)
}
return proto == "https"
}
func SourceIPForRequest(r *http.Request) string {
ip := r.Header.Get("CF-Connecting-IP")
if ip == "" {
ip := r.Header.Get("X-Forwarded-For")
if ip == "" {
ip = r.RemoteAddr[:strings.LastIndex(r.RemoteAddr, ":")]
}
}
return ip
}
func HTTPSMuxMatcher(r *http.Request, rm *mux.RouteMatch) bool {
return Env() == EnvironmentDevelopment || RequestIsHTTPS(r)
}
func NonHTTPSMuxMatcher(r *http.Request, rm *mux.RouteMatch) bool {
return !HTTPSMuxMatcher(r, rm)
}
type ReloadFunction func()
var reloadFunctions = []ReloadFunction{}
func RegisterReloadFunction(f ReloadFunction) {
reloadFunctions = append(reloadFunctions, f)
}
func ReloadAll() {
for _, f := range reloadFunctions {
f()
}
}
type ByteSize float64
const (
_ = iota // ignore first value by assigning to blank identifier
KB ByteSize = 1 << (10 * iota)
MB
GB
TB
PB
EB
ZB
YB
)
func (b ByteSize) String() string {
switch {
case b >= YB:
return fmt.Sprintf("%.2fYB", b/YB)
case b >= ZB:
return fmt.Sprintf("%.2fZB", b/ZB)
case b >= EB:
return fmt.Sprintf("%.2fEB", b/EB)
case b >= PB:
return fmt.Sprintf("%.2fPB", b/PB)
case b >= TB:
return fmt.Sprintf("%.2fTB", b/TB)
case b >= GB:
return fmt.Sprintf("%.2fGB", b/GB)
case b >= MB:
return fmt.Sprintf("%.2fMB", b/MB)
case b >= KB:
return fmt.Sprintf("%.2fKB", b/KB)
}
return fmt.Sprintf("%.2fB", b)
}
var environment string = EnvironmentDevelopment
func Env() string {
return environment
}
func init() {
environment = os.Getenv("SPECTRE_ENV")
if environment != EnvironmentProduction {
environment = EnvironmentDevelopment
}
brand := os.Getenv("SPECTRE_BRAND")
if brand == "" {
brand = SPECTRE_DEFAULT_BRAND
}
RegisterTemplateFunction("env", func() string { return environment })
RegisterTemplateFunction("brand", func() string {
return brand
})
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGHUP)
go func() {
for _ = range sigChan {
glog.Info("Received SIGHUP")
ReloadAll()
}
}()
}
| [
"\"SPECTRE_ENV\"",
"\"SPECTRE_BRAND\""
]
| []
| [
"SPECTRE_BRAND",
"SPECTRE_ENV"
]
| [] | ["SPECTRE_BRAND", "SPECTRE_ENV"] | go | 2 | 0 | |
course2021/part3_automation/example1/runner.py | # Step 1: establish path to traci
import os, sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
# Step 2: add traci to be able to access its functionality
import traci
# Step 3: compose a sumo command you would like to run
sumo_command = ["sumo", "-n", "network.net.xml", "-r", "demands.rou.xml"]
# Step 4: open connection between sumo and traci
traci.start(sumo_command)
# Step 5: take simulation steps until there are no more vehicles in the network
while traci.simulation.getMinExpectedNumber() > 0:
traci.simulationStep() # move simulation forward 1 step
###
# Here you can decide what to do with simulation data at each step
###
# Step 6: close connection between sumo and traci
traci.close() | []
| []
| [
"SUMO_HOME"
]
| [] | ["SUMO_HOME"] | python | 1 | 0 | |
python/pyspark/pandas/window.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from functools import partial
from typing import (
Any,
Callable,
Generic,
List,
Optional,
cast,
)
from pyspark.sql import Window
from pyspark.sql import functions as F
from pyspark.pandas.missing.window import (
MissingPandasLikeRolling,
MissingPandasLikeRollingGroupby,
MissingPandasLikeExpanding,
MissingPandasLikeExpandingGroupby,
)
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas._typing import FrameLike
from pyspark.pandas.groupby import GroupBy, DataFrameGroupBy
from pyspark.pandas.internal import NATURAL_ORDER_COLUMN_NAME, SPARK_INDEX_NAME_FORMAT
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.utils import scol_for
from pyspark.sql.column import Column
from pyspark.sql.window import WindowSpec
class RollingAndExpanding(Generic[FrameLike], metaclass=ABCMeta):
def __init__(self, window: WindowSpec, min_periods: int):
self._window = window
# This unbounded Window is later used to handle 'min_periods' for now.
self._unbounded_window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(
Window.unboundedPreceding, Window.currentRow
)
self._min_periods = min_periods
@abstractmethod
def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:
"""
Wraps a function that handles Spark column in order
to support it in both pandas-on-Spark Series and DataFrame.
Note that the given `func` name should be same as the API's method name.
"""
pass
@abstractmethod
def count(self) -> FrameLike:
pass
def sum(self) -> FrameLike:
def sum(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.sum(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(sum)
def min(self) -> FrameLike:
def min(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.min(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(min)
def max(self) -> FrameLike:
def max(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.max(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(max)
def mean(self) -> FrameLike:
def mean(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.mean(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(mean)
def std(self) -> FrameLike:
def std(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.stddev(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(std)
def var(self) -> FrameLike:
def var(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.variance(scol).over(self._window),
).otherwise(SF.lit(None))
return self._apply_as_series_or_frame(var)
class RollingLike(RollingAndExpanding[FrameLike]):
def __init__(
self,
window: int,
min_periods: Optional[int] = None,
):
if window < 0:
raise ValueError("window must be >= 0")
if (min_periods is not None) and (min_periods < 0):
raise ValueError("min_periods must be >= 0")
if min_periods is None:
# TODO: 'min_periods' is not equivalent in pandas because it does not count NA as
# a value.
min_periods = window
window_spec = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(
Window.currentRow - (window - 1), Window.currentRow
)
super().__init__(window_spec, min_periods)
def count(self) -> FrameLike:
def count(scol: Column) -> Column:
return F.count(scol).over(self._window)
return self._apply_as_series_or_frame(count).astype("float64") # type: ignore[attr-defined]
class Rolling(RollingLike[FrameLike]):
def __init__(
self,
psdf_or_psser: FrameLike,
window: int,
min_periods: Optional[int] = None,
):
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series
super().__init__(window, min_periods)
if not isinstance(psdf_or_psser, (DataFrame, Series)):
raise TypeError(
"psdf_or_psser must be a series or dataframe; however, got: %s"
% type(psdf_or_psser)
)
self._psdf_or_psser = psdf_or_psser
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeRolling, item):
property_or_func = getattr(MissingPandasLikeRolling, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self)
else:
return partial(property_or_func, self)
raise AttributeError(item)
def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:
return cast(
FrameLike,
self._psdf_or_psser._apply_series_op(
lambda psser: psser._with_new_scol(func(psser.spark.column)), # TODO: dtype?
should_resolve=True,
),
)
def count(self) -> FrameLike:
"""
The rolling count of any non-NaN observations inside the window.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.count : Count of the full Series.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 3, float("nan"), 10])
>>> s.rolling(1).count()
0 1.0
1 1.0
2 0.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.to_frame().rolling(1).count()
0
0 1.0
1 1.0
2 0.0
3 1.0
>>> s.to_frame().rolling(3).count()
0
0 1.0
1 2.0
2 2.0
3 2.0
"""
return super().count()
def sum(self) -> FrameLike:
"""
Calculate rolling summation of given DataFrame or Series.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
rolling summation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s
0 4
1 3
2 5
3 2
4 6
dtype: int64
>>> s.rolling(2).sum()
0 NaN
1 7.0
2 8.0
3 7.0
4 8.0
dtype: float64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 12.0
3 10.0
4 13.0
dtype: float64
For DataFrame, each rolling summation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 4 16
1 3 9
2 5 25
3 2 4
4 6 36
>>> df.rolling(2).sum()
A B
0 NaN NaN
1 7.0 25.0
2 8.0 34.0
3 7.0 29.0
4 8.0 40.0
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 12.0 50.0
3 10.0 38.0
4 13.0 65.0
"""
return super().sum()
def min(self) -> FrameLike:
"""
Calculate the rolling minimum.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with a Series.
DataFrame.rolling : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s
0 4
1 3
2 5
3 2
4 6
dtype: int64
>>> s.rolling(2).min()
0 NaN
1 3.0
2 3.0
3 2.0
4 2.0
dtype: float64
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
For DataFrame, each rolling minimum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 4 16
1 3 9
2 5 25
3 2 4
4 6 36
>>> df.rolling(2).min()
A B
0 NaN NaN
1 3.0 9.0
2 3.0 9.0
3 2.0 4.0
4 2.0 4.0
>>> df.rolling(3).min()
A B
0 NaN NaN
1 NaN NaN
2 3.0 9.0
3 2.0 4.0
4 2.0 4.0
"""
return super().min()
def max(self) -> FrameLike:
"""
Calculate the rolling maximum.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.rolling : Series rolling.
DataFrame.rolling : DataFrame rolling.
Series.max : Similar method for Series.
DataFrame.max : Similar method for DataFrame.
Examples
--------
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s
0 4
1 3
2 5
3 2
4 6
dtype: int64
>>> s.rolling(2).max()
0 NaN
1 4.0
2 5.0
3 5.0
4 6.0
dtype: float64
>>> s.rolling(3).max()
0 NaN
1 NaN
2 5.0
3 5.0
4 6.0
dtype: float64
For DataFrame, each rolling maximum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 4 16
1 3 9
2 5 25
3 2 4
4 6 36
>>> df.rolling(2).max()
A B
0 NaN NaN
1 4.0 16.0
2 5.0 25.0
3 5.0 25.0
4 6.0 36.0
>>> df.rolling(3).max()
A B
0 NaN NaN
1 NaN NaN
2 5.0 25.0
3 5.0 25.0
4 6.0 36.0
"""
return super().max()
def mean(self) -> FrameLike:
"""
Calculate the rolling mean of the values.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s
0 4
1 3
2 5
3 2
4 6
dtype: int64
>>> s.rolling(2).mean()
0 NaN
1 3.5
2 4.0
3 3.5
4 4.0
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 4.000000
3 3.333333
4 4.333333
dtype: float64
For DataFrame, each rolling mean is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 4 16
1 3 9
2 5 25
3 2 4
4 6 36
>>> df.rolling(2).mean()
A B
0 NaN NaN
1 3.5 12.5
2 4.0 17.0
3 3.5 14.5
4 4.0 20.0
>>> df.rolling(3).mean()
A B
0 NaN NaN
1 NaN NaN
2 4.000000 16.666667
3 3.333333 12.666667
4 4.333333 21.666667
"""
return super().mean()
def std(self) -> FrameLike:
"""
Calculate rolling standard deviation.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Examples
--------
>>> s = ps.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
For DataFrame, each rolling standard deviation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.rolling(2).std()
A B
0 NaN NaN
1 0.000000 0.000000
2 0.707107 7.778175
3 0.707107 9.192388
4 1.414214 16.970563
5 0.000000 0.000000
6 0.000000 0.000000
"""
return super().std()
def var(self) -> FrameLike:
"""
Calculate unbiased rolling variance.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Examples
--------
>>> s = ps.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
For DataFrame, each unbiased rolling variance is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.rolling(2).var()
A B
0 NaN NaN
1 0.0 0.0
2 0.5 60.5
3 0.5 84.5
4 2.0 288.0
5 0.0 0.0
6 0.0 0.0
"""
return super().var()
class RollingGroupby(RollingLike[FrameLike]):
def __init__(
self,
groupby: GroupBy[FrameLike],
window: int,
min_periods: Optional[int] = None,
):
super().__init__(window, min_periods)
self._groupby = groupby
self._window = self._window.partitionBy(*[ser.spark.column for ser in groupby._groupkeys])
self._unbounded_window = self._unbounded_window.partitionBy(
*[ser.spark.column for ser in groupby._groupkeys]
)
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeRollingGroupby, item):
property_or_func = getattr(MissingPandasLikeRollingGroupby, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self)
else:
return partial(property_or_func, self)
raise AttributeError(item)
def _apply_as_series_or_frame(self, func: Callable[[Column], Column]) -> FrameLike:
"""
Wraps a function that handles Spark column in order
to support it in both pandas-on-Spark Series and DataFrame.
Note that the given `func` name should be same as the API's method name.
"""
from pyspark.pandas import DataFrame
groupby = self._groupby
psdf = groupby._psdf
# Here we need to include grouped key as an index, and shift previous index.
# [index_column0, index_column1] -> [grouped key, index_column0, index_column1]
new_index_scols: List[Column] = []
new_index_spark_column_names = []
new_index_names = []
new_index_fields = []
for groupkey in groupby._groupkeys:
index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))
new_index_scols.append(groupkey.spark.column.alias(index_column_name))
new_index_spark_column_names.append(index_column_name)
new_index_names.append(groupkey._column_label)
new_index_fields.append(groupkey._internal.data_fields[0].copy(name=index_column_name))
for new_index_scol, index_name, index_field in zip(
psdf._internal.index_spark_columns,
psdf._internal.index_names,
psdf._internal.index_fields,
):
index_column_name = SPARK_INDEX_NAME_FORMAT(len(new_index_scols))
new_index_scols.append(new_index_scol.alias(index_column_name))
new_index_spark_column_names.append(index_column_name)
new_index_names.append(index_name)
new_index_fields.append(index_field.copy(name=index_column_name))
if groupby._agg_columns_selected:
agg_columns = groupby._agg_columns
else:
# pandas doesn't keep the groupkey as a column from 1.3 for DataFrameGroupBy
column_labels_to_exclude = groupby._column_labels_to_exclude.copy()
if isinstance(groupby, DataFrameGroupBy):
for groupkey in groupby._groupkeys: # type: ignore[attr-defined]
column_labels_to_exclude.add(groupkey._internal.column_labels[0])
agg_columns = [
psdf._psser_for(label)
for label in psdf._internal.column_labels
if label not in column_labels_to_exclude
]
applied = []
for agg_column in agg_columns:
applied.append(agg_column._with_new_scol(func(agg_column.spark.column))) # TODO: dtype?
# Seems like pandas filters out when grouped key is NA.
cond = groupby._groupkeys[0].spark.column.isNotNull()
for c in groupby._groupkeys[1:]:
cond = cond | c.spark.column.isNotNull()
sdf = psdf._internal.spark_frame.filter(cond).select(
new_index_scols + [c.spark.column for c in applied]
)
internal = psdf._internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in new_index_spark_column_names],
index_names=new_index_names,
index_fields=new_index_fields,
column_labels=[c._column_label for c in applied],
data_spark_columns=[
scol_for(sdf, c._internal.data_spark_column_names[0]) for c in applied
],
data_fields=[c._internal.data_fields[0] for c in applied],
)
return groupby._cleanup_and_return(DataFrame(internal))
def count(self) -> FrameLike:
"""
The rolling count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.count : Count of the full Series.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).count().sort_index()
2 0 1.0
1 2.0
3 2 1.0
3 2.0
4 3.0
4 5 1.0
6 2.0
7 3.0
8 3.0
5 9 1.0
10 2.0
dtype: float64
For DataFrame, each rolling count is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).count().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 1.0
1 2.0
3 2 1.0
3 2.0
4 2.0
4 5 1.0
6 2.0
7 2.0
8 2.0
5 9 1.0
10 2.0
"""
return super().count()
def sum(self) -> FrameLike:
"""
The rolling summation of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.sum : Sum of the full Series.
DataFrame.sum : Sum of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).sum().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 9.0
4 5 NaN
6 NaN
7 12.0
8 12.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each rolling summation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 NaN
1 8.0
3 2 NaN
3 18.0
4 18.0
4 5 NaN
6 32.0
7 32.0
8 32.0
5 9 NaN
10 50.0
"""
return super().sum()
def min(self) -> FrameLike:
"""
The rolling minimum of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.min : Min of the full Series.
DataFrame.min : Min of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).min().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each rolling minimum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).min().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 NaN
1 4.0
3 2 NaN
3 9.0
4 9.0
4 5 NaN
6 16.0
7 16.0
8 16.0
5 9 NaN
10 25.0
"""
return super().min()
def max(self) -> FrameLike:
"""
The rolling maximum of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.max : Max of the full Series.
DataFrame.max : Max of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).max().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each rolling maximum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).max().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 NaN
1 4.0
3 2 NaN
3 9.0
4 9.0
4 5 NaN
6 16.0
7 16.0
8 16.0
5 9 NaN
10 25.0
"""
return super().max()
def mean(self) -> FrameLike:
"""
The rolling mean of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the rolling
calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.mean : Mean of the full Series.
DataFrame.mean : Mean of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).rolling(3).mean().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each rolling mean is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).rolling(2).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 NaN
1 4.0
3 2 NaN
3 9.0
4 9.0
4 5 NaN
6 16.0
7 16.0
8 16.0
5 9 NaN
10 25.0
"""
return super().mean()
def std(self) -> FrameLike:
"""
Calculate rolling standard deviation.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
"""
return super().std()
def var(self) -> FrameLike:
"""
Calculate unbiased rolling variance.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the rolling calculation.
See Also
--------
Series.rolling : Calling object with Series data.
DataFrame.rolling : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
"""
return super().var()
class ExpandingLike(RollingAndExpanding[FrameLike]):
def __init__(self, min_periods: int = 1):
if min_periods < 0:
raise ValueError("min_periods must be >= 0")
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(
Window.unboundedPreceding, Window.currentRow
)
super().__init__(window, min_periods)
def count(self) -> FrameLike:
def count(scol: Column) -> Column:
return F.when(
F.row_number().over(self._unbounded_window) >= self._min_periods,
F.count(scol).over(self._window),
).otherwise(F.lit(None))
return self._apply_as_series_or_frame(count).astype("float64") # type: ignore[attr-defined]
class Expanding(ExpandingLike[FrameLike]):
def __init__(self, psdf_or_psser: FrameLike, min_periods: int = 1):
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.series import Series
super().__init__(min_periods)
if not isinstance(psdf_or_psser, (DataFrame, Series)):
raise TypeError(
"psdf_or_psser must be a series or dataframe; however, got: %s"
% type(psdf_or_psser)
)
self._psdf_or_psser = psdf_or_psser
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeExpanding, item):
property_or_func = getattr(MissingPandasLikeExpanding, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self)
else:
return partial(property_or_func, self)
raise AttributeError(item)
# TODO: when add 'center' and 'axis' parameter, should add to here too.
def __repr__(self) -> str:
return "Expanding [min_periods={}]".format(self._min_periods)
_apply_as_series_or_frame = Rolling._apply_as_series_or_frame
def count(self) -> FrameLike:
"""
The expanding count of any non-NaN observations inside the window.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.count : Count of the full Series.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 3, float("nan"), 10])
>>> s.expanding().count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
>>> s.to_frame().expanding().count()
0
0 1.0
1 2.0
2 2.0
3 3.0
"""
return super().count()
def sum(self) -> FrameLike:
"""
Calculate expanding summation of given DataFrame or Series.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
expanding summation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = ps.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
For DataFrame, each expanding summation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.expanding(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 10.0 30.0
4 15.0 55.0
"""
return super().sum()
def min(self) -> FrameLike:
"""
Calculate the expanding minimum.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with a Series.
DataFrame.expanding : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a expanding minimum with a window size of 3.
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s.expanding(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
return super().min()
def max(self) -> FrameLike:
"""
Calculate the expanding maximum.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.max : Similar method for Series.
DataFrame.max : Similar method for DataFrame.
Examples
--------
Performing a expanding minimum with a window size of 3.
>>> s = ps.Series([4, 3, 5, 2, 6])
>>> s.expanding(3).max()
0 NaN
1 NaN
2 5.0
3 5.0
4 6.0
dtype: float64
"""
return super().max()
def mean(self) -> FrameLike:
"""
Calculate the expanding mean of the values.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show expanding mean calculations with window sizes of
two and three, respectively.
>>> s = ps.Series([1, 2, 3, 4])
>>> s.expanding(2).mean()
0 NaN
1 1.5
2 2.0
3 2.5
dtype: float64
>>> s.expanding(3).mean()
0 NaN
1 NaN
2 2.0
3 2.5
dtype: float64
"""
return super().mean()
def std(self) -> FrameLike:
"""
Calculate expanding standard deviation.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the expanding calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Examples
--------
>>> s = ps.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
For DataFrame, each expanding standard deviation variance is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.expanding(2).std()
A B
0 NaN NaN
1 0.000000 0.000000
2 0.577350 6.350853
3 0.957427 11.412712
4 0.894427 10.630146
5 0.836660 9.928075
6 0.786796 9.327379
"""
return super().std()
def var(self) -> FrameLike:
"""
Calculate unbiased expanding variance.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the expanding calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Examples
--------
>>> s = ps.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
For DataFrame, each unbiased expanding variance is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.expanding(2).var()
A B
0 NaN NaN
1 0.000000 0.000000
2 0.333333 40.333333
3 0.916667 130.250000
4 0.800000 113.000000
5 0.700000 98.566667
6 0.619048 87.000000
"""
return super().var()
class ExpandingGroupby(ExpandingLike[FrameLike]):
def __init__(self, groupby: GroupBy[FrameLike], min_periods: int = 1):
super().__init__(min_periods)
self._groupby = groupby
self._window = self._window.partitionBy(*[ser.spark.column for ser in groupby._groupkeys])
self._unbounded_window = self._window.partitionBy(
*[ser.spark.column for ser in groupby._groupkeys]
)
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeExpandingGroupby, item):
property_or_func = getattr(MissingPandasLikeExpandingGroupby, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self)
else:
return partial(property_or_func, self)
raise AttributeError(item)
_apply_as_series_or_frame = RollingGroupby._apply_as_series_or_frame
def count(self) -> FrameLike:
"""
The expanding count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.count : Count of the full Series.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).count().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 3.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding count is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).count().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 NaN
1 2.0
3 2 NaN
3 2.0
4 3.0
4 5 NaN
6 2.0
7 3.0
8 4.0
5 9 NaN
10 2.0
"""
return super().count()
def sum(self) -> FrameLike:
"""
Calculate expanding summation of given DataFrame or Series.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
expanding summation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).sum().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 9.0
4 5 NaN
6 NaN
7 12.0
8 16.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding summation is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 NaN
1 8.0
3 2 NaN
3 18.0
4 27.0
4 5 NaN
6 32.0
7 48.0
8 64.0
5 9 NaN
10 50.0
"""
return super().sum()
def min(self) -> FrameLike:
"""
Calculate the expanding minimum.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with a Series.
DataFrame.expanding : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).min().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding minimum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).min().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 NaN
1 4.0
3 2 NaN
3 9.0
4 9.0
4 5 NaN
6 16.0
7 16.0
8 16.0
5 9 NaN
10 25.0
"""
return super().min()
def max(self) -> FrameLike:
"""
Calculate the expanding maximum.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.max : Similar method for Series.
DataFrame.max : Similar method for DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).max().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding maximum is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).max().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 NaN
1 4.0
3 2 NaN
3 9.0
4 9.0
4 5 NaN
6 16.0
7 16.0
8 16.0
5 9 NaN
10 25.0
"""
return super().max()
def mean(self) -> FrameLike:
"""
Calculate the expanding mean of the values.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the expanding
calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
>>> s = ps.Series([2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5])
>>> s.groupby(s).expanding(3).mean().sort_index()
2 0 NaN
1 NaN
3 2 NaN
3 NaN
4 3.0
4 5 NaN
6 NaN
7 4.0
8 4.0
5 9 NaN
10 NaN
dtype: float64
For DataFrame, each expanding mean is computed column-wise.
>>> df = ps.DataFrame({"A": s.to_numpy(), "B": s.to_numpy() ** 2})
>>> df.groupby(df.A).expanding(2).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
2 0 NaN
1 4.0
3 2 NaN
3 9.0
4 9.0
4 5 NaN
6 16.0
7 16.0
8 16.0
5 9 NaN
10 25.0
"""
return super().mean()
def std(self) -> FrameLike:
"""
Calculate expanding standard deviation.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the expanding calculation.
See Also
--------
Series.expanding: Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
"""
return super().std()
def var(self) -> FrameLike:
"""
Calculate unbiased expanding variance.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the expanding calculation.
See Also
--------
Series.expanding : Calling object with Series data.
DataFrame.expanding : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
"""
return super().var()
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.window
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.window.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.window tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.window,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| []
| []
| [
"SPARK_HOME"
]
| [] | ["SPARK_HOME"] | python | 1 | 0 | |
pkg/environmentvar/environmentvar_test.go | package environmentvar
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSetEnv(t *testing.T) {
os.Setenv("NODE_ENV", "production")
defer os.Unsetenv("NODE_ENV")
actual := os.Getenv("NODE_ENV")
assert.Equal(t, actual, "production")
}
| [
"\"NODE_ENV\""
]
| []
| [
"NODE_ENV"
]
| [] | ["NODE_ENV"] | go | 1 | 0 | |
vendor/github.com/armory/dinghy/pkg/settings/load.go | /*
* Copyright 2019 Armory, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package settings is a single place to put all of the application settings.
package settings
import (
"context"
"encoding/json"
"errors"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/armory/go-yaml-tools/pkg/secrets"
"github.com/armory/go-yaml-tools/pkg/tls/server"
"github.com/mitchellh/mapstructure"
"github.com/armory/dinghy/pkg/util"
"github.com/armory/go-yaml-tools/pkg/spring"
"github.com/imdario/mergo"
log "github.com/sirupsen/logrus"
)
const (
// DefaultDinghyPort is the default port that Dinghy will listen on.
DefaultDinghyPort = 8081
)
func NewDefaultSettings() Settings {
dinghyPort, err := strconv.ParseUint(util.GetenvOrDefault("DINGHY_PORT", string(DefaultDinghyPort)), 10, 32)
if err != nil {
dinghyPort = DefaultDinghyPort
}
return Settings{
DinghyFilename: "dinghyfile",
TemplateRepo: "dinghy-templates",
AutoLockPipelines: "true",
GitHubCredsPath: util.GetenvOrDefault("GITHUB_TOKEN_PATH", os.Getenv("HOME")+"/.armory/cache/github-creds.txt"),
GithubEndpoint: "https://api.github.com",
StashCredsPath: util.GetenvOrDefault("STASH_TOKEN_PATH", os.Getenv("HOME")+"/.armory/cache/stash-creds.txt"),
StashEndpoint: "http://localhost:7990/rest/api/1.0",
Logging: Logging{
File: "",
Level: "INFO",
},
spinnakerSupplied: spinnakerSupplied{
Orca: spinnakerService{
Enabled: "true",
BaseURL: util.GetenvOrDefault("ORCA_BASE_URL", "http://orca:8083"),
},
Front50: spinnakerService{
Enabled: "true",
BaseURL: util.GetenvOrDefault("FRONT50_BASE_URL", "http://front50:8080"),
},
Echo: spinnakerService{
BaseURL: util.GetenvOrDefault("ECHO_BASE_URL", "http://echo:8089"),
},
Fiat: fiat{
spinnakerService: spinnakerService{
Enabled: "false",
BaseURL: util.GetenvOrDefault("FIAT_BASE_URL", "http://fiat:7003"),
},
AuthUser: "",
},
Redis: Redis{
BaseURL: util.GetenvOrDefault("REDIS_HOST", "redis:6379"),
Password: util.GetenvOrDefaultRedact("REDIS_PASSWORD", ""),
},
},
ParserFormat: "json",
RepoConfig: []RepoConfig{},
Server: server.ServerConfig{
Port: uint32(dinghyPort),
},
}
}
// LoadSettings loads the Spring config from the default Spinnaker paths
// and merges default settings with the loaded settings
func LoadSettings() (*Settings, error) {
springConfig, err := loadProfiles()
if err != nil {
return nil, err
}
settings, err := configureSettings(NewDefaultSettings(), springConfig)
if err != nil {
return nil, err
}
if err = settings.Http.Init(); err != nil {
return nil, err
}
return settings, nil
}
func configureSettings(defaultSettings, overrides Settings) (*Settings, error) {
if err := mergo.Merge(&defaultSettings, overrides, mergo.WithOverride); err != nil {
return nil, err
}
// If Github token not passed directly
// Required for backwards compatibility
if defaultSettings.GitHubToken == "" {
// load github api token
if _, err := os.Stat(defaultSettings.GitHubCredsPath); err == nil {
creds, err := ioutil.ReadFile(defaultSettings.GitHubCredsPath)
if err != nil {
return nil, err
}
c := strings.Split(strings.TrimSpace(string(creds)), ":")
if len(c) < 2 {
return nil, errors.New("github creds file should have format 'username:token'")
}
defaultSettings.GitHubToken = c[1]
log.Info("Successfully loaded github api creds")
}
}
// If Stash token not passed directly
// Required for backwards compatibility
if defaultSettings.StashToken == "" || defaultSettings.StashUsername == "" {
// load stash api creds
if _, err := os.Stat(defaultSettings.StashCredsPath); err == nil {
creds, err := ioutil.ReadFile(defaultSettings.StashCredsPath)
if err != nil {
return nil, err
}
c := strings.Split(strings.TrimSpace(string(creds)), ":")
if len(c) < 2 {
return nil, errors.New("stash creds file should have format 'username:token'")
}
defaultSettings.StashUsername = c[0]
defaultSettings.StashToken = c[1]
log.Info("Successfully loaded stash api creds")
}
}
// Required for backwards compatibility
if defaultSettings.Deck.BaseURL == "" && defaultSettings.SpinnakerUIURL != "" {
log.Warn("Spinnaker UI URL should be set with ${services.deck.baseUrl}")
defaultSettings.Deck.BaseURL = defaultSettings.SpinnakerUIURL
}
// Take the FiatUser setting if fiat is enabled (coming from hal settings)
if defaultSettings.Fiat.Enabled == "true" && defaultSettings.FiatUser != "" {
defaultSettings.Fiat.AuthUser = defaultSettings.FiatUser
}
if defaultSettings.ParserFormat == "" {
defaultSettings.ParserFormat = "json"
}
c, _ := json.Marshal(defaultSettings.Redacted())
log.Infof("The following settings have been loaded: %v", string(c))
return &defaultSettings, nil
}
func decodeProfilesToSettings(profiles map[string]interface{}, s *Settings) error {
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
WeaklyTypedInput: true,
Result: s,
})
if err != nil {
return err
}
return decoder.Decode(profiles)
}
func loadProfiles() (Settings, error) {
// var s Settings
var config Settings
propNames := []string{"spinnaker", "dinghy"}
c, err := spring.LoadDefault(propNames)
if err != nil {
return config, err
}
if err := decodeProfilesToSettings(c, &config); err != nil {
return config, err
}
if (Secrets{}) != config.Secrets {
if (secrets.VaultConfig{}) != config.Secrets.Vault {
if err = secrets.RegisterVaultConfig(config.Secrets.Vault); err != nil {
return config, err
}
}
}
if err = decryptSecrets(context.TODO(), &config); err != nil {
log.Fatalf("failed to decrypt secrets: %s", err)
}
return config, nil
}
func decryptSecrets(ctx context.Context, config *Settings) error {
decrypter, err := secrets.NewDecrypter(ctx, config.GitHubToken)
if err != nil {
return err
}
secret, err := decrypter.Decrypt()
if err != nil {
return err
}
config.GitHubToken = secret
decrypter, err = secrets.NewDecrypter(ctx, config.StashToken)
if err != nil {
return err
}
secret, err = decrypter.Decrypt()
if err != nil {
return err
}
config.StashToken = secret
return nil
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
pkg/terraform/install_test.go | package terraform
import (
"bytes"
"io/ioutil"
"os"
"strings"
"testing"
"github.com/deislabs/porter/pkg/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
type InstallTest struct {
expectedCommand string
installStep InstallStep
}
// sad hack: not sure how to make a common test main for all my subpackages
func TestMain(m *testing.M) {
test.TestMainWithMockedCommandHandlers(m)
}
func TestMixin_UnmarshalInstallStep(t *testing.T) {
b, err := ioutil.ReadFile("testdata/install-input.yaml")
require.NoError(t, err)
var action InstallAction
err = yaml.Unmarshal(b, &action)
require.NoError(t, err)
require.Len(t, action.Steps, 1)
step := action.Steps[0]
assert.Equal(t, "Install MySQL", step.Description)
assert.Equal(t, "TRACE", step.LogLevel)
}
func TestMixin_Install(t *testing.T) {
installTests := []InstallTest{
{
expectedCommand: strings.Join([]string{
"terraform init",
"terraform apply -auto-approve -var cool=true -var foo=bar",
}, "\n"),
installStep: InstallStep{
InstallArguments: InstallArguments{
AutoApprove: true,
LogLevel: "TRACE",
Vars: map[string]string{
"cool": "true",
"foo": "bar",
},
},
},
},
}
defer os.Unsetenv(test.ExpectedCommandEnv)
for _, installTest := range installTests {
t.Run(installTest.expectedCommand, func(t *testing.T) {
os.Setenv(test.ExpectedCommandEnv, installTest.expectedCommand)
action := InstallAction{Steps: []InstallStep{installTest.installStep}}
b, err := yaml.Marshal(action)
require.NoError(t, err)
h := NewTestMixin(t)
h.In = bytes.NewReader(b)
// Set up working dir as current dir
h.WorkingDir, err = os.Getwd()
require.NoError(t, err)
err = h.Install()
require.NoError(t, err)
assert.Equal(t, "TRACE", os.Getenv("TF_LOG"))
wd, err := os.Getwd()
require.NoError(t, err)
assert.Equal(t, wd, h.WorkingDir)
})
}
}
| [
"\"TF_LOG\""
]
| []
| [
"TF_LOG"
]
| [] | ["TF_LOG"] | go | 1 | 0 | |
a-plus-rst-tools/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Sample documentation build configuration file, created by
# sphinx-quickstart on Tue May 3 14:15:50 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# -- Aplus configuration --------------------------------------------------
course_open_date = '2016-01-01'
course_close_date = '2017-01-01'
questionnaire_default_submissions = 5
program_default_submissions = 10
ae_default_submissions = 0
use_wide_column = True
static_host = os.environ.get('STATIC_CONTENT_HOST', None)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('a-plus-rst-tools'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'aplus_setup',
]
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Sample'
copyright = '2016, NN'
author = 'NN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'aplus'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'use_wide_column': use_wide_column,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['a-plus-rst-tools/theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sampledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Sample.tex', 'Sample Documentation',
'NN', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sample', 'Sample Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Sample', 'Sample Documentation',
author, 'Sample', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| []
| []
| [
"STATIC_CONTENT_HOST"
]
| [] | ["STATIC_CONTENT_HOST"] | python | 1 | 0 | |
config/production.py | import os
API_URI = os.environ["WRISTBAND_API_URI"]
SECRET_KEY = os.environ["FLASK_SECRET_KEY"]
| []
| []
| [
"WRISTBAND_API_URI",
"FLASK_SECRET_KEY"
]
| [] | ["WRISTBAND_API_URI", "FLASK_SECRET_KEY"] | python | 2 | 0 | |
resolver/resolver.go | package resolver
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"sort"
"strings"
"time"
"github.com/google/go-github/v28/github"
goSem "github.com/tj/go-semver"
"golang.org/x/oauth2"
"github.com/Masterminds/semver"
)
const DEFAULT_PROXY_URL = "https://proxy.golang.org"
const hashRegex = "^[0-9a-f]{7,40}$"
var gh = &GitHub{}
type Resolver struct {
Pkg string
Value string
Hash bool
ConstraintCheck *semver.Constraints
ghClient *github.Client
}
type GitHub struct {
// Client is the GitHub client.
Client *github.Client
}
type VersionInfo struct {
Version string // version string
Time time.Time // commit time
}
func init() {
ctx := context.Background()
ghClient := oauth2.StaticTokenSource(
&oauth2.Token{
AccessToken: os.Getenv("GITHUB_TOKEN"),
},
)
gh.Client = github.NewClient(oauth2.NewClient(ctx, ghClient))
}
// Resolve the version for the given package by
// checking with the proxy for either the specified version
// or getting the latest version on the proxy
func (v *Resolver) ResolveVersion() (string, error) {
if len(v.Value) == 0 {
version, err := v.ResolveLatestVersion()
if err != nil {
if v.isGithubPKG() {
return v.GithubFallbackResolveVersion()
}
return "", err
}
if len(version.Version) == 0 {
return v.GithubFallbackResolveVersion()
}
return version.Version, err
}
if v.Hash {
return v.Value, nil
}
versionString, err := v.ResolveClosestVersion()
if err != nil {
return "", err
}
if len(versionString) == 0 && v.isGithubPKG() {
return v.GithubFallbackResolveVersion()
}
return versionString, nil
}
func (v *Resolver) isGithubPKG() bool {
parts := strings.Split(v.Pkg, "/")
return parts[0] == "github.com"
}
func (v *Resolver) GithubFallbackResolveVersion() (string, error) {
parts := strings.Split(v.Pkg, "/")
version := "master"
if len(v.Value) == 0 {
version = v.Value
}
resolvedV, err := gh.resolve(parts[1], parts[2], version)
if err != nil {
return "", err
}
return resolvedV, nil
}
// resolve the latest version from the proxy
func (v *Resolver) ResolveLatestVersion() (VersionInfo, error) {
var versionInfo VersionInfo
resp, err := http.Get(getVersionLatestProxyURL(v.Pkg))
if err != nil {
return versionInfo, err
}
defer resp.Body.Close()
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return versionInfo, err
}
if err := json.Unmarshal(respBytes, &versionInfo); err != nil {
return versionInfo, err
}
return versionInfo, nil
}
// Parse the given string to be either a semver version string
// or a commit hash
func (v *Resolver) ParseVersion(version string) error {
v.Value = version
// just send back if no version is provided
if len(version) == 0 {
return nil
}
// return the string back if it's a valid hash string
if !isSemver(version) && !isValidSemverConstraint(version) {
matched, err := regexp.MatchString(hashRegex, version)
if matched {
v.Hash = true
return nil
}
// if not a hash or a semver, just return an error
if err != nil {
return err
}
}
if isSemver(version) {
check, err := semver.NewConstraint("= " + version)
if err != nil {
return err
}
v.ConstraintCheck = check
}
if isValidSemverConstraint(version) {
check, err := semver.NewConstraint(version)
if err != nil {
return err
}
v.ConstraintCheck = check
}
return nil
}
// Resolve the closes version to the given semver from the proxy
func (v *Resolver) ResolveClosestVersion() (string, error) {
var versionTags []string
resp, err := http.Get(getVersionListProxyURL(v.Pkg))
if err != nil {
return "", err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
versionTags = strings.Split(string(data), "\n")
matchedVersion := ""
var sortedVersionTags []*semver.Version
for _, versionTag := range versionTags {
if len(versionTag) == 0 {
continue
}
ver, err := semver.NewVersion(versionTag)
if err != nil {
return "", err
}
sortedVersionTags = append(sortedVersionTags, ver)
}
sort.Sort(semver.Collection(sortedVersionTags))
for _, versionTag := range sortedVersionTags {
if !v.ConstraintCheck.Check(
versionTag,
) {
continue
}
matchedVersion = versionTag.String()
break
}
if len(matchedVersion) == 0 {
return "", nil
}
return matchedVersion, nil
}
// check if the given string is valid semver string and if yest
// create a constraint checker out of it
func isSemver(version string) bool {
_, err := semver.NewVersion(version)
return err == nil
}
func isValidSemverConstraint(version string) bool {
versionRegex := `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
constraintOperations := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^`
validConstraintRegex := regexp.MustCompile(fmt.Sprintf(
`^(\s*(%s)\s*(%s)\s*\,?)+$`,
constraintOperations,
versionRegex))
return validConstraintRegex.MatchString(version)
}
// normalize the proxy url to
// - not have traling slashes
func normalizeUrl(url string) string {
if strings.HasSuffix(url, "/") {
ind := strings.LastIndex(url, "/")
if ind == -1 {
return url
}
return strings.Join([]string{url[:ind], "", url[ind+1:]}, "")
}
return url
}
// get the proxy url for the latest version
func getVersionLatestProxyURL(pkg string) string {
urlPrefix := normalizeUrl(DEFAULT_PROXY_URL)
return urlPrefix + "/" + pkg + "/@latest"
}
// get the proxy url for the entire version list
func getVersionListProxyURL(pkg string) string {
urlPrefix := normalizeUrl(DEFAULT_PROXY_URL)
return urlPrefix + "/" + pkg + "/@v/list"
}
func (g *GitHub) versions(owner, repo string) (versions []string, err error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
page := 1
for {
options := &github.ListOptions{
Page: page,
PerPage: 100,
}
tags, _, err := g.Client.Repositories.ListTags(ctx, owner, repo, options)
if err != nil {
return nil, fmt.Errorf("listing tags: %w", err)
}
if len(tags) == 0 {
break
}
for _, t := range tags {
versions = append(versions, t.GetName())
}
page++
}
if len(versions) == 0 {
return nil, errors.New("no versions defined")
}
return
}
// Resolve implementation.
func (g *GitHub) resolve(owner, repo, version string) (string, error) {
// fetch tags
tags, err := g.versions(owner, repo)
if err != nil {
return "", err
}
// convert to semver, ignoring malformed
var versions []goSem.Version
for _, t := range tags {
if v, err := goSem.Parse(t); err == nil {
versions = append(versions, v)
}
}
// no versions, it has tags but they're not semver
if len(versions) == 0 {
return "", errors.New("no versions matched")
}
// master special-case
if version == "master" {
return versions[0].String(), nil
}
// match requested semver range
vr, err := goSem.ParseRange(version)
if err != nil {
return "", fmt.Errorf("parsing version range: %w", err)
}
for _, v := range versions {
if vr.Match(v) {
return v.String(), nil
}
}
return "", errors.New("no versions matched")
}
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
Matrimony/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Matrimony.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
controllers/kustomization_controller.go | /*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/metrics"
"github.com/fluxcd/pkg/runtime/predicates"
"github.com/fluxcd/pkg/untar"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
"github.com/go-logr/logr"
"github.com/hashicorp/go-retryablehttp"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kuberecorder "k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
"sigs.k8s.io/cli-utils/pkg/kstatus/polling"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
"sigs.k8s.io/kustomize/api/filesys"
kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1beta1"
)
// +kubebuilder:rbac:groups=kustomize.toolkit.fluxcd.io,resources=kustomizations,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=kustomize.toolkit.fluxcd.io,resources=kustomizations/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=kustomize.toolkit.fluxcd.io,resources=kustomizations/finalizers,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets;gitrepositories,verbs=get;list;watch
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status;gitrepositories/status,verbs=get
// +kubebuilder:rbac:groups="",resources=secrets;serviceaccounts,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// KustomizationReconciler reconciles a Kustomization object
type KustomizationReconciler struct {
client.Client
httpClient *retryablehttp.Client
requeueDependency time.Duration
Scheme *runtime.Scheme
EventRecorder kuberecorder.EventRecorder
ExternalEventRecorder *events.Recorder
MetricsRecorder *metrics.Recorder
StatusPoller *polling.StatusPoller
}
type KustomizationReconcilerOptions struct {
MaxConcurrentReconciles int
HTTPRetry int
DependencyRequeueInterval time.Duration
}
func (r *KustomizationReconciler) SetupWithManager(mgr ctrl.Manager, opts KustomizationReconcilerOptions) error {
// Index the Kustomizations by the GitRepository references they (may) point at.
if err := mgr.GetCache().IndexField(context.TODO(), &kustomizev1.Kustomization{}, kustomizev1.GitRepositoryIndexKey,
r.indexByGitRepository); err != nil {
return fmt.Errorf("failed setting index fields: %w", err)
}
// Index the Kustomizations by the Bucket references they (may) point at.
if err := mgr.GetCache().IndexField(context.TODO(), &kustomizev1.Kustomization{}, kustomizev1.BucketIndexKey,
r.indexByBucket); err != nil {
return fmt.Errorf("failed setting index fields: %w", err)
}
r.requeueDependency = opts.DependencyRequeueInterval
// Configure the retryable http client used for fetching artifacts.
// By default it retries 10 times within a 3.5 minutes window.
httpClient := retryablehttp.NewClient()
httpClient.RetryWaitMin = 5 * time.Second
httpClient.RetryWaitMax = 30 * time.Second
httpClient.RetryMax = opts.HTTPRetry
httpClient.Logger = nil
r.httpClient = httpClient
return ctrl.NewControllerManagedBy(mgr).
For(&kustomizev1.Kustomization{}, builder.WithPredicates(
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
)).
Watches(
&source.Kind{Type: &sourcev1.GitRepository{}},
handler.EnqueueRequestsFromMapFunc(r.requestsForGitRepositoryRevisionChange),
builder.WithPredicates(SourceRevisionChangePredicate{}),
).
Watches(
&source.Kind{Type: &sourcev1.Bucket{}},
handler.EnqueueRequestsFromMapFunc(r.requestsForBucketRevisionChange),
builder.WithPredicates(SourceRevisionChangePredicate{}),
).
WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}).
Complete(r)
}
func (r *KustomizationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := logr.FromContext(ctx)
reconcileStart := time.Now()
var kustomization kustomizev1.Kustomization
if err := r.Get(ctx, req.NamespacedName, &kustomization); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Record suspended status metric
defer r.recordSuspension(ctx, kustomization)
// Add our finalizer if it does not exist
if !controllerutil.ContainsFinalizer(&kustomization, kustomizev1.KustomizationFinalizer) {
controllerutil.AddFinalizer(&kustomization, kustomizev1.KustomizationFinalizer)
if err := r.Update(ctx, &kustomization); err != nil {
log.Error(err, "unable to register finalizer")
return ctrl.Result{}, err
}
}
// Examine if the object is under deletion
if !kustomization.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, kustomization)
}
// Return early if the Kustomization is suspended.
if kustomization.Spec.Suspend {
log.Info("Reconciliation is suspended for this object")
return ctrl.Result{}, nil
}
// resolve source reference
source, err := r.getSource(ctx, kustomization)
if err != nil {
if apierrors.IsNotFound(err) {
msg := fmt.Sprintf("Source '%s' not found", kustomization.Spec.SourceRef.String())
kustomization = kustomizev1.KustomizationNotReady(kustomization, "", kustomizev1.ArtifactFailedReason, msg)
if err := r.patchStatus(ctx, req, kustomization.Status); err != nil {
log.Error(err, "unable to update status for source not found")
return ctrl.Result{Requeue: true}, err
}
r.recordReadiness(ctx, kustomization)
log.Info(msg)
// do not requeue immediately, when the source is created the watcher should trigger a reconciliation
return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, nil
} else {
// retry on transient errors
return ctrl.Result{Requeue: true}, err
}
}
if source.GetArtifact() == nil {
msg := "Source is not ready, artifact not found"
kustomization = kustomizev1.KustomizationNotReady(kustomization, "", kustomizev1.ArtifactFailedReason, msg)
if err := r.patchStatus(ctx, req, kustomization.Status); err != nil {
log.Error(err, "unable to update status for artifact not found")
return ctrl.Result{Requeue: true}, err
}
r.recordReadiness(ctx, kustomization)
log.Info(msg)
// do not requeue immediately, when the artifact is created the watcher should trigger a reconciliation
return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, nil
}
// check dependencies
if len(kustomization.Spec.DependsOn) > 0 {
if err := r.checkDependencies(kustomization); err != nil {
kustomization = kustomizev1.KustomizationNotReady(
kustomization, source.GetArtifact().Revision, meta.DependencyNotReadyReason, err.Error())
if err := r.patchStatus(ctx, req, kustomization.Status); err != nil {
log.Error(err, "unable to update status for dependency not ready")
return ctrl.Result{Requeue: true}, err
}
// we can't rely on exponential backoff because it will prolong the execution too much,
// instead we requeue on a fix interval.
msg := fmt.Sprintf("Dependencies do not meet ready condition, retrying in %s", r.requeueDependency.String())
log.Info(msg)
r.event(ctx, kustomization, source.GetArtifact().Revision, events.EventSeverityInfo, msg, nil)
r.recordReadiness(ctx, kustomization)
return ctrl.Result{RequeueAfter: r.requeueDependency}, nil
}
log.Info("All dependencies area ready, proceeding with reconciliation")
}
// record reconciliation duration
if r.MetricsRecorder != nil {
objRef, err := reference.GetReference(r.Scheme, &kustomization)
if err != nil {
return ctrl.Result{}, err
}
defer r.MetricsRecorder.RecordDuration(*objRef, reconcileStart)
}
// set the reconciliation status to progressing
kustomization = kustomizev1.KustomizationProgressing(kustomization)
if err := r.patchStatus(ctx, req, kustomization.Status); err != nil {
log.Error(err, "unable to update status to progressing")
return ctrl.Result{Requeue: true}, err
}
r.recordReadiness(ctx, kustomization)
// reconcile kustomization by applying the latest revision
reconciledKustomization, reconcileErr := r.reconcile(ctx, *kustomization.DeepCopy(), source)
if err := r.patchStatus(ctx, req, reconciledKustomization.Status); err != nil {
log.Error(err, "unable to update status after reconciliation")
return ctrl.Result{Requeue: true}, err
}
r.recordReadiness(ctx, reconciledKustomization)
// broadcast the reconciliation failure and requeue at the specified retry interval
if reconcileErr != nil {
log.Error(reconcileErr, fmt.Sprintf("Reconciliation failed after %s, next try in %s",
time.Now().Sub(reconcileStart).String(),
kustomization.GetRetryInterval().String()),
"revision",
source.GetArtifact().Revision)
r.event(ctx, reconciledKustomization, source.GetArtifact().Revision, events.EventSeverityError,
reconcileErr.Error(), nil)
return ctrl.Result{RequeueAfter: kustomization.GetRetryInterval()}, nil
}
// broadcast the reconciliation result and requeue at the specified interval
log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s",
time.Now().Sub(reconcileStart).String(),
kustomization.Spec.Interval.Duration.String()),
"revision",
source.GetArtifact().Revision,
)
r.event(ctx, reconciledKustomization, source.GetArtifact().Revision, events.EventSeverityInfo,
"Update completed", map[string]string{"commit_status": "update"})
return ctrl.Result{RequeueAfter: kustomization.Spec.Interval.Duration}, nil
}
func (r *KustomizationReconciler) reconcile(
ctx context.Context,
kustomization kustomizev1.Kustomization,
source sourcev1.Source) (kustomizev1.Kustomization, error) {
// record the value of the reconciliation request, if any
if v, ok := meta.ReconcileAnnotationValue(kustomization.GetAnnotations()); ok {
kustomization.Status.SetLastHandledReconcileRequest(v)
}
// create tmp dir
tmpDir, err := ioutil.TempDir("", kustomization.Name)
if err != nil {
err = fmt.Errorf("tmp dir error: %w", err)
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
sourcev1.StorageOperationFailedReason,
err.Error(),
), err
}
defer os.RemoveAll(tmpDir)
// download artifact and extract files
err = r.download(source.GetArtifact().URL, tmpDir)
if err != nil {
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
kustomizev1.ArtifactFailedReason,
err.Error(),
), err
}
// check build path exists
dirPath, err := securejoin.SecureJoin(tmpDir, kustomization.Spec.Path)
if err != nil {
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
kustomizev1.ArtifactFailedReason,
err.Error(),
), err
}
if _, err := os.Stat(dirPath); err != nil {
err = fmt.Errorf("kustomization path not found: %w", err)
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
kustomizev1.ArtifactFailedReason,
err.Error(),
), err
}
// create any necessary kube-clients for impersonation
impersonation := NewKustomizeImpersonation(kustomization, r.Client, r.StatusPoller, dirPath)
kubeClient, statusPoller, err := impersonation.GetClient(ctx)
if err != nil {
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
meta.ReconciliationFailedReason,
err.Error(),
), fmt.Errorf("failed to build kube client: %w", err)
}
// generate kustomization.yaml and calculate the manifests checksum
checksum, err := r.generate(ctx, kubeClient, kustomization, dirPath)
if err != nil {
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
kustomizev1.BuildFailedReason,
err.Error(),
), err
}
// build the kustomization and generate the GC snapshot
snapshot, err := r.build(ctx, kustomization, checksum, dirPath)
if err != nil {
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
kustomizev1.BuildFailedReason,
err.Error(),
), err
}
// dry-run apply
err = r.validate(ctx, kustomization, impersonation, dirPath)
if err != nil {
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
kustomizev1.ValidationFailedReason,
err.Error(),
), err
}
// apply
changeSet, err := r.applyWithRetry(ctx, kustomization, impersonation, source.GetArtifact().Revision, dirPath, 5*time.Second)
if err != nil {
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
meta.ReconciliationFailedReason,
err.Error(),
), err
}
// prune
err = r.prune(ctx, kubeClient, kustomization, checksum)
if err != nil {
return kustomizev1.KustomizationNotReady(
kustomization,
source.GetArtifact().Revision,
kustomizev1.PruneFailedReason,
err.Error(),
), err
}
// health assessment
err = r.checkHealth(ctx, statusPoller, kustomization, source.GetArtifact().Revision, changeSet != "")
if err != nil {
return kustomizev1.KustomizationNotReadySnapshot(
kustomization,
snapshot,
source.GetArtifact().Revision,
kustomizev1.HealthCheckFailedReason,
err.Error(),
), err
}
return kustomizev1.KustomizationReady(
kustomization,
snapshot,
source.GetArtifact().Revision,
meta.ReconciliationSucceededReason,
"Applied revision: "+source.GetArtifact().Revision,
), nil
}
func (r *KustomizationReconciler) checkDependencies(kustomization kustomizev1.Kustomization) error {
for _, d := range kustomization.Spec.DependsOn {
if d.Namespace == "" {
d.Namespace = kustomization.GetNamespace()
}
dName := types.NamespacedName(d)
var k kustomizev1.Kustomization
err := r.Get(context.Background(), dName, &k)
if err != nil {
return fmt.Errorf("unable to get '%s' dependency: %w", dName, err)
}
if len(k.Status.Conditions) == 0 || k.Generation != k.Status.ObservedGeneration {
return fmt.Errorf("dependency '%s' is not ready", dName)
}
if !apimeta.IsStatusConditionTrue(k.Status.Conditions, meta.ReadyCondition) {
return fmt.Errorf("dependency '%s' is not ready", dName)
}
}
return nil
}
func (r *KustomizationReconciler) download(artifactURL string, tmpDir string) error {
if hostname := os.Getenv("SOURCE_CONTROLLER_LOCALHOST"); hostname != "" {
u, err := url.Parse(artifactURL)
if err != nil {
return err
}
u.Host = hostname
artifactURL = u.String()
}
req, err := retryablehttp.NewRequest(http.MethodGet, artifactURL, nil)
if err != nil {
return fmt.Errorf("failed to create a new request: %w", err)
}
resp, err := r.httpClient.Do(req)
if err != nil {
return fmt.Errorf("failed to download artifact, error: %w", err)
}
defer resp.Body.Close()
// check response
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to download artifact from %s, status: %s", artifactURL, resp.Status)
}
// extract
if _, err = untar.Untar(resp.Body, tmpDir); err != nil {
return fmt.Errorf("failed to untar artifact, error: %w", err)
}
return nil
}
func (r *KustomizationReconciler) getSource(ctx context.Context, kustomization kustomizev1.Kustomization) (sourcev1.Source, error) {
var source sourcev1.Source
sourceNamespace := kustomization.GetNamespace()
if kustomization.Spec.SourceRef.Namespace != "" {
sourceNamespace = kustomization.Spec.SourceRef.Namespace
}
namespacedName := types.NamespacedName{
Namespace: sourceNamespace,
Name: kustomization.Spec.SourceRef.Name,
}
switch kustomization.Spec.SourceRef.Kind {
case sourcev1.GitRepositoryKind:
var repository sourcev1.GitRepository
err := r.Client.Get(ctx, namespacedName, &repository)
if err != nil {
if apierrors.IsNotFound(err) {
return source, err
}
return source, fmt.Errorf("unable to get source '%s': %w", namespacedName, err)
}
source = &repository
case sourcev1.BucketKind:
var bucket sourcev1.Bucket
err := r.Client.Get(ctx, namespacedName, &bucket)
if err != nil {
if apierrors.IsNotFound(err) {
return source, err
}
return source, fmt.Errorf("unable to get source '%s': %w", namespacedName, err)
}
source = &bucket
default:
return source, fmt.Errorf("source `%s` kind '%s' not supported",
kustomization.Spec.SourceRef.Name, kustomization.Spec.SourceRef.Kind)
}
return source, nil
}
func (r *KustomizationReconciler) generate(ctx context.Context, kubeClient client.Client, kustomization kustomizev1.Kustomization, dirPath string) (string, error) {
gen := NewGenerator(kustomization, kubeClient)
return gen.WriteFile(ctx, dirPath)
}
func (r *KustomizationReconciler) build(ctx context.Context, kustomization kustomizev1.Kustomization, checksum, dirPath string) (*kustomizev1.Snapshot, error) {
timeout := kustomization.GetTimeout()
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
dec, cleanup, err := NewTempDecryptor(r.Client, kustomization)
if err != nil {
return nil, err
}
defer cleanup()
// import OpenPGP keys if any
if err := dec.ImportKeys(ctx); err != nil {
return nil, err
}
fs := filesys.MakeFsOnDisk()
m, err := buildKustomization(fs, dirPath)
if err != nil {
return nil, fmt.Errorf("kustomize build failed: %w", err)
}
for _, res := range m.Resources() {
// check if resources are encrypted and decrypt them before generating the final YAML
if kustomization.Spec.Decryption != nil {
outRes, err := dec.Decrypt(res)
if err != nil {
return nil, fmt.Errorf("decryption failed for '%s': %w", res.GetName(), err)
}
if outRes != nil {
_, err = m.Replace(res)
if err != nil {
return nil, err
}
}
}
// run variable substitutions
if kustomization.Spec.PostBuild != nil {
outRes, err := substituteVariables(ctx, r.Client, kustomization, res)
if err != nil {
return nil, fmt.Errorf("var substitution failed for '%s': %w", res.GetName(), err)
}
if outRes != nil {
_, err = m.Replace(res)
if err != nil {
return nil, err
}
}
}
}
resources, err := m.AsYaml()
if err != nil {
return nil, fmt.Errorf("kustomize build failed: %w", err)
}
manifestsFile := filepath.Join(dirPath, fmt.Sprintf("%s.yaml", kustomization.GetUID()))
if err := fs.WriteFile(manifestsFile, resources); err != nil {
return nil, err
}
return kustomizev1.NewSnapshot(resources, checksum)
}
func (r *KustomizationReconciler) validate(ctx context.Context, kustomization kustomizev1.Kustomization, imp *KustomizeImpersonation, dirPath string) error {
if kustomization.Spec.Validation == "" || kustomization.Spec.Validation == "none" {
return nil
}
log := logr.FromContext(ctx)
timeout := kustomization.GetTimeout() + (time.Second * 1)
applyCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
validation := kustomization.Spec.Validation
if validation == "server" && kustomization.Spec.Force {
// Use client-side validation with force
validation = "client"
log.Info(fmt.Sprintf("Server-side validation is configured, falling-back to client-side validation since 'force' is enabled"))
}
cmd := fmt.Sprintf("cd %s && kubectl apply -f %s.yaml --timeout=%s --dry-run=%s --cache-dir=/tmp --force=%t",
dirPath, kustomization.GetUID(), kustomization.GetTimeout().String(), validation, kustomization.Spec.Force)
if kustomization.Spec.KubeConfig != nil {
kubeConfig, err := imp.WriteKubeConfig(ctx)
if err != nil {
return err
}
cmd = fmt.Sprintf("%s --kubeconfig=%s", cmd, kubeConfig)
} else {
// impersonate SA
if kustomization.Spec.ServiceAccountName != "" {
saToken, err := imp.GetServiceAccountToken(ctx)
if err != nil {
return fmt.Errorf("service account impersonation failed: %w", err)
}
cmd = fmt.Sprintf("%s --token %s", cmd, saToken)
}
}
command := exec.CommandContext(applyCtx, "/bin/sh", "-c", cmd)
output, err := command.CombinedOutput()
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
return fmt.Errorf("validation timeout: %w", err)
}
return fmt.Errorf("validation failed: %s", parseApplyError(output))
}
return nil
}
func (r *KustomizationReconciler) apply(ctx context.Context, kustomization kustomizev1.Kustomization, imp *KustomizeImpersonation, dirPath string) (string, error) {
log := logr.FromContext(ctx)
start := time.Now()
timeout := kustomization.GetTimeout() + (time.Second * 1)
applyCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
fieldManager := "kustomize-controller"
cmd := fmt.Sprintf("cd %s && kubectl apply --field-manager=%s -f %s.yaml --timeout=%s --cache-dir=/tmp --force=%t",
dirPath, fieldManager, kustomization.GetUID(), kustomization.Spec.Interval.Duration.String(), kustomization.Spec.Force)
if kustomization.Spec.KubeConfig != nil {
kubeConfig, err := imp.WriteKubeConfig(ctx)
if err != nil {
return "", err
}
cmd = fmt.Sprintf("%s --kubeconfig=%s", cmd, kubeConfig)
} else {
// impersonate SA
if kustomization.Spec.ServiceAccountName != "" {
saToken, err := imp.GetServiceAccountToken(ctx)
if err != nil {
return "", fmt.Errorf("service account impersonation failed: %w", err)
}
cmd = fmt.Sprintf("%s --token %s", cmd, saToken)
}
}
command := exec.CommandContext(applyCtx, "/bin/sh", "-c", cmd)
output, err := command.CombinedOutput()
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
return "", fmt.Errorf("apply timeout: %w", err)
}
if string(output) == "" {
return "", fmt.Errorf("apply failed: %w, kubectl process was killed, probably due to OOM", err)
}
applyErr := parseApplyError(output)
if applyErr == "" {
applyErr = "no error output found, this may happen because of a timeout"
}
return "", fmt.Errorf("apply failed: %s", applyErr)
}
resources := parseApplyOutput(output)
log.Info(
fmt.Sprintf("Kustomization applied in %s",
time.Now().Sub(start).String()),
"output", resources,
)
changeSet := ""
for obj, action := range resources {
if action != "" && action != "unchanged" {
changeSet += obj + " " + action + "\n"
}
}
return changeSet, nil
}
func (r *KustomizationReconciler) applyWithRetry(ctx context.Context, kustomization kustomizev1.Kustomization, imp *KustomizeImpersonation, revision, dirPath string, delay time.Duration) (string, error) {
log := logr.FromContext(ctx)
changeSet, err := r.apply(ctx, kustomization, imp, dirPath)
if err != nil {
// retry apply due to CRD/CR race
if strings.Contains(err.Error(), "could not find the requested resource") ||
strings.Contains(err.Error(), "no matches for kind") {
log.Info("retrying apply", "error", err.Error())
time.Sleep(delay)
if changeSet, err := r.apply(ctx, kustomization, imp, dirPath); err != nil {
return "", err
} else {
if changeSet != "" {
r.event(ctx, kustomization, revision, events.EventSeverityInfo, changeSet, nil)
}
}
} else {
return "", err
}
} else {
if changeSet != "" && kustomization.Status.LastAppliedRevision != revision {
r.event(ctx, kustomization, revision, events.EventSeverityInfo, changeSet, nil)
}
}
return changeSet, nil
}
func (r *KustomizationReconciler) prune(ctx context.Context, kubeClient client.Client, kustomization kustomizev1.Kustomization, newChecksum string) error {
if !kustomization.Spec.Prune || kustomization.Status.Snapshot == nil {
return nil
}
if kustomization.DeletionTimestamp.IsZero() && kustomization.Status.Snapshot.Checksum == newChecksum {
return nil
}
log := logr.FromContext(ctx)
gc := NewGarbageCollector(kubeClient, *kustomization.Status.Snapshot, newChecksum, logr.FromContext(ctx))
if output, ok := gc.Prune(kustomization.GetTimeout(),
kustomization.GetName(),
kustomization.GetNamespace(),
); !ok {
return fmt.Errorf("garbage collection failed: %s", output)
} else {
if output != "" {
log.Info(fmt.Sprintf("garbage collection completed: %s", output))
r.event(ctx, kustomization, newChecksum, events.EventSeverityInfo, output, nil)
}
}
return nil
}
func (r *KustomizationReconciler) checkHealth(ctx context.Context, statusPoller *polling.StatusPoller, kustomization kustomizev1.Kustomization, revision string, changed bool) error {
if len(kustomization.Spec.HealthChecks) == 0 {
return nil
}
hc := NewHealthCheck(kustomization, statusPoller)
if err := hc.Assess(1 * time.Second); err != nil {
return err
}
healthiness := apimeta.FindStatusCondition(kustomization.Status.Conditions, kustomizev1.HealthyCondition)
healthy := healthiness != nil && healthiness.Status == metav1.ConditionTrue
if !healthy || (kustomization.Status.LastAppliedRevision != revision && changed) {
r.event(ctx, kustomization, revision, events.EventSeverityInfo, "Health check passed", nil)
}
return nil
}
func (r *KustomizationReconciler) reconcileDelete(ctx context.Context, kustomization kustomizev1.Kustomization) (ctrl.Result, error) {
log := logr.FromContext(ctx)
if kustomization.Spec.Prune && !kustomization.Spec.Suspend {
// create any necessary kube-clients
imp := NewKustomizeImpersonation(kustomization, r.Client, r.StatusPoller, "")
client, _, err := imp.GetClient(ctx)
if err != nil {
err = fmt.Errorf("failed to build kube client for Kustomization: %w", err)
log.Error(err, "Unable to prune for finalizer")
return ctrl.Result{}, err
}
if err := r.prune(ctx, client, kustomization, ""); err != nil {
r.event(ctx, kustomization, kustomization.Status.LastAppliedRevision, events.EventSeverityError, "pruning for deleted resource failed", nil)
// Return the error so we retry the failed garbage collection
return ctrl.Result{}, err
}
}
// Record deleted status
r.recordReadiness(ctx, kustomization)
// Remove our finalizer from the list and update it
controllerutil.RemoveFinalizer(&kustomization, kustomizev1.KustomizationFinalizer)
if err := r.Update(ctx, &kustomization); err != nil {
return ctrl.Result{}, err
}
// Stop reconciliation as the object is being deleted
return ctrl.Result{}, nil
}
func (r *KustomizationReconciler) event(ctx context.Context, kustomization kustomizev1.Kustomization, revision, severity, msg string, metadata map[string]string) {
log := logr.FromContext(ctx)
r.EventRecorder.Event(&kustomization, "Normal", severity, msg)
objRef, err := reference.GetReference(r.Scheme, &kustomization)
if err != nil {
log.Error(err, "unable to send event")
return
}
if r.ExternalEventRecorder != nil {
if metadata == nil {
metadata = map[string]string{}
}
if revision != "" {
metadata["revision"] = revision
}
reason := severity
if c := apimeta.FindStatusCondition(kustomization.Status.Conditions, meta.ReadyCondition); c != nil {
reason = c.Reason
}
if err := r.ExternalEventRecorder.Eventf(*objRef, metadata, severity, reason, msg); err != nil {
log.Error(err, "unable to send event")
return
}
}
}
func (r *KustomizationReconciler) recordReadiness(ctx context.Context, kustomization kustomizev1.Kustomization) {
if r.MetricsRecorder == nil {
return
}
log := logr.FromContext(ctx)
objRef, err := reference.GetReference(r.Scheme, &kustomization)
if err != nil {
log.Error(err, "unable to record readiness metric")
return
}
if rc := apimeta.FindStatusCondition(kustomization.Status.Conditions, meta.ReadyCondition); rc != nil {
r.MetricsRecorder.RecordCondition(*objRef, *rc, !kustomization.DeletionTimestamp.IsZero())
} else {
r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionUnknown,
}, !kustomization.DeletionTimestamp.IsZero())
}
}
func (r *KustomizationReconciler) recordSuspension(ctx context.Context, kustomization kustomizev1.Kustomization) {
if r.MetricsRecorder == nil {
return
}
log := logr.FromContext(ctx)
objRef, err := reference.GetReference(r.Scheme, &kustomization)
if err != nil {
log.Error(err, "unable to record suspended metric")
return
}
if !kustomization.DeletionTimestamp.IsZero() {
r.MetricsRecorder.RecordSuspend(*objRef, false)
} else {
r.MetricsRecorder.RecordSuspend(*objRef, kustomization.Spec.Suspend)
}
}
func (r *KustomizationReconciler) patchStatus(ctx context.Context, req ctrl.Request, newStatus kustomizev1.KustomizationStatus) error {
var kustomization kustomizev1.Kustomization
if err := r.Get(ctx, req.NamespacedName, &kustomization); err != nil {
return err
}
patch := client.MergeFrom(kustomization.DeepCopy())
kustomization.Status = newStatus
return r.Status().Patch(ctx, &kustomization, patch)
}
| [
"\"SOURCE_CONTROLLER_LOCALHOST\""
]
| []
| [
"SOURCE_CONTROLLER_LOCALHOST"
]
| [] | ["SOURCE_CONTROLLER_LOCALHOST"] | go | 1 | 0 | |
backend/src/main/java/br/com/estacionamento/main/ApplicationConfiguration.java | package br.com.estacionamento.main;
import javax.sql.DataSource;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
import org.springframework.jdbc.datasource.DriverManagerDataSource;
import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
import org.springframework.transaction.annotation.EnableTransactionManagement;
@Configuration
@EnableTransactionManagement
@EnableJpaRepositories(basePackages = "br.com.estacionamento.model", entityManagerFactoryRef = "factoryBean")
@ComponentScan(basePackages = "br.com.estacionamento.controller")
public class ApplicationConfiguration {
public static String getDatabaseUrl() {
String url = System.getenv("DATABASE_URL");
if (url == null) {
url = "jdbc:mysql://localhost:3306/estacionamento?user=root&password=123456";
}
return url;
}
@Bean
public LocalContainerEntityManagerFactoryBean factoryBean() {
LocalContainerEntityManagerFactoryBean factoryBean = new LocalContainerEntityManagerFactoryBean();
factoryBean.setDataSource(dataSource());
factoryBean.setPackagesToScan("br.com.estacionamento.model");
return factoryBean;
}
@Bean
public DataSource dataSource() {
DriverManagerDataSource driver = new DriverManagerDataSource();
driver.setDriverClassName("com.mysql.jdbc.Driver");
driver.setUrl(getDatabaseUrl());
return driver;
}
}
| [
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | java | 1 | 0 | |
enterprise/internal/insights/insights.go | package insights
import (
"context"
"database/sql"
"log"
"os"
"strconv"
"time"
"github.com/cockroachdb/errors"
"github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise"
"github.com/sourcegraph/sourcegraph/enterprise/internal/insights/migration"
"github.com/sourcegraph/sourcegraph/enterprise/internal/insights/resolvers"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/conf/deploy"
"github.com/sourcegraph/sourcegraph/internal/database"
connections "github.com/sourcegraph/sourcegraph/internal/database/connections/live"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/oobmigration"
)
// IsEnabled tells if code insights are enabled or not.
func IsEnabled() bool {
if v, _ := strconv.ParseBool(os.Getenv("DISABLE_CODE_INSIGHTS")); v {
// Code insights can always be disabled. This can be a helpful escape hatch if e.g. there
// are issues with (or connecting to) the codeinsights-db deployment and it is preventing
// the Sourcegraph frontend or repo-updater from starting.
//
// It is also useful in dev environments if you do not wish to spend resources running Code
// Insights.
return false
}
if deploy.IsDeployTypeSingleDockerContainer(deploy.Type()) {
// Code insights is not supported in single-container Docker demo deployments.
return false
}
return true
}
// Init initializes the given enterpriseServices to include the required resolvers for insights.
func Init(ctx context.Context, postgres database.DB, _ conftypes.UnifiedWatchable, outOfBandMigrationRunner *oobmigration.Runner, enterpriseServices *enterprise.Services, observationContext *observation.Context) error {
if !IsEnabled() {
if deploy.IsDeployTypeSingleDockerContainer(deploy.Type()) {
enterpriseServices.InsightsResolver = resolvers.NewDisabledResolver("backend-run code insights are not available on single-container deployments")
} else {
enterpriseServices.InsightsResolver = resolvers.NewDisabledResolver("code insights has been disabled")
}
return nil
}
timescale, err := InitializeCodeInsightsDB("frontend")
if err != nil {
return err
}
enterpriseServices.InsightsResolver = resolvers.New(timescale, postgres)
insightsMigrator := migration.NewMigrator(timescale, postgres)
// This id (14) was defined arbitrarily in this migration file: 1528395945_settings_migration_out_of_band.up.sql.
if err := outOfBandMigrationRunner.Register(14, insightsMigrator, oobmigration.MigratorOptions{Interval: 10 * time.Second}); err != nil {
log.Fatalf("failed to register settings migration job: %v", err)
}
return nil
}
// InitializeCodeInsightsDB connects to and initializes the Code Insights Timescale DB, running
// database migrations before returning. It is safe to call from multiple services/containers (in
// which case, one's migration will win and the other caller will receive an error and should exit
// and restart until the other finishes.)
func InitializeCodeInsightsDB(app string) (*sql.DB, error) {
dsn := conf.GetServiceConnectionValueAndRestartOnChange(func(serviceConnections conftypes.ServiceConnections) string {
return serviceConnections.CodeInsightsTimescaleDSN
})
db, err := connections.NewCodeInsightsDB(dsn, app, true, &observation.TestContext)
if err != nil {
return nil, errors.Errorf("Failed to connect to codeinsights database: %s", err)
}
return db, nil
}
| [
"\"DISABLE_CODE_INSIGHTS\""
]
| []
| [
"DISABLE_CODE_INSIGHTS"
]
| [] | ["DISABLE_CODE_INSIGHTS"] | go | 1 | 0 | |
news2rss.py | #!/usr/bin/env python3
#
# news2rss.py by lenormf
# A lightweight HTTP server that turns NewsAPI data into RSS feeds
#
import os
import sys
import logging
import inspect
import argparse
from enum import Enum, auto
import bottle
from bottle import get, abort
from newsapi import NewsApiClient
from newsapi.newsapi_exception import NewsAPIException
from feedgen.feed import FeedGenerator
class NewsAPIPlugin(object):
name = "news_api"
api = 2
def __init__(self, keyword="newsapi", api_key=None):
self.keyword = keyword
self.newsapi = NewsApiClient(api_key=api_key)
try:
self.newsapi._sources_cache = self.newsapi.get_sources()["sources"]
logging.debug("sources cache: %r", self.newsapi._sources_cache)
except NewsAPIException as e:
logging.error("unable to fetch list of sources: %s", e)
raise bottle.PluginError("unable to initialise NewsAPIPlugin")
def setup(self, app):
for other in app.plugins:
if not isinstance(other, NewsAPIPlugin):
continue
if other.keyword == self.keyword:
raise bottle.PluginError("Found another '%s' plugin with conflicting settings (non-unique keyword)." % self.name)
def apply(self, callback, context):
conf = context.config.get(NewsAPIPlugin.name) or {}
keyword = conf.get("keyword", self.keyword)
newsapi = conf.get("newsapi", self.newsapi)
if self.keyword not in inspect.signature(callback).parameters:
return callback
def wrapper(*args, **kwargs):
kwargs[keyword] = newsapi
return callback(*args, **kwargs)
return wrapper
def _feed_rss(source_meta, articles):
feed = FeedGenerator()
if "name" not in source_meta:
logging.error("no 'name' entry in the source meta")
abort(401, "an error occurred while generating the feed")
feed.title(source_meta["name"])
if "url" not in source_meta:
logging.error("no 'url' entry in the source meta")
abort(401, "an error occurred while generating the feed")
feed.link(href=source_meta["url"], rel="self")
if "description" not in source_meta:
logging.error("no 'description' entry in the source meta")
abort(401, "an error occurred while generating the feed")
feed.description(source_meta["description"])
if "id" in source_meta:
feed.id(source_meta["id"])
if "category" in source_meta:
feed.category(term=source_meta["category"])
if "language" in source_meta:
feed.language(source_meta["language"])
for article in articles:
entry = feed.add_entry(order="append")
logging.debug("article: %r", article)
if "title" not in article:
logging.error("no 'title' entry in the article")
abort(401, "an error occurred while adding an entry")
entry.title(article["title"])
if "content" not in article:
logging.error("no 'content' entry in the article")
abort(401, "an error occurred while adding an entry")
entry.content(article["content"])
if "description" not in article:
logging.error("no 'description' entry in the article")
abort(401, "an error occurred while adding an entry")
entry.description(article["description"])
if "author" in article:
entry.author(name=article["author"], email="[email protected]")
if "url" in article:
entry.link(href=article["url"], rel="alternate")
if "publishedAt" in article:
entry.pubDate(article["publishedAt"])
try:
return feed.rss_str()
except ValueError as e:
logging.error("invalid data: %s", e)
abort(401, "an error occurred while generating the feed")
@get("/<feed_type>/<source_id>")
def get_feed_sources(feed_type, source_id, newsapi):
source_meta = next((source for source in newsapi._sources_cache if source["id"] == source_id),
None)
if not source_meta:
abort(401, "invalid source identifier")
try:
# Maximum amount of articles returned in a single page: 100
articles = newsapi.get_everything(sources=source_id, page_size=100)
logging.debug("total amount of articles: %d", articles["totalResults"])
except (ValueError, TypeError) as e:
logging.error("invalid request: %s", e)
abort(401, "an error occurred while fetching the articles")
except NewsAPIException as e:
logging.error("couldn't query the API: %s", e)
abort(401, "an error occurred while fetching the articles")
logging.debug("requested feed type: %s", feed_type)
if feed_type == "rss":
return _feed_rss(source_meta, articles["articles"])
else:
abort(401, "invalid feed type")
class CliOptions(argparse.Namespace):
def __init__(self, args):
parser = argparse.ArgumentParser(description="News2RSS - An HTTP server that returns feeds of news articles")
parser.add_argument("-d", "--debug", default=False, action="store_true", help="Display debug messages")
parser.add_argument("-v", "--verbose", default=False, action="store_true", help="Display more messages")
parser.add_argument("-H", "--host", default="localhost", help="Hostname to bind to")
parser.add_argument("-P", "--port", type=int, default=8080, help="Port to listen on")
parser.add_argument("-X", "--api-key", help="News API authentication key")
parser.parse_args(args, self)
def main(av):
cli_options = CliOptions(av[1:])
logging_level = logging.WARN
if cli_options.debug:
logging_level = logging.DEBUG
elif cli_options.verbose:
logging_level = logging.INFO
logging.basicConfig(level=logging_level,
format="[%(asctime)s][%(levelname)s]: %(message)s")
api_key = cli_options.api_key or os.getenv("NEWS2RSS_API_KEY")
if not api_key:
logging.critical("No API key set")
return 1
bottle.install(NewsAPIPlugin(api_key=api_key))
bottle.run(host=cli_options.host, port=cli_options.port,
debug=cli_options.debug, reloader=cli_options.debug)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| []
| []
| [
"NEWS2RSS_API_KEY"
]
| [] | ["NEWS2RSS_API_KEY"] | python | 1 | 0 | |
vendor/github.com/containers/buildah/util/util.go | package util
import (
"fmt"
"io"
"net/url"
"os"
"path"
"strings"
"syscall"
"github.com/containers/buildah/pkg/cgroups"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/signature"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/docker/distribution/registry/api/errcode"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
minimumTruncatedIDLength = 3
// DefaultTransport is a prefix that we apply to an image name if we
// can't find one in the local Store, in order to generate a source
// reference for the image that we can then copy to the local Store.
DefaultTransport = "docker://"
)
var (
// RegistryDefaultPathPrefix contains a per-registry listing of default prefixes
// to prepend to image names that only contain a single path component.
RegistryDefaultPathPrefix = map[string]string{
"index.docker.io": "library",
"docker.io": "library",
}
)
// ResolveName checks if name is a valid image name, and if that name doesn't
// include a domain portion, returns a list of the names which it might
// correspond to in the set of configured registries, the transport used to
// pull the image, and a boolean which is true iff
// 1) the list of search registries was used, and 2) it was empty.
//
// The returned image names never include a transport: prefix, and if transport != "",
// (transport, image) should be a valid input to alltransports.ParseImageName.
// transport == "" indicates that image that already exists in a local storage,
// and the name is valid for store.Image() / storage.Transport.ParseStoreReference().
//
// NOTE: The "list of search registries is empty" check does not count blocked registries,
// and neither the implied "localhost" nor a possible firstRegistry are counted
func ResolveName(name string, firstRegistry string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) {
if name == "" {
return nil, "", false, nil
}
// Maybe it's a truncated image ID. Don't prepend a registry name, then.
if len(name) >= minimumTruncatedIDLength {
if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) {
// It's a truncated version of the ID of an image that's present in local storage;
// we need only expand the ID.
return []string{img.ID}, "", false, nil
}
}
// If the image includes a transport's name as a prefix, use it as-is.
if strings.HasPrefix(name, DefaultTransport) {
return []string{strings.TrimPrefix(name, DefaultTransport)}, DefaultTransport, false, nil
}
split := strings.SplitN(name, ":", 2)
if len(split) == 2 {
if trans := transports.Get(split[0]); trans != nil {
return []string{split[1]}, trans.Name(), false, nil
}
}
// If the image name already included a domain component, we're done.
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
return nil, "", false, errors.Wrapf(err, "error parsing image name %q", name)
}
if named.String() == name {
// Parsing produced the same result, so there was a domain name in there to begin with.
return []string{name}, DefaultTransport, false, nil
}
if reference.Domain(named) != "" && RegistryDefaultPathPrefix[reference.Domain(named)] != "" {
// If this domain can cause us to insert something in the middle, check if that happened.
repoPath := reference.Path(named)
domain := reference.Domain(named)
tag := ""
if tagged, ok := named.(reference.Tagged); ok {
tag = ":" + tagged.Tag()
}
digest := ""
if digested, ok := named.(reference.Digested); ok {
digest = "@" + digested.Digest().String()
}
defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/"
if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):])+tag+digest == name {
// Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with.
return []string{name}, DefaultTransport, false, nil
}
}
// Figure out the list of registries.
var registries []string
searchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(sc)
if err != nil {
logrus.Debugf("unable to read configured registries to complete %q: %v", name, err)
searchRegistries = nil
}
for _, registry := range searchRegistries {
reg, err := sysregistriesv2.FindRegistry(sc, registry)
if err != nil {
logrus.Debugf("unable to read registry configuration for %#v: %v", registry, err)
continue
}
if reg == nil || !reg.Blocked {
registries = append(registries, registry)
}
}
searchRegistriesAreEmpty := len(registries) == 0
// Create all of the combinations. Some registries need an additional component added, so
// use our lookaside map to keep track of them. If there are no configured registries, we'll
// return a name using "localhost" as the registry name.
candidates := []string{}
initRegistries := []string{"localhost"}
if firstRegistry != "" && firstRegistry != "localhost" {
initRegistries = append([]string{firstRegistry}, initRegistries...)
}
for _, registry := range append(initRegistries, registries...) {
if registry == "" {
continue
}
middle := ""
if prefix, ok := RegistryDefaultPathPrefix[registry]; ok && !strings.ContainsRune(name, '/') {
middle = prefix
}
candidate := path.Join(registry, middle, name)
candidates = append(candidates, candidate)
}
return candidates, DefaultTransport, searchRegistriesAreEmpty, nil
}
// ExpandNames takes unqualified names, parses them as image names, and returns
// the fully expanded result, including a tag. Names which don't include a registry
// name will be marked for the most-preferred registry (i.e., the first one in our
// configuration).
func ExpandNames(names []string, firstRegistry string, systemContext *types.SystemContext, store storage.Store) ([]string, error) {
expanded := make([]string, 0, len(names))
for _, n := range names {
var name reference.Named
nameList, _, _, err := ResolveName(n, firstRegistry, systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", n)
}
if len(nameList) == 0 {
named, err := reference.ParseNormalizedNamed(n)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", n)
}
name = named
} else {
named, err := reference.ParseNormalizedNamed(nameList[0])
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", nameList[0])
}
name = named
}
name = reference.TagNameOnly(name)
expanded = append(expanded, name.String())
}
return expanded, nil
}
// FindImage locates the locally-stored image which corresponds to a given name.
func FindImage(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image string) (types.ImageReference, *storage.Image, error) {
var ref types.ImageReference
var img *storage.Image
var err error
names, _, _, err := ResolveName(image, firstRegistry, systemContext, store)
if err != nil {
return nil, nil, errors.Wrapf(err, "error parsing name %q", image)
}
for _, name := range names {
ref, err = is.Transport.ParseStoreReference(store, name)
if err != nil {
logrus.Debugf("error parsing reference to image %q: %v", name, err)
continue
}
img, err = is.Transport.GetStoreImage(store, ref)
if err != nil {
img2, err2 := store.Image(name)
if err2 != nil {
logrus.Debugf("error locating image %q: %v", name, err2)
continue
}
img = img2
}
break
}
if ref == nil || img == nil {
return nil, nil, errors.Wrapf(err, "error locating image with name %q (%v)", image, names)
}
return ref, img, nil
}
// AddImageNames adds the specified names to the specified image.
func AddImageNames(store storage.Store, firstRegistry string, systemContext *types.SystemContext, image *storage.Image, addNames []string) error {
names, err := ExpandNames(addNames, firstRegistry, systemContext, store)
if err != nil {
return err
}
err = store.SetNames(image.ID, append(image.Names, names...))
if err != nil {
return errors.Wrapf(err, "error adding names (%v) to image %q", names, image.ID)
}
return nil
}
// GetFailureCause checks the type of the error "err" and returns a new
// error message that reflects the reason of the failure.
// In case err type is not a familiar one the error "defaultError" is returned.
func GetFailureCause(err, defaultError error) error {
switch nErr := errors.Cause(err).(type) {
case errcode.Errors:
return err
case errcode.Error, *url.Error:
return nErr
default:
return defaultError
}
}
// WriteError writes `lastError` into `w` if not nil and return the next error `err`
func WriteError(w io.Writer, err error, lastError error) error {
if lastError != nil {
fmt.Fprintln(w, lastError)
}
return err
}
// Runtime is the default command to use to run the container.
func Runtime() string {
runtime := os.Getenv("BUILDAH_RUNTIME")
if runtime != "" {
return runtime
}
// Need to switch default until runc supports cgroups v2
if unified, _ := cgroups.IsCgroup2UnifiedMode(); unified {
return "crun"
}
return DefaultRuntime
}
// StringInSlice returns a boolean indicating if the exact value s is present
// in the slice slice.
func StringInSlice(s string, slice []string) bool {
for _, v := range slice {
if v == s {
return true
}
}
return false
}
// GetContainerIDs uses ID mappings to compute the container-level IDs that will
// correspond to a UID/GID pair on the host.
func GetContainerIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) {
uidMapped := true
for _, m := range uidmap {
uidMapped = false
if uid >= m.HostID && uid < m.HostID+m.Size {
uid = (uid - m.HostID) + m.ContainerID
uidMapped = true
break
}
}
if !uidMapped {
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
}
gidMapped := true
for _, m := range gidmap {
gidMapped = false
if gid >= m.HostID && gid < m.HostID+m.Size {
gid = (gid - m.HostID) + m.ContainerID
gidMapped = true
break
}
}
if !gidMapped {
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
}
return uid, gid, nil
}
// GetHostIDs uses ID mappings to compute the host-level IDs that will
// correspond to a UID/GID pair in the container.
func GetHostIDs(uidmap, gidmap []specs.LinuxIDMapping, uid, gid uint32) (uint32, uint32, error) {
uidMapped := true
for _, m := range uidmap {
uidMapped = false
if uid >= m.ContainerID && uid < m.ContainerID+m.Size {
uid = (uid - m.ContainerID) + m.HostID
uidMapped = true
break
}
}
if !uidMapped {
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map UID %d", uidmap, uid)
}
gidMapped := true
for _, m := range gidmap {
gidMapped = false
if gid >= m.ContainerID && gid < m.ContainerID+m.Size {
gid = (gid - m.ContainerID) + m.HostID
gidMapped = true
break
}
}
if !gidMapped {
return 0, 0, errors.Errorf("container uses ID mappings (%#v), but doesn't map GID %d", gidmap, gid)
}
return uid, gid, nil
}
// GetHostRootIDs uses ID mappings in spec to compute the host-level IDs that will
// correspond to UID/GID 0/0 in the container.
func GetHostRootIDs(spec *specs.Spec) (uint32, uint32, error) {
if spec.Linux == nil {
return 0, 0, nil
}
return GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, 0, 0)
}
// GetPolicyContext sets up, initializes and returns a new context for the specified policy
func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) {
policy, err := signature.DefaultPolicy(ctx)
if err != nil {
return nil, err
}
policyContext, err := signature.NewPolicyContext(policy)
if err != nil {
return nil, err
}
return policyContext, nil
}
// logIfNotErrno logs the error message unless err is either nil or one of the
// listed syscall.Errno values. It returns true if it logged an error.
func logIfNotErrno(err error, what string, ignores ...syscall.Errno) (logged bool) {
if err == nil {
return false
}
if errno, isErrno := err.(syscall.Errno); isErrno {
for _, ignore := range ignores {
if errno == ignore {
return false
}
}
}
logrus.Error(what)
return true
}
// LogIfNotRetryable logs "what" if err is set and is not an EINTR or EAGAIN
// syscall.Errno. Returns "true" if we can continue.
func LogIfNotRetryable(err error, what string) (retry bool) {
return !logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN)
}
// LogIfUnexpectedWhileDraining logs "what" if err is set and is not an EINTR
// or EAGAIN or EIO syscall.Errno.
func LogIfUnexpectedWhileDraining(err error, what string) {
logIfNotErrno(err, what, syscall.EINTR, syscall.EAGAIN, syscall.EIO)
}
// TruncateString trims the given string to the provided maximum amount of
// characters and shortens it with `...`.
func TruncateString(str string, to int) string {
newStr := str
if len(str) > to {
const tr = "..."
if to > len(tr) {
to -= len(tr)
}
newStr = str[0:to] + tr
}
return newStr
}
| [
"\"BUILDAH_RUNTIME\""
]
| []
| [
"BUILDAH_RUNTIME"
]
| [] | ["BUILDAH_RUNTIME"] | go | 1 | 0 | |
src/go/MONIT/annotationManager.go | package main
// File : annotationManager.go
// Author : Rahul Indra <indrarahul2013 AT gmail dot com>
// Created : Tue, 1 Sep 2020 14:08:00 GMT
// Description: CERN MONIT infrastructure Annotation Manager CLI Tool
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strconv"
"strings"
"text/tabwriter"
"time"
)
// -------VARIABLES-------
// MAX timeStamp // Saturday, May 24, 3000 3:43:26 PM
var maxtstmp int64 = 32516091806
var microSec int64 = 1000000
var milliSec int64 = 1000
// token name
var token string
// text for annotating
var annotation string
// annotation ID for deleting single annotation
var annotationID int
// tags list seperated by comma
var tags string
// Action to be performed. [list, create, delete, deleteall, update] Default: list
var action string
// time range
var trange string
// boolean for generating default config
var generateConfig *bool
// Config filepath
var configFilePath string
// config variable
var configJSON config
// -------VARIABLES-------
// -------STRUCTS---------
// Grafana Dashboard Annotation API Data struct
type annotationData struct {
ID int `json:"id"` // Annotation Id
DashboardID int `json:"dashboardId"` // Dashboard ID
Created int64 `json:"created"` // Timestamp when the annotation created
Updated int64 `json:"updated"` // Timestamp when the annotation last updated
Time int64 `json:"time"` // Start Time of the annotation
TimeEnd int64 `json:"timeEnd"` // End Time of the annotation
Text string `json:"text"` // Text field of the annotation
Tags []string `json:"tags"` // List of tags attached with the annotation
}
type config struct {
GrafanaBaseURL string `json:"grafanaBaseURL"` // Grafana Annotation Base URL
FindDashboardAPI string `json:"findDashboardAPI"` // API endpoint for all dashboards with given tags
ListAnnotationsAPI string `json:"listAnnotationsAPI"` // API endpoint for fetching all annotations
CreateAnnotationAPI string `json:"createAnnotationAPI"` // API endpoint for creating an annotation
UpdateAnnotationAPI string `json:"updateAnnotationAPI"` // API endpoint for updating an annotation
DeleteAnnotationAPI string `json:"deleteAnnotationAPI"` // API endpoint for deleting an annotation
Columns []string `json:"columns"` // column names for annotations info
HTTPTimeout int `json:"httpTimeout"` // http timeout to connect to AM
Verbose int `json:"verbose"` // verbosity level
Token string `json:"token"` // CERN SSO token to use
}
// -------STRUCTS---------
// function for get request grafana annotations endpoint for fetching annotations.
func getAnnotations(data *[]annotationData, tags []string) {
var headers [][]string
bearer := fmt.Sprintf("Bearer %s", configJSON.Token)
h := []string{"Authorization", bearer}
headers = append(headers, h)
h = []string{"Accept", "application/json"}
headers = append(headers, h)
// Ex: /api/annotations?from=1598366184000&to=1598366184000&tags=cmsweb&tags=cmsmon-int
v := url.Values{}
if trange != "" {
tms := parseTimes(false) // False because Listing annotations with past time offset
if len(tms) == 2 {
v.Add("from", strings.Trim(strconv.FormatInt(tms[0], 10), " "))
v.Add("to", strings.Trim(strconv.FormatInt(tms[1], 10), " "))
} else if len(tms) == 1 {
v.Add("from", strings.Trim(strconv.FormatInt(tms[0], 10), " "))
v.Add("to", strings.Trim(strconv.FormatInt(time.Now().UTC().Unix()*int64(milliSec), 10), " "))
}
}
for _, tag := range tags {
v.Add("tags", strings.Trim(tag, " "))
}
apiURL := fmt.Sprintf("%s%s?%s", configJSON.GrafanaBaseURL, configJSON.ListAnnotationsAPI, v.Encode())
if configJSON.Verbose > 0 {
log.Println(apiURL)
}
req, err := http.NewRequest("GET", apiURL, nil)
if err != nil {
log.Fatalf("Unable to make request to %s, error: %s", apiURL, err)
}
for _, v := range headers {
if len(v) == 2 {
req.Header.Add(v[0], v[1])
}
}
timeout := time.Duration(configJSON.HTTPTimeout) * time.Second
client := &http.Client{Timeout: timeout}
if configJSON.Verbose > 1 {
log.Println("URL", apiURL)
dump, err := httputil.DumpRequestOut(req, true)
if err == nil {
log.Println("Request: ", string(dump))
}
}
resp, err := client.Do(req)
if err != nil {
log.Fatalf("Response Error, error: %v\n", err)
}
defer resp.Body.Close()
byteValue, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalf("Unable to read JSON Data from Grafana Annotation GET API, error: %v\n", err)
}
err = json.Unmarshal(byteValue, &data)
if err != nil {
if configJSON.Verbose > 0 {
log.Println(string(byteValue))
}
log.Fatalf("Unable to parse JSON Data from Grafana Annotation GET API, error: %v\n", err)
}
if configJSON.Verbose > 1 {
dump, err := httputil.DumpResponse(resp, true)
if err == nil {
log.Println("Response: ", string(dump))
}
}
}
// function for delete request on grafana annotations endpoint for deleting an annotation.
func deleteAnnotationHelper(annotationID int) {
apiurl := fmt.Sprintf("%s%s%d", configJSON.GrafanaBaseURL, configJSON.DeleteAnnotationAPI, annotationID)
req, err := http.NewRequest("DELETE", apiurl, nil)
if err != nil {
log.Fatalf("Request Error, error: %v\n", err)
}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", configJSON.Token))
timeout := time.Duration(configJSON.HTTPTimeout) * time.Second
client := &http.Client{Timeout: timeout}
if configJSON.Verbose > 1 {
log.Println("DELETE", apiurl)
} else if configJSON.Verbose > 2 {
dump, err := httputil.DumpRequestOut(req, true)
if err == nil {
log.Println("Request: ", string(dump))
}
}
resp, err := client.Do(req)
if err != nil {
log.Fatalf("Response Error, error: %v\n", err)
}
if resp.StatusCode != http.StatusOK {
log.Fatalf("Http Response Code Error, status code: %d", resp.StatusCode)
}
defer resp.Body.Close()
if configJSON.Verbose > 2 {
dump, err := httputil.DumpResponse(resp, true)
if err == nil {
log.Println("Response: ", string(dump))
}
}
}
//
// The following block of code was taken from
// https://github.com/dmwm/CMSMonitoring/blob/master/src/go/MONIT/alert.go#L204
// Helper function for converting time difference in a meaningful manner
func diff(a, b time.Time) (array []int) {
if a.Location() != b.Location() {
b = b.In(a.Location())
}
if a.After(b) {
a, b = b, a
}
y1, M1, d1 := a.Date()
y2, M2, d2 := b.Date()
h1, m1, s1 := a.Clock()
h2, m2, s2 := b.Clock()
var year = int(y2 - y1)
var month = int(M2 - M1)
var day = int(d2 - d1)
var hour = int(h2 - h1)
var min = int(m2 - m1)
var sec = int(s2 - s1)
// Normalize negative values
if sec < 0 {
sec += 60
min--
}
if min < 0 {
min += 60
hour--
}
if hour < 0 {
hour += 24
day--
}
if day < 0 {
// days in month:
t := time.Date(y1, M1, 32, 0, 0, 0, 0, time.UTC)
day += 32 - t.Day()
month--
}
if month < 0 {
month += 12
year--
}
array = append(array, year)
array = append(array, month)
array = append(array, day)
array = append(array, hour)
array = append(array, min)
array = append(array, sec)
return
}
//
// The following block of code was taken from
// https://github.com/dmwm/CMSMonitoring/blob/master/src/go/MONIT/alert.go#L259
// Helper function for time difference between two time.Time objects
func timeDiffHelper(timeList []int) (dif string) {
for ind := range timeList {
if timeList[ind] > 0 {
switch ind {
case 0:
dif += strconv.Itoa(timeList[ind]) + "Y "
break
case 1:
dif += strconv.Itoa(timeList[ind]) + "M "
break
case 2:
dif += strconv.Itoa(timeList[ind]) + "D "
break
case 3:
dif += strconv.Itoa(timeList[ind]) + "h "
break
case 4:
dif += strconv.Itoa(timeList[ind]) + "m "
break
case 5:
dif += strconv.Itoa(timeList[ind]) + "s "
break
default:
break
}
}
}
return
}
//
// The following block of code was taken from
// https://github.com/dmwm/CMSMonitoring/blob/master/src/go/MONIT/alert.go#L291
// Function for time difference between two time.Time objects
func timeDiff(t1 time.Time, t2 time.Time, duration int) string {
if t1.After(t2) {
timeList := diff(t1, t2)
return timeDiffHelper(timeList) + "AGO"
}
timeList := diff(t2, t1)
if duration == 1 {
return timeDiffHelper(timeList)
}
return "IN " + timeDiffHelper(timeList)
}
//
// The following block of code was taken from (with few changes done)
// https://github.com/dmwm/CMSMonitoring/blob/master/src/go/MONIT/alert.go#L426
// Function for printing annotations in Plain text format
func tabulate(data []annotationData) {
w := new(tabwriter.Writer)
w.Init(os.Stdout, 8, 8, 0, '\t', 0)
defer w.Flush()
fmt.Fprintf(w, "\n ")
for _, each := range configJSON.Columns {
fmt.Fprintf(w, "%s\t", each)
}
fmt.Fprintf(w, "\n")
for _, each := range data {
fmt.Fprintf(w, " %d\t%d\t%s\t%s\t%s",
each.ID,
each.DashboardID,
strings.Split(each.Text, "\n<a")[0],
each.Tags,
timeDiff(time.Now(), time.Unix(0, each.Time*int64(microSec)), 0), // times should be in microseconds of Unix since epoch
)
if each.TimeEnd == maxtstmp {
fmt.Fprintf(w, "\t%s", "Undefined")
fmt.Fprintf(w, "\t%s\n", "Undefined")
} else {
fmt.Fprintf(w, "\t%s", timeDiff(time.Now(), time.Unix(0, each.TimeEnd*int64(microSec)), 0)) // times should be in microseconds of Unix since epoch
fmt.Fprintf(w, "\t%s\n", timeDiff(time.Unix(0, each.Time*int64(microSec)), time.Unix(0, each.TimeEnd*int64(microSec)), 1)) // times should be in microseconds of Unix since epoch
}
}
}
// helper function for listing annotations
func listAnnotation() []annotationData {
var aData []annotationData
tagList := parseTags(tags)
getAnnotations(&aData, tagList)
if len(aData) == 0 {
log.Printf("NO ANNOTATION FOUND\n")
return nil
}
tabulate(aData)
return aData
}
//
// The following block of code was taken from (with few changes done)
// https://github.com/dmwm/CMSMonitoring/blob/master/src/go/MONIT/monit.go#L603
// helper function to find dashboard info
func findDashboard() []map[string]interface{} {
tagsList := parseTags(tags)
var headers [][]string
bearer := fmt.Sprintf("Bearer %s", configJSON.Token)
h := []string{"Authorization", bearer}
headers = append(headers, h)
h = []string{"Accept", "application/json"}
headers = append(headers, h)
// example: /api/search?query=Production%20Overview&starred=true&tag=prod
v := url.Values{}
for _, tag := range tagsList {
v.Add("tag", strings.Trim(tag, " "))
}
rurl := fmt.Sprintf("%s%s?%s", configJSON.GrafanaBaseURL, configJSON.FindDashboardAPI, v.Encode())
if configJSON.Verbose > 0 {
log.Println(rurl)
}
req, err := http.NewRequest("GET", rurl, nil)
if err != nil {
log.Fatalf("Unable to make request to %s, error: %s", rurl, err)
}
for _, v := range headers {
if len(v) == 2 {
req.Header.Add(v[0], v[1])
}
}
if configJSON.Verbose > 1 {
dump, err := httputil.DumpRequestOut(req, true)
if err == nil {
log.Println("request: ", string(dump))
}
}
timeout := time.Duration(configJSON.HTTPTimeout) * time.Second
client := &http.Client{Timeout: timeout}
resp, err := client.Do(req)
if err != nil {
log.Fatalf("Unable to get response from %s, error: %s", rurl, err)
}
if configJSON.Verbose > 1 {
dump, err := httputil.DumpResponse(resp, true)
if err == nil {
log.Println("response: ", string(dump))
}
}
defer resp.Body.Close()
var data []map[string]interface{}
// Deserialize the response into a map.
if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
log.Fatalf("Error parsing the response body: %s", err)
}
return data
}
//
// The following block of code was taken from (with few changes done)
// https://github.com/dmwm/CMSMonitoring/blob/master/src/go/MONIT/monit.go#L657
// helper function to add annotation
func addAnnotation(data []byte) {
var headers [][]string
bearer := fmt.Sprintf("Bearer %s", configJSON.Token)
h := []string{"Authorization", bearer}
headers = append(headers, h)
h = []string{"Content-Type", "application/json"}
headers = append(headers, h)
rurl := fmt.Sprintf("%s%s", configJSON.GrafanaBaseURL, configJSON.CreateAnnotationAPI)
if configJSON.Verbose > 0 {
log.Println(rurl)
}
req, err := http.NewRequest("POST", rurl, bytes.NewBuffer(data))
if err != nil {
log.Fatalf("Unable to make request to %s, error: %s", rurl, err)
}
for _, v := range headers {
if len(v) == 2 {
req.Header.Add(v[0], v[1])
}
}
if configJSON.Verbose > 1 {
dump, err := httputil.DumpRequestOut(req, true)
if err == nil {
log.Println("request: ", string(dump))
}
}
timeout := time.Duration(configJSON.HTTPTimeout) * time.Second
client := &http.Client{Timeout: timeout}
resp, err := client.Do(req)
if err != nil {
log.Fatalf("Unable to get response from %s, error: %s", rurl, err)
}
if configJSON.Verbose > 1 {
dump, err := httputil.DumpResponse(resp, true)
if err == nil {
log.Println("response:", string(dump))
}
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalf("Unable to read JSON Data from Grafana Annotation POST API, error: %v\n", err)
}
if configJSON.Verbose > 1 {
log.Println("response Status:", resp.Status)
log.Println("response Headers:", resp.Header)
log.Println("response Body:", string(body))
}
}
//
// The following block of code was taken from (with few changes done)
// https://github.com/dmwm/CMSMonitoring/blob/master/src/go/MONIT/monit.go#L813
func createAnnotation() {
if annotation != "" {
if tags != "" {
dashboards := findDashboard()
timeRanges := parseTimes(true) // True because we are creating annotation and time ranges should be in future
if trange == "" {
log.Fatalf("Unable to create Annotation. Please provide Time Range!\n")
}
if configJSON.Verbose > 0 {
log.Println("timeRanges", timeRanges)
}
for _, r := range dashboards {
rec := make(map[string]interface{})
rec["dashboardId"] = r["id"]
rec["time"] = timeRanges[0]
rec["timeEnd"] = timeRanges[1]
rec["tags"] = parseTags(tags)
rec["text"] = annotation
data, err := json.Marshal(rec)
if err != nil {
log.Fatalf("Unable to marshal the data %+v, error %v\n", rec, err)
}
if configJSON.Verbose > 0 {
log.Printf("Add annotation: %+v", rec)
}
addAnnotation(data)
}
log.Printf("Annotation Created Successfully\n")
} else {
log.Fatalf("Can't Create annotation. Please provide tags!\n")
}
} else {
log.Fatalf("Can't Create annotation without any text!\n")
}
}
// helper function for updating an annotation which makes PUT request
func updateAnnotationHelper(annotationID int, data []byte) {
var headers [][]string
bearer := fmt.Sprintf("Bearer %s", configJSON.Token)
h := []string{"Authorization", bearer}
headers = append(headers, h)
h = []string{"Content-Type", "application/json"}
headers = append(headers, h)
h = []string{"Accept", "application/json"}
headers = append(headers, h)
apiurl := fmt.Sprintf("%s%s%d", configJSON.GrafanaBaseURL, configJSON.DeleteAnnotationAPI, annotationID)
if configJSON.Verbose > 0 {
log.Println(apiurl)
}
req, err := http.NewRequest("PUT", apiurl, bytes.NewBuffer(data))
if err != nil {
log.Fatalf("Unable to make request to %s, error: %s", apiurl, err)
}
for _, v := range headers {
if len(v) == 2 {
req.Header.Add(v[0], v[1])
}
}
if configJSON.Verbose > 1 {
dump, err := httputil.DumpRequestOut(req, true)
if err == nil {
log.Println("request: ", string(dump))
}
}
timeout := time.Duration(configJSON.HTTPTimeout) * time.Second
client := &http.Client{Timeout: timeout}
resp, err := client.Do(req)
if err != nil {
log.Fatalf("Unable to get response from %s, error: %s", apiurl, err)
}
if configJSON.Verbose > 1 {
dump, err := httputil.DumpResponse(resp, true)
if err == nil {
log.Println("response:", string(dump))
}
}
if resp.StatusCode != 200 {
log.Fatalf("HTTP Respose error, code: %d", resp.StatusCode)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalf("Unable to read JSON Data from Grafana Annotation PUT API, error: %v\n", err)
}
if configJSON.Verbose > 1 {
log.Println("response Status:", resp.Status)
log.Println("response Headers:", resp.Header)
log.Println("response Body:", string(body))
}
}
// function which contains the logic of updating an annotation
func updateAnnotation() {
updatedData := make(map[string]interface{})
if trange != "" {
tms := parseTimes(true)
if len(tms) == 2 {
updatedData["time"] = tms[0]
updatedData["timeEnd"] = tms[1]
}
}
if annotation != "" {
updatedData["text"] = annotation
}
if tags != "" {
updatedData["tags"] = parseTags(tags)
}
data, err := json.Marshal(updatedData)
if err != nil {
log.Fatalf("Unable to parse Data for Updation, update failed !, error: %v", err)
}
updateAnnotationHelper(annotationID, data)
log.Printf("Annotation with id:%d has been updated successfully!\n", annotationID)
if configJSON.Verbose > 1 {
log.Printf("Annotation Update Data :%v\n", string(data))
}
}
// function which contains the logic of deleting an annotation
func deleteOneAnnotation() {
deleteAnnotationHelper(annotationID)
log.Printf("Annotation with id: %d has been deleted successfully !\n", annotationID)
}
// function which contains the logic of deleting multiple annotations
func deleteAllAnnotations() {
noOfDeletedAnnotation := 0
aData := listAnnotation()
if aData != nil {
log.Printf("DELETING ALL OF THE ALERTS SHOWN ABOVE!!\n")
for _, each := range aData {
deleteAnnotationHelper(each.ID)
if configJSON.Verbose > 2 {
log.Printf("Annotation Deleted for:\n%+v\n", each)
}
noOfDeletedAnnotation++
}
log.Printf("Successfully Deleted %d annotations!!\n", noOfDeletedAnnotation)
} else {
log.Fatalf("Unable to delete with null data!\n")
}
}
// Function running all logics
func run() {
switch action {
case "list":
listAnnotation()
case "create":
createAnnotation()
case "update":
updateAnnotation()
case "delete":
deleteOneAnnotation()
case "deleteall":
deleteAllAnnotations()
default:
listAnnotation()
}
}
// helper function for parsing Configs
func openConfigFile(configFilePath string) {
jsonFile, e := os.Open(configFilePath)
if e != nil {
if configJSON.Verbose > 0 {
log.Printf("Config File not found at %s, error: %s\n", configFilePath, e)
} else {
log.Printf("Config File Missing at %s. Using Defaults\n", configFilePath)
}
return
}
defer jsonFile.Close()
decoder := json.NewDecoder(jsonFile)
err := decoder.Decode(&configJSON)
if err != nil {
log.Printf("Config JSON File can't be loaded, error: %s", err)
return
} else if configJSON.Verbose > 0 {
log.Printf("Load config from %s\n", configFilePath)
}
}
// function for parsing Configs
func parseConfig(verbose int) {
configFilePath = os.Getenv("CONFIG_PATH") // CONFIG_PATH Environment Variable storing config filepath.
defaultConfigFilePath := os.Getenv("HOME") + "/.annotationManagerConfig.json"
// Defaults in case no config file is provided
configJSON.GrafanaBaseURL = "https://monit-grafana.cern.ch"
configJSON.ListAnnotationsAPI = "/api/annotations/"
configJSON.CreateAnnotationAPI = "/api/annotations/"
configJSON.UpdateAnnotationAPI = "/api/annotations/"
configJSON.DeleteAnnotationAPI = "/api/annotations/"
configJSON.FindDashboardAPI = "/api/search"
configJSON.Columns = []string{"ID", "DASID", "TEXT", "TAGS", "STARTS", "ENDS", "DURATION"}
configJSON.Verbose = verbose
configJSON.HTTPTimeout = 5 // 5 seconds timeout for http
if *generateConfig {
config, err := json.MarshalIndent(configJSON, "", " ")
if err != nil {
log.Fatalf("Default Config Value can't be parsed from configJSON struct, error: %s", err)
}
filePath := defaultConfigFilePath
if len(flag.Args()) > 0 {
filePath = flag.Args()[0]
}
err = ioutil.WriteFile(filePath, config, 0644)
if err != nil {
log.Fatalf("Failed to generate Config File, error: %s", err)
}
log.Printf("A new configuration file %s was generated.\n", filePath)
return
}
if configFilePath != "" {
openConfigFile(configFilePath)
} else if defaultConfigFilePath != "" {
log.Printf("$CONFIG_PATH is not set. Using config file at %s\n", defaultConfigFilePath)
openConfigFile(defaultConfigFilePath)
}
// we we were given verbose from command line we should overwrite its value in config
if verbose > 0 {
configJSON.Verbose = verbose
}
if configJSON.Verbose > 0 {
log.SetFlags(log.LstdFlags | log.Lshortfile)
} else {
log.SetFlags(log.LstdFlags)
}
// if we were given the token we will use it
if token != "" {
configJSON.Token = token
}
if configJSON.Verbose > 1 {
log.Printf("Configuration:\n%+v\n", configJSON)
}
}
//
// The following block of code was taken from
// https://github.com/dmwm/CMSMonitoring/blob/master/src/go/MONIT/monit.go#L550
// helper function to parse comma separated tags string
func parseTags(itags string) []string {
var tags []string
for _, tag := range strings.Split(itags, ",") {
tags = append(tags, strings.Trim(tag, " "))
}
return tags
}
//
// The following block of code was taken from (with few changes done)
// https://github.com/dmwm/CMSMonitoring/blob/master/src/go/MONIT/monit.go#L559
// helper function to parse given time-range separated by '-'
func parseTimes(ifCreatingAnnotation bool) []int64 {
var times []int64
// additional line added in the adopted code for date support.
if strings.Contains(trange, "--") {
tms := strings.Split(trange, "--")
startTime, err := time.Parse(time.RFC3339, tms[0])
if err != nil {
log.Fatalf("Unable to parse time, error: %s", err)
}
endTime, err := time.Parse(time.RFC3339, tms[1])
if err != nil {
log.Fatalf("Unable to parse time, error: %s", err)
}
times = append(times, startTime.Unix()*int64(milliSec))
times = append(times, endTime.Unix()*int64(milliSec)) // times should be in milliseconds of Unix since epoch
return times
}
for _, v := range strings.Split(trange, "-") {
v = strings.Trim(v, " ")
var t int64
if v == "now" {
t = time.Now().UTC().Unix()
} else if len(v) == 10 { // unix since epoch
value, err := strconv.Atoi(v)
if err != nil {
log.Fatalf("Unable to parse given time value: %v\n", v)
}
t = int64(value)
} else {
var value string
var offset int64
if strings.HasSuffix(v, "s") || strings.HasSuffix(v, "sec") || strings.HasSuffix(v, "secs") {
value = strings.Split(v, "s")[0]
offset = 1
} else if strings.HasSuffix(v, "m") || strings.HasSuffix(v, "min") || strings.HasSuffix(v, "mins") {
value = strings.Split(v, "m")[0]
offset = 60
} else if strings.HasSuffix(v, "h") || strings.HasSuffix(v, "hr") || strings.HasSuffix(v, "hrs") {
value = strings.Split(v, "h")[0]
offset = 60 * 60
} else if strings.HasSuffix(v, "d") || strings.HasSuffix(v, "day") || strings.HasSuffix(v, "days") {
value = strings.Split(v, "d")[0]
offset = 60 * 60 * 24
} else {
log.Fatalf("Unable to parse given time value: %v\n", v)
}
if v, e := strconv.Atoi(value); e == nil {
if ifCreatingAnnotation { // When we create annotation we think in future time frame while when we list annotation the offset would be in past.
t = time.Now().UTC().Add(time.Duration((int64(v) * offset)) * time.Second).Unix() // FUTURE OFFSET
} else {
t = time.Now().UTC().Add(time.Duration((int64(v) * offset)) * -1 * time.Second).Unix() // PAST OFFSET
}
}
}
if t == 0 {
log.Fatalf("Unable to parse given time value: %v\n", v)
}
times = append(times, t*milliSec) // times should be in milliseconds of Unix since epoch
}
return times
}
func main() {
flag.StringVar(&annotation, "annotation", "", "Annotation text")
flag.StringVar(&token, "token", "", "Authentication token to use (Optional-can be stored in config file)")
flag.StringVar(&tags, "tags", "", "List of tags seperated by comma")
flag.StringVar(&action, "action", "", "Action to be performed. [list, create, delete, deleteall, update] Default: list")
flag.StringVar(&trange, "trange", "", "Time Range for filtering annotations.")
flag.IntVar(&annotationID, "annotationID", 0, "Annotation ID required in making delete api call for deleting an annotation")
generateConfig = flag.Bool("generateConfig", false, "Flag for generating default config")
var verbose int
flag.IntVar(&verbose, "verbose", 0, "verbosity level, can be overwritten in config (Optional-can be stored in config file)")
flag.Usage = func() {
configPath := os.Getenv("HOME") + "/.annotationManagerConfig.json"
fmt.Println("Usage: annotationManager [options]")
flag.PrintDefaults()
fmt.Println("\nEnvironments:")
fmt.Printf("\tCONFIG_PATH:\t Config to use, default (%s)\n", configPath)
fmt.Println("\nExamples:")
fmt.Println("\tALTHOUGH ACTION SHOULD BE (-action=list) FOR GETTING ALL ANNOTATIONS BUT ACTION FLAG CAN BE DROPPED AS LIST IS DEFAULT ACTION!")
fmt.Println("\n\tGet all annotations with specified tags:")
fmt.Println("\t annotationManager -action=list -tags=das,cmsmon-int")
fmt.Println("\n\tUse (d/day/days) for days, (h/hr/hrs) for hours, (m/min/mins) for minutes and (s/sec/secs) for seconds.")
fmt.Println("\n\tGet all annotations with specified tags on time period range (from last 7days to now)")
fmt.Println("\t annotationManager -action=list -tags=das,cmsmon-int -trange=7d-now")
fmt.Println("\n\tGet all annotations with specified tags on time period range (from last 14days to last 7days)")
fmt.Println("\t annotationManager -action=list -tags=das,cmsmon-int -trange=14d-7d")
fmt.Println("\n\tGet all annotations with specified tags on time period range (using dates seperated by '--')")
fmt.Println("\t annotationManager -action=list -tags=das,cmsmon-int -trange=2020-09-01T19:49:50.206Z--2020-09-01T19:49:50.206Z")
fmt.Println("\n\tDelete all annotations once you find appropriate annotations to delete from the above commands.")
fmt.Println("\t annotationManager -action=deleteall -tags=das,cmsmon-int -trange=7d-now")
fmt.Println("\t annotationManager -action=deleteall -tags=das,cmsmon-int -trange=14d-7d")
fmt.Println("\t annotationManager -action=deleteall -tags=das,cmsmon-int -trange=2020-09-01T19:49:50.206Z--2020-09-01T19:49:50.206Z")
fmt.Println("\n\tDelete an annotation with specific annotation id (/api/annotations/:id)")
fmt.Println("\t annotationManager -action=delete -annotationID=761323")
fmt.Println("\n\tCreate an annotation on dashboards with specific tags with time ranging from now to 2m")
fmt.Println("\t annotationManager -action=create -annotation=\"some message\" -tags=das,cmsweb -trange=now-2m")
fmt.Println("\n\tUpdate an annotation with specific annotation id with new annotation and tags. (/api/annotations/:id)")
fmt.Println("\t(TAGS AND ANNOTATION ARE REQUIRED FLAGS OR OLD DATA WILL BE REPLACED WITH NULL VALUES. If you don't want to change annotation or tag, please send the old values.)")
fmt.Println("\t annotationManager -action=update -annotationID=761323 -annotation=\"some new message\" -tags=newTag1,newTag2")
fmt.Println("\n\tUpdate an annotation with specific annotation id with new start and end Time. (/api/annotations/:id)")
fmt.Println("\t(TAGS AND ANNOTATION ARE REQUIRED FLAGS OR OLD DATA WILL BE REPLACED WITH NULL VALUES. If you don't want to change annotation or tag, please send the old values.)")
fmt.Println("\t annotationManager -action=update -annotationID=761323 -annotation=\"old message\" -tags=oldTag1,oldTag2 -trange=now-5m")
}
flag.Parse()
parseConfig(verbose)
if !*generateConfig {
run()
}
}
| [
"\"CONFIG_PATH\"",
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME",
"CONFIG_PATH"
]
| [] | ["HOME", "CONFIG_PATH"] | go | 2 | 0 | |
httpecho.go | package main
import (
"bufio"
"crypto/tls"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"time"
"github.com/ariary/HTTPCustomHouse/pkg/utils"
)
const usage = `Usage of httpecho: echo server accepting malformed HTTP request
-s, --serve serve continuously (default: wait for 1 request)
-t, --timeout timeout to close connection in millisecond. Needed for closing http request. (default: 500)
-d, --dump dump incoming request to a file (default: only print to stdout)
-p, --port listening on specific port (default: 8888)
-v, --verbose display request with special characters
--tls use TLS encryption for communication
-h, --help dump incoming request to a file (default: only print to stdout)
`
var verbose bool
func main() {
//-s
var serve bool
flag.BoolVar(&serve, "serve", false, "Serve continuously (default: wait for 1 request)")
flag.BoolVar(&serve, "s", false, "Serve continuously (default: wait for 1 request)")
// -t
var timeout int
flag.IntVar(&timeout, "timeout", 200, "Timeout to close connection. Needed for closing http request. (default: 200)")
flag.IntVar(&timeout, "t", 200, "Timeout to close connection. Needed for closing http request. (default: 200)")
//-d
var dump string
flag.StringVar(&dump, "dump", "", "Dump incoming request to a file (default: only print to stdout)")
flag.StringVar(&dump, "d", "", "Dump incoming request to a file (default: only print to stdout)")
//-p
var port string
flag.StringVar(&port, "port", "8888", "Listening on specific port (default: 8888)")
flag.StringVar(&port, "p", "8888", "Listening on specific port (default: 8888)")
//--tls
var encrypted bool
flag.BoolVar(&encrypted, "tls", false, "Use TLS encryption for communication")
//-v,--verbose
flag.BoolVar(&verbose, "verbose", false, "Display request with special characters")
flag.BoolVar(&verbose, "v", false, "Display request with special characters")
flag.Usage = func() { fmt.Print(usage) }
flag.Parse()
log.SetFlags(log.Lshortfile)
port = ":" + port
var ln net.Listener
var err error
if encrypted {
home := os.Getenv("HOME")
cer, err := tls.LoadX509KeyPair(home+"/.httpecho/server.crt", home+"/.httpecho/server.key")
if err != nil {
log.Println(err)
return
}
config := &tls.Config{Certificates: []tls.Certificate{cer}}
ln, err = tls.Listen("tcp", port, config)
} else {
ln, err = net.Listen("tcp", port)
}
if err != nil {
log.Println(err)
return
}
defer ln.Close()
if serve {
for {
conn, err := ln.Accept()
conn.SetDeadline(time.Now().Add(time.Duration(timeout) * time.Second)) //close http request
if err != nil {
log.Println(err)
}
//fmt.Println("-----------------")
go handleConnection(conn, dump, timeout)
}
} else { //only 1 time
conn, err := ln.Accept()
//conn.SetDeadline(time.Now().Add(time.Duration(timeout) * time.Millisecond)) //close http request
if err != nil {
log.Println(err)
}
handleConnection(conn, dump, timeout)
}
}
func handleConnection(conn net.Conn, dump string, timeout int) {
defer conn.Close()
//HTTP1.1 OK!
n, err := conn.Write([]byte("HTTP/1.1 200 OK\n\n"))
if err != nil {
log.Println(n, err)
return
}
writeFile := false
var request string
if dump != "" {
writeFile = true
f, err := os.Create(dump)
if err != nil {
log.Fatal(err)
}
defer f.Close()
}
r := bufio.NewReader(conn)
go func() { //handle packet without '\n' ending character
time.Sleep(time.Duration(timeout) * time.Millisecond)
residue, err := r.Peek(r.Buffered())
if err != nil && err != io.EOF {
log.Println(err)
}
n, err := conn.Write(residue)
if err != nil && err != io.EOF {
log.Println(n, err)
return
}
conn.Close()
}()
for {
msg, err := r.ReadString('\n')
//print log
if verbose {
msgDebug := strings.ReplaceAll(string(msg), "\r", utils.Green("\\r"))
msgDebug = strings.ReplaceAll(string(msgDebug), "\n", utils.Green("\\n\n"))
fmt.Print(msgDebug)
} else {
fmt.Print(msg)
}
//write to file
if writeFile {
request += msg
}
//handle read error
if err != nil && err != io.EOF {
if !strings.Contains(err.Error(), "timeout") && !strings.Contains(err.Error(), "closed network connection") { //avoid timeout error
log.Println(err)
}
if writeFile { //Write request received in file
f, err := os.Create(dump)
if err != nil {
log.Fatal(err)
}
defer f.Close()
_, err2 := f.WriteString(request)
if err2 != nil {
log.Fatal(err2)
} else {
fmt.Println("dump request in:", dump)
}
}
return
}
n, err := conn.Write([]byte(msg))
if err != nil {
log.Println(n, err)
return
}
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
test/Configure/config-h.py | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Verify creation of a config.h file from a Configure context.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import re
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_exact)
lib = test.Configure_lib
LIB = "LIB" + lib.upper()
test.write('SConstruct', """\
env = Environment()
import os
env.AppendENVPath('PATH', os.environ['PATH'])
conf = Configure(env, config_h = 'config.h')
r1 = conf.CheckFunc('printf')
r2 = conf.CheckFunc('noFunctionCall')
r3 = conf.CheckFunc('memmove')
r4 = conf.CheckType('int')
r5 = conf.CheckType('noType')
r6 = conf.CheckCHeader('stdio.h', '<>')
r7 = conf.CheckCHeader('hopefullynoc-header.h')
r8 = conf.CheckCXXHeader('vector', '<>')
r9 = conf.CheckCXXHeader('hopefullynocxx-header.h')
env = conf.Finish()
conf = Configure(env, config_h = 'config.h')
r10 = conf.CheckLib('%(lib)s', 'sin')
r11 = conf.CheckLib('hopefullynolib', 'sin')
r12 = conf.CheckLibWithHeader('%(lib)s', 'math.h', 'c')
r13 = conf.CheckLibWithHeader('%(lib)s', 'hopefullynoheader2.h', 'c')
r14 = conf.CheckLibWithHeader('hopefullynolib2', 'math.h', 'c')
env = conf.Finish()
""" % locals())
expected_read_str = """\
Checking for C function printf()... yes
Checking for C function noFunctionCall()... no
Checking for C function memmove()... yes
Checking for C type int... yes
Checking for C type noType... no
Checking for C header file stdio.h... yes
Checking for C header file hopefullynoc-header.h... no
Checking for C++ header file vector... yes
Checking for C++ header file hopefullynocxx-header.h... no
Checking for sin() in C library %(lib)s... yes
Checking for sin() in C library hopefullynolib... no
Checking for C library %(lib)s... yes
Checking for C library %(lib)s... no
Checking for C library hopefullynolib2... no
""" % locals()
expected_build_str = """\
scons: Configure: creating config.h
"""
expected_stdout = test.wrap_stdout(build_str=expected_build_str,
read_str=expected_read_str)
expected_config_h = ("""\
#ifndef CONFIG_H_SEEN
#define CONFIG_H_SEEN
/* Define to 1 if the system has the function `printf'. */
#define HAVE_PRINTF 1
/* Define to 1 if the system has the function `noFunctionCall'. */
/* #undef HAVE_NOFUNCTIONCALL */
/* Define to 1 if the system has the function `memmove'. */
#define HAVE_MEMMOVE 1
/* Define to 1 if the system has the type `int'. */
#define HAVE_INT 1
/* Define to 1 if the system has the type `noType'. */
/* #undef HAVE_NOTYPE */
/* Define to 1 if you have the <stdio.h> header file. */
#define HAVE_STDIO_H 1
/* Define to 1 if you have the <hopefullynoc-header.h> header file. */
/* #undef HAVE_HOPEFULLYNOC_HEADER_H */
/* Define to 1 if you have the <vector> header file. */
#define HAVE_VECTOR 1
/* Define to 1 if you have the <hopefullynocxx-header.h> header file. */
/* #undef HAVE_HOPEFULLYNOCXX_HEADER_H */
/* Define to 1 if you have the `%(lib)s' library. */
#define HAVE_%(LIB)s 1
/* Define to 1 if you have the `hopefullynolib' library. */
/* #undef HAVE_LIBHOPEFULLYNOLIB */
/* Define to 1 if you have the `%(lib)s' library. */
#define HAVE_%(LIB)s 1
/* Define to 1 if you have the `%(lib)s' library. */
/* #undef HAVE_%(LIB)s */
/* Define to 1 if you have the `hopefullynolib2' library. */
/* #undef HAVE_LIBHOPEFULLYNOLIB2 */
#endif /* CONFIG_H_SEEN */
""" % locals())
test.run(stdout=expected_stdout)
config_h = test.read(test.workpath('config.h'), mode='r')
if expected_config_h != config_h:
print("Unexpected config.h")
print("Expected: ")
print("---------------------------------------------------------")
print(repr(expected_config_h))
print("---------------------------------------------------------")
print("Found: ")
print("---------------------------------------------------------")
print(repr(config_h))
print("---------------------------------------------------------")
print("Stdio: ")
print("---------------------------------------------------------")
print(test.stdout())
print("---------------------------------------------------------")
test.fail_test()
expected_read_str = re.sub(r'\b((yes)|(no))\b',
r'(cached) \1',
expected_read_str)
expected_build_str = "scons: `.' is up to date.\n"
expected_stdout = test.wrap_stdout(build_str=expected_build_str,
read_str=expected_read_str)
#expected_stdout = expected_stdout.replace("\n", os.linesep)
test.run(stdout=expected_stdout)
config_h = test.read(test.workpath('config.h'),mode='r')
if expected_config_h != config_h:
print("Unexpected config.h")
print("Expected: ")
print("---------------------------------------------------------")
print(repr(expected_config_h))
print("---------------------------------------------------------")
print("Found: ")
print("---------------------------------------------------------")
print(repr(config_h))
print("---------------------------------------------------------")
print("Stdio: ")
print("---------------------------------------------------------")
print(test.stdout())
print("---------------------------------------------------------")
test.fail_test()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
ci.py | #!/usr/bin/env python
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import gzip
import json
import logging
import os
import re
import shutil
import signal
import six
import subprocess
import sys
import time
import yaml
parser = argparse.ArgumentParser(description='CI command')
parser.add_argument('-p', '--pipeline', dest='pipeline', default=None, required=True,
help='Select the pipeline [metrics|logs]')
parser.add_argument('-nv', '--non-voting', dest='non_voting', action='store_true',
help='Set the check as non-voting')
parser.add_argument('-pl', '--print-logs', dest='printlogs', action='store_true',
help='Print containers logs')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Increment verbosity')
parser.add_argument('--CI_BRANCH', dest='ci_branch', default=None, required=False,
help='')
parser.add_argument('--CI_EVENT_TYPE', dest='ci_event_type', default=None, required=False,
help='')
parser.add_argument('--CI_COMMIT_RANGE', dest='ci_commit_range', default=None, required=False,
help='')
args = parser.parse_args()
pipeline = args.pipeline
non_voting = args.non_voting
printlogs = args.printlogs
verbose = args.verbose
ci_branch = args.ci_branch if args.ci_branch else os.environ.get('CI_BRANCH', None)
ci_event_type = args.ci_event_type if args.ci_event_type else os.environ.get('CI_EVENT_TYPE', None)
ci_commit_range = args.ci_commit_range if args.ci_commit_range else os.environ.get('CI_COMMIT_RANGE', None)
logging.basicConfig(format = '%(asctime)s %(levelname)5.5s %(message)s')
LOG=logging.getLogger(__name__)
verbose = args.verbose
LOG.setLevel(logging.DEBUG) if verbose else LOG.setLevel(logging.INFO)
LOG.debug(args)
#TAG_REGEX = re.compile(r'^!(\w+)(?:\s+([\w-]+))?$')
TAG_REGEX = re.compile(r'^!(build|push|readme)(?:\s([\w-]+))$')
METRIC_PIPELINE_MARKER = 'metrics'
LOG_PIPELINE_MARKER = 'logs'
TEMPEST_TIMEOUT = 20 # minutes
BUILD_TIMEOUT = 20 # minutes
INITJOBS_ATTEMPS = 5
METRIC_PIPELINE_MODULE_TO_COMPOSE_SERVICES = {
'monasca-agent-forwarder': 'agent-forwarder',
'zookeeper': 'zookeeper',
'influxdb': 'influxdb',
'kafka': 'kafka',
'kafka-init': 'kafka-init',
'monasca-thresh': 'thresh',
'monasca-persister-python': 'monasca-persister',
'mysql-init': 'mysql-init',
'monasca-api-python': 'monasca',
'influxdb-init': 'influxdb-init',
'monasca-agent-collector': 'agent-collector',
'grafana': 'grafana',
'monasca-notification': 'monasca-notification',
'grafana-init': 'grafana-init',
'monasca-statsd': 'monasca-statsd'
}
LOGS_PIPELINE_MODULE_TO_COMPOSE_SERVICES = {
'monasca-log-metrics': 'log-metrics',
'monasca-log-persister': 'log-persister',
'monasca-log-transformer': 'log-transformer',
'elasticsearch': 'elasticsearch',
'elasticsearch-curator': 'elasticsearch-curator',
'elasticsearch-init': 'elasticsearch-init',
'kafka-init': 'kafka-log-init',
'kibana': 'kibana',
'monasca-log-api': 'log-api',
'monasca-log-agent': 'log-agent',
'logspout': 'logspout',
}
METRIC_PIPELINE_INIT_JOBS = ('influxdb-init', 'kafka-init', 'mysql-init', 'grafana-init')
LOG_PIPELINE_INIT_JOBS = ('elasticsearch-init', 'kafka-log-init')
INIT_JOBS = {
METRIC_PIPELINE_MARKER: METRIC_PIPELINE_INIT_JOBS,
LOG_PIPELINE_MARKER: LOG_PIPELINE_INIT_JOBS
}
METRIC_PIPELINE_SERVICES = METRIC_PIPELINE_MODULE_TO_COMPOSE_SERVICES.values()
"""Explicit list of services for docker compose
to launch for metrics pipeline"""
LOG_PIPELINE_SERVICES = LOGS_PIPELINE_MODULE_TO_COMPOSE_SERVICES.values()
"""Explicit list of services for docker compose
to launch for logs pipeline"""
PIPELINE_TO_YAML_COMPOSE = {
METRIC_PIPELINE_MARKER: 'docker-compose-metric.yml',
LOG_PIPELINE_MARKER: 'docker-compose-log.yml'
}
CI_COMPOSE_FILE = 'ci-compose.yml'
LOG_DIR = 'monasca-logs/' + \
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
BUILD_LOG_DIR = LOG_DIR + '/build/'
RUN_LOG_DIR = LOG_DIR + '/run/'
LOG_DIRS = [LOG_DIR, BUILD_LOG_DIR, RUN_LOG_DIR]
class SubprocessException(Exception):
pass
class FileReadException(Exception):
pass
class FileWriteException(Exception):
pass
class InitJobFailedException(Exception):
pass
class TempestTestFailedException(Exception):
pass
class SmokeTestFailedException(Exception):
pass
class BuildFailedException(Exception):
pass
def print_logs():
for log_dir in LOG_DIRS:
for file_name in os.listdir(log_dir):
file_path = log_dir + file_name
if os.path.isfile(file_path):
with open(file_path, 'r') as f:
log_contents = f.read()
LOG.info("#" * 100)
LOG.info("###### Container Logs from {0}".format(file_name))
LOG.info("#" * 100)
LOG.info(log_contents)
def set_log_dir():
try:
LOG.debug('Working directory: {0}'.format(os.getcwd()))
if not os.path.exists(LOG_DIR):
LOG.debug('Creating LOG_DIR: {0}'.format(LOG_DIR))
os.makedirs(LOG_DIR)
if not os.path.exists(BUILD_LOG_DIR):
LOG.debug('Creating BUILD_LOG_DIR: {0}'.format(BUILD_LOG_DIR))
os.makedirs(BUILD_LOG_DIR)
if not os.path.exists(RUN_LOG_DIR):
LOG.debug('Creating RUN_LOG_DIR: {0}'.format(RUN_LOG_DIR))
os.makedirs(RUN_LOG_DIR)
except Exception as e:
LOG.error('Unexpected error {0}'.format(e))
def get_changed_files():
if not ci_commit_range:
return []
LOG.debug('Execute: git diff --name-only {0}'.format(ci_commit_range))
p = subprocess.Popen([
'git', 'diff', '--name-only', ci_commit_range
], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if six.PY3:
stdout = stdout.decode('utf-8')
if p.returncode != 0:
raise SubprocessException('git returned non-zero exit code')
return [line.strip() for line in stdout.splitlines()]
def get_message_tags():
if not ci_commit_range:
return []
LOG.debug('Execute: git log --pretty=%B -1 {0}'.format(ci_commit_range))
p = subprocess.Popen([
'git', 'log', '--pretty=%B', '-1', ci_commit_range
], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if six.PY3:
stdout = stdout.decode('utf-8')
if p.returncode != 0:
raise SubprocessException('git returned non-zero exit code')
tags = []
for line in stdout.splitlines():
line = line.strip()
m = TAG_REGEX.match(line)
if m:
tags.append(m.groups())
return tags
def get_dirty_modules(dirty_files):
dirty = set()
for f in dirty_files:
if os.path.sep in f:
mod, _ = f.split(os.path.sep, 1)
if not os.path.exists(os.path.join(mod, 'Dockerfile')):
continue
if not os.path.exists(os.path.join(mod, 'build.yml')):
continue
dirty.add(mod)
# if len(dirty) > 5:
# LOG.error('Max number of changed modules exceded.',
# 'Please break up the patch set until a maximum of 5 modules are changed.')
# sys.exit(1)
return list(dirty)
def get_dirty_for_module(files, module=None):
ret = []
for f in files:
if os.path.sep in f:
mod, rel_path = f.split(os.path.sep, 1)
if mod == module:
ret.append(rel_path)
else:
# top-level file, no module
if module is None:
ret.append(f)
return ret
def run_build(modules):
log_dir = BUILD_LOG_DIR
build_args = ['dbuild', '-sd', '--build-log-dir', log_dir, 'build', 'all', '+', ':ci-cd'] + modules
LOG.debug('Executing build command: {0}\n'.format(' '.join(build_args)))
p = subprocess.Popen(build_args, stdout=subprocess.PIPE, universal_newlines=True)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
start_time = datetime.datetime.now()
while True:
output = p.stdout.readline()
print(" " + output.strip())
return_code = p.poll()
if return_code is not None:
LOG.debug('Return code: {0}'.format(return_code))
if return_code != 0:
LOG.error('BUILD FAILED !!!')
raise BuildFailedException('Build failed')
if return_code == 0:
LOG.info('Build succeeded')
# Process has finished, read rest of the output
for output in p.stdout.readlines():
LOG.debug(output.strip())
break
end_time = start_time + datetime.timedelta(minutes=BUILD_TIMEOUT)
if datetime.datetime.now() >= end_time:
LOG.error('BUILD TIMEOUT AFTER {0} MIN !!!'.format(BUILD_TIMEOUT))
p.kill()
raise BuildFailedException('Build timeout')
def run_push(modules, pipeline):
if ci_branch != 'master':
LOG.warn('Push images to Docker Hub is only allowed from master branch')
return
if pipeline == 'logs':
LOG.info('Images are already pushed by metrics-pipeline, skipping!')
return
username = os.environ.get('DOCKER_HUB_USERNAME', None)
password = os.environ.get('DOCKER_HUB_PASSWORD', None)
if not password:
LOG.info('Not DOCKER_HUB_PASSWORD, skipping!')
LOG.info('Not pushing: {0}'.format(modules))
return
if username and password:
LOG.info('Logging into docker registry...')
login = subprocess.Popen([
'docker', 'login',
'-u', username,
'--password-stdin'
], stdin=subprocess.PIPE)
login.communicate(password)
if login.returncode != 0:
LOG.error('Docker registry login failed, cannot push!')
sys.exit(1)
log_dir = BUILD_LOG_DIR
push_args = ['dbuild', '-sd', '--build-log-dir', log_dir, 'build', 'push', 'all'] + modules
LOG.debug('Executing push command: {0}\n'.format(' '.join(push_args)))
p = subprocess.Popen(push_args)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
if p.wait() != 0:
LOG.error('PUSH FAILED !!!')
sys.exit(p.returncode)
def run_readme(modules):
if ci_branch != 'master':
LOG.warn('Update readme to Docker Hub is only allowed from master branch')
return
log_dir = BUILD_LOG_DIR
readme_args = ['dbuild', '-sd', '--build-log-dir', log_dir, 'readme'] + modules
LOG.debug('Executing readme command: {0}\n'.format(' '.join(readme_args)))
p = subprocess.Popen(readme_args)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
if p.wait() != 0:
LOG.error('README FAILED !!!')
sys.exit(p.returncode)
def update_docker_compose(modules, pipeline):
compose_dict = load_yml(PIPELINE_TO_YAML_COMPOSE['metrics'])
services_to_changes = METRIC_PIPELINE_MODULE_TO_COMPOSE_SERVICES.copy()
if pipeline == 'logs':
LOG.info('\'logs\' pipeline is enabled, including in CI run')
log_compose = load_yml(PIPELINE_TO_YAML_COMPOSE['logs'])
compose_dict['services'].update(log_compose['services'])
services_to_changes.update(
LOGS_PIPELINE_MODULE_TO_COMPOSE_SERVICES.copy()
)
if modules:
compose_services = compose_dict['services']
for module in modules:
# Not all modules are included in docker compose
if module not in services_to_changes:
continue
service_name = services_to_changes[module]
services_to_update = service_name.split(',')
for service in services_to_update:
image = compose_services[service]['image']
image = image.split(':')[0]
image += ":ci-cd"
compose_services[service]['image'] = image
# Update compose version
compose_dict['version'] = '2'
LOG.debug("Displaying {0}\n\n{1}".format(CI_COMPOSE_FILE, yaml.dump(compose_dict, default_flow_style=False)))
try:
with open(CI_COMPOSE_FILE, 'w') as docker_compose:
yaml.dump(compose_dict, docker_compose, default_flow_style=False)
except:
raise FileWriteException(
'Error writing CI dictionary to {0}'.format(CI_COMPOSE_FILE)
)
def load_yml(yml_path):
try:
with open(yml_path) as compose_file:
compose_dict = yaml.safe_load(compose_file)
return compose_dict
except:
raise FileReadException('Failed to read {0}'.format(yml_path))
def handle_pull_request(files, modules, tags, pipeline):
modules_to_build = modules[:]
for tag, arg in tags:
if tag in ('build', 'push'):
if arg is None:
# arg-less doesn't make sense for PRs since any changes to a
# module already result in a rebuild
continue
modules_to_build.append(arg)
# note(kornicameister) check if module belong to the pipeline
# if not, there's no point of building that as it will be build
# for the given pipeline
pipeline_modules = pick_modules_for_pipeline(modules_to_build, pipeline)
if pipeline_modules:
run_build(pipeline_modules)
else:
LOG.info('No modules to build.')
update_docker_compose(pipeline_modules, pipeline)
run_docker_keystone()
run_docker_compose(pipeline)
wait_for_init_jobs(pipeline)
LOG.info('Waiting for containers to be ready 1 min...')
time.sleep(60)
output_docker_ps()
cool_test_mapper = {
'smoke': {
METRIC_PIPELINE_MARKER: run_smoke_tests_metrics,
LOG_PIPELINE_MARKER: lambda : LOG.info('No smoke tests for logs')
},
'tempest': {
METRIC_PIPELINE_MARKER: run_tempest_tests_metrics,
LOG_PIPELINE_MARKER: lambda : LOG.info('No tempest tests for logs')
}
}
cool_test_mapper['smoke'][pipeline]()
cool_test_mapper['tempest'][pipeline]()
def pick_modules_for_pipeline(modules, pipeline):
if not modules:
return []
modules_for_pipeline = {
LOG_PIPELINE_MARKER: LOGS_PIPELINE_MODULE_TO_COMPOSE_SERVICES.keys(),
METRIC_PIPELINE_MARKER: METRIC_PIPELINE_MODULE_TO_COMPOSE_SERVICES.keys()
}
pipeline_modules = modules_for_pipeline[pipeline]
# some of the modules are not used in pipelines, but should be
# taken into consideration during the build
other_modules = [
'storm'
]
LOG.info('Modules to build: {0}'.format(modules))
LOG.info('Modules to pull: {0}'.format(pipeline_modules))
# iterate over copy of all modules that are planned for the build
# if one of them does not belong to active pipeline
# remove from current run
for m in modules[::]:
if m not in pipeline_modules:
if m in other_modules:
LOG.info('Module {0} is not part of either pipeline, but it will be build anyway'.format(m))
continue
LOG.info('Module {0} does not belong to {1}, skipping'.format(m, pipeline))
modules.remove(m)
return modules
def get_current_init_status(docker_id):
init_status = ['docker', 'inspect', '-f', '{{ .State.ExitCode }}:{{ .State.Status }}', docker_id]
p = subprocess.Popen(init_status, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
output, err = p.communicate()
if six.PY3:
output = output.decode('utf-8')
if p.wait() != 0:
LOG.info('getting current status failed')
return False
status_output = output.rstrip()
exit_code, status = status_output.split(":", 1)
LOG.debug('Status from init-container {0}, exit_code {1}, status {2}'.format(docker_id, exit_code, status))
return exit_code == "0" and status == "exited"
def output_docker_logs():
LOG.info("Saving container logs at {0}".format(LOG_DIR))
docker_names = ['docker', 'ps', '-a', '--format', '"{{.Names}}"']
LOG.debug('Executing: {0}'.format(' '.join(docker_names)))
p = subprocess.Popen(docker_names, stdout=subprocess.PIPE)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
output, err = p.communicate()
if six.PY3:
output = output.decode('utf-8')
names = output.replace('"', '').split('\n')
for name in names:
if not name:
continue
docker_logs = ['docker', 'logs', '-t', name]
log_name = RUN_LOG_DIR + 'docker_log_' + name + '.log'
LOG.debug('Executing: {0}'.format(' '.join(docker_logs)))
with open(log_name, 'w') as out:
p = subprocess.Popen(docker_logs, stdout=out,
stderr=subprocess.STDOUT)
signal.signal(signal.SIGINT, kill)
if p.wait() != 0:
LOG.error('Error running docker log for {0}'.format(name))
def addtab(s):
white = " " * 2
return white + white.join(s.splitlines(1))
def output_docker_ps():
docker_ps = ['docker', 'ps', '-a']
LOG.debug('Executing: {0}'.format(' '.join(docker_ps)))
p = subprocess.Popen(docker_ps, stdout=subprocess.PIPE)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
output, err = p.communicate()
if six.PY3:
output = output.decode('utf-8')
LOG.info("Displaying all docker containers\n" + addtab(output))
def output_compose_details(pipeline):
if pipeline == 'metrics':
services = METRIC_PIPELINE_SERVICES
else:
services = LOG_PIPELINE_SERVICES
if six.PY3:
services = list(services)
LOG.info('All services that are about to start: {0}'.format(', '.join(services)))
def get_docker_id(init_job):
docker_id = ['docker-compose',
'-f', CI_COMPOSE_FILE,
'ps',
'-q', init_job]
p = subprocess.Popen(docker_id, stdout=subprocess.PIPE)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
output, err = p.communicate()
if six.PY3:
output = output.decode('utf-8')
if p.wait() != 0:
LOG.error('error getting docker id')
return ""
return output.rstrip()
def wait_for_init_jobs(pipeline):
LOG.info('Waiting 20 sec for init jobs to finish...')
init_status_dict = {job: False for job in INIT_JOBS[pipeline]}
docker_id_dict = {job: "" for job in INIT_JOBS[pipeline]}
amount_succeeded = 0
for attempt in range(INITJOBS_ATTEMPS):
time.sleep(20)
amount_succeeded = 0
for init_job, status in init_status_dict.items():
if docker_id_dict[init_job] == "":
docker_id_dict[init_job] = get_docker_id(init_job)
if status:
amount_succeeded += 1
else:
updated_status = get_current_init_status(docker_id_dict[init_job])
init_status_dict[init_job] = updated_status
if updated_status:
amount_succeeded += 1
if amount_succeeded == len(docker_id_dict):
LOG.info("All init-jobs finished successfully !!!")
break
else:
LOG.info("Not all init jobs have finished yet, waiting another 20 sec. " +
"Try " + str(attempt + 1) + " of {0}...".format(INITJOBS_ATTEMPS))
if amount_succeeded != len(docker_id_dict):
LOG.error("INIT-JOBS FAILED !!!")
raise InitJobFailedException("Not all init-containers finished with exit code 0")
def handle_push(files, modules, tags, pipeline):
modules_to_push = []
modules_to_readme = []
force_push = False
force_readme = False
for tag, arg in tags:
if tag in ('build', 'push'):
if arg is None:
force_push = True
else:
modules_to_push.append(arg)
elif tag == 'readme':
if arg is None:
force_readme = True
else:
modules_to_readme.append(arg)
for module in modules:
dirty = get_dirty_for_module(files, module)
if force_push or 'build.yml' in dirty:
modules_to_push.append(module)
if force_readme or 'README.md' in dirty:
modules_to_readme.append(module)
if modules_to_push:
run_push(modules_to_push, pipeline)
else:
LOG.info('No modules to push.')
if modules_to_readme:
run_readme(modules_to_readme)
else:
LOG.info('No READMEs to update.')
def run_docker_keystone():
LOG.info('Running docker compose for Keystone')
username = os.environ.get('DOCKER_HUB_USERNAME', None)
password = os.environ.get('DOCKER_HUB_PASSWORD', None)
if username and password:
LOG.info('Logging into docker registry...')
login = subprocess.Popen([
'docker', 'login',
'-u', username,
'--password-stdin'
], stdin=subprocess.PIPE)
login.communicate(password)
if login.returncode != 0:
LOG.error('Docker registry login failed!')
sys.exit(1)
docker_compose_dev_command = ['docker-compose',
'-f', 'docker-compose-dev.yml',
'up', '-d']
LOG.debug('Executing: {0}'.format(' '.join(docker_compose_dev_command)))
with open(RUN_LOG_DIR + 'docker_compose_dev.log', 'w') as out:
p = subprocess.Popen(docker_compose_dev_command, stdout=out)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
if p.wait() != 0:
LOG.error('DOCKER COMPOSE FAILED !!!')
sys.exit(p.returncode)
# print out running images for debugging purposes
LOG.info('docker compose dev succeeded')
output_docker_ps()
def run_docker_compose(pipeline):
LOG.info('Running docker compose')
output_compose_details(pipeline)
username = os.environ.get('DOCKER_HUB_USERNAME', None)
password = os.environ.get('DOCKER_HUB_PASSWORD', None)
if username and password:
LOG.info('Logging into docker registry...')
login = subprocess.Popen([
'docker', 'login',
'-u', username,
'--password-stdin'
], stdin=subprocess.PIPE)
login.communicate(password)
if login.returncode != 0:
LOG.error('Docker registry login failed!')
sys.exit(1)
if pipeline == 'metrics':
services = METRIC_PIPELINE_SERVICES
else:
services = LOG_PIPELINE_SERVICES
if six.PY3:
services = list(services)
docker_compose_command = ['docker-compose',
'-f', CI_COMPOSE_FILE,
'up', '-d'] + services
LOG.debug('Executing: {0}'.format(' '.join(docker_compose_command)))
with open(RUN_LOG_DIR + 'docker_compose.log', 'w') as out:
p = subprocess.Popen(docker_compose_command, stdout=out)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
if p.wait() != 0:
LOG.error('DOCKER COMPOSE FAILED !!!')
sys.exit(p.returncode)
# print out running images for debugging purposes
LOG.info('docker compose succeeded')
output_docker_ps()
def run_smoke_tests_metrics():
LOG.info('Running Smoke-tests')
#TODO: branch as variable... use TRAVIS_PULL_REQUEST_BRANCH ?
smoke_tests_run = ['docker', 'run',
'-e', 'OS_AUTH_URL=http://keystone:35357/v3',
'-e', 'MONASCA_URL=http://monasca:8070',
'-e', 'METRIC_NAME_TO_CHECK=monasca.thread_count',
'--net', 'monasca-docker_default',
'-p', '0.0.0.0:8080:8080',
'--name', 'monasca-docker-smoke',
'fest/smoke-tests:pike-latest']
LOG.debug('Executing: {0}'.format(' '.join(smoke_tests_run)))
p = subprocess.Popen(smoke_tests_run)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
if p.wait() != 0:
LOG.error('SMOKE-TEST FAILED !!!')
raise SmokeTestFailedException("Smoke Tests Failed")
def run_tempest_tests_metrics():
LOG.info('Running Tempest-tests')
return
tempest_tests_run = ['docker', 'run',
'-e', 'KEYSTONE_IDENTITY_URI=http://keystone:35357',
'-e', 'OS_AUTH_URL=http://keystone:35357/v3',
'-e', 'MONASCA_WAIT_FOR_API=true',
'-e', 'STAY_ALIVE_ON_FAILURE=false',
'--net', 'monasca-docker_default',
'--name', 'monasca-docker-tempest',
'chaconpiza/tempest-tests:test']
LOG.debug('Executing: {0}'.format(' '.join(tempest_tests_run)))
p = subprocess.Popen(tempest_tests_run, stdout=subprocess.PIPE, universal_newlines=True)
def kill(signal, frame):
p.kill()
LOG.warn('Finished by Ctrl-c!')
sys.exit(2)
signal.signal(signal.SIGINT, kill)
start_time = datetime.datetime.now()
while True:
output = p.stdout.readline()
LOG.info(output.strip())
return_code = p.poll()
if return_code is not None:
LOG.debug('RETURN CODE: {0}'.format(return_code))
if return_code != 0:
LOG.error('TEMPEST-TEST FAILED !!!')
raise TempestTestFailedException("Tempest Tests finished but some tests failed")
if return_code == 0:
LOG.info('Tempest-tests succeeded')
# Process has finished, read rest of the output
for output in p.stdout.readlines():
LOG.debug(output.strip())
break
end_time = start_time + datetime.timedelta(minutes=TEMPEST_TIMEOUT)
if datetime.datetime.now() >= end_time:
LOG.error('TEMPEST-TEST TIMEOUT AFTER {0} MIN !!!'.format(TEMPEST_TIMEOUT))
p.kill()
raise TempestTestFailedException("Tempest Tests failed by timeout")
def handle_other(files, modules, tags, pipeline):
LOG.error('Unsupported event type: {0}, nothing to do.'.format(ci_event_type))
exit(2)
def print_env():
env_vars_used = ['pipeline={0}'.format(pipeline),
'non_voting={0}'.format(non_voting),
'printlogs={0}'.format(printlogs),
'verbose={0}'.format(verbose),
'CI_BRANCH="{0}"'.format(ci_branch),
'CI_EVENT_TYPE="{0}"'.format(ci_event_type),
'CI_COMMIT_RANGE="{0}"'.format(ci_commit_range)
]
LOG.info('Variables used in CI:\n {0}'.format('\n '.join(env_vars_used)))
def main():
try:
LOG.info("DOCKER_HUB_USERNAME: {0}".format(os.environ.get('DOCKER_HUB_USERNAME', None)))
LOG.info("DOCKER_HUB_PASSWORD: {0}".format(os.environ.get('DOCKER_HUB_PASSWORD', None)))
print_env()
if not pipeline or pipeline not in ('logs', 'metrics'):
LOG.error('UNKNOWN PIPELINE: {0} !!! Choose (metrics|logs)'.format(pipeline))
exit(2)
set_log_dir()
files = get_changed_files()
LOG.info('Changed files: {0}'.format(files))
modules = get_dirty_modules(files)
LOG.info('Dirty modules: {0}'.format(modules))
tags = get_message_tags()
LOG.info('Message tags: {0}'.format(tags))
if tags:
LOG.debug('Tags detected:')
for tag in tags:
LOG.debug(' '.format(tag))
else:
LOG.info('No tags detected.')
func = {
'cron': handle_pull_request,
'pull_request': handle_pull_request,
'push': handle_push
}.get(ci_event_type, handle_other)
func(files, modules, tags, pipeline)
except (FileReadException, FileWriteException, SubprocessException) as ex:
LOG.error("FAILED !!! RCA: {0}".format(ex))
exit(1)
except (InitJobFailedException, SmokeTestFailedException,
TempestTestFailedException) as ex:
if non_voting:
LOG.warn('{0} is non voting, skipping failure'.format(pipeline))
else:
LOG.error("FAILED !!! RCA: {0}".format(ex))
exit(1)
except Exception as ex:
LOG.error("UNKNOWN EXCEPTION !!! RCA: {0}".format(ex))
exit(1)
finally:
output_docker_ps()
output_docker_logs()
if printlogs:
print_logs()
if __name__ == '__main__':
main()
| []
| []
| [
"DOCKER_HUB_USERNAME",
"CI_BRANCH",
"DOCKER_HUB_PASSWORD",
"CI_COMMIT_RANGE",
"CI_EVENT_TYPE"
]
| [] | ["DOCKER_HUB_USERNAME", "CI_BRANCH", "DOCKER_HUB_PASSWORD", "CI_COMMIT_RANGE", "CI_EVENT_TYPE"] | python | 5 | 0 | |
appengine_module/gae_ts_mon/test/instrument_webapp2_test.py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import mock
import webapp2
from .test_support import test_case
from google.appengine.api.runtime import runtime
from infra_libs.ts_mon import config
from infra_libs.ts_mon import instrument_webapp2
from infra_libs.ts_mon.common import http_metrics
from infra_libs.ts_mon.common import interface
from infra_libs.ts_mon.common import targets
class InstrumentWebapp2Test(test_case.TestCase):
def setUp(self):
super(InstrumentWebapp2Test, self).setUp()
config.reset_for_unittest()
self.next_time = 42.0
self.time_increment = 3.0
def fake_time(self):
ret = self.next_time
self.next_time += self.time_increment
return ret
def test_instrument_webapp2_invoked_multiple_times(self):
class Handler(webapp2.RequestHandler):
def get(self):
self.response.write('success!')
app = webapp2.WSGIApplication([('/', Handler)])
self.assertFalse(instrument_webapp2._is_instrumented(app))
instrument_webapp2.instrument(app, time_fn=self.fake_time)
self.assertTrue(instrument_webapp2._is_instrumented(app))
instrument_webapp2.instrument(app, time_fn=self.fake_time)
self.assertTrue(instrument_webapp2._is_instrumented(app))
# trigger a test page handler and check if the value of the HTTP metric
# didn't increase twice.
app.get_response('/')
fields = {'name': '^/$', 'status': 200, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
def test_success(self):
class Handler(webapp2.RequestHandler):
def get(self):
self.response.write('success!')
app = webapp2.WSGIApplication([('/', Handler)])
instrument_webapp2.instrument(app, time_fn=self.fake_time)
app.get_response('/')
fields = {'name': '^/$', 'status': 200, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
self.assertLessEqual(3000, http_metrics.server_durations.get(fields).sum)
self.assertEqual(
len('success!'),
http_metrics.server_response_bytes.get(fields).sum)
def test_abort(self):
class Handler(webapp2.RequestHandler):
def get(self):
self.abort(417)
app = webapp2.WSGIApplication([('/', Handler)])
instrument_webapp2.instrument(app)
app.get_response('/')
fields = {'name': '^/$', 'status': 417, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
def test_set_status(self):
class Handler(webapp2.RequestHandler):
def get(self):
self.response.set_status(418)
app = webapp2.WSGIApplication([('/', Handler)])
instrument_webapp2.instrument(app)
app.get_response('/')
fields = {'name': '^/$', 'status': 418, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
def test_exception(self):
class Handler(webapp2.RequestHandler):
def get(self):
raise ValueError
app = webapp2.WSGIApplication([('/', Handler)])
instrument_webapp2.instrument(app)
app.get_response('/')
fields = {'name': '^/$', 'status': 500, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
def test_http_exception(self):
class Handler(webapp2.RequestHandler):
def get(self):
raise webapp2.exc.HTTPExpectationFailed()
app = webapp2.WSGIApplication([('/', Handler)])
instrument_webapp2.instrument(app)
app.get_response('/')
fields = {'name': '^/$', 'status': 417, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
def test_return_response(self):
class Handler(webapp2.RequestHandler):
def get(self):
ret = webapp2.Response()
ret.set_status(418)
return ret
app = webapp2.WSGIApplication([('/', Handler)])
instrument_webapp2.instrument(app)
app.get_response('/')
fields = {'name': '^/$', 'status': 418, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
def test_robot(self):
class Handler(webapp2.RequestHandler):
def get(self):
ret = webapp2.Response()
ret.set_status(200)
return ret
app = webapp2.WSGIApplication([('/', Handler)])
instrument_webapp2.instrument(app)
app.get_response('/', user_agent='GoogleBot')
fields = {'name': '^/$', 'status': 200, 'is_robot': True}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
def test_missing_response_content_length(self):
class Handler(webapp2.RequestHandler):
def get(self):
del self.response.headers['content-length']
app = webapp2.WSGIApplication([('/', Handler)])
instrument_webapp2.instrument(app)
app.get_response('/')
fields = {'name': '^/$', 'status': 200, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
self.assertIsNone(http_metrics.server_response_bytes.get(fields))
def test_not_found(self):
app = webapp2.WSGIApplication([])
instrument_webapp2.instrument(app)
app.get_response('/notfound')
fields = {'name': '', 'status': 404, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
def test_post(self):
class Handler(webapp2.RequestHandler):
def post(self):
pass
app = webapp2.WSGIApplication([('/', Handler)])
instrument_webapp2.instrument(app)
app.get_response('/', POST='foo')
fields = {'name': '^/$', 'status': 200, 'is_robot': False}
self.assertEqual(1, http_metrics.server_response_status.get(fields))
self.assertEqual(
len('foo'),
http_metrics.server_request_bytes.get(fields).sum)
class TaskNumAssignerHandlerTest(test_case.TestCase):
def setUp(self):
super(TaskNumAssignerHandlerTest, self).setUp()
config.reset_for_unittest()
target = targets.TaskTarget('test_service', 'test_job', 'test_region',
'test_host')
self.mock_state = interface.State(target=target)
# Workaround the fact that 'system' module is not mocked.
class _memory_usage(object):
def current(self):
return 10.0
env = os.environ.copy()
env['SERVER_SOFTWARE'] = 'PRODUCTION'
self.mock(runtime, 'memory_usage', _memory_usage)
self.mock(os, 'environ', env)
self.app = instrument_webapp2.create_app()
instrument_webapp2.instrument(self.app)
def tearDown(self):
mock.patch.stopall()
super(TaskNumAssignerHandlerTest, self).tearDown()
def test_success(self):
response = self.app.get_response(
'/internal/cron/ts_mon/send', headers=[('X-Appengine-Cron', 'true')])
self.assertEqual(response.status_int, 200)
def test_unauthorized(self):
response = self.app.get_response('/internal/cron/ts_mon/send')
self.assertEqual(response.status_int, 403)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
youtube_dl/YoutubeDL.py | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import copy
import datetime
import errno
import fileinput
import io
import itertools
import json
import locale
import operator
import os
import platform
import re
import shutil
import subprocess
import socket
import sys
import time
import tokenize
import traceback
import random
from string import ascii_letters
from .compat import (
compat_basestring,
compat_cookiejar,
compat_get_terminal_size,
compat_http_client,
compat_kwargs,
compat_numeric_types,
compat_os_name,
compat_str,
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
compat_urllib_request_DataHandler,
)
from .utils import (
age_restricted,
args_to_str,
ContentTooShortError,
date_from_str,
DateRange,
DEFAULT_OUTTMPL,
determine_ext,
determine_protocol,
DownloadError,
encode_compat_str,
encodeFilename,
error_to_compat_str,
expand_path,
ExtractorError,
format_bytes,
formatSeconds,
GeoRestrictedError,
int_or_none,
ISO3166Utils,
locked_file,
make_HTTPS_handler,
MaxDownloadsReached,
orderedSet,
PagedList,
parse_filesize,
PerRequestProxyHandler,
platform_name,
PostProcessingError,
preferredencoding,
prepend_extension,
register_socks_protocols,
render_table,
replace_extension,
SameFileError,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
std_headers,
str_or_none,
subtitles_filename,
UnavailableVideoError,
url_basename,
version_tuple,
write_json_file,
write_string,
YoutubeDLCookieJar,
YoutubeDLCookieProcessor,
YoutubeDLHandler,
YoutubeDLRedirectHandler,
)
from .cache import Cache
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
from .extractor.openload import PhantomJSwrapper
from .downloader import get_suitable_downloader
from .downloader.rtmp import rtmpdump_version
from .postprocessor import (
FFmpegFixupM3u8PP,
FFmpegFixupM4aPP,
FFmpegFixupStretchedPP,
FFmpegMergerPP,
FFmpegPostProcessor,
get_postprocessor,
)
from .version import __version__
if compat_os_name == 'nt':
import ctypes
class YoutubeDL(object):
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
YoutubeDL process the extracted information, possibly using a File
Downloader to download the video.
YoutubeDL objects accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params
attribute for the InfoExtractors to use. The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
videopassword: Password for accessing a video.
ap_mso: Adobe Pass multiple-system operator identifier.
ap_username: Multiple-system operator account username.
ap_password: Multiple-system operator account password.
usenetrc: Use netrc for authentication instead.
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
no_warnings: Do not print out anything for warnings.
forceurl: Force printing final URL.
forcetitle: Force printing title.
forceid: Force printing ID.
forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description.
forcefilename: Force printing final filename.
forceduration: Force printing duration.
forcejson: Force printing info_dict as JSON.
dump_single_json: Force printing the info_dict of the whole playlist
(or video) as a single JSON line.
simulate: Do not download the video files.
format: Video format code. See options.py for more information.
outtmpl: Template for output names.
outtmpl_na_placeholder: Placeholder for unavailable meta fields.
restrictfilenames: Do not allow "&" and spaces in file names
ignoreerrors: Do not stop on download errors.
force_generic_extractor: Force downloader to use the generic extractor
nooverwrites: Prevent overwriting files.
playliststart: Playlist item to start at.
playlistend: Playlist item to end at.
playlist_items: Specific indices of playlist to download.
playlistreverse: Download playlist items in reverse order.
playlistrandom: Download playlist items in random order.
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
logger: Log messages to a logging.Logger instance.
logtostderr: Log messages to stderr instead of stdout.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
writeannotations: Write the video annotations to a .annotations.xml file
writethumbnail: Write the thumbnail image to a file
write_all_thumbnails: Write all thumbnail formats to files
writesubtitles: Write the video subtitles to a file
writeautomaticsub: Write the automatically generated subtitles to a file
allsubtitles: Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
listsubtitles: Lists all available subtitles for the video
subtitlesformat: The format code for subtitles
subtitleslangs: List of languages of the subtitles to download
keepvideo: Keep the video file after post-processing
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded
again.
cookiefile: File name where cookies should be read from and dumped to.
nocheckcertificate:Do not verify SSL certificates
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
At the moment, this is only supported by YouTube.
proxy: URL of the proxy server to use
geo_verification_proxy: URL of the proxy to use for IP address verification
on geo-restricted sites.
socket_timeout: Time to wait for unresponsive hosts, in seconds
bidi_workaround: Work around buggy terminals without bidirectional text
support, using fridibi
debug_printtraffic:Print out sent and received HTTP traffic
include_ads: Download ads as well
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
extract_flat: Do not resolve URLs, return the immediate result.
Pass in 'in_playlist' to only show this behavior for
playlist items.
postprocessors: A list of dictionaries, each with an entry
* key: The name of the postprocessor. See
youtube_dl/postprocessor/__init__.py for a list.
as well as any further keyword arguments for the
postprocessor.
progress_hooks: A list of functions that get called on download
progress, with a dictionary with the entries
* status: One of "downloading", "error", or "finished".
Check this first and ignore unknown values.
If status is one of "downloading", or "finished", the
following properties may also be present:
* filename: The final filename (always present)
* tmpfilename: The filename we're currently writing to
* downloaded_bytes: Bytes on disk
* total_bytes: Size of the whole file, None if unknown
* total_bytes_estimate: Guess of the eventual file size,
None if unavailable.
* elapsed: The number of seconds since download started.
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if
unknown
* fragment_index: The counter of the currently
downloaded video fragment.
* fragment_count: The number of fragments (= individual
files that will be merged)
Progress hooks are guaranteed to be called at least once
(with status "finished") if the download is successful.
merge_output_format: Extension to use when merging formats.
fixup: Automatically correct known faults of the file.
One of:
- "never": do nothing
- "warn": only emit a warning
- "detect_or_warn": check whether we can do anything
about it, warn otherwise (default)
source_address: Client-side IP address to bind to.
call_home: Boolean, true iff we are allowed to contact the
youtube-dl servers for debugging.
sleep_interval: Number of seconds to sleep before each download when
used alone or a lower bound of a range for randomized
sleep before each download (minimum possible number
of seconds to sleep) when used along with
max_sleep_interval.
max_sleep_interval:Upper bound of a range for randomized sleep before each
download (maximum possible number of seconds to sleep).
Must only be used along with sleep_interval.
Actual sleep time will be a random float from range
[sleep_interval; max_sleep_interval].
listformats: Print an overview of available video formats and exit.
list_thumbnails: Print a table of all thumbnails and exit.
match_filter: A function that gets called with the info_dict of
every video.
If it returns a message, the video is ignored.
If it returns None, the video is downloaded.
match_filter_func in utils.py is one example for this.
no_color: Do not emit color codes in output.
geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
HTTP header
geo_bypass_country:
Two-letter ISO 3166-2 country code that will be used for
explicit geographic restriction bypassing via faking
X-Forwarded-For HTTP header
geo_bypass_ip_block:
IP range in CIDR notation that will be used similarly to
geo_bypass_country
The following options determine which downloader is picked:
external_downloader: Executable of the external downloader to call.
None or unset for standard (built-in) downloader.
hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
if True, otherwise use ffmpeg/avconv if False, otherwise
use downloader suggested by extractor if None.
The following parameters are not used by YoutubeDL itself, they are used by
the downloader (see youtube_dl/downloader/common.py):
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
noresizebuffer, retries, continuedl, noprogress, consoletitle,
xattr_set_filesize, external_downloader_args, hls_use_mpegts,
http_chunk_size.
The following options are used by the post processors:
prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
otherwise prefer ffmpeg.
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
to the binary or its containing directory.
postprocessor_args: A list of additional command-line arguments for the
postprocessor.
The following options are used by the Youtube extractor:
youtube_include_dash_manifest: If True (default), DASH manifests and related
data will be downloaded and processed by extractor.
You can reduce network I/O by disabling it if you don't
care about DASH.
"""
_NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'upload_year', 'upload_month', 'upload_day',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
'playlist_index',
))
params = None
_ies = []
_pps = []
_download_retcode = None
_num_downloads = None
_playlist_level = 0
_playlist_urls = set()
_screen_file = None
def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options."""
if params is None:
params = {}
self._ies = []
self._ies_instances = {}
self._pps = []
self._progress_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = {
# Default parameters
'nocheckcertificate': False,
}
self.params.update(params)
self.cache = Cache(self)
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning(
'%s is deprecated. Use %s instead.' % (option, suggestion))
return True
return False
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
if self.params.get('geo_verification_proxy') is None:
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
if params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = compat_get_terminal_size().columns
if width is None:
width_args = []
else:
width_args = ['-w', str(width)]
sp_kwargs = dict(
stdin=subprocess.PIPE,
stdout=slave,
stderr=self._err_file)
try:
self._output_process = subprocess.Popen(
['bidiv'] + width_args, **sp_kwargs
)
except OSError:
self._output_process = subprocess.Popen(
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == errno.ENOENT:
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if (sys.platform != 'win32'
and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not params.get('restrictfilenames', False)):
# Unicode filesystem API will throw errors (#1474, #13027)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
if isinstance(params.get('outtmpl'), bytes):
self.report_warning(
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
self._setup_opener()
if auto_init:
self.print_debug_header()
self.add_default_info_extractors()
for pp_def_raw in self.params.get('postprocessors', []):
pp_class = get_postprocessor(pp_def_raw['key'])
pp_def = dict(pp_def_raw)
del pp_def['key']
pp = pp_class(self, **compat_kwargs(pp_def))
self.add_post_processor(pp)
for ph in self.params.get('progress_hooks', []):
self.add_progress_hook(ph)
register_socks_protocols()
def warn_if_short_id(self, argv):
# short YouTube ID starting with dash?
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['youtube-dl']
+ [a for i, a in enumerate(argv) if i not in idxs]
+ ['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s\n' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
if not isinstance(ie, type):
self._ies_instances[ie.ie_key()] = ie
ie.set_downloader(self)
def get_info_extractor(self, ie_key):
"""
Get an instance of an IE with name ie_key, it will try to get one from
the _ies list, if there's no instance it will create a new one and add
it to the extractor list.
"""
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractor_classes():
self.add_info_extractor(ie)
def add_post_processor(self, pp):
"""Add a PostProcessor object to the end of the chain."""
self._pps.append(pp)
pp.set_downloader(self)
def add_progress_hook(self, ph):
"""Add the progress hook (currently only for the file downloader)"""
self._progress_hooks.append(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
return res[:-len('\n')]
def to_screen(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode."""
return self.to_stdout(message, skip_eol, check_quiet=True)
def _write_string(self, s, out=None):
write_string(s, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, check_quiet=False):
"""Print message to stdout if not in quiet mode."""
if self.params.get('logger'):
self.params['logger'].debug(message)
elif not check_quiet or not self.params.get('quiet', False):
message = self._bidi_workaround(message)
terminator = ['\n', ''][skip_eol]
output = message + terminator
self._write_string(output, self._screen_file)
def to_stderr(self, message):
"""Print message to stderr."""
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
message = self._bidi_workaround(message)
output = message + '\n'
self._write_string(output, self._err_file)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
if compat_os_name == 'nt':
if ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
self._write_string('\033]0;%s\007' % message, self._screen_file)
def save_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate', False):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Save the title on stack
self._write_string('\033[22;0t', self._screen_file)
def restore_console_title(self):
if not self.params.get('consoletitle', False):
return
if self.params.get('simulate', False):
return
if compat_os_name != 'nt' and 'TERM' in os.environ:
# Restore the title from stack
self._write_string('\033[23;0t', self._screen_file)
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save(ignore_discard=True, ignore_expires=True)
def trouble(self, message=None, tb=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
not when errors are found, after printing the message.
tb, if given, is additional traceback information.
"""
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
self.to_stderr(tb)
if not self.params.get('ignoreerrors', False):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def report_warning(self, message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
warning_message = '%s %s' % (_msg_header, message)
self.to_stderr(warning_message)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;31mERROR:\033[0m'
else:
_msg_header = 'ERROR:'
error_message = '%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def prepare_filename(self, info_dict):
"""Generate the output filename."""
try:
template_dict = dict(info_dict)
template_dict['epoch'] = int(time.time())
autonumber_size = self.params.get('autonumber_size')
if autonumber_size is None:
autonumber_size = 5
template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
if template_dict.get('resolution') is None:
if template_dict.get('width') and template_dict.get('height'):
template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
elif template_dict.get('height'):
template_dict['resolution'] = '%sp' % template_dict['height']
elif template_dict.get('width'):
template_dict['resolution'] = '%dx?' % template_dict['width']
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get('restrictfilenames'),
is_id=(k == 'id' or k.endswith('_id')))
template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
for k, v in template_dict.items()
if v is not None and not isinstance(v, (list, tuple, dict)))
template_dict = collections.defaultdict(lambda: self.params.get('outtmpl_na_placeholder', 'NA'), template_dict)
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
# For fields playlist_index and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': len(str(template_dict['n_entries'])),
'autonumber': autonumber_size,
}
FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
if mobj:
outtmpl = re.sub(
FIELD_SIZE_COMPAT_RE,
r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
outtmpl)
# Missing numeric fields used together with integer presentation types
# in format specification will break the argument substitution since
# string NA placeholder is returned for missing fields. We will patch
# output template for missing fields to meet string presentation type.
for numeric_field in self._NUMERIC_FIELDS:
if numeric_field not in template_dict:
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
FORMAT_RE = r'''(?x)
(?<!%)
%
\({0}\) # mapping key
(?:[#0\-+ ]+)? # conversion flags (optional)
(?:\d+)? # minimum field width (optional)
(?:\.\d+)? # precision (optional)
[hlL]? # length modifier (optional)
[diouxXeEfFgGcrs%] # conversion type
'''
outtmpl = re.sub(
FORMAT_RE.format(numeric_field),
r'%({0})s'.format(numeric_field), outtmpl)
# expand_path translates '%%' into '%' and '$$' into '$'
# correspondingly that is not what we want since we need to keep
# '%%' intact for template dict substitution step. Working around
# with boundary-alike separator hack.
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
# print('hychu, {%s}' % outtmpl)
outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
# outtmpl should be expand_path'ed before template dict substitution
# because meta fields may contain env variables we don't want to
# be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
# title "Hello $PATH", we don't want `$PATH` to be expanded.
filename = expand_path(outtmpl).replace(sep, '') % template_dict
# filename = os.path.join('out', filename)
# print('hychu, The filename is {%s}.{%s}' % (outtmpl, filename))
# Temporary fix for #4787
# 'Treat' all problem characters by passing filename through preferredencoding
# to workaround encoding issues with subprocess on python2 @ Windows
if sys.version_info < (3, 0) and sys.platform == 'win32':
filename = encodeFilename(filename, True).decode(preferredencoding())
return sanitize_path(filename)
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def _match_entry(self, info_dict, incomplete):
""" Returns None iff the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', 'video'))
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title
if self.in_download_archive(info_dict):
return '%s has already been recorded in archive' % video_title
if not incomplete:
match_filter = self.params.get('match_filter')
if match_filter is not None:
ret = match_filter(info_dict)
if ret is not None:
return ret
return None
@staticmethod
def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing'''
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info={},
process=True, force_generic_extractor=False):
'''
Returns a list with a dictionary for each video we find.
If 'download', also downloads the videos.
extra_info is a dict containing the extra values to add to each result
'''
if not ie_key and force_generic_extractor:
ie_key = 'Generic'
if ie_key:
ies = [self.get_info_extractor(ie_key)]
else:
ies = self._ies
for ie in ies:
if not ie.suitable(url):
continue
ie = self.get_info_extractor(ie.ie_key())
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
return self.__extract_info(url, ie, download, extra_info, process)
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def __handle_extraction_exceptions(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except GeoRestrictedError as e:
msg = e.msg
if e.countries:
msg += '\nThis video is available in %s.' % ', '.join(
map(ISO3166Utils.short2full, e.countries))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg)
except ExtractorError as e: # An error we somewhat expected
self.report_error(compat_str(e), e.format_traceback())
except MaxDownloadsReached:
raise
except Exception as e:
if self.params.get('ignoreerrors', False):
self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
else:
raise
return wrapper
@__handle_extraction_exceptions
def __extract_info(self, url, ie, download, extra_info, process):
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
return
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
self.add_default_extra_info(ie_result, ie, url)
if process:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
def add_default_extra_info(self, ie_result, ie, url):
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'webpage_url': url,
'webpage_url_basename': url_basename(url),
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info={}):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
or extract_flat is True):
self.__forced_printings(
ie_result, self.prepare_filename(ie_result),
incomplete=True)
return ie_result
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
return self.process_video_result(ie_result, download=download)
elif result_type == 'url':
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(ie_result['url'],
download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
# Use the information from the embedding page
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info:
return info
force_properties = dict(
(k, v) for k, v in ie_result.items() if v is not None)
for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
if new_result.get('_type') == 'url':
new_result['_type'] = 'url_transparent'
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type in ('playlist', 'multi_video'):
# Protect from infinite recursion due to recursively nested playlists
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
webpage_url = ie_result['webpage_url']
if webpage_url in self._playlist_urls:
self.to_screen(
'[download] Skipping already downloaded playlist: %s'
% ie_result.get('title') or ie_result.get('id'))
return
self._playlist_level += 1
self._playlist_urls.add(webpage_url)
try:
return self.__process_playlist(ie_result, download)
finally:
self._playlist_level -= 1
if not self._playlist_level:
self._playlist_urls.clear()
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(
r,
{
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
)
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
def __process_playlist(self, ie_result, download):
# We process each entry in the playlist
playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
playlist_results = []
playliststart = self.params.get('playliststart', 1) - 1
playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(','):
if '-' in string_segment:
start, end = string_segment.split('-')
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
ie_entries = ie_result['entries']
def make_playlistitems_entries(list_ie_entries):
num_entries = len(list_ie_entries)
return [
list_ie_entries[i - 1] for i in playlistitems
if -num_entries <= i - 1 < num_entries]
def report_download(num_entries):
self.to_screen(
'[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, num_entries))
if isinstance(ie_entries, list):
n_all_entries = len(ie_entries)
if playlistitems:
entries = make_playlistitems_entries(ie_entries)
else:
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
elif isinstance(ie_entries, PagedList):
if playlistitems:
entries = []
for item in playlistitems:
entries.extend(ie_entries.getslice(
item - 1, item
))
else:
entries = ie_entries.getslice(
playliststart, playlistend)
n_entries = len(entries)
report_download(n_entries)
else: # iterable
if playlistitems:
entries = make_playlistitems_entries(list(itertools.islice(
ie_entries, 0, max(playlistitems))))
else:
entries = list(itertools.islice(
ie_entries, playliststart, playlistend))
n_entries = len(entries)
report_download(n_entries)
if self.params.get('playlistreverse', False):
entries = entries[::-1]
if self.params.get('playlistrandom', False):
random.shuffle(entries)
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
for i, entry in enumerate(entries, 1):
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry['__x_forwarded_for_ip'] = x_forwarded_for
extra = {
'n_entries': n_entries,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'playlist_index': playlistitems[i - 1] if playlistitems else i + playliststart,
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
reason = self._match_entry(entry, incomplete=True)
if reason is not None:
self.to_screen('[download] ' + reason)
continue
entry_result = self.__process_iterable_entry(entry, download, extra)
# TODO: skip failed (empty) entries?
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
return ie_result
@__handle_extraction_exceptions
def __process_iterable_entry(self, entry, download, extra_info):
return self.process_ie_result(
entry, download=download, extra_info=extra_info)
def _build_format_filter(self, filter_spec):
" Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
$
''' % '|'.join(map(re.escape, OPERATORS.keys())))
m = operator_rex.search(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
except ValueError:
comparison_value = parse_filesize(m.group('value'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
STR_OPERATORS = {
'=': operator.eq,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr,
}
str_operator_rex = re.compile(r'''(?x)
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id|language)
\s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
\s*(?P<value>[a-zA-Z0-9._-]+)
\s*$
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.search(filter_spec)
if m:
comparison_value = m.group('value')
str_op = STR_OPERATORS[m.group('op')]
if m.group('negation'):
op = lambda attr, value: not str_op(attr, value)
else:
op = str_op
if not m:
raise ValueError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
return _filter
def _default_format_spec(self, info_dict, download=True):
def can_merge():
merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge()
def prefer_best():
if self.params.get('simulate', False):
return False
if not download:
return False
if self.params.get('outtmpl', DEFAULT_OUTTMPL) == '-':
return True
if info_dict.get('is_live'):
return True
if not can_merge():
return True
return False
req_format_list = ['bestvideo+bestaudio', 'best']
if prefer_best():
req_format_list.reverse()
return '/'.join(req_format_list)
def build_format_selector(self, format_spec):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
MERGE = 'MERGE'
SINGLE = 'SINGLE'
GROUP = 'GROUP'
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
def _parse_filter(tokens):
filter_parts = []
for type, string, start, _, _ in tokens:
if type == tokenize.OP and string == ']':
return ''.join(filter_parts)
else:
filter_parts.append(string)
def _remove_unused_ops(tokens):
# Remove operators that we don't use and join them with the surrounding strings
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
for type, string, start, end, line in tokens:
if type == tokenize.OP and string == '[':
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
# everything inside brackets will be handled by _parse_filter
for type, string, start, end, line in tokens:
yield type, string, start, end, line
if type == tokenize.OP and string == ']':
break
elif type == tokenize.OP and string in ALLOWED_OPS:
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string:
last_string = string
last_start = start
last_end = end
else:
last_string += string
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = []
current_selector = None
for type, string, start, _, _ in tokens:
# ENCODING is only defined in python 3.x
if type == getattr(tokenize, 'ENCODING', None):
continue
elif type in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string, [])
elif type == tokenize.OP:
if string == ')':
if not inside_group:
# ')' will be handled by the parentheses group
tokens.restore_last_token()
break
elif inside_merge and string in ['/', ',']:
tokens.restore_last_token()
break
elif inside_choice and string == ',':
tokens.restore_last_token()
break
elif string == ',':
if not current_selector:
raise syntax_error('"," must follow a format selector', start)
selectors.append(current_selector)
current_selector = None
elif string == '/':
if not current_selector:
raise syntax_error('"/" must follow a format selector', start)
first_choice = current_selector
second_choice = _parse_format_selection(tokens, inside_choice=True)
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
elif string == '[':
if not current_selector:
current_selector = FormatSelector(SINGLE, 'best', [])
format_filter = _parse_filter(tokens)
current_selector.filters.append(format_filter)
elif string == '(':
if current_selector:
raise syntax_error('Unexpected "("', start)
group = _parse_format_selection(tokens, inside_group=True)
current_selector = FormatSelector(GROUP, group, [])
elif string == '+':
if inside_merge:
raise syntax_error('Unexpected "+"', start)
video_selector = current_selector
audio_selector = _parse_format_selection(tokens, inside_merge=True)
if not video_selector or not audio_selector:
raise syntax_error('"+" must be between two format selectors', start)
current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
selectors.append(current_selector)
return selectors
def _build_selector_function(selector):
if isinstance(selector, list):
fs = [_build_selector_function(s) for s in selector]
def selector_function(ctx):
for f in fs:
for format in f(ctx):
yield format
return selector_function
elif selector.type == GROUP:
selector_function = _build_selector_function(selector.selector)
elif selector.type == PICKFIRST:
fs = [_build_selector_function(s) for s in selector.selector]
def selector_function(ctx):
for f in fs:
picked_formats = list(f(ctx))
if picked_formats:
return picked_formats
return []
elif selector.type == SINGLE:
format_spec = selector.selector
def selector_function(ctx):
formats = list(ctx['formats'])
if not formats:
return
if format_spec == 'all':
for f in formats:
yield f
elif format_spec in ['best', 'worst', None]:
format_idx = 0 if format_spec == 'worst' else -1
audiovideo_formats = [
f for f in formats
if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
if audiovideo_formats:
yield audiovideo_formats[format_idx]
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) we will fallback to best/worst
# {video,audio}-only format
elif ctx['incomplete_formats']:
yield formats[format_idx]
elif format_spec == 'bestaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[-1]
elif format_spec == 'worstaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[0]
elif format_spec == 'bestvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[-1]
elif format_spec == 'worstvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[0]
else:
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
filter_f = lambda f: f['format_id'] == format_spec
matches = list(filter(filter_f, formats))
if matches:
yield matches[-1]
elif selector.type == MERGE:
def _merge(formats_info):
format_1, format_2 = [f['format_id'] for f in formats_info]
# The first format must contain the video and the
# second the audio
if formats_info[0].get('vcodec') == 'none':
self.report_error('The first format must '
'contain the video, try using '
'"-f %s+%s"' % (format_2, format_1))
return
# Formats must be opposite (video+audio)
if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
self.report_error(
'Both formats %s and %s are video-only, you must specify "-f video+audio"'
% (format_1, format_2))
return
output_ext = (
formats_info[0]['ext']
if self.params.get('merge_output_format') is None
else self.params['merge_output_format'])
return {
'requested_formats': formats_info,
'format': '%s+%s' % (formats_info[0].get('format'),
formats_info[1].get('format')),
'format_id': '%s+%s' % (formats_info[0].get('format_id'),
formats_info[1].get('format_id')),
'width': formats_info[0].get('width'),
'height': formats_info[0].get('height'),
'resolution': formats_info[0].get('resolution'),
'fps': formats_info[0].get('fps'),
'vcodec': formats_info[0].get('vcodec'),
'vbr': formats_info[0].get('vbr'),
'stretched_ratio': formats_info[0].get('stretched_ratio'),
'acodec': formats_info[1].get('acodec'),
'abr': formats_info[1].get('abr'),
'ext': output_ext,
}
video_selector, audio_selector = map(_build_selector_function, selector.selector)
def selector_function(ctx):
for pair in itertools.product(
video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
yield _merge(pair)
filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx):
ctx_copy = copy.deepcopy(ctx)
for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy)
return final_selector
stream = io.BytesIO(format_spec.encode('utf-8'))
try:
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object):
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter >= len(self.tokens):
raise StopIteration()
value = self.tokens[self.counter]
self.counter += 1
return value
next = __next__
def restore_last_token(self):
self.counter -= 1
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
add_headers = info_dict.get('http_headers')
if add_headers:
res.update(add_headers)
cookies = self._calc_cookies(info_dict)
if cookies:
res['Cookie'] = cookies
if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip
return res
def _calc_cookies(self, info_dict):
pr = sanitized_Request(info_dict['url'])
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result')
if 'title' not in info_dict:
raise ExtractorError('Missing "title" field in extractor result')
def report_force_conversion(field, field_not, conversion):
self.report_warning(
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
% (field, field_not, conversion))
def sanitize_string_field(info, string_field):
field = info.get(string_field)
if field is None or isinstance(field, compat_str):
return
report_force_conversion(string_field, 'a string', 'string')
info[string_field] = compat_str(field)
def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS:
field = info.get(numeric_field)
if field is None or isinstance(field, compat_numeric_types):
continue
report_force_conversion(numeric_field, 'numeric', 'int')
info[numeric_field] = int_or_none(field)
sanitize_string_field(info_dict, 'id')
sanitize_numeric_fields(info_dict)
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if thumbnails:
thumbnails.sort(key=lambda t: (
t.get('preference') if t.get('preference') is not None else -1,
t.get('width') if t.get('width') is not None else -1,
t.get('height') if t.get('height') is not None else -1,
t.get('id') if t.get('id') is not None else '', t.get('url')))
for i, t in enumerate(thumbnails):
t['url'] = sanitize_url(t['url'])
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
if t.get('id') is None:
t['id'] = '%d' % i
if self.params.get('list_thumbnails'):
self.list_thumbnails(info_dict)
return
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnail'] = sanitize_url(thumbnail)
elif thumbnails:
info_dict['thumbnail'] = thumbnails[-1]['url']
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = upload_date.strftime('%Y%m%d')
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)
if cc:
for _, subtitle in cc.items():
for subtitle_format in subtitle:
if subtitle_format.get('url'):
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
if subtitle_format.get('ext') is None:
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
automatic_captions = info_dict.get('automatic_captions')
subtitles = info_dict.get('subtitles')
if self.params.get('listsubtitles', False):
if 'automatic_captions' in info_dict:
self.list_subtitles(
info_dict['id'], automatic_captions, 'automatic captions')
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
return
info_dict['requested_subtitles'] = self.process_subtitles(
info_dict['id'], subtitles, automatic_captions)
# We now pick which formats have to be downloaded
if info_dict.get('formats') is None:
# There's only one format available
formats = [info_dict]
else:
formats = info_dict['formats']
if not formats:
raise ExtractorError('No video formats found!')
def is_wellformed(f):
url = f.get('url')
if not url:
self.report_warning(
'"url" field is missing or empty - skipping format, '
'there is an error in extractor')
return False
if isinstance(url, bytes):
sanitize_string_field(f, 'url')
return True
# Filter out malformed formats for better extraction robustness
formats = list(filter(is_wellformed, formats))
formats_dict = {}
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
sanitize_string_field(format, 'format_id')
sanitize_numeric_fields(format)
format['url'] = sanitize_url(format['url'])
if not format.get('format_id'):
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
formats_dict[format_id].append(format)
# Make sure all formats have unique format_id
for format_id, ambiguous_formats in formats_dict.items():
if len(ambiguous_formats) > 1:
for i, format in enumerate(ambiguous_formats):
format['format_id'] = '%s-%d' % (format_id, i)
for i, format in enumerate(formats):
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
)
# Automatically determine file extension if missing
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
# Automatically determine protocol if missing (useful for format
# selection purposes)
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info)
# Remove private housekeeping stuff
if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip']
# TODO Central sorting goes here
if formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# which can't be exported to json
info_dict['formats'] = formats
if self.params.get('listformats'):
self.list_formats(info_dict)
return
req_format = self.params.get('format')
if req_format is None:
req_format = self._default_format_spec(info_dict, download=download)
if self.params.get('verbose'):
self._write_string('[debug] Default format spec: %s\n' % req_format)
format_selector = self.build_format_selector(req_format)
# While in format selection we may need to have an access to the original
# format set in order to calculate some metrics or do some processing.
# For now we need to be able to guess whether original formats provided
# by extractor are incomplete or not (i.e. whether extractor provides only
# video-only or audio-only formats) for proper formats selection for
# extractors with such incomplete formats (see
# https://github.com/ytdl-org/youtube-dl/pull/5556).
# Since formats may be filtered during format selection and may not match
# the original formats the results may be incorrect. Thus original formats
# or pre-calculated metrics should be passed to format selection routines
# as well.
# We will pass a context object containing all necessary additional data
# instead of just formats.
# This fixes incorrect format selection issue (see
# https://github.com/ytdl-org/youtube-dl/issues/10083).
incomplete_formats = (
# All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
# all formats are audio-only
or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = {
'formats': formats,
'incomplete_formats': incomplete_formats,
}
formats_to_download = list(format_selector(ctx))
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
if download:
if len(formats_to_download) > 1:
self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
for format in formats_to_download:
new_info = dict(info_dict)
new_info.update(format)
self.process_info(new_info)
# We update the info dict with the best quality format (backwards compatibility)
info_dict.update(formats_to_download[-1])
return info_dict
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
"""Select the requested subtitles and their format"""
available_subs = {}
if normal_subtitles and self.params.get('writesubtitles'):
available_subs.update(normal_subtitles)
if automatic_captions and self.params.get('writeautomaticsub'):
for lang, cap_info in automatic_captions.items():
if lang not in available_subs:
available_subs[lang] = cap_info
if (not self.params.get('writesubtitles') and not
self.params.get('writeautomaticsub') or not
available_subs):
return None
if self.params.get('allsubtitles', False):
requested_langs = available_subs.keys()
else:
if self.params.get('subtitleslangs', False):
requested_langs = self.params.get('subtitleslangs')
elif 'en' in available_subs:
requested_langs = ['en']
else:
requested_langs = [list(available_subs.keys())[0]]
formats_query = self.params.get('subtitlesformat', 'best')
formats_preference = formats_query.split('/') if formats_query else []
subs = {}
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
continue
for ext in formats_preference:
if ext == 'best':
f = formats[-1]
break
matches = list(filter(lambda f: f['ext'] == ext, formats))
if matches:
f = matches[-1]
break
else:
f = formats[-1]
self.report_warning(
'No subtitle format found matching "%s" for language %s, '
'using %s' % (formats_query, lang, f['ext']))
subs[lang] = f
return subs
def __forced_printings(self, info_dict, filename, incomplete):
def print_mandatory(field):
if (self.params.get('force%s' % field, False)
and (not incomplete or info_dict.get(field) is not None)):
self.to_stdout(info_dict[field])
def print_optional(field):
if (self.params.get('force%s' % field, False)
and info_dict.get(field) is not None):
self.to_stdout(info_dict[field])
print_mandatory('title')
print_mandatory('id')
if self.params.get('forceurl', False) and not incomplete:
if info_dict.get('requested_formats') is not None:
for f in info_dict['requested_formats']:
self.to_stdout(f['url'] + f.get('play_path', ''))
else:
# For RTMP URLs, also include the playpath
self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
print_optional('thumbnail')
print_optional('description')
if self.params.get('forcefilename', False) and filename is not None:
self.to_stdout(filename)
if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
print_mandatory('format')
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(info_dict))
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get('_type', 'video') == 'video'
max_downloads = self.params.get('max_downloads')
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
# TODO: backward compatibility, to be removed
info_dict['fulltitle'] = info_dict['title']
if 'format' not in info_dict:
info_dict['format'] = info_dict['ext']
reason = self._match_entry(info_dict, incomplete=False)
if reason is not None:
self.to_screen('[download] ' + reason)
return
self._num_downloads += 1
info_dict['_filename'] = filename = self.prepare_filename(info_dict)
# Forced printings
self.__forced_printings(info_dict, filename, incomplete=False)
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
return
if filename is None:
return
def ensure_dir_exists(path):
try:
dn = os.path.dirname(path)
if dn and not os.path.exists(dn):
os.makedirs(dn)
return True
except (OSError, IOError) as err:
if isinstance(err, OSError) and err.errno == errno.EEXIST:
return True
self.report_error('unable to create directory ' + error_to_compat_str(err))
return False
if not ensure_dir_exists(sanitize_path(encodeFilename(filename))):
return
if self.params.get('writedescription', False):
descfn = replace_extension(filename, 'description', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Video description is already present')
elif info_dict.get('description') is None:
self.report_warning('There\'s no description to write.')
else:
try:
self.to_screen('[info] Writing video description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (OSError, IOError):
self.report_error('Cannot write description file ' + descfn)
return
if self.params.get('writeannotations', False):
annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
elif not info_dict.get('annotations'):
self.report_warning('There are no annotations to write.')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
self.report_error('Cannot write annotations file: ' + annofn)
return
subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')])
if subtitles_are_requested and info_dict.get('requested_subtitles'):
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict['requested_subtitles']
ie = self.get_info_extractor(info_dict['extractor_key'])
for sub_lang, sub_info in subtitles.items():
sub_format = sub_info['ext']
sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
if sub_info.get('data') is not None:
try:
# Use newline='' to prevent conversion of newline characters
# See https://github.com/ytdl-org/youtube-dl/issues/10268
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data'])
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
else:
try:
sub_data = ie._request_webpage(
sub_info['url'], info_dict['id'], note=False).read()
with io.open(encodeFilename(sub_filename), 'wb') as subfile:
subfile.write(sub_data)
except (ExtractorError, IOError, OSError, ValueError) as err:
self.report_warning('Unable to download subtitle for "%s": %s' %
(sub_lang, error_to_compat_str(err)))
continue
if self.params.get('writeinfojson', False):
infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Video description metadata is already present')
else:
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
try:
write_json_file(self.filter_requested_info(info_dict), infofn)
except (OSError, IOError):
self.report_error('Cannot write metadata to JSON file ' + infofn)
return
self._write_thumbnails(info_dict, filename)
if not self.params.get('skip_download', False):
try:
def dl(name, info):
fd = get_suitable_downloader(info, self.params)(self, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
if self.params.get('verbose'):
self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
return fd.download(name, info)
if info_dict.get('requested_formats') is not None:
downloaded = []
success = True
merger = FFmpegMergerPP(self)
if not merger.available:
postprocessors = []
self.report_warning('You have requested multiple '
'formats but ffmpeg or avconv are not installed.'
' The formats won\'t be merged.')
else:
postprocessors = [merger]
def compatible_formats(formats):
video, audio = formats
# Check extension
video_ext, audio_ext = video.get('ext'), audio.get('ext')
if video_ext and audio_ext:
COMPATIBLE_EXTS = (
('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
('webm')
)
for exts in COMPATIBLE_EXTS:
if video_ext in exts and audio_ext in exts:
return True
# TODO: Check acodec/vcodec
return False
filename_real_ext = os.path.splitext(filename)[1][1:]
filename_wo_ext = (
os.path.splitext(filename)[0]
if filename_real_ext == info_dict['ext']
else filename)
requested_formats = info_dict['requested_formats']
if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
info_dict['ext'] = 'mkv'
self.report_warning(
'Requested formats are incompatible for merge and will be merged into mkv.')
# Ensure filename always has a correct extension for successful merge
filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
if os.path.exists(encodeFilename(filename)):
self.to_screen(
'[download] %s has already been downloaded and '
'merged' % filename)
else:
for f in requested_formats:
new_info = dict(info_dict)
new_info.update(f)
fname = prepend_extension(
self.prepare_filename(new_info),
'f%s' % f['format_id'], new_info['ext'])
if not ensure_dir_exists(fname):
return
downloaded.append(fname)
partial_success = dl(fname, new_info)
success = success and partial_success
info_dict['__postprocessors'] = postprocessors
info_dict['__files_to_merge'] = downloaded
else:
# Just a single file
success = dl(filename, info_dict)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success and filename != '-':
# Fixup content
fixup_policy = self.params.get('fixup')
if fixup_policy is None:
fixup_policy = 'detect_or_warn'
INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
stretched_ratio = info_dict.get('stretched_ratio')
if stretched_ratio is not None and stretched_ratio != 1:
if fixup_policy == 'warn':
self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
info_dict['id'], stretched_ratio))
elif fixup_policy == 'detect_or_warn':
stretched_pp = FFmpegFixupStretchedPP(self)
if stretched_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(stretched_pp)
else:
self.report_warning(
'%s: Non-uniform pixel ratio (%s). %s'
% (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('requested_formats') is None
and info_dict.get('container') == 'm4a_dash'):
if fixup_policy == 'warn':
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container.'
% info_dict['id'])
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM4aPP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('protocol') == 'm3u8_native'
or info_dict.get('protocol') == 'm3u8'
and self.params.get('hls_prefer_native')):
if fixup_policy == 'warn':
self.report_warning('%s: malformed AAC bitstream detected.' % (
info_dict['id']))
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM3u8PP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: malformed AAC bitstream detected. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
try:
self.post_process(filename, info_dict)
except (PostProcessingError) as err:
self.report_error('postprocessing: %s' % str(err))
return
self.record_download_archive(info_dict)
def download(self, url_list):
"""Download a given list of URLs."""
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
if (len(url_list) > 1
and outtmpl != '-'
and '%' not in outtmpl
and self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
try:
# It also downloads the videos
res = self.extract_info(
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached.')
raise
else:
if self.params.get('dump_single_json', False):
self.to_stdout(json.dumps(res))
return self._download_retcode
def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput(
[info_filename], mode='r',
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.filter_requested_info(json.loads('\n'.join(f)))
try:
self.process_ie_result(info, download=True)
except DownloadError:
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
return self.download([webpage_url])
else:
raise
return self._download_retcode
@staticmethod
def filter_requested_info(info_dict):
return dict(
(k, v) for k, v in info_dict.items()
if k not in ['requested_formats', 'requested_subtitles'])
def post_process(self, filename, ie_info):
"""Run all the postprocessors on the given file."""
info = dict(ie_info)
info['filepath'] = filename
pps_chain = []
if ie_info.get('__postprocessors') is not None:
pps_chain.extend(ie_info['__postprocessors'])
pps_chain.extend(self._pps)
for pp in pps_chain:
files_to_delete = []
try:
files_to_delete, info = pp.run(info)
except PostProcessingError as e:
self.report_error(e.msg)
if files_to_delete and not self.params.get('keepvideo', False):
for old_filename in files_to_delete:
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try:
os.remove(encodeFilename(old_filename))
except (IOError, OSError):
self.report_warning('Unable to remove downloaded original file')
def _make_archive_id(self, info_dict):
video_id = info_dict.get('id')
if not video_id:
return
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
if extractor is None:
url = str_or_none(info_dict.get('url'))
if not url:
return
# Try to find matching extractor for the URL and take its ie_key
for ie in self._ies:
if ie.suitable(url):
extractor = ie.ie_key()
break
else:
return
return extractor.lower() + ' ' + video_id
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if not vid_id:
return False # Incomplete video information
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
if line.strip() == vid_id:
return True
except IOError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
@staticmethod
def format_resolution(format, default='unknown'):
if format.get('vcodec') == 'none':
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('height') is not None:
if format.get('width') is not None:
res = '%sx%s' % (format['width'], format['height'])
else:
res = '%sp' % format['height']
elif format.get('width') is not None:
res = '%dx?' % format['width']
else:
res = default
return res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])
table = [
[f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
for f in formats
if f.get('preference') is None or f['preference'] >= -1000]
if len(formats) > 1:
table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
header_line = ['format code', 'extension', 'resolution', 'note']
self.to_screen(
'[info] Available formats for %s:\n%s' %
(info_dict['id'], render_table(header_line, table)))
def list_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if not thumbnails:
self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
return
self.to_screen(
'[info] Thumbnails for %s:' % info_dict['id'])
self.to_screen(render_table(
['ID', 'width', 'height', 'URL'],
[[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
def list_subtitles(self, video_id, subtitles, name='subtitles'):
if not subtitles:
self.to_screen('%s has no %s' % (video_id, name))
return
self.to_screen(
'Available %s for %s:' % (name, video_id))
self.to_screen(render_table(
['Language', 'formats'],
[[lang, ', '.join(f['ext'] for f in reversed(formats))]
for lang, formats in subtitles.items()]))
def urlopen(self, req):
""" Start an HTTP download """
if isinstance(req, compat_basestring):
req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
if type('') is not compat_str:
# Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
self.report_warning(
'Your Python is broken! Update to a newer and supported version')
stdout_encoding = getattr(
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
stdout_encoding,
self.get_encoding()))
write_string(encoding_str, encoding=None)
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
if _LAZY_LOADER:
self._write_string('[debug] Lazy loading extractors enabled' + '\n')
try:
sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = sp.communicate()
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
self._write_string('[debug] Git HEAD: ' + out + '\n')
except Exception:
try:
sys.exc_clear()
except Exception:
pass
def python_implementation():
impl_name = platform.python_implementation()
if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
return impl_name
self._write_string('[debug] Python version %s (%s) - %s\n' % (
platform.python_version(), python_implementation(),
platform_name()))
exe_versions = FFmpegPostProcessor.get_versions(self)
exe_versions['rtmpdump'] = rtmpdump_version()
exe_versions['phantomjs'] = PhantomJSwrapper._version()
exe_str = ', '.join(
'%s %s' % (exe, v)
for exe, v in sorted(exe_versions.items())
if v
)
if not exe_str:
exe_str = 'none'
self._write_string('[debug] exe versions: %s\n' % exe_str)
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
if self.params.get('call_home', False):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
self._write_string('[debug] Public IP address: %s\n' % ipaddr)
latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode('utf-8')
if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning(
'You are using an outdated version (newest version: %s)! '
'See https://yt-dl.org/update if you need help updating.' %
latest_version)
def _setup_opener(self):
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
if opts_cookiefile is None:
self.cookiejar = compat_cookiejar.CookieJar()
else:
opts_cookiefile = expand_path(opts_cookiefile)
self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
if os.access(opts_cookiefile, os.R_OK):
self.cookiejar.load(ignore_discard=True, ignore_expires=True)
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = PerRequestProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
redirect_handler = YoutubeDLRedirectHandler()
data_handler = compat_urllib_request_DataHandler()
# When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
# https://github.com/ytdl-org/youtube-dl/issues/8227)
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
file_handler.file_open = file_open
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
def _write_thumbnails(self, info_dict, filename):
if self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails')
if thumbnails:
thumbnails = [thumbnails[-1]]
elif self.params.get('write_all_thumbnails', False):
thumbnails = info_dict.get('thumbnails')
else:
return
if not thumbnails:
# No thumbnails present, so return immediately
return
for t in thumbnails:
thumb_ext = determine_ext(t['url'], 'jpg')
suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
t['filename'] = thumb_filename = replace_extension(filename + suffix, thumb_ext, info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
self.to_screen('[%s] %s: Thumbnail %sis already present' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
else:
self.to_screen('[%s] %s: Downloading thumbnail %s...' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
try:
uf = self.urlopen(t['url'])
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
(t['url'], error_to_compat_str(err)))
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test/helpers/ssh_command.go | // Copyright 2017-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helpers
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strconv"
"time"
"github.com/kevinburke/ssh_config"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
// SSHCommand stores the data associated with executing a command.
// TODO: this is poorly named in that it's not related to a command only
// ran over SSH - rename this.
type SSHCommand struct {
// TODO: path is not a clear name - rename to something more clear.
Path string
Env []string
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// SSHClient stores the information needed to SSH into a remote location for
// running tests.
type SSHClient struct {
Config *ssh.ClientConfig // ssh client configuration information.
Host string // Ip/Host from the target virtualserver
Port int // Port to connect to the target server
client *ssh.Client // Client implements a traditional SSH client that supports shells,
// subprocesses, TCP port/streamlocal forwarding and tunneled dialing.
}
// GetHostPort returns the host port representation of the ssh client
func (cli *SSHClient) GetHostPort() string {
return net.JoinHostPort(cli.Host, strconv.Itoa(cli.Port))
}
// SSHConfig contains metadata for an SSH session.
type SSHConfig struct {
target string
host string
user string
port int
identityFile string
}
// SSHConfigs maps the name of a host (VM) to its corresponding SSHConfiguration
type SSHConfigs map[string]*SSHConfig
// GetSSHClient initializes an SSHClient based on the provided SSHConfig
func (cfg *SSHConfig) GetSSHClient() *SSHClient {
sshConfig := &ssh.ClientConfig{
User: cfg.user,
Auth: []ssh.AuthMethod{
cfg.GetSSHAgent(),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: 15 * time.Second,
}
return &SSHClient{
Config: sshConfig,
Host: cfg.host,
Port: cfg.port,
}
}
func (client *SSHClient) String() string {
return fmt.Sprintf("host: %s, port: %d, user: %s", client.Host, client.Port, client.Config.User)
}
func (cfg *SSHConfig) String() string {
return fmt.Sprintf("target: %s, host: %s, port %d, user, %s, identityFile: %s", cfg.target, cfg.host, cfg.port, cfg.user, cfg.identityFile)
}
// GetSSHAgent returns the ssh.AuthMethod corresponding to SSHConfig cfg.
func (cfg *SSHConfig) GetSSHAgent() ssh.AuthMethod {
key, err := ioutil.ReadFile(cfg.identityFile)
if err != nil {
log.Fatalf("unable to retrieve ssh-key on target '%s': %s", cfg.target, err)
}
signer, err := ssh.ParsePrivateKey(key)
if err != nil {
log.Fatalf("unable to parse private key on target '%s': %s", cfg.target, err)
}
return ssh.PublicKeys(signer)
}
// ImportSSHconfig imports the SSH configuration stored at the provided path.
// Returns an error if the SSH configuration could not be instantiated.
func ImportSSHconfig(config []byte) (SSHConfigs, error) {
result := make(SSHConfigs)
cfg, err := ssh_config.Decode(bytes.NewBuffer(config))
if err != nil {
return nil, err
}
for _, host := range cfg.Hosts {
key := host.Patterns[0].String()
if key == "*" {
continue
}
port, _ := cfg.Get(key, "Port")
hostConfig := SSHConfig{target: key}
hostConfig.host, _ = cfg.Get(key, "Hostname")
hostConfig.identityFile, _ = cfg.Get(key, "identityFile")
hostConfig.user, _ = cfg.Get(key, "User")
hostConfig.port, _ = strconv.Atoi(port)
result[key] = &hostConfig
}
return result, nil
}
// copyWait runs an instance of io.Copy() in a goroutine, and returns a channel
// to receive the error result.
func copyWait(dst io.Writer, src io.Reader) chan error {
c := make(chan error, 1)
go func() {
_, err := io.Copy(dst, src)
c <- err
}()
return c
}
// runCommand runs the specified command on the provided SSH session, and
// gathers both of the sterr and stdout output into the writers provided by
// cmd. Returns whether the command was run and an optional error.
// Returns nil when the command completes successfully and all stderr,
// stdout output has been written. Returns an error otherwise.
func runCommand(session *ssh.Session, cmd *SSHCommand) (bool, error) {
stderr, err := session.StderrPipe()
if err != nil {
return false, fmt.Errorf("Unable to setup stderr for session: %v", err)
}
errChan := copyWait(cmd.Stderr, stderr)
stdout, err := session.StdoutPipe()
if err != nil {
return false, fmt.Errorf("Unable to setup stdout for session: %v", err)
}
outChan := copyWait(cmd.Stdout, stdout)
if err = session.Run(cmd.Path); err != nil {
return false, err
}
if err = <-errChan; err != nil {
return true, err
}
if err = <-outChan; err != nil {
return true, err
}
return true, nil
}
// RunCommand runs a SSHCommand using SSHClient client. The returned error is
// nil if the command runs, has no problems copying stdin, stdout, and stderr,
// and exits with a zero exit status.
func (client *SSHClient) RunCommand(cmd *SSHCommand) error {
session, err := client.newSession()
if err != nil {
return err
}
defer session.Close()
_, err = runCommand(session, cmd)
return err
}
// RunCommandInBackground runs an SSH command in a similar way to
// RunCommandContext, but with a context which allows the command to be
// cancelled at any time. When cancel is called the error of the command is
// returned instead the context error.
func (client *SSHClient) RunCommandInBackground(ctx context.Context, cmd *SSHCommand) error {
if ctx == nil {
panic("nil context provided to RunCommandInBackground()")
}
session, err := client.newSession()
if err != nil {
return err
}
defer session.Close()
modes := ssh.TerminalModes{
ssh.ECHO: 1, // enable echoing
ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
}
session.RequestPty("xterm-256color", 80, 80, modes)
stdin, err := session.StdinPipe()
if err != nil {
log.Errorf("Could not get stdin: %s", err)
}
go func() {
select {
case <-ctx.Done():
_, err := stdin.Write([]byte{3})
if err != nil {
log.Errorf("write ^C error: %s", err)
}
err = session.Wait()
if err != nil {
log.Errorf("wait error: %s", err)
}
if err = session.Signal(ssh.SIGHUP); err != nil {
log.Errorf("failed to kill command: %s", err)
}
if err = session.Close(); err != nil {
log.Errorf("failed to close session: %s", err)
}
}
}()
_, err = runCommand(session, cmd)
return err
}
// RunCommandContext runs an SSH command in a similar way to RunCommand but with
// a context. If context is canceled it will return the error of that given
// context.
func (client *SSHClient) RunCommandContext(ctx context.Context, cmd *SSHCommand) error {
if ctx == nil {
panic("nil context provided to RunCommandContext()")
}
var (
session *ssh.Session
sessionErrChan = make(chan error, 1)
)
go func() {
var sessionErr error
// This may block depending on the state of the setup tests are being
// ran against. As a result, these goroutines may leak, but the logic
// below will fail and propagate to the rest of the CI framework, which
// will error out anyway. It's better to leak in really bad cases since
// the CI will fail anyway. Unfortunately, the golang SSH library does
// not provide a way to propagate context through to creating sessions.
// Note that this is a closure on the session variable!
session, sessionErr = client.newSession()
if sessionErr != nil {
log.Infof("error creating session: %s", sessionErr)
sessionErrChan <- sessionErr
return
}
_, runErr := runCommand(session, cmd)
sessionErrChan <- runErr
if closeErr := session.Close(); closeErr != nil {
log.WithError(closeErr).Error("failed to close session")
}
}()
select {
case asyncErr := <-sessionErrChan:
return asyncErr
case <-ctx.Done():
if session != nil {
log.Warning("sending SIGHUP to session due to canceled context")
if err := session.Signal(ssh.SIGHUP); err != nil {
log.Errorf("failed to kill command when context is canceled: %s", err)
}
} else {
log.Error("timeout reached; no session was able to be created")
}
return ctx.Err()
}
}
func (client *SSHClient) newSession() (*ssh.Session, error) {
var connection *ssh.Client
var err error
if client.client != nil {
connection = client.client
} else {
connection, err = ssh.Dial(
"tcp",
fmt.Sprintf("%s:%d", client.Host, client.Port),
client.Config)
if err != nil {
return nil, fmt.Errorf("failed to dial: %s", err)
}
client.client = connection
}
session, err := connection.NewSession()
if err != nil {
return nil, fmt.Errorf("failed to create session: %s", err)
}
return session, nil
}
// SSHAgent returns the ssh.Authmethod using the Public keys. Returns nil if
// a connection to SSH_AUTH_SHOCK does not succeed.
func SSHAgent() ssh.AuthMethod {
if sshAgent, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil {
return ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)
}
return nil
}
// GetSSHClient initializes an SSHClient for the specified host/port/user
// combination.
func GetSSHClient(host string, port int, user string) *SSHClient {
sshConfig := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{
SSHAgent(),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: 15 * time.Second,
}
return &SSHClient{
Config: sshConfig,
Host: host,
Port: port,
}
}
| [
"\"SSH_AUTH_SOCK\""
]
| []
| [
"SSH_AUTH_SOCK"
]
| [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
src/core/main.go | // Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/gob"
"fmt"
"os"
"os/signal"
"strconv"
"syscall"
"time"
"github.com/astaxie/beego"
_ "github.com/astaxie/beego/session/redis"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/job"
"github.com/goharbor/harbor/src/common/models"
common_quota "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/api"
quota "github.com/goharbor/harbor/src/core/api/quota"
_ "github.com/goharbor/harbor/src/core/api/quota/chart"
_ "github.com/goharbor/harbor/src/core/api/quota/registry"
_ "github.com/goharbor/harbor/src/core/auth/authproxy"
_ "github.com/goharbor/harbor/src/core/auth/db"
_ "github.com/goharbor/harbor/src/core/auth/ldap"
_ "github.com/goharbor/harbor/src/core/auth/oidc"
_ "github.com/goharbor/harbor/src/core/auth/uaa"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/filter"
"github.com/goharbor/harbor/src/core/middlewares"
_ "github.com/goharbor/harbor/src/core/notifier/topic"
"github.com/goharbor/harbor/src/core/service/token"
"github.com/goharbor/harbor/src/pkg/notification"
"github.com/goharbor/harbor/src/pkg/scan"
"github.com/goharbor/harbor/src/pkg/scan/dao/scanner"
"github.com/goharbor/harbor/src/pkg/scan/event"
"github.com/goharbor/harbor/src/pkg/scheduler"
"github.com/goharbor/harbor/src/pkg/types"
"github.com/goharbor/harbor/src/pkg/version"
"github.com/goharbor/harbor/src/replication"
"github.com/goharbor/harbor/src/server"
)
const (
adminUserID = 1
)
func updateInitPassword(userID int, password string) error {
queryUser := models.User{UserID: userID}
user, err := dao.GetUser(queryUser)
if err != nil {
return fmt.Errorf("Failed to get user, userID: %d %v", userID, err)
}
if user == nil {
return fmt.Errorf("user id: %d does not exist", userID)
}
if user.Salt == "" {
salt := utils.GenerateRandomString()
user.Salt = salt
user.Password = password
err = dao.ChangeUserPassword(*user)
if err != nil {
return fmt.Errorf("Failed to update user encrypted password, userID: %d, err: %v", userID, err)
}
log.Infof("User id: %d updated its encrypted password successfully.", userID)
} else {
log.Infof("User id: %d already has its encrypted password.", userID)
}
return nil
}
// Quota migration
func quotaSync() error {
projects, err := dao.GetProjects(nil)
if err != nil {
log.Errorf("list project error, %v", err)
return err
}
var pids []string
for _, project := range projects {
pids = append(pids, strconv.FormatInt(project.ProjectID, 10))
}
usages, err := dao.ListQuotaUsages(&models.QuotaUsageQuery{Reference: "project", ReferenceIDs: pids})
if err != nil {
log.Errorf("list quota usage error, %v", err)
return err
}
// The condition handles these two cases:
// 1, len(project) > 1 && len(usages) == 1. existing projects without usage, as we do always has 'library' usage in DB.
// 2, migration fails at the phase of inserting usage into DB, and parts of them are inserted successfully.
if len(projects) != len(usages) {
log.Info("Start to sync quota data .....")
if err := quota.Sync(config.GlobalProjectMgr, true); err != nil {
log.Errorf("Fail to sync quota data, %v", err)
return err
}
log.Info("Success to sync quota data .....")
return nil
}
// Only has one project without usage
zero := common_quota.ResourceList{
common_quota.ResourceCount: 0,
common_quota.ResourceStorage: 0,
}
if len(projects) == 1 && len(usages) == 1 {
totalRepo, err := dao.GetTotalOfRepositories()
if totalRepo == 0 {
return nil
}
refID, err := strconv.ParseInt(usages[0].ReferenceID, 10, 64)
if err != nil {
log.Error(err)
return err
}
usedRes, err := types.NewResourceList(usages[0].Used)
if err != nil {
log.Error(err)
return err
}
if types.Equals(usedRes, zero) && refID == projects[0].ProjectID {
log.Info("Start to sync quota data .....")
if err := quota.Sync(config.GlobalProjectMgr, true); err != nil {
log.Errorf("Fail to sync quota data, %v", err)
return err
}
log.Info("Success to sync quota data .....")
}
}
return nil
}
func gracefulShutdown(closing, done chan struct{}) {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
log.Infof("capture system signal %s, to close \"closing\" channel", <-signals)
close(closing)
select {
case <-done:
log.Infof("Goroutines exited normally")
case <-time.After(time.Second * 3):
log.Infof("Timeout waiting goroutines to exit")
}
os.Exit(0)
}
func main() {
beego.BConfig.WebConfig.Session.SessionOn = true
beego.BConfig.WebConfig.Session.SessionName = config.SessionCookieName
redisURL := os.Getenv("_REDIS_URL")
if len(redisURL) > 0 {
gob.Register(models.User{})
beego.BConfig.WebConfig.Session.SessionProvider = "redis"
beego.BConfig.WebConfig.Session.SessionProviderConfig = redisURL
}
beego.AddTemplateExt("htm")
log.Info("initializing configurations...")
config.Init()
log.Info("configurations initialization completed")
token.InitCreators()
database, err := config.Database()
if err != nil {
log.Fatalf("failed to get database configuration: %v", err)
}
if err := dao.InitAndUpgradeDatabase(database); err != nil {
log.Fatalf("failed to initialize database: %v", err)
}
if err := config.Load(); err != nil {
log.Fatalf("failed to load config: %v", err)
}
// init the jobservice client
job.Init()
// init the scheduler
scheduler.Init()
password, err := config.InitialAdminPassword()
if err != nil {
log.Fatalf("failed to get admin's initial password: %v", err)
}
if err := updateInitPassword(adminUserID, password); err != nil {
log.Error(err)
}
// Init API handler
if err := api.Init(); err != nil {
log.Fatalf("Failed to initialize API handlers with error: %s", err.Error())
}
registerScanners()
closing := make(chan struct{})
done := make(chan struct{})
go gracefulShutdown(closing, done)
if err := replication.Init(closing, done); err != nil {
log.Fatalf("failed to init for replication: %v", err)
}
log.Info("initializing notification...")
notification.Init()
// Initialize the event handlers for handling artifact cascade deletion
event.Init()
filter.Init()
beego.InsertFilter("/api/*", beego.BeforeStatic, filter.SessionCheck)
beego.InsertFilter("/*", beego.BeforeRouter, filter.SecurityFilter)
beego.InsertFilter("/*", beego.BeforeRouter, filter.ReadonlyFilter)
server.RegisterRoutes()
syncRegistry := os.Getenv("SYNC_REGISTRY")
sync, err := strconv.ParseBool(syncRegistry)
if err != nil {
log.Errorf("Failed to parse SYNC_REGISTRY: %v", err)
// if err set it default to false
sync = false
}
if sync {
if err := api.SyncRegistry(config.GlobalProjectMgr); err != nil {
log.Error(err)
}
} else {
log.Infof("Because SYNC_REGISTRY set false , no need to sync registry \n")
}
log.Info("Init proxy")
if err := middlewares.Init(); err != nil {
log.Fatalf("init proxy error, %v", err)
}
syncQuota := os.Getenv("SYNC_QUOTA")
doSyncQuota, err := strconv.ParseBool(syncQuota)
if err != nil {
log.Errorf("Failed to parse SYNC_QUOTA: %v", err)
doSyncQuota = true
}
if doSyncQuota {
if err := quotaSync(); err != nil {
log.Fatalf("quota migration error, %v", err)
}
} else {
log.Infof("Because SYNC_QUOTA set false , no need to sync quota \n")
}
log.Infof("Version: %s, Git commit: %s", version.ReleaseVersion, version.GitCommit)
beego.RunWithMiddleWares("", middlewares.MiddleWares()...)
}
func registerScanners() {
wantedScanners := make([]scanner.Registration, 0)
uninstallURLs := make([]string, 0)
if config.WithTrivy() {
log.Info("Registering Trivy scanner")
wantedScanners = append(wantedScanners, scanner.Registration{
Name: "Trivy",
Description: "The Trivy scanner adapter",
URL: config.TrivyAdapterURL(),
UseInternalAddr: true,
Immutable: true,
})
} else {
log.Info("Removing Trivy scanner")
uninstallURLs = append(uninstallURLs, config.TrivyAdapterURL())
}
if config.WithClair() {
clairDB, err := config.ClairDB()
if err != nil {
log.Fatalf("failed to load clair database information: %v", err)
}
if err := dao.InitClairDB(clairDB); err != nil {
log.Fatalf("failed to initialize clair database: %v", err)
}
log.Info("Registering Clair scanner")
wantedScanners = append(wantedScanners, scanner.Registration{
Name: "Clair",
Description: "The Clair scanner adapter",
URL: config.ClairAdapterEndpoint(),
UseInternalAddr: true,
Immutable: true,
})
} else {
log.Info("Removing Clair scanner")
uninstallURLs = append(uninstallURLs, config.ClairAdapterEndpoint())
}
if err := scan.EnsureScanners(wantedScanners); err != nil {
log.Fatalf("failed to register scanners: %v", err)
}
if defaultScannerURL := getDefaultScannerURL(); defaultScannerURL != "" {
log.Infof("Setting %s as default scanner", defaultScannerURL)
if err := scan.EnsureDefaultScanner(defaultScannerURL); err != nil {
log.Fatalf("failed to set default scanner: %v", err)
}
}
if err := scan.RemoveImmutableScanners(uninstallURLs); err != nil {
log.Warningf("failed to remove scanners: %v", err)
}
}
func getDefaultScannerURL() string {
if config.WithTrivy() {
return config.TrivyAdapterURL()
}
if config.WithClair() {
return config.ClairAdapterEndpoint()
}
return ""
}
| [
"\"_REDIS_URL\"",
"\"SYNC_REGISTRY\"",
"\"SYNC_QUOTA\""
]
| []
| [
"SYNC_QUOTA",
"SYNC_REGISTRY",
"_REDIS_URL"
]
| [] | ["SYNC_QUOTA", "SYNC_REGISTRY", "_REDIS_URL"] | go | 3 | 0 | |
microservices/transportation/handlers/handlers.go | package handlers
import (
"context"
"encoding/csv"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"github.com/Risath18/xplored-transportation/models"
"github.com/gofiber/fiber/v2"
_ "github.com/lib/pq"
"googlemaps.github.io/maps"
)
//Import All Codes
var Codes map[string]models.AirportCodes
// helper function to initalize airport codes
func InitAirportCodes() {
Codes = ImportAirportCodes()
}
/*
* Helper Method to Import codes from CSV
*/
func ImportAirportCodes() map[string]models.AirportCodes {
// err := godotenv.Load("../../.env")
// if err != nil {
// log.Fatal("Error loading .env file")
// }
f, err := os.Open(os.Getenv("TRANSPORTATION_AIRPORT_CODE_CSV"))
if err != nil {
log.Fatal(err)
}
// remember to close the file at the end of the program
defer f.Close()
// read csv values using csv.Reader
csvReader := csv.NewReader(f)
data, err := csvReader.ReadAll()
if err != nil {
log.Fatal(err)
}
// convert records to array of structs
codesMap := make(map[string]models.AirportCodes)
for i, line := range data {
if i > 0 { // omit header line
var rec models.AirportCodes
var key string
for j, field := range line {
if j == 0 {
key = field
rec.Name = field
} else if j == 1 {
rec.Municipality = field
} else if j == 2 {
rec.Code = field
} else if j == 3 {
rec.Coordinates = field
}
codesMap[key] = rec
}
}
}
return codesMap
}
/*
* Dynamic Query to get Airport Codes based on various data
*/
func GetAirportCode(c *fiber.Ctx) error {
request := new(models.CodeRequest)
err := c.QueryParser(request)
if err != nil {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Body Error", "cause": "Couldn't process body of request"})
}
if request.Name == "" {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Missing Paramater"})
}
//Look for Names
tempMap := make(map[string]models.AirportCodes)
for k := range Codes {
if strings.HasPrefix(strings.ToLower(k), strings.ToLower(request.Name)) {
tempMap[k] = Codes[k]
}
}
//Look for City
for k := range Codes {
if strings.HasPrefix(strings.ToLower(Codes[k].Municipality), strings.ToLower(request.Name)) {
tempMap[k] = Codes[k]
}
}
//Look for Code
for k := range Codes {
if strings.HasPrefix(strings.ToLower(Codes[k].Code), strings.ToLower(request.Name)) {
tempMap[k] = Codes[k]
}
}
//If prefix contains less than 5, look for inside string.
if len(tempMap) < 5 {
for k := range Codes {
if strings.Contains(strings.ToLower(k), strings.ToLower(request.Name)) {
if _, ok := tempMap[k]; !ok {
tempMap[k] = Codes[k]
}
}
}
}
if len(tempMap) == 0 {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Invalid data"})
}
return c.Status(200).JSON(fiber.Map{"status": "Success", "data": tempMap})
}
/*
* Short Distance travel with Google API
*/
func GetTransShort(c *fiber.Ctx) error {
request := new(models.ShortRequest)
err := c.QueryParser(request)
if err != nil {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Body Error", "cause": "Couldn't process body of request"})
}
if request.Origin == "" || request.Destination == "" {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Missing Paramater"})
}
client, err := maps.NewClient(maps.WithAPIKey(os.Getenv("TRANSPORTATION_GOOGLE_API_KEY")))
if err != nil {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Failed to connect with API"})
}
r := &maps.DirectionsRequest{
Origin: request.Origin,
Destination: request.Destination,
DepartureTime: request.DepartureTime,
ArrivalTime: request.ArrivalTime,
Mode: request.Mode,
}
result, waypoints, err := client.Directions(context.Background(), r)
if err != nil {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": err})
}
return c.Status(200).JSON(fiber.Map{"status": err, "data": result, "waypoint": waypoints})
}
/*
* Flight information with Flights API
*/
func GetTransLong(c *fiber.Ctx) error {
request := new(models.LongRequest)
err := c.QueryParser(request)
if err != nil {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Body Error", "cause": "Couldn't process body of request"})
}
if request.Origin == "" || request.Destination == "" || request.DepartureDate == "" || request.Adults == "" {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Missing Paramater"})
}
url := os.Getenv("TRANSPORTATION_FLIGHT_API_URI") + "?"
if request.Origin != "" {
url = url + "&origin=" + request.Origin
}
if request.Destination != "" {
url = url + "&destination=" + request.Destination
}
if request.DepartureDate != "" {
url = url + "&departureDate=" + request.DepartureDate
}
if request.Adults != "" {
url = url + "&adults=" + request.Adults
}
if request.Currency != "" {
url = url + "¤cy=" + request.Currency
}
if request.ReturnDate != "" {
url = url + "&returnDate=" + request.ReturnDate
}
req, _ := http.NewRequest("GET", url, nil)
req.Header.Add("X-RapidAPI-Host", os.Getenv("TRANSPORTATION_FLIGHT_API_HOST"))
req.Header.Add("X-RapidAPI-Key", os.Getenv("TRANSPORTATION_FLIGHT_API_KEY"))
res, err := http.DefaultClient.Do(req)
if err != nil {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Body Error", "cause": "Couldn't make request to api"})
}
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
//JSONIFY
var jsonMap map[string]interface{}
json.Unmarshal([]byte(string(body)), &jsonMap)
return c.Status(200).JSON(fiber.Map{"status": err, "data": jsonMap})
}
/*
* Short Distance travel with Google API
*/
func GetAllModes(c *fiber.Ctx) error {
request := new(models.AllShortRequest)
err := c.QueryParser(request)
if err != nil {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Body Error", "cause": "Couldn't process body of request"})
}
if request.Origin == "" || request.Destination == "" {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Missing Paramater"})
}
client, err := maps.NewClient(maps.WithAPIKey(os.Getenv("TRANSPORTATION_GOOGLE_API_KEY")))
if err != nil {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": "Failed to connect with API"})
}
var modes[4] maps.Mode
modes[0] = "driving"
modes[1] = "bicycling"
modes[2] = "transit"
modes[3] = "walking"
var results[4][] maps.Route
var waypoints[4][] maps.GeocodedWaypoint
//Driving
r := &maps.DirectionsRequest{
Origin: request.Origin,
Destination: request.Destination,
DepartureTime: request.DepartureTime,
ArrivalTime: request.ArrivalTime,
Mode: modes[0],
}
tempResultOne, tempWaypointOne, errOne := client.Directions(context.Background(), r)
if errOne == nil {
results[0] = tempResultOne
waypoints[0] = tempWaypointOne
}
//Bicycling
r = &maps.DirectionsRequest{
Origin: request.Origin,
Destination: request.Destination,
DepartureTime: request.DepartureTime,
ArrivalTime: request.ArrivalTime,
Mode: modes[1],
}
tempResultTwo, tempWaypointTwo, errTwo := client.Directions(context.Background(), r)
if errTwo == nil {
results[1] = tempResultTwo
waypoints[1] = tempWaypointTwo
}
//transit
r = &maps.DirectionsRequest{
Origin: request.Origin,
Destination: request.Destination,
DepartureTime: request.DepartureTime,
ArrivalTime: request.ArrivalTime,
Mode: modes[2],
}
tempResultThree, tempWaypointThree, errThree := client.Directions(context.Background(), r)
if errThree == nil {
results[2] = tempResultThree
waypoints[2] = tempWaypointThree
}
//walking
r = &maps.DirectionsRequest{
Origin: request.Origin,
Destination: request.Destination,
DepartureTime: request.DepartureTime,
ArrivalTime: request.ArrivalTime,
Mode: modes[3],
}
tempResultFour, tempWaypointFour, errFour := client.Directions(context.Background(), r)
if errFour == nil {
results[3] = tempResultFour
waypoints[3] = tempWaypointFour
}
//May not do anything
if len(results) == 0 {
return c.Status(400).JSON(fiber.Map{"status": "fail", "type": err})
}
return c.Status(200).JSON(fiber.Map{"status": err, "data": results, "waypoint": waypoints})
} | [
"\"TRANSPORTATION_AIRPORT_CODE_CSV\"",
"\"TRANSPORTATION_GOOGLE_API_KEY\"",
"\"TRANSPORTATION_FLIGHT_API_URI\"",
"\"TRANSPORTATION_FLIGHT_API_HOST\"",
"\"TRANSPORTATION_FLIGHT_API_KEY\"",
"\"TRANSPORTATION_GOOGLE_API_KEY\""
]
| []
| [
"TRANSPORTATION_FLIGHT_API_URI",
"TRANSPORTATION_GOOGLE_API_KEY",
"TRANSPORTATION_AIRPORT_CODE_CSV",
"TRANSPORTATION_FLIGHT_API_KEY",
"TRANSPORTATION_FLIGHT_API_HOST"
]
| [] | ["TRANSPORTATION_FLIGHT_API_URI", "TRANSPORTATION_GOOGLE_API_KEY", "TRANSPORTATION_AIRPORT_CODE_CSV", "TRANSPORTATION_FLIGHT_API_KEY", "TRANSPORTATION_FLIGHT_API_HOST"] | go | 5 | 0 | |
cmd/server/shared/shared.go | // Package shared provides the entrypoint to Sourcegraph's single docker
// image. It has functionality to setup the shared environment variables, as
// well as create the Procfile for goreman to run.
package shared
import (
"encoding/json"
"flag"
"log"
"os"
"path/filepath"
"strings"
"github.com/joho/godotenv"
"github.com/sourcegraph/sourcegraph/cmd/server/internal/goreman"
)
// FrontendInternalHost is the value of SRC_FRONTEND_INTERNAL.
const FrontendInternalHost = "127.0.0.1:3090"
// defaultEnv is environment variables that will be set if not already set.
var defaultEnv = map[string]string{
// Sourcegraph services running in this container
"SRC_GIT_SERVERS": "127.0.0.1:3178",
"SEARCHER_URL": "http://127.0.0.1:3181",
"REPO_UPDATER_URL": "http://127.0.0.1:3182",
"QUERY_RUNNER_URL": "http://127.0.0.1:3183",
"SRC_SYNTECT_SERVER": "http://127.0.0.1:9238",
"SYMBOLS_URL": "http://127.0.0.1:3184",
"REPLACER_URL": "http://127.0.0.1:3185",
"LSIF_SERVER_URL": "http://127.0.0.1:3186",
"SRC_HTTP_ADDR": ":8080",
"SRC_HTTPS_ADDR": ":8443",
"SRC_FRONTEND_INTERNAL": FrontendInternalHost,
"GITHUB_BASE_URL": "http://127.0.0.1:3180", // points to github-proxy
"GRAFANA_SERVER_URL": "http://127.0.0.1:3370",
// Limit our cache size to 100GB, same as prod. We should probably update
// searcher/symbols to ensure this value isn't larger than the volume for
// CACHE_DIR.
"SEARCHER_CACHE_SIZE_MB": "50000",
"REPLACER_CACHE_SIZE_MB": "50000",
"SYMBOLS_CACHE_SIZE_MB": "50000",
// Used to differentiate between deployments on dev, Docker, and Kubernetes.
"DEPLOY_TYPE": "docker-container",
// enables the debug proxy (/-/debug)
"SRC_PROF_HTTP": "",
"LOGO": "t",
"SRC_LOG_LEVEL": "warn",
// TODO other bits
// * DEBUG LOG_REQUESTS https://github.com/sourcegraph/sourcegraph/issues/8458
}
// Set verbosity based on simple interpretation of env var to avoid external dependencies (such as
// on github.com/sourcegraph/sourcegraph/internal/env).
var verbose = os.Getenv("SRC_LOG_LEVEL") == "dbug" || os.Getenv("SRC_LOG_LEVEL") == "info"
// Main is the main server command function which is shared between Sourcegraph
// server's open-source and enterprise variant.
func Main() {
flag.Parse()
log.SetFlags(0)
// Ensure CONFIG_DIR and DATA_DIR
// Load $CONFIG_DIR/env before we set any defaults
{
configDir := SetDefaultEnv("CONFIG_DIR", "/etc/sourcegraph")
err := os.MkdirAll(configDir, 0755)
if err != nil {
log.Fatalf("failed to ensure CONFIG_DIR exists: %s", err)
}
err = godotenv.Load(filepath.Join(configDir, "env"))
if err != nil && !os.IsNotExist(err) {
log.Fatalf("failed to load %s: %s", filepath.Join(configDir, "env"), err)
}
}
// Next persistence
{
SetDefaultEnv("SRC_REPOS_DIR", filepath.Join(DataDir, "repos"))
SetDefaultEnv("LSIF_STORAGE_ROOT", filepath.Join(DataDir, "lsif-storage"))
SetDefaultEnv("CACHE_DIR", filepath.Join(DataDir, "cache"))
}
// Special case some convenience environment variables
if redis, ok := os.LookupEnv("REDIS"); ok {
SetDefaultEnv("REDIS_ENDPOINT", redis)
}
data, err := json.MarshalIndent(SrcProfServices, "", " ")
if err != nil {
log.Println("Failed to marshal default SRC_PROF_SERVICES")
} else {
SetDefaultEnv("SRC_PROF_SERVICES", string(data))
}
for k, v := range defaultEnv {
SetDefaultEnv(k, v)
}
// Now we put things in the right place on the FS
if err := copySSH(); err != nil {
// TODO There are likely several cases where we don't need SSH
// working, we shouldn't prevent setup in those cases. The main one
// that comes to mind is an ORIGIN_MAP which creates https clone URLs.
log.Println("Failed to setup SSH authorization:", err)
log.Fatal("SSH authorization required for cloning from your codehost. Please see README.")
}
if err := copyNetrc(); err != nil {
log.Fatal("Failed to copy netrc:", err)
}
// TODO validate known_hosts contains all code hosts in config.
nginx, err := nginxProcFile()
if err != nil {
log.Fatal("Failed to setup nginx:", err)
}
procfile := []string{
nginx,
`frontend: env CONFIGURATION_MODE=server frontend`,
`gitserver: gitserver`,
`query-runner: query-runner`,
`symbols: symbols`,
`lsif-server: node /lsif/out/server.js`,
`lsif-worker: node /lsif/out/worker.js`,
`management-console: management-console`,
`searcher: searcher`,
`github-proxy: github-proxy`,
`repo-updater: repo-updater`,
`syntect_server: sh -c 'env QUIET=true ROCKET_ENV=production ROCKET_PORT=9238 ROCKET_LIMITS='"'"'{json=10485760}'"'"' ROCKET_SECRET_KEY='"'"'SeerutKeyIsI7releuantAndknvsuZPluaseIgnorYA='"'"' ROCKET_KEEP_ALIVE=0 ROCKET_ADDRESS='"'"'"127.0.0.1"'"'"' syntect_server | grep -v "Rocket has launched" | grep -v "Warning: environment is"' | grep -v 'Configured for production'`,
`prometheus: prometheus --config.file=/sg_config_prometheus/prometheus.yml --storage.tsdb.path=/var/opt/sourcegraph/prometheus --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles >> /var/opt/sourcegraph/prometheus.log 2>&1`,
`grafana: /usr/share/grafana/bin/grafana-server -config /sg_config_grafana/grafana-single-container.ini -homepath /usr/share/grafana >> /var/opt/sourcegraph/grafana.log 2>&1`,
}
procfile = append(procfile, ProcfileAdditions...)
redisStoreLine, err := maybeRedisStoreProcFile()
if err != nil {
log.Fatal(err)
}
if redisStoreLine != "" {
procfile = append(procfile, redisStoreLine)
}
redisCacheLine, err := maybeRedisCacheProcFile()
if err != nil {
log.Fatal(err)
}
if redisCacheLine != "" {
procfile = append(procfile, redisCacheLine)
}
if line, err := maybePostgresProcFile(); err != nil {
log.Fatal(err)
} else if line != "" {
procfile = append(procfile, line)
}
procfile = append(procfile, maybeZoektProcFile()...)
const goremanAddr = "127.0.0.1:5005"
if err := os.Setenv("GOREMAN_RPC_ADDR", goremanAddr); err != nil {
log.Fatal(err)
}
err = goreman.Start(goremanAddr, []byte(strings.Join(procfile, "\n")))
if err != nil {
log.Fatal(err)
}
}
| [
"\"SRC_LOG_LEVEL\"",
"\"SRC_LOG_LEVEL\""
]
| []
| [
"SRC_LOG_LEVEL"
]
| [] | ["SRC_LOG_LEVEL"] | go | 1 | 0 | |
cli/pcluster/commands.py | # Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
# FIXME
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
from __future__ import absolute_import, print_function
import datetime
import json
import logging
import os
import random
import shlex
import string
import subprocess as sub
import sys
import tarfile
import time
from builtins import str
from shutil import rmtree
from tempfile import mkdtemp, mkstemp
import boto3
import pkg_resources
from botocore.exceptions import ClientError
from tabulate import tabulate
import pcluster.utils as utils
from pcluster.config.pcluster_config import PclusterConfig
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve # pylint: disable=no-name-in-module
LOGGER = logging.getLogger(__name__)
def _create_bucket_with_batch_resources(stack_name, resources_dir, region):
random_string = "".join(random.choice(string.ascii_lowercase + string.digits) for _ in range(16))
# bucket names must be at least 3 and no more than 63 characters long
s3_bucket_name = "-".join([stack_name.lower()[: 63 - len(random_string) - 1], random_string])
try:
utils.create_s3_bucket(bucket_name=s3_bucket_name, region=region)
utils.upload_resources_artifacts(bucket_name=s3_bucket_name, root=resources_dir)
except boto3.client("s3").exceptions.BucketAlreadyExists:
LOGGER.error("Bucket %s already exists. Please retry cluster creation.", s3_bucket_name)
raise
except Exception:
utils.delete_s3_bucket(bucket_name=s3_bucket_name)
raise
return s3_bucket_name
def version():
return utils.get_installed_version()
def _check_for_updates(pcluster_config):
"""Check for updates."""
update_check = pcluster_config.get_section("global").get_param_value("update_check")
if update_check:
utils.check_if_latest_version()
def create(args): # noqa: C901 FIXME!!!
LOGGER.info("Beginning cluster creation for cluster: %s", args.cluster_name)
LOGGER.debug("Building cluster config based on args %s", str(args))
# Build the config based on args
pcluster_config = PclusterConfig(
config_file=args.config_file, cluster_label=args.cluster_template, fail_on_file_absence=True
)
pcluster_config.validate()
# get CFN parameters, template url and tags from config
cluster_section = pcluster_config.get_section("cluster")
cfn_params = pcluster_config.to_cfn()
_check_for_updates(pcluster_config)
batch_temporary_bucket = None
try:
cfn_client = boto3.client("cloudformation")
stack_name = utils.get_stack_name(args.cluster_name)
# If scheduler is awsbatch create bucket with resources
if cluster_section.get_param_value("scheduler") == "awsbatch":
batch_resources = pkg_resources.resource_filename(__name__, "resources/batch")
batch_temporary_bucket = _create_bucket_with_batch_resources(
stack_name=stack_name, resources_dir=batch_resources, region=pcluster_config.region
)
cfn_params["ResourcesS3Bucket"] = batch_temporary_bucket
LOGGER.info("Creating stack named: %s", stack_name)
LOGGER.debug(cfn_params)
# determine the CloudFormation Template URL to use
template_url = _evaluate_pcluster_template_url(pcluster_config, preferred_template_url=args.template_url)
# merge tags from configuration, command-line and internal ones
tags = _evaluate_tags(pcluster_config, preferred_tags=args.tags)
# append extra parameters from command-line
if args.extra_parameters:
LOGGER.debug("Adding extra parameters to the CFN parameters")
cfn_params.update(dict(args.extra_parameters))
# prepare input parameters for stack creation and create the stack
LOGGER.debug(cfn_params)
params = [{"ParameterKey": key, "ParameterValue": value} for key, value in cfn_params.items()]
stack = cfn_client.create_stack(
StackName=stack_name,
TemplateURL=template_url,
Parameters=params,
Capabilities=["CAPABILITY_IAM"],
DisableRollback=args.norollback,
Tags=tags,
)
LOGGER.debug("StackId: %s", stack.get("StackId"))
if not args.nowait:
utils.verify_stack_creation(stack_name, cfn_client)
LOGGER.info("")
result_stack = utils.get_stack(stack_name, cfn_client)
_print_stack_outputs(result_stack)
else:
stack_status = utils.get_stack(stack_name, cfn_client).get("StackStatus")
LOGGER.info("Status: %s", stack_status)
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.stdout.flush()
if batch_temporary_bucket:
utils.delete_s3_bucket(bucket_name=batch_temporary_bucket)
sys.exit(1)
except KeyboardInterrupt:
LOGGER.info("\nExiting...")
sys.exit(0)
except KeyError as e:
LOGGER.critical("ERROR: KeyError - reason:")
LOGGER.critical(e)
if batch_temporary_bucket:
utils.delete_s3_bucket(bucket_name=batch_temporary_bucket)
sys.exit(1)
except Exception as e:
LOGGER.critical(e)
if batch_temporary_bucket:
utils.delete_s3_bucket(bucket_name=batch_temporary_bucket)
sys.exit(1)
def _evaluate_pcluster_template_url(pcluster_config, preferred_template_url=None):
"""
Determine the CloudFormation Template URL to use.
Order is 1) preferred_template_url 2) Config file 3) default for version + region.
:param pcluster_config: PclusterConfig, it can contain the template_url
:param preferred_template_url: preferred template url to use, if not None
:return: the evaluated template url
"""
configured_template_url = pcluster_config.get_section("cluster").get_param_value("template_url")
return preferred_template_url or configured_template_url or _get_default_template_url(pcluster_config.region)
def _evaluate_tags(pcluster_config, preferred_tags=None):
"""
Merge given tags to the ones defined in the configuration file and convert them into the Key/Value format.
:param pcluster_config: PclusterConfig, it can contain tags
:param preferred_tags: tags that must take the precedence before the configured ones
:return: a merge of the tags + version tag
"""
tags = {}
configured_tags = pcluster_config.get_section("cluster").get_param_value("tags")
if configured_tags:
tags.update(configured_tags)
if preferred_tags:
# add tags from command line parameter, by overriding configured ones
tags.update(preferred_tags)
# add pcluster version
tags["Version"] = utils.get_installed_version()
# convert to CFN tags
return [{"Key": tag, "Value": tags[tag]} for tag in tags]
def _print_stack_outputs(stack):
"""
Print a limited set of the CloudFormation Stack outputs.
:param stack: the stack dictionary
"""
whitelisted_outputs = [
"ClusterUser",
"MasterPrivateIP",
"MasterPublicIP",
"BatchComputeEnvironmentArn",
"BatchJobQueueArn",
"BatchJobDefinitionArn",
"BatchJobDefinitionMnpArn",
"BatchUserRole",
]
if _is_ganglia_enabled(stack.get("Parameters")):
whitelisted_outputs.extend(["GangliaPrivateURL", "GangliaPublicURL"])
for output in stack.get("Outputs", []):
output_key = output.get("OutputKey")
if output_key in whitelisted_outputs:
LOGGER.info("%s: %s", output_key, output.get("OutputValue"))
def _is_ganglia_enabled(parameters):
is_ganglia_enabled = False
try:
cfn_extra_json = utils.get_cfn_param(parameters, "ExtraJson")
is_ganglia_enabled = json.loads(cfn_extra_json).get("cfncluster").get("ganglia_enabled") == "yes"
except Exception:
pass
return is_ganglia_enabled
def update(args): # noqa: C901 FIXME!!!
LOGGER.info("Updating: %s", args.cluster_name)
stack_name = utils.get_stack_name(args.cluster_name)
pcluster_config = PclusterConfig(
config_file=args.config_file, cluster_label=args.cluster_template, fail_on_file_absence=True
)
pcluster_config.validate()
cfn_params = pcluster_config.to_cfn()
cluster_section = pcluster_config.get_section("cluster")
cfn = boto3.client("cloudformation")
if cluster_section.get_param_value("scheduler") != "awsbatch":
if not args.reset_desired:
asg_name = _get_asg_name(stack_name)
desired_capacity = (
boto3.client("autoscaling")
.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])
.get("AutoScalingGroups")[0]
.get("DesiredCapacity")
)
cfn_params["DesiredSize"] = str(desired_capacity)
else:
if args.reset_desired:
LOGGER.info("reset_desired flag does not work with awsbatch scheduler")
params = utils.get_stack(stack_name, cfn).get("Parameters")
for parameter in params:
if parameter.get("ParameterKey") == "ResourcesS3Bucket":
cfn_params["ResourcesS3Bucket"] = parameter.get("ParameterValue")
try:
LOGGER.debug(cfn_params)
if args.extra_parameters:
LOGGER.debug("Adding extra parameters to the CFN parameters")
cfn_params.update(dict(args.extra_parameters))
cfn_params = [{"ParameterKey": key, "ParameterValue": value} for key, value in cfn_params.items()]
LOGGER.info("Calling update_stack")
cfn.update_stack(
StackName=stack_name, UsePreviousTemplate=True, Parameters=cfn_params, Capabilities=["CAPABILITY_IAM"]
)
stack_status = utils.get_stack(stack_name, cfn).get("StackStatus")
if not args.nowait:
while stack_status == "UPDATE_IN_PROGRESS":
stack_status = utils.get_stack(stack_name, cfn).get("StackStatus")
events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents")[0]
resource_status = (
"Status: %s - %s" % (events.get("LogicalResourceId"), events.get("ResourceStatus"))
).ljust(80)
sys.stdout.write("\r%s" % resource_status)
sys.stdout.flush()
time.sleep(5)
else:
stack_status = utils.get_stack(stack_name, cfn).get("StackStatus")
LOGGER.info("Status: %s", stack_status)
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.exit(1)
except KeyboardInterrupt:
LOGGER.info("\nExiting...")
sys.exit(0)
def start(args):
"""Restore ASG limits or awsbatch CE to min/max/desired."""
stack_name = utils.get_stack_name(args.cluster_name)
pcluster_config = PclusterConfig(config_file=args.config_file, cluster_name=args.cluster_name)
cluster_section = pcluster_config.get_section("cluster")
if cluster_section.get_param_value("scheduler") == "awsbatch":
LOGGER.info("Enabling AWS Batch compute environment : %s", args.cluster_name)
max_vcpus = cluster_section.get_param_value("max_vcpus")
desired_vcpus = cluster_section.get_param_value("desired_vcpus")
min_vcpus = cluster_section.get_param_value("min_vcpus")
ce_name = _get_batch_ce(stack_name)
_start_batch_ce(ce_name=ce_name, min_vcpus=min_vcpus, desired_vcpus=desired_vcpus, max_vcpus=max_vcpus)
else:
LOGGER.info("Starting compute fleet : %s", args.cluster_name)
max_queue_size = cluster_section.get_param_value("max_queue_size")
min_desired_size = (
cluster_section.get_param_value("initial_queue_size")
if cluster_section.get_param_value("maintain_initial_size")
else 0
)
asg_name = _get_asg_name(stack_name)
_set_asg_limits(asg_name=asg_name, min=min_desired_size, max=max_queue_size, desired=min_desired_size)
def stop(args):
"""Set ASG limits or awsbatch ce to min/max/desired = 0/0/0."""
stack_name = utils.get_stack_name(args.cluster_name)
pcluster_config = PclusterConfig(config_file=args.config_file, cluster_name=args.cluster_name)
cluster_section = pcluster_config.get_section("cluster")
if cluster_section.get_param_value("scheduler") == "awsbatch":
LOGGER.info("Disabling AWS Batch compute environment : %s", args.cluster_name)
ce_name = _get_batch_ce(stack_name)
_stop_batch_ce(ce_name=ce_name)
else:
LOGGER.info("Stopping compute fleet : %s", args.cluster_name)
asg_name = _get_asg_name(stack_name)
_set_asg_limits(asg_name=asg_name, min=0, max=0, desired=0)
def _get_batch_ce(stack_name):
"""
Get name of the AWS Batch Compute Environment.
:param stack_name: name of the master stack
:param config: config
:return: ce_name or exit if not found
"""
cfn = boto3.client("cloudformation")
try:
outputs = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0].get("Outputs")
return utils.get_stack_output_value(outputs, "BatchComputeEnvironmentArn")
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.exit(1)
def _get_pcluster_version_from_stack(stack):
"""
Get the version of the stack if tagged.
:param stack: stack object
:return: version or empty string
"""
return next((tag.get("Value") for tag in stack.get("Tags") if tag.get("Key") == "Version"), "")
def _colorize(stack_status, args):
"""
Color the output, COMPLETE = green, FAILED = red, IN_PROGRESS = yellow.
:param stack_status: stack status
:param args: args
:return: colorized status string
"""
if not args.color:
return stack_status
end = "0m"
status_to_color = {"COMPLETE": "0;32m", "FAILED": "0;31m", "IN_PROGRESS": "10;33m"}
for status_label in status_to_color:
if status_label in stack_status:
return "\033[%s%s\033[%s" % (status_to_color[status_label], stack_status, end)
def list_stacks(args):
# Parse configuration file to read the AWS section
PclusterConfig.init_aws(config_file=args.config_file)
try:
result = []
for stack in utils.paginate_boto3(boto3.client("cloudformation").describe_stacks):
if stack.get("ParentId") is None and stack.get("StackName").startswith(utils.PCLUSTER_STACK_PREFIX):
pcluster_version = _get_pcluster_version_from_stack(stack)
result.append(
[
stack.get("StackName")[len(utils.PCLUSTER_STACK_PREFIX) :], # noqa: E203
_colorize(stack.get("StackStatus"), args),
pcluster_version,
]
)
LOGGER.info(tabulate(result, tablefmt="plain"))
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.exit(1)
except KeyboardInterrupt:
LOGGER.info("Exiting...")
sys.exit(0)
def _poll_master_server_state(stack_name):
ec2 = boto3.client("ec2")
try:
master_id = utils.get_master_server_id(stack_name)
instance = ec2.describe_instance_status(InstanceIds=[master_id]).get("InstanceStatuses")[0]
state = instance.get("InstanceState").get("Name")
sys.stdout.write("\rMasterServer: %s" % state.upper())
sys.stdout.flush()
while state not in ["running", "stopped", "terminated", "shutting-down"]:
time.sleep(5)
state = (
ec2.describe_instance_status(InstanceIds=[master_id])
.get("InstanceStatuses")[0]
.get("InstanceState")
.get("Name")
)
master_status = "\r\033[KMasterServer: %s" % state.upper()
sys.stdout.write(master_status)
sys.stdout.flush()
if state in ["terminated", "shutting-down"]:
LOGGER.info("State: %s is irrecoverable. Cluster needs to be re-created.", state)
sys.exit(1)
master_status = "\rMasterServer: %s\n" % state.upper()
sys.stdout.write(master_status)
sys.stdout.flush()
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.stdout.flush()
sys.exit(1)
except KeyboardInterrupt:
LOGGER.info("\nExiting...")
sys.exit(0)
return state
def _get_ec2_instances(stack):
try:
resources = boto3.client("cloudformation").describe_stack_resources(StackName=stack).get("StackResources")
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.stdout.flush()
sys.exit(1)
temp_instances = [r for r in resources if r.get("ResourceType") == "AWS::EC2::Instance"]
stack_instances = []
for instance in temp_instances:
stack_instances.append([instance.get("LogicalResourceId"), instance.get("PhysicalResourceId")])
return stack_instances
def _get_asg_name(stack_name):
try:
resources = boto3.client("cloudformation").describe_stack_resources(StackName=stack_name).get("StackResources")
return [r for r in resources if r.get("LogicalResourceId") == "ComputeFleet"][0].get("PhysicalResourceId")
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.stdout.flush()
sys.exit(1)
except IndexError:
LOGGER.critical("Stack %s does not have a ComputeFleet", stack_name)
sys.exit(1)
def _set_asg_limits(asg_name, min, max, desired):
asg = boto3.client("autoscaling")
asg.update_auto_scaling_group(
AutoScalingGroupName=asg_name, MinSize=int(min), MaxSize=int(max), DesiredCapacity=int(desired)
)
def _get_asg_instances(stack):
asg = boto3.client("autoscaling")
asg_name = _get_asg_name(stack)
asg = asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]).get("AutoScalingGroups")[0]
name = [tag.get("Value") for tag in asg.get("Tags") if tag.get("Key") == "aws:cloudformation:logical-id"][0]
temp_instances = []
for instance in asg.get("Instances"):
temp_instances.append([name, instance.get("InstanceId")])
return temp_instances
def _start_batch_ce(ce_name, min_vcpus, desired_vcpus, max_vcpus):
try:
boto3.client("batch").update_compute_environment(
computeEnvironment=ce_name,
state="ENABLED",
computeResources={
"minvCpus": int(min_vcpus),
"maxvCpus": int(max_vcpus),
"desiredvCpus": int(desired_vcpus),
},
)
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.exit(1)
def _stop_batch_ce(ce_name):
boto3.client("batch").update_compute_environment(computeEnvironment=ce_name, state="DISABLED")
def instances(args):
stack_name = utils.get_stack_name(args.cluster_name)
pcluster_config = PclusterConfig(config_file=args.config_file, cluster_name=args.cluster_name)
cluster_section = pcluster_config.get_section("cluster")
instances = []
instances.extend(_get_ec2_instances(stack_name))
if cluster_section.get_param_value("scheduler") != "awsbatch":
instances.extend(_get_asg_instances(stack_name))
for instance in instances:
LOGGER.info("%s %s", instance[0], instance[1])
if cluster_section.get_param_value("scheduler") == "awsbatch":
LOGGER.info("Run 'awsbhosts --cluster %s' to list the compute instances", args.cluster_name)
def ssh(args, extra_args): # noqa: C901 FIXME!!!
"""
Execute an SSH command to the master instance, according to the [aliases] section if there.
:param args: pcluster CLI args
:param extra_args: pcluster CLI extra_args
"""
pcluster_config = PclusterConfig(fail_on_error=False) # FIXME it always search for the default config file
if args.command in pcluster_config.get_section("aliases").params:
ssh_command = pcluster_config.get_section("aliases").get_param_value(args.command)
else:
ssh_command = "ssh {CFN_USER}@{MASTER_IP} {ARGS}"
try:
master_ip, username = utils.get_master_ip_and_username(args.cluster_name)
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
# build command
cmd = ssh_command.format(
CFN_USER=username, MASTER_IP=master_ip, ARGS=" ".join(cmd_quote(str(arg)) for arg in extra_args)
)
# run command
log_message = "SSH command: {0}".format(cmd)
if not args.dryrun:
LOGGER.debug(log_message)
os.system(cmd)
else:
LOGGER.info(log_message)
except KeyboardInterrupt:
LOGGER.info("\nExiting...")
sys.exit(0)
def status(args): # noqa: C901 FIXME!!!
stack_name = utils.get_stack_name(args.cluster_name)
# Parse configuration file to read the AWS section
PclusterConfig.init_aws(config_file=args.config_file)
cfn = boto3.client("cloudformation")
try:
stack_status = utils.get_stack(stack_name, cfn).get("StackStatus")
sys.stdout.write("\rStatus: %s" % stack_status)
sys.stdout.flush()
if not args.nowait:
while stack_status not in [
"CREATE_COMPLETE",
"UPDATE_COMPLETE",
"UPDATE_ROLLBACK_COMPLETE",
"ROLLBACK_COMPLETE",
"CREATE_FAILED",
"DELETE_FAILED",
]:
time.sleep(5)
stack_status = utils.get_stack(stack_name, cfn).get("StackStatus")
events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents")[0]
resource_status = (
"Status: %s - %s" % (events.get("LogicalResourceId"), events.get("ResourceStatus"))
).ljust(80)
sys.stdout.write("\r%s" % resource_status)
sys.stdout.flush()
sys.stdout.write("\rStatus: %s\n" % stack_status)
sys.stdout.flush()
if stack_status in ["CREATE_COMPLETE", "UPDATE_COMPLETE"]:
state = _poll_master_server_state(stack_name)
if state == "running":
stack = utils.get_stack(stack_name, cfn)
_print_stack_outputs(stack)
elif stack_status in ["ROLLBACK_COMPLETE", "CREATE_FAILED", "DELETE_FAILED", "UPDATE_ROLLBACK_COMPLETE"]:
events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents")
for event in events:
if event.get("ResourceStatus") in ["CREATE_FAILED", "DELETE_FAILED", "UPDATE_FAILED"]:
LOGGER.info(
"%s %s %s %s %s",
event.get("Timestamp"),
event.get("ResourceStatus"),
event.get("ResourceType"),
event.get("LogicalResourceId"),
event.get("ResourceStatusReason"),
)
else:
sys.stdout.write("\n")
sys.stdout.flush()
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.stdout.flush()
sys.exit(1)
except KeyboardInterrupt:
LOGGER.info("\nExiting...")
sys.exit(0)
def delete(args):
saw_update = False
LOGGER.info("Deleting: %s", args.cluster_name)
stack_name = utils.get_stack_name(args.cluster_name)
# Parse configuration file to read the AWS section
PclusterConfig.init_aws(config_file=args.config_file)
cfn = boto3.client("cloudformation")
try:
# delete_stack does not raise an exception if stack does not exist
# Use describe_stacks to explicitly check if the stack exists
cfn.describe_stacks(StackName=stack_name)
cfn.delete_stack(StackName=stack_name)
saw_update = True
stack_status = utils.get_stack(stack_name, cfn).get("StackStatus")
sys.stdout.write("\rStatus: %s" % stack_status)
sys.stdout.flush()
LOGGER.debug("Status: %s", stack_status)
if not args.nowait:
while stack_status == "DELETE_IN_PROGRESS":
time.sleep(5)
stack_status = utils.get_stack(stack_name, cfn).get("StackStatus")
events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents")[0]
resource_status = (
"Status: %s - %s" % (events.get("LogicalResourceId"), events.get("ResourceStatus"))
).ljust(80)
sys.stdout.write("\r%s" % resource_status)
sys.stdout.flush()
sys.stdout.write("\rStatus: %s\n" % stack_status)
sys.stdout.flush()
LOGGER.debug("Status: %s", stack_status)
else:
sys.stdout.write("\n")
sys.stdout.flush()
if stack_status == "DELETE_FAILED":
LOGGER.info("Cluster did not delete successfully. Run 'pcluster delete %s' again", args.cluster_name)
except ClientError as e:
if e.response.get("Error").get("Message").endswith("does not exist"):
if saw_update:
LOGGER.info("\nCluster deleted successfully.")
sys.exit(0)
LOGGER.critical(e.response.get("Error").get("Message"))
sys.stdout.flush()
sys.exit(1)
except KeyboardInterrupt:
LOGGER.info("\nExiting...")
sys.exit(0)
def _get_cookbook_url(region, template_url, args, tmpdir):
if args.custom_ami_cookbook is not None:
return args.custom_ami_cookbook
cookbook_version = _get_cookbook_version(template_url, tmpdir)
s3_suffix = ".cn" if region.startswith("cn") else ""
return "https://s3.%s.amazonaws.com%s/%s-aws-parallelcluster/cookbooks/%s.tgz" % (
region,
s3_suffix,
region,
cookbook_version,
)
def _get_cookbook_version(template_url, tmpdir):
tmp_template_file = os.path.join(tmpdir, "aws-parallelcluster-template.json")
try:
LOGGER.info("Template: %s", template_url)
urlretrieve(url=template_url, filename=tmp_template_file)
with open(tmp_template_file) as cfn_file:
cfn_data = json.load(cfn_file)
return cfn_data.get("Mappings").get("PackagesVersions").get("default").get("cookbook")
except IOError as e:
LOGGER.error("Unable to download template at URL %s", template_url)
LOGGER.critical("Error: %s", str(e))
sys.exit(1)
except (ValueError, AttributeError) as e:
LOGGER.error("Unable to parse template at URL %s", template_url)
LOGGER.critical("Error: %s", str(e))
sys.exit(1)
def _get_cookbook_dir(region, template_url, args, tmpdir):
cookbook_url = ""
try:
tmp_cookbook_archive = os.path.join(tmpdir, "aws-parallelcluster-cookbook.tgz")
cookbook_url = _get_cookbook_url(region, template_url, args, tmpdir)
LOGGER.info("Cookbook: %s", cookbook_url)
urlretrieve(url=cookbook_url, filename=tmp_cookbook_archive)
tar = tarfile.open(tmp_cookbook_archive)
cookbook_archive_root = tar.firstmember.path
tar.extractall(path=tmpdir)
tar.close()
return os.path.join(tmpdir, cookbook_archive_root)
except (IOError, tarfile.ReadError) as e:
LOGGER.error("Unable to download cookbook at URL %s", cookbook_url)
LOGGER.critical("Error: %s", str(e))
sys.exit(1)
def _dispose_packer_instance(results):
time.sleep(2)
try:
ec2_client = boto3.client("ec2")
instance = ec2_client.describe_instance_status(
InstanceIds=[results["PACKER_INSTANCE_ID"]], IncludeAllInstances=True
).get("InstanceStatuses")[0]
instance_state = instance.get("InstanceState").get("Name")
if instance_state in ["running", "pending", "stopping", "stopped"]:
LOGGER.info("Terminating Instance %s created by Packer", results["PACKER_INSTANCE_ID"])
ec2_client.terminate_instances(InstanceIds=[results["PACKER_INSTANCE_ID"]])
except ClientError as e:
LOGGER.critical(e.response.get("Error").get("Message"))
sys.exit(1)
def _run_packer(packer_command, packer_env):
erase_line = "\x1b[2K"
_command = shlex.split(packer_command)
results = {}
_, path_log = mkstemp(prefix="packer.log." + datetime.datetime.now().strftime("%Y%m%d-%H%M%S" + "."), text=True)
LOGGER.info("Packer log: %s", path_log)
try:
dev_null = open(os.devnull, "rb")
packer_env.update(os.environ.copy())
process = sub.Popen(
_command, env=packer_env, stdout=sub.PIPE, stderr=sub.STDOUT, stdin=dev_null, universal_newlines=True
)
with open(path_log, "w") as packer_log:
while process.poll() is None:
output_line = process.stdout.readline().strip()
packer_log.write("\n%s" % output_line)
packer_log.flush()
sys.stdout.write(erase_line)
sys.stdout.write("\rPacker status: %s" % output_line[:90] + (output_line[90:] and ".."))
sys.stdout.flush()
if output_line.find("packer build") > 0:
results["PACKER_COMMAND"] = output_line
if output_line.find("Instance ID:") > 0:
results["PACKER_INSTANCE_ID"] = output_line.rsplit(":", 1)[1].strip(" \n\t")
sys.stdout.write(erase_line)
sys.stdout.write("\rPacker Instance ID: %s\n" % results["PACKER_INSTANCE_ID"])
sys.stdout.flush()
if output_line.find("AMI:") > 0:
results["PACKER_CREATED_AMI"] = output_line.rsplit(":", 1)[1].strip(" \n\t")
if output_line.find("Prevalidating AMI Name:") > 0:
results["PACKER_CREATED_AMI_NAME"] = output_line.rsplit(":", 1)[1].strip(" \n\t")
sys.stdout.write("\texit code %s\n" % process.returncode)
sys.stdout.flush()
return results
except sub.CalledProcessError:
sys.stdout.flush()
LOGGER.error("Failed to run %s\n", _command)
sys.exit(1)
except (IOError, OSError):
sys.stdout.flush()
LOGGER.error("Failed to run %s\nCommand not found", packer_command)
sys.exit(1)
except KeyboardInterrupt:
sys.stdout.flush()
LOGGER.info("\nExiting...")
sys.exit(0)
finally:
dev_null.close()
if results.get("PACKER_INSTANCE_ID"):
_dispose_packer_instance(results)
def _print_create_ami_results(results):
if results.get("PACKER_CREATED_AMI"):
LOGGER.info(
"\nCustom AMI %s created with name %s", results["PACKER_CREATED_AMI"], results["PACKER_CREATED_AMI_NAME"]
)
print(
"\nTo use it, add the following variable to the AWS ParallelCluster config file, "
"under the [cluster ...] section"
)
print("custom_ami = %s" % results["PACKER_CREATED_AMI"])
else:
LOGGER.info("\nNo custom AMI created")
def create_ami(args):
LOGGER.info("Building AWS ParallelCluster AMI. This could take a while...")
LOGGER.debug("Building AMI based on args %s", str(args))
results = {}
instance_type = args.instance_type
try:
# FIXME it doesn't work if there is no a default section
pcluster_config = PclusterConfig(config_file=args.config_file, fail_on_file_absence=True)
vpc_section = pcluster_config.get_section("vpc")
vpc_id = args.vpc_id if args.vpc_id else vpc_section.get_param_value("vpc_id")
subnet_id = args.subnet_id if args.subnet_id else vpc_section.get_param_value("master_subnet_id")
packer_env = {
"CUSTOM_AMI_ID": args.base_ami_id,
"AWS_FLAVOR_ID": instance_type,
"AMI_NAME_PREFIX": args.custom_ami_name_prefix,
"AWS_VPC_ID": vpc_id,
"AWS_SUBNET_ID": subnet_id,
"ASSOCIATE_PUBLIC_IP": "true" if args.associate_public_ip else "false",
}
aws_section = pcluster_config.get_section("aws")
aws_region = aws_section.get_param_value("aws_region_name")
if aws_section and aws_section.get_param_value("aws_access_key_id"):
packer_env["AWS_ACCESS_KEY_ID"] = aws_section.get_param_value("aws_access_key_id")
if aws_section and aws_section.get_param_value("aws_secret_access_key"):
packer_env["AWS_SECRET_ACCESS_KEY"] = aws_section.get_param_value("aws_secret_access_key")
LOGGER.info("Base AMI ID: %s", args.base_ami_id)
LOGGER.info("Base AMI OS: %s", args.base_ami_os)
LOGGER.info("Instance Type: %s", instance_type)
LOGGER.info("Region: %s", aws_region)
LOGGER.info("VPC ID: %s", vpc_id)
LOGGER.info("Subnet ID: %s", subnet_id)
template_url = _evaluate_pcluster_template_url(pcluster_config)
tmp_dir = mkdtemp()
cookbook_dir = _get_cookbook_dir(aws_region, template_url, args, tmp_dir)
packer_command = (
cookbook_dir
+ "/amis/build_ami.sh --os "
+ args.base_ami_os
+ " --partition region"
+ " --region "
+ aws_region
+ " --custom"
)
results = _run_packer(packer_command, packer_env)
except KeyboardInterrupt:
LOGGER.info("\nExiting...")
sys.exit(0)
finally:
_print_create_ami_results(results)
if "tmp_dir" in locals() and tmp_dir:
rmtree(tmp_dir)
def _get_default_template_url(region):
return (
"https://s3.{REGION}.amazonaws.com{SUFFIX}/{REGION}-aws-parallelcluster/templates/"
"aws-parallelcluster-{VERSION}.cfn.json".format(
REGION=region, SUFFIX=".cn" if region.startswith("cn") else "", VERSION=utils.get_installed_version()
)
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
9_Dynamic/4.py | # https://www.hackerrank.com/challenges/decibinary-numbers/problem
# 7/8 Test Cases Passed
#!/bin/python3
import math
import os
import random
import re
import sys
import bisect
from collections import defaultdict, Counter
#
# Complete the 'decibinaryNumbers' function below.
#
# The function is expected to return a LONG_INTEGER.
# The function accepts LONG_INTEGER x as parameter.
#
class Decibinary:
def __init__(self):
self.COUNT = [[1, 1]]
self.cum_sums = [1]
self.min_dig = [0]
for p in range(20): self.min_dig.append(9 * 2 ** p + self.min_dig[-1])
def decibinaryNumbers(self, x):
if x > self.cum_sums[-1]:
n = len(self.COUNT)
num=x
while self.cum_sums[-1] < num:
min_digits = bisect.bisect_left(self.min_dig, n)
max_digits = math.floor(math.log(n, 2)) + 1
self.COUNT.append([0] * (max_digits + 1))
for m in range(min_digits, max_digits + 1):
self.COUNT[n][m] = self.COUNT[n][m - 1]
for d in range(1, 10):
remainder = n - d * 2 ** (m - 1)
if remainder >= 0:
self.COUNT[n][m] += self.COUNT[remainder][min(m - 1, len(self.COUNT[remainder]) - 1)]
else:
break
self.cum_sums.append(self.cum_sums[-1] + self.COUNT[-1][-1])
n += 1
if x == 1: return 0
n = bisect.bisect_left(self.cum_sums, x)
n_rem = x - self.cum_sums[n - 1]
m = bisect.bisect_left(self.COUNT[n], n_rem)
m_rem = n_rem - self.COUNT[n][m - 1]
return int(self.reconstruct(n, m, m_rem))
def reconstruct(self, n, m, rem, partial=0):
if m == 1: return partial + n
skipped = 0
for k in range(not partial, 10):
dig_val = k * 2 ** (m - 1)
smaller = n - dig_val
s_m = min(len(self.COUNT[smaller]) - 1, m - 1)
skipped += self.COUNT[smaller][s_m]
if skipped >= rem:
partial += k * 10 ** (m-1) #math.pow(10,(m - 1))
new_rem = rem - (skipped - self.COUNT[smaller][s_m])
return self.reconstruct(smaller, s_m, new_rem, partial)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input().strip())
db = Decibinary()
for q_itr in range(q):
x = int(input().strip())
result = db.decibinaryNumbers(x)
fptr.write(str(result) + '\n')
fptr.close() | []
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
src/src/create_tf_record.py | # -*-coding: utf-8 -*-
"""
@Project: create_tfrecord
@File : create_tfrecord.py
@Author : panjq
@E-mail : [email protected]
@Date : 2018-07-27 17:19:54
@desc : 将图片数据保存为单个tfrecord文件
"""
##########################################################################
import tensorflow as tf
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import random
from PIL import Image
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
train_path = './train_new/img'
test_path = './test_new/img'
list = set(os.listdir(test_path))
classes=sorted(list,key=str.lower)
print(classes)
##########################################################################
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# 生成字符串型的属性
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# 生成实数型的属性
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def get_example_nums(tf_records_filenames):
'''
统计tf_records图像的个数(example)个数
:param tf_records_filenames: tf_records文件路径
:return:
'''
nums= 0
for record in tf.python_io.tf_record_iterator(tf_records_filenames):
nums += 1
return nums
def show_image(title,image):
'''
显示图片
:param title: 图像标题
:param image: 图像的数据
:return:
'''
# plt.figure("show_image")
# print(image.dtype)
plt.imshow(image)
plt.axis('on') # 关掉坐标轴为 off
plt.title(title) # 图像题目
plt.show()
# def load_labels_file(filename,labels_num=1,shuffle=False):
# '''
# 载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2
# :param filename:
# :param labels_num :labels个数
# :param shuffle :是否打乱顺序
# :return:images type->list
# :return:labels type->list
# '''
# images=[]
# labels=[]
# with open(filename) as f:
# lines_list=f.readlines()
# if shuffle:
# random.shuffle(lines_list)
#
# for lines in lines_list:
# line=lines.rstrip().split(' ')
# label=[]
# for i in range(labels_num):
# label.append(int(line[i+1]))
# images.append(line[0])
# labels.append(label)
# return images,labels
def load_labels_file(filename,num=1,shuffle=False):
'''
载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2
:param filename:
:param labels_num :labels个数
:param shuffle :是否打乱顺序
:return:images type->list
:return:labels type->list
'''
images=[]
labels=[]
# with open(filename) as f:
# lines_list=f.readlines()
# if shuffle:
# random.shuffle(lines_list)
#
# for lines in lines_list:
# line=lines.rstrip().split(' ')
# label=[]
# for i in range(labels_num):
# label.append(int(line[i+1]))
# images.append(line[0])
# labels.append(label)
# return images,labels
for index,name in enumerate(classes):
# print(index,name)
class_path = filename+'/'+name+'/'
# print(class_path)
for img_name in os.listdir(class_path):
img_path = class_path+img_name
# print(img_path)
images.append(img_path)
labels.append(index)
# img = Image.open(img_path)
# img = img.resize((224,224))
# img_raw = img.tobytes()
# with open(train_label,'a') as f:
# f.write(str(index)+'\n')
randnum = random.randint(0, 100)
random.seed(randnum)
random.shuffle(images)
random.seed(randnum)
random.shuffle(labels)
return images,labels
def read_image(filename, resize_height, resize_width,normalization=False):
'''
读取图片数据,默认返回的是uint8,[0,255]
:param filename:
:param resize_height:
:param resize_width:
:param normalization:是否归一化到[0.,1.0]
:return: 返回的图片数据
'''
bgr_image = cv2.imread(filename)
if len(bgr_image.shape)==2:#若是灰度图则转为三通道
print("Warning:gray image",filename)
bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)#将BGR转为RGB
# show_image(filename,rgb_image)
# rgb_image=Image.open(filename)
if resize_height>0 and resize_width>0:
rgb_image=cv2.resize(rgb_image,(resize_width,resize_height))
rgb_image=np.asanyarray(rgb_image)
if normalization:
# 不能写成:rgb_image=rgb_image/255
rgb_image=rgb_image/255.0
# show_image("src resize image",image)
return rgb_image
def get_batch_images(images,labels,batch_size,labels_nums,one_hot=False,shuffle=False,num_threads=64):
'''
:param images:图像
:param labels:标签
:param batch_size:
:param labels_nums:标签个数
:param one_hot:是否将labels转为one_hot的形式
:param shuffle:是否打乱顺序,一般train时shuffle=True,验证时shuffle=False
:return:返回batch的images和labels
'''
min_after_dequeue = 200
capacity = min_after_dequeue + 3 * batch_size # 保证capacity必须大于min_after_dequeue参数值
if shuffle:
images_batch, labels_batch = tf.train.shuffle_batch([images,labels],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads)
else:
images_batch, labels_batch = tf.train.batch([images,labels],
batch_size=batch_size,
capacity=capacity,
num_threads=num_threads)
if one_hot:
labels_batch = tf.one_hot(labels_batch, labels_nums, 1, 0)
return images_batch,labels_batch
def read_records(filename,resize_height, resize_width,type=None):
'''
解析record文件:源文件的图像数据是RGB,uint8,[0,255],一般作为训练数据时,需要归一化到[0,1]
:param filename:
:param resize_height:
:param resize_width:
:param type:选择图像数据的返回类型
None:默认将uint8-[0,255]转为float32-[0,255]
normalization:归一化float32-[0,1]
centralization:归一化float32-[0,1],再减均值中心化
:return:
'''
# 创建文件队列,不限读取的数量
filename_queue = tf.train.string_input_producer([filename])
# create a reader from file queue
reader = tf.TFRecordReader()
# reader从文件队列中读入一个序列化的样本
_, serialized_example = reader.read(filename_queue)
# get feature from serialized example
# 解析符号化的样本
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64)
}
)
tf_image = tf.decode_raw(features['image_raw'], tf.uint8)#获得图像原始的数据
tf_height = features['height']
tf_width = features['width']
tf_depth = features['depth']
tf_label = tf.cast(features['label'], tf.int32)
# PS:恢复原始图像数据,reshape的大小必须与保存之前的图像shape一致,否则出错
# tf_image=tf.reshape(tf_image, [-1]) # 转换为行向量
tf_image=tf.reshape(tf_image, [resize_height, resize_width, 3]) # 设置图像的维度
# 恢复数据后,才可以对图像进行resize_images:输入uint->输出float32
# tf_image=tf.image.resize_images(tf_image,[224, 224])
# 存储的图像类型为uint8,tensorflow训练时数据必须是tf.float32
if type is None:
tf_image = tf.cast(tf_image, tf.float32)
elif type=='normalization':# [1]若需要归一化请使用:
# 仅当输入数据是uint8,才会归一化[0,255]
# tf_image = tf.image.convert_image_dtype(tf_image, tf.float32)
tf_image = tf.cast(tf_image, tf.float32) * (1. / 255.0) # 归一化
elif type=='centralization':
# 若需要归一化,且中心化,假设均值为0.5,请使用:
tf_image = tf.cast(tf_image, tf.float32) * (1. / 255) - 0.5 #中心化
# 这里仅仅返回图像和标签
# return tf_image, tf_height,tf_width,tf_depth,tf_label
return tf_image,tf_label
def create_records(image_dir, output_record_dir, resize_height, resize_width,shuffle,log=5):
'''
实现将图像原始数据,label,长,宽等信息保存为record文件
注意:读取的图像数据默认是uint8,再转为tf的字符串型BytesList保存,解析请需要根据需要转换类型
:param image_dir:原始图像的目录
:param file:输入保存图片信息的txt文件(image_dir+file构成图片的路径)
:param output_record_dir:保存record文件的路径
:param resize_height:
:param resize_width:
PS:当resize_height或者resize_width=0是,不执行resize
:param shuffle:是否打乱顺序
:param log:log信息打印间隔
'''
# 加载文件,仅获取一个label
images_list, labels_list=load_labels_file(image_dir,1,shuffle)
writer = tf.python_io.TFRecordWriter(output_record_dir)
for i, [image_name, labels] in enumerate(zip(images_list, labels_list)):
image_path=image_name
# print(image_path)
# print(labels)
if not os.path.exists(image_path):
print('Err:no image',image_path)
continue
image = read_image(image_path, resize_height, resize_width)
image_raw = image.tostring()
if i%log==0 or i==len(images_list)-1:
print('------------processing:%d-th------------' % (i))
print('current image_path=%s' % (image_path),'shape:{}'.format(image.shape),'labels:{}'.format(labels))
# 这里仅保存一个label,多label适当增加"'label': _int64_feature(label)"项
label=labels
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(image_raw),
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(image.shape[2]),
'labels': _int64_feature(label)
}))
writer.write(example.SerializeToString())
writer.close()
def disp_records(record_file,resize_height, resize_width,show_nums=4):
'''
解析record文件,并显示show_nums张图片,主要用于验证生成record文件是否成功
:param tfrecord_file: record文件路径
:return:
'''
# 读取record函数
tf_image, tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
# 显示前4个图片
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(show_nums):
image,label = sess.run([tf_image,tf_label]) # 在会话中取出image和label
# image = tf_image.eval()
# 直接从record解析的image是一个向量,需要reshape显示
# image = image.reshape([height,width,depth])
#print('shape:{},tpye:{},labels:{}'.format(image.shape,image.dtype,label))
# pilimg = Image.fromarray(np.asarray(image_eval_reshape))
# pilimg.show()
show_image("image:%d"%(label),image)
coord.request_stop()
coord.join(threads)
def batch_test(record_file,resize_height, resize_width):
'''
:param record_file: record文件路径
:param resize_height:
:param resize_width:
:return:
:PS:image_batch, label_batch一般作为网络的输入
'''
# 读取record函数
tf_image,tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
image_batch, label_batch= get_batch_images(tf_image,tf_label,batch_size=4,labels_nums=5,one_hot=False,shuffle=False)
init = tf.global_variables_initializer()
with tf.Session() as sess: # 开始一个会话
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(4):
# 在会话中取出images和labels
images, labels = sess.run([image_batch, label_batch])
# 这里仅显示每个batch里第一张图片
show_image("image", images[0, :, :, :])
print('shape:{},tpye:{},labels:{}'.format(images.shape,images.dtype,labels))
# 停止所有线程
coord.request_stop()
coord.join(threads)
# if __name__ == '__main__':
# # 参数设置
#
# resize_height = 224 # 指定存储图片高度
# resize_width = 224 # 指定存储图片宽度
# shuffle=True
# log=5
# # 产生train.record文件
# image_dir='dataset/train'
# train_labels = 'dataset/train.txt' # 图片路径
# train_record_output = 'dataset/record/train.tfrecords'
# create_records(image_dir,train_labels, train_record_output, resize_height, resize_width,shuffle,log)
# train_nums=get_example_nums(train_record_output)
# print("save train example nums={}".format(train_nums))
#
# # 产生val.record文件
# image_dir='dataset/val'
# val_labels = 'dataset/val.txt' # 图片路径
# val_record_output = 'dataset/record/val.tfrecords'
# create_records(image_dir,val_labels, val_record_output, resize_height, resize_width,shuffle,log)
# val_nums=get_example_nums(val_record_output)
# print("save val example nums={}".format(val_nums))
#
# # 测试显示函数
# # disp_records(train_record_output,resize_height, resize_width)
# batch_test(train_record_output,resize_height, resize_width)
if __name__ == '__main__':
# 参数设置
resize_height = 224 # 指定存储图片高度
resize_width = 224 # 指定存储图片宽度
shuffle=True
log=5
# 产生train.record文件
image_dir='./train_new/img'
# train_labels = './onsets/train.txt' # 图片路径
train_record_output = 'train.tfrecord'
create_records(image_dir, train_record_output, resize_height, resize_width,shuffle,log)
train_nums=get_example_nums(train_record_output)
print("save train example nums={}".format(train_nums))
# 产生val.record文件
image_dir='./test_new/img'
# val_labels = './onsets/val.txt' # 图片路径
val_record_output = 'val.tfrecord'
create_records(image_dir, val_record_output, resize_height, resize_width,shuffle,log)
val_nums=get_example_nums(val_record_output)
print("save val example nums={}".format(val_nums))
# 测试显示函数
# disp_records(train_record_output,resize_height, resize_width)
# batch_test(train_record_output,resize_height, resize_width) | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
teuthology/__init__.py | import os
# Tell gevent not to patch os.waitpid() since it is susceptible to race
# conditions. See:
# http://www.gevent.org/gevent.monkey.html#gevent.monkey.patch_os
os.environ['GEVENT_NOWAITPID'] = 'true'
# Use manhole to give us a way to debug hung processes
# https://pypi.python.org/pypi/manhole
import manhole
manhole.install(
verbose=False,
# Listen for SIGUSR1
oneshot_on="USR1"
)
from gevent import monkey
monkey.patch_all(
dns=False,
# Don't patch subprocess to avoid http://tracker.ceph.com/issues/14990
subprocess=False,
)
import sys
from gevent.hub import Hub
# Don't write pyc files
sys.dont_write_bytecode = True
from .orchestra import monkey
monkey.patch_all()
import logging
import subprocess
__version__ = '1.0.0'
# do our best, but if it fails, continue with above
try:
__version__ += '-' + subprocess.check_output(
'git rev-parse --short HEAD'.split(),
cwd=os.path.dirname(os.path.realpath(__file__))
).strip()
except Exception as e:
# before logging; should be unusual
print >>sys.stderr, 'Can\'t get version from git rev-parse', e
# If we are running inside a virtualenv, ensure we have its 'bin' directory in
# our PATH. This doesn't happen automatically if scripts are called without
# first activating the virtualenv.
exec_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
if os.path.split(exec_dir)[-1] == 'bin' and exec_dir not in os.environ['PATH']:
os.environ['PATH'] = ':'.join((exec_dir, os.environ['PATH']))
# We don't need to see log entries for each connection opened
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARN)
# if requests doesn't bundle it, shut it up anyway
logging.getLogger('urllib3.connectionpool').setLevel(
logging.WARN)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s')
log = logging.getLogger(__name__)
log.debug('teuthology version: %s', __version__)
def setup_log_file(log_path):
root_logger = logging.getLogger()
handlers = root_logger.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler) and \
handler.stream.name == log_path:
log.debug("Already logging to %s; not adding new handler",
log_path)
return
formatter = logging.Formatter(
fmt=u'%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
datefmt='%Y-%m-%dT%H:%M:%S')
handler = logging.FileHandler(filename=log_path)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
root_logger.info('teuthology version: %s', __version__)
def install_except_hook():
"""
Install an exception hook that first logs any uncaught exception, then
raises it.
"""
def log_exception(exc_type, exc_value, exc_traceback):
if not issubclass(exc_type, KeyboardInterrupt):
log.critical("Uncaught exception", exc_info=(exc_type, exc_value,
exc_traceback))
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = log_exception
def patch_gevent_hub_error_handler():
Hub._origin_handle_error = Hub.handle_error
def custom_handle_error(self, context, type, value, tb):
if not issubclass(type, Hub.SYSTEM_ERROR + Hub.NOT_ERROR):
log.error("Uncaught exception (Hub)", exc_info=(type, value, tb))
self._origin_handle_error(context, type, value, tb)
Hub.handle_error = custom_handle_error
patch_gevent_hub_error_handler()
| []
| []
| [
"GEVENT_NOWAITPID",
"PATH"
]
| [] | ["GEVENT_NOWAITPID", "PATH"] | python | 2 | 0 | |
cmd/dashboard/main.go | /*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"fmt"
"net/http"
"os"
"time"
routeclient "github.com/openshift/client-go/route/clientset/versioned"
"github.com/tektoncd/dashboard/pkg/controllers"
"github.com/tektoncd/dashboard/pkg/endpoints"
logging "github.com/tektoncd/dashboard/pkg/logging"
"github.com/tektoncd/dashboard/pkg/router"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
resourceclientset "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned"
k8sclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"knative.dev/pkg/signals"
)
// Stores config env
type config struct {
kubeConfigPath string
// Should conform with http.Server.Addr field
port string
installNamespace string
}
func main() {
// Initialize config
dashboardConfig := config{
kubeConfigPath: os.Getenv("KUBECONFIG"),
port: ":8080",
installNamespace: os.Getenv("INSTALLED_NAMESPACE"),
}
portNumber := os.Getenv("PORT")
if portNumber != "" {
dashboardConfig.port = ":" + portNumber
logging.Log.Infof("Port number from config: %s", portNumber)
}
var cfg *rest.Config
var err error
if len(dashboardConfig.kubeConfigPath) != 0 {
cfg, err = clientcmd.BuildConfigFromFlags("", dashboardConfig.kubeConfigPath)
if err != nil {
logging.Log.Errorf("Error building kubeconfig from %s: %s", dashboardConfig.kubeConfigPath, err.Error())
}
} else {
if cfg, err = rest.InClusterConfig(); err != nil {
logging.Log.Errorf("Error building kubeconfig: %s", err.Error())
}
}
pipelineClient, err := clientset.NewForConfig(cfg)
if err != nil {
logging.Log.Errorf("Error building pipeline clientset: %s", err.Error())
}
pipelineResourceClient, err := resourceclientset.NewForConfig(cfg)
if err != nil {
logging.Log.Errorf("Error building pipelineresource clientset: %s", err.Error())
}
k8sClient, err := k8sclientset.NewForConfig(cfg)
if err != nil {
logging.Log.Errorf("Error building k8s clientset: %s", err.Error())
}
routeClient, err := routeclient.NewForConfig(cfg)
if err != nil {
logging.Log.Errorf("Error building route clientset: %s", err.Error())
}
resource := endpoints.Resource{
PipelineClient: pipelineClient,
PipelineResourceClient: pipelineResourceClient,
K8sClient: k8sClient,
RouteClient: routeClient,
}
ctx := signals.NewContext()
routerHandler := router.Register(resource)
logging.Log.Info("Creating controllers")
resyncDur := time.Second * 30
controllers.StartTektonControllers(resource.PipelineClient, resource.PipelineResourceClient, resyncDur, ctx.Done())
controllers.StartKubeControllers(resource.K8sClient, resyncDur, dashboardConfig.installNamespace, routerHandler, ctx.Done())
logging.Log.Infof("Creating server and entering wait loop")
server := &http.Server{Addr: dashboardConfig.port, Handler: routerHandler}
errCh := make(chan error, 1)
defer close(errCh)
go func() {
// Don't forward ErrServerClosed as that indicates we're already shutting down.
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
errCh <- fmt.Errorf("dashboard server failed: %w", err)
}
}()
select {
case err := <-errCh:
logging.Log.Fatal(err)
case <-ctx.Done():
if err := server.Shutdown(context.Background()); err != nil {
logging.Log.Fatal(err)
}
}
}
| [
"\"KUBECONFIG\"",
"\"INSTALLED_NAMESPACE\"",
"\"PORT\""
]
| []
| [
"INSTALLED_NAMESPACE",
"PORT",
"KUBECONFIG"
]
| [] | ["INSTALLED_NAMESPACE", "PORT", "KUBECONFIG"] | go | 3 | 0 | |
gitinfo/utils.py | from __future__ import annotations
from anytree import NodeMixin
from datetime import datetime, timezone
from dotenv import load_dotenv
from os import environ
from os.path import join, dirname
from typing import Tuple, List, Any, Dict, Optional
import re
import requests
from rich.box import Box
__all__ = [
"get_data", "get_token", "get_url_info", "human_size", "humanize_time",
"populate_tree", "ROUNDED_BORDER", "run_query", "set_token", "sort_entries"
]
ROUNDED_BORDER: Box = Box(
"""\
╭──╮
│ │
│ │
│ │
│ │
│ │
│ │
╰──╯
"""
)
def get_token() -> str:
"""
Retrieves the Github Personal Access Token from .env file
"""
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
return environ.get("GITSORT_TOKEN")
def set_token(token: str) -> None:
"""
Set your Github personal access token in order to access
private repositories and extend the usage of the GraphQL API.
"""
import os
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
gitsort_token = os.environ.get("GITSORT_TOKEN")
if not gitsort_token:
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token set!")
else:
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
while inp not in ["y", "n"]:
print("Invalid answer")
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
if inp == "y":
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token updated!")
def run_query(
query: str,
token: str,
variables: dict | None = None,
headers: dict | None = None
) -> Tuple[dict, str]:
"""
Runs a Github GraphQL query and returns the result
:param query: str
GraphQL query
:param token: str
The users Github Personal Access Token
:param variables: dict
GraphQL Variables
:param headers: dict
Request headers
:return: tuple
The response and rate limit
"""
if not headers:
headers = {"Authorization": f"Bearer {token}"}
request = requests.post(
'https://api.github.com/graphql',
json={'query': query, 'variables': variables},
headers=headers
)
if request.status_code == 200:
return request.json(), request.headers["X-RateLimit-Remaining"]
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def get_data(
query: str,
token: str,
query_variables: Dict[str, str]
) -> Tuple[bool, Any, str]:
"""
Get data from query
:param query: str
Graphql Query
:param token: str
Github Personal Access Token
:param query_variables: dict
Variables used in query
:return: tuple
returns a tuple of tree items:
0. bool: True if query failed and return error messages else False
1. Any: Data returned from query
2. str: Rate limit
"""
data, rate_limit = run_query(query, token, query_variables)
if list(data.keys())[0] == "errors":
return True, data["errors"][0]["message"], rate_limit
try:
return False, data["data"]["repository"], rate_limit
except TypeError:
return True, "Query failed. Make sure path and branch is valid.", rate_limit
def get_url_info(url: str) -> Tuple[str, str] | List[str]:
"""
Retrieves owner and repository from a string
:param url: str
Either some form of Github Url or path such as `user/repo/whatever`
:return: tuple | list
Tuple containing owner and repo
"""
is_link = re.compile(r"^(git(hub)?|https?)")
is_git_path = re.compile(r"^[a-zA-Z0-9\-_.]+/[a-zA-Z0-9\-_.]+")
git_url_regex = re.compile(r"^(https|git)?(://|@)?([^/:]+)[/:](?P<owner>[^/:]+)/(?P<name>.+)(.git)?$")
is_git_repo = re.compile(r"((.git)|/)$")
if is_link.match(url):
if is_git_path.match(url):
return url.split("/")[:2]
match = git_url_regex.match(url)
if not match:
raise Exception("Invalid path")
name = match.group("name").split("/")[0]
name = is_git_repo.sub("", name)
owner = match.group("owner")
return owner, name
else:
if url.count("/") > 0:
return url.split("/")[:2]
raise Exception("Link/path must contain both user and repo")
def humanize_time(time_str: str) -> str:
"""
Convert datetime into a more human-friendly format
:param time_str: str
Time string in the ISO 8601 format
:return: str
Human friendly format: <number> <time_period> ago
"""
if not time_str:
return "null"
now = datetime.now()
date = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
date = date.replace(tzinfo=timezone.utc)
diff = int(now.timestamp() - date.timestamp())
times = [
1, 60, 3600, 86400, 604800, 2629746, 31556925
]
times_str = [
"Second", "Minute", "Hour", "Day", "Week", "Month", "Year"
]
temp = [diff // t for t in times][::-1]
for i, t in enumerate(temp):
if t != 0:
return f"{t} {times_str[6-i]}{'' if t == 1 else 's'} ago"
def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str:
"""
Convert bytes into a more human-friendly format
:param bytes: int
Number of bytes
:param units: Optional[List[str]]
units used
:return: str
Return size in human friendly format: <number> <size_unit>
"""
if units is None:
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:])
class FileEntry(NodeMixin):
def __init__(
self,
name: str,
size: str | int = None,
parent=None,
children=None
) -> None:
super(FileEntry, self).__init__()
if size != None:
self.name = f"{name} ([green]{human_size(size)}[/])"
else:
self.name = f"[blue]{name}/[/]"
self.parent = parent
if children:
self.children = children
class FileEntryRoot(NodeMixin):
def __init__(self, name: str, parent=None, children=None):
super(FileEntryRoot, self).__init__()
self.name = name
self.parent = parent
if children:
self.children = children
def populate_tree(
root_name: str,
data: list,
collapse_blobs: bool = False
) -> "anytree.Node":
"""
Populate the tree
:param root_name: str
Name of root node
:param data: dict
Data
:param collapse_blobs: bool
Collapse files or not
:return: anytree.node
"""
root = FileEntryRoot(root_name)
def edges(tree: FileEntry | FileEntryRoot, parent=None):
collapsed_count = 0
collapsed_size = 0
for entry in tree:
if entry["type"] == "blob":
if collapse_blobs:
collapsed_size += entry["object"]["byteSize"]
collapsed_count += 1
else:
_ = FileEntry(entry["name"], entry["object"]["byteSize"], parent=parent)
else:
node = FileEntry(entry["name"], parent=parent)
if entry["object"]:
edges(entry["object"]["entries"], parent=node)
if collapse_blobs:
_ = FileEntry(f"[orange1]{collapsed_count}[/] Files", collapsed_size, parent=parent)
edges(data, root)
return root
class Reversor:
def __init__(self, obj: Any) -> None:
self.obj = obj
def __eq__(self, other: Any) -> bool:
return other.obj == self.obj
def __lt__(self, other: Any) -> bool:
return other.obj < self.obj
def sort_entries(entries: List[Any]) -> List[Any]:
"""
Recursively sort the data first based on type
then alphabetically
:param entries: list
Entries
:return: list
Entries but sorted
"""
entries = sorted(
entries, key=lambda x: (
Reversor(x["type"]), # First sort by type (reversed)
x["name"].lower() # Then sort by alphabetical
)
)
for entry in entries:
if entry["type"] == "tree" and entry["object"]:
entry["object"]["entries"] = sort_entries(entry["object"]["entries"])
return entries
| []
| []
| [
"GITSORT_TOKEN"
]
| [] | ["GITSORT_TOKEN"] | python | 1 | 0 | |
heat/tests/functional/test_AutoScalingMultiAZSample.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
from nose.plugins.attrib import attr
import unittest
import os
from time import sleep
@attr(speed='slow')
@attr(tag=['func', 'autoscaling', 'AutoScalingMultiAZSample.template'])
class AutoScalingMultiAZSampleFunctionalTest(unittest.TestCase):
def setUp(self):
template = 'AutoScalingMultiAZSample.template'
stack_paramstr = ';'.join(['InstanceType=m1.small',
'DBUsername=dbuser',
'DBPassword=' + os.environ['OS_PASSWORD']])
self.stack = util.Stack(self, template, 'F17', 'x86_64', 'cfntools',
stack_paramstr)
self.WebServerGroup0 = util.Instance(self, 'WebServerGroup-0')
def tearDown(self):
pass
self.stack.cleanup()
def test_instance(self):
self.stack.create()
self.WebServerGroup0.wait_for_boot()
self.WebServerGroup0.check_cfntools()
self.WebServerGroup0.wait_for_provisioning()
# TODO: verify the code below tests the template properly
# TODO(sdake) use a util exists function for nonexistent instances (needs dev)
# Trigger the load balancer by taking up memory
self.WebServerGroup0.exec_command('memhog -r100000 1500m')
# Give the load balancer 2 minutes to react
sleep(2 * 60)
self.WebServerGroup1 = util.Instance(self, 'WebServerGroup-1')
# Verify the second instance gets launched
self.assertTrue(self.WebServerGroup1.exists())
self.WebServerGroup1.wait_for_boot()
self.WebServerGroup1.check_cfntools()
self.WebServerGroup1.wait_for_provisioning()
# ensure wordpress was installed by checking for expected
# configuration file over ssh
self.assertTrue(self.WebServerGroup0.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected on WSG0"
# ensure wordpress was installed by checking for expected
# configuration file over ssh
self.assertTrue(self.WebServerGroup1.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected on WSG1"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.stack.get_stack_output("URL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
| []
| []
| [
"OS_PASSWORD"
]
| [] | ["OS_PASSWORD"] | python | 1 | 0 | |
magefile.go | // +build mage
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/magefile/mage/mg"
"github.com/magefile/mage/sh"
)
// can be overwritten by GOEXE
var goExe = "go"
// can be overwritten by PY3EXE
var py3Exe = "python3"
// can be overwritten by CLIEXE
var cliExe = "cartridge"
var goPackageName = "github.com/tarantool/cartridge-cli/cli"
var packagePath = "./cli"
var generateModePath = filepath.Join(packagePath, "create", "codegen", "generate_mode.go")
var generatedFilesPath = fmt.Sprintf("./%s", filepath.Join(packagePath, "create", "codegen", "static"))
var generatedFSFile = "cartridge_vfsdata_gen.go"
var generatedModeFile = "cartridge_filemodes_gen.go"
var completionPath = "./completion"
var tmpPath = "./tmp"
var sdkDirName = "tarantool-enterprise"
var sdkDirPath = filepath.Join(tmpPath, sdkDirName)
func getBuildEnv() map[string]string {
var err error
var curDir string
var gitTag string
var gitCommit string
if curDir, err = os.Getwd(); err != nil {
fmt.Printf("Failed to get current directory: %s\n", err)
}
if _, err := exec.LookPath("git"); err == nil {
gitTag, _ = sh.Output("git", "describe", "--tags")
gitCommit, _ = sh.Output("git", "rev-parse", "--short", "HEAD")
}
versionLabel := os.Getenv("VERSION_LABEL")
return map[string]string{
"PACKAGE": goPackageName,
"GIT_TAG": gitTag,
"GIT_COMMIT": gitCommit,
"VERSION_LABEL": versionLabel,
"PWD": curDir,
}
}
var ldflags = []string{
"-s", "-w",
"-X ${PACKAGE}/version.gitTag=${GIT_TAG}",
"-X ${PACKAGE}/version.gitCommit=${GIT_COMMIT}",
"-X ${PACKAGE}/version.versionLabel=${VERSION_LABEL}",
}
var ldflagsStr = strings.Join(ldflags, " ")
var asmflags = "all=-trimpath=${PWD}"
var gcflags = "all=-trimpath=${PWD}"
func init() {
var err error
if specifiedGoExe := os.Getenv("GOEXE"); specifiedGoExe != "" {
goExe = specifiedGoExe
}
if specifiedCliExe := os.Getenv("CLIEXE"); specifiedCliExe != "" {
cliExe = specifiedCliExe
} else {
if cliExe, err = filepath.Abs(cliExe); err != nil {
panic(err)
}
}
// We want to use Go 1.11 modules even if the source lives inside GOPATH.
// The default is "auto".
os.Setenv("GO111MODULE", "on")
}
// Run go vet and flake8
func Lint() error {
fmt.Println("Generating Go code...")
mg.Deps(GenerateGoCode)
fmt.Println("Running go vet...")
if err := sh.RunV(goExe, "vet", packagePath); err != nil {
return err
}
fmt.Println("Running flake8...")
if err := sh.RunV(py3Exe, "-m", "flake8"); err != nil {
return err
}
fmt.Println("Running luacheck for test projects files...")
if err := sh.RunV(".rocks/bin/luacheck", "test/files"); err != nil {
return err
}
return nil
}
// Run unit tests
func Unit() error {
fmt.Println("Running unit tests...")
mg.Deps(GenerateGoCode)
if mg.Verbose() {
return sh.RunV(goExe, "test", "-v", "./cli/...")
} else {
return sh.RunV(goExe, "test", "./cli/...")
}
}
// Run integration tests
func Integration() error {
fmt.Println("Running integration tests...")
return sh.RunV(py3Exe, "-m", "pytest", "test/integration")
}
// Run examples tests
func TestExamples() error {
fmt.Println("Running examples tests...")
return sh.RunV(py3Exe, "-m", "pytest", "test/examples")
}
// Run e2e tests
func E2e() error {
fmt.Println("Running e2e tests...")
return sh.RunV(py3Exe, "-m", "pytest", "test/e2e")
}
// Run all tests
func Test() {
mg.SerialDeps(Lint, Unit, Integration, TestExamples, E2e)
}
// Build cartridge-cli executable
func Build() error {
var err error
fmt.Println("Building...")
mg.Deps(GenerateGoCode)
err = sh.RunWith(
getBuildEnv(), goExe, "build",
"-o", cliExe,
"-ldflags", ldflagsStr,
"-asmflags", asmflags,
"-gcflags", gcflags,
packagePath,
)
if err != nil {
return fmt.Errorf("Failed to build cartridge-cli executable: %s", err)
}
return nil
}
// Generate Go code that statically implements filesystem
// and map with modes for that filesystem.
func GenerateGoCode() error {
err := sh.RunWith(
getBuildEnv(), goExe,
"generate", "-tags=dev",
generatedFilesPath,
)
if err != nil {
return err
}
err = sh.RunWith(
getBuildEnv(), goExe,
"run", generateModePath,
)
if err != nil {
return err
}
return nil
}
// Generate completion scripts for bash and zsh
func GenCompletion() error {
if err := Build(); err != nil {
return err
}
fmt.Println("Generate autocompletion...")
if err := sh.Run(cliExe, "gen", "completion"); err != nil {
return fmt.Errorf("Failed to generate autocompletion scripts: %s", err)
}
return nil
}
// Download Tarantool Enterprise to tmp/tarantool-enterprise dir
func Sdk() error {
if _, err := os.Stat(sdkDirPath); os.IsNotExist(err) {
if err := downloadSdk(); err != nil {
return err
}
} else if err != nil {
return fmt.Errorf("Failed to check if SDK exists: %s", err)
} else {
fmt.Printf("Found Tarantool Enterprise SDK: %s\n", sdkDirPath)
}
fmt.Printf("Run `source %s/env.sh` to activate Tarantool Enterprise\n", sdkDirPath)
return nil
}
// Clean up after yourself
func Clean() {
fmt.Println("Cleaning...")
os.Remove(filepath.Join(generatedFilesPath, generatedFSFile))
os.Remove(filepath.Join(generatedFilesPath, generatedModeFile))
os.RemoveAll(cliExe)
os.RemoveAll(completionPath)
}
func downloadSdk() error {
bundleVersion := os.Getenv("BUNDLE_VERSION")
if bundleVersion == "" {
return fmt.Errorf("Please, specify BUNDLE_VERSION")
}
downloadToken := os.Getenv("DOWNLOAD_TOKEN")
if downloadToken == "" {
return fmt.Errorf("Please, specify DOWNLOAD_TOKEN")
}
archivedSDKName := fmt.Sprintf("tarantool-enterprise-bundle-%s.tar.gz", bundleVersion)
sdkDownloadUrl := fmt.Sprintf(
"https://tarantool:%[email protected]/enterprise/%s",
downloadToken,
archivedSDKName,
)
fmt.Printf("Download Tarantool Enterprise SDK %s...\n", bundleVersion)
archivedSDKPath := filepath.Join(tmpPath, archivedSDKName)
if err := downloadFile(sdkDownloadUrl, archivedSDKPath); err != nil {
return fmt.Errorf("Failed to download archived SDK: %s", err)
}
defer os.RemoveAll(archivedSDKPath)
fmt.Println("Unarchive Tarantool Enterprise SDK...")
if err := sh.RunV("tar", "-xzf", archivedSDKPath, "-C", tmpPath); err != nil {
return fmt.Errorf("Failed to unarchive SDK: %s")
}
return nil
}
| [
"\"VERSION_LABEL\"",
"\"GOEXE\"",
"\"CLIEXE\"",
"\"BUNDLE_VERSION\"",
"\"DOWNLOAD_TOKEN\""
]
| []
| [
"DOWNLOAD_TOKEN",
"VERSION_LABEL",
"GOEXE",
"BUNDLE_VERSION",
"CLIEXE"
]
| [] | ["DOWNLOAD_TOKEN", "VERSION_LABEL", "GOEXE", "BUNDLE_VERSION", "CLIEXE"] | go | 5 | 0 | |
python/ray/serve/backend_worker.py | import asyncio
import logging
import traceback
import inspect
from collections.abc import Iterable
from itertools import groupby
from typing import Union, List, Any, Callable, Type
import time
import starlette.responses
from starlette.requests import Request
import ray
from ray.actor import ActorHandle
from ray._private.async_compat import sync_to_async
from ray.serve.batching import _BatchQueue
from ray.serve.utils import (ASGIHTTPSender, parse_request_item, _get_logger,
chain_future, unpack_future, import_attr)
from ray.serve.exceptions import RayServeException
from ray.util import metrics
from ray.serve.config import BackendConfig
from ray.serve.long_poll import LongPollClient, LongPollNamespace
from ray.serve.router import Query, RequestMetadata
from ray.serve.constants import (
BACKEND_RECONFIGURE_METHOD,
DEFAULT_LATENCY_BUCKET_MS,
)
from ray.exceptions import RayTaskError
logger = _get_logger()
def create_backend_replica(backend_def: Union[Callable, Type[Callable], str]):
"""Creates a replica class wrapping the provided function or class.
This approach is picked over inheritance to avoid conflict between user
provided class and the RayServeReplica class.
"""
backend_def = backend_def
# TODO(architkulkarni): Add type hints after upgrading cloudpickle
class RayServeWrappedReplica(object):
def __init__(self, backend_tag, replica_tag, init_args,
backend_config: BackendConfig, controller_name: str):
if isinstance(backend_def, str):
backend = import_attr(backend_def)
else:
backend = backend_def
if inspect.isfunction(backend):
is_function = True
elif inspect.isclass(backend):
is_function = False
else:
assert False, ("backend_def must be function, class, or "
"corresponding import path.")
# Set the controller name so that serve.connect() in the user's
# backend code will connect to the instance that this backend is
# running in.
ray.serve.api._set_internal_replica_context(
backend_tag, replica_tag, controller_name)
if is_function:
_callable = backend
else:
_callable = backend(*init_args)
assert controller_name, "Must provide a valid controller_name"
controller_handle = ray.get_actor(controller_name)
self.backend = RayServeReplica(_callable, backend_config,
is_function, controller_handle)
@ray.method(num_returns=2)
async def handle_request(
self,
request_metadata: RequestMetadata,
*request_args,
**request_kwargs,
):
# Directly receive input because it might contain an ObjectRef.
query = Query(request_args, request_kwargs, request_metadata)
return await self.backend.handle_request(query)
def ready(self):
pass
async def drain_pending_queries(self):
return await self.backend.drain_pending_queries()
if isinstance(backend_def, str):
RayServeWrappedReplica.__name__ = "RayServeReplica_{}".format(
backend_def)
else:
RayServeWrappedReplica.__name__ = "RayServeReplica_{}".format(
backend_def.__name__)
return RayServeWrappedReplica
def wrap_to_ray_error(function_name: str,
exception: Exception) -> RayTaskError:
"""Utility method to wrap exceptions in user code."""
try:
# Raise and catch so we can access traceback.format_exc()
raise exception
except Exception as e:
traceback_str = ray._private.utils.format_error_message(
traceback.format_exc())
return ray.exceptions.RayTaskError(function_name, traceback_str, e)
class RayServeReplica:
"""Handles requests with the provided callable."""
def __init__(self, _callable: Callable, backend_config: BackendConfig,
is_function: bool, controller_handle: ActorHandle) -> None:
self.backend_tag = ray.serve.api.get_replica_context().backend_tag
self.replica_tag = ray.serve.api.get_replica_context().replica_tag
self.callable = _callable
self.is_function = is_function
self.config = backend_config
self.batch_queue = _BatchQueue(self.config.max_batch_size or 1,
self.config.batch_wait_timeout)
self.reconfigure(self.config.user_config)
self.num_ongoing_requests = 0
self.request_counter = metrics.Counter(
"serve_backend_request_counter",
description=("The number of queries that have been "
"processed in this replica."),
tag_keys=("backend", ))
self.request_counter.set_default_tags({"backend": self.backend_tag})
self.loop = asyncio.get_event_loop()
self.long_poll_client = LongPollClient(
controller_handle,
{
(LongPollNamespace.BACKEND_CONFIGS, self.backend_tag): self.
_update_backend_configs,
},
call_in_event_loop=self.loop,
)
self.error_counter = metrics.Counter(
"serve_backend_error_counter",
description=("The number of exceptions that have "
"occurred in the backend."),
tag_keys=("backend", ))
self.error_counter.set_default_tags({"backend": self.backend_tag})
self.restart_counter = metrics.Counter(
"serve_backend_replica_starts",
description=("The number of times this replica "
"has been restarted due to failure."),
tag_keys=("backend", "replica"))
self.restart_counter.set_default_tags({
"backend": self.backend_tag,
"replica": self.replica_tag
})
self.queuing_latency_tracker = metrics.Histogram(
"serve_backend_queuing_latency_ms",
description=("The latency for queries in the replica's queue "
"waiting to be processed or batched."),
boundaries=DEFAULT_LATENCY_BUCKET_MS,
tag_keys=("backend", "replica"))
self.queuing_latency_tracker.set_default_tags({
"backend": self.backend_tag,
"replica": self.replica_tag
})
self.processing_latency_tracker = metrics.Histogram(
"serve_backend_processing_latency_ms",
description="The latency for queries to be processed.",
boundaries=DEFAULT_LATENCY_BUCKET_MS,
tag_keys=("backend", "replica", "batch_size"))
self.processing_latency_tracker.set_default_tags({
"backend": self.backend_tag,
"replica": self.replica_tag
})
self.num_queued_items = metrics.Gauge(
"serve_replica_queued_queries",
description=("The current number of queries queued in "
"the backend replicas."),
tag_keys=("backend", "replica"))
self.num_queued_items.set_default_tags({
"backend": self.backend_tag,
"replica": self.replica_tag
})
self.num_processing_items = metrics.Gauge(
"serve_replica_processing_queries",
description="The current number of queries being processed.",
tag_keys=("backend", "replica"))
self.num_processing_items.set_default_tags({
"backend": self.backend_tag,
"replica": self.replica_tag
})
self.restart_counter.inc()
ray_logger = logging.getLogger("ray")
for handler in ray_logger.handlers:
handler.setFormatter(
logging.Formatter(
handler.formatter._fmt +
f" component=serve backend={self.backend_tag} "
f"replica={self.replica_tag}"))
asyncio.get_event_loop().create_task(self.main_loop())
def get_runner_method(self, request_item: Query) -> Callable:
method_name = request_item.metadata.call_method
if not hasattr(self.callable, method_name):
raise RayServeException("Backend doesn't have method {} "
"which is specified in the request. "
"The available methods are {}".format(
method_name, dir(self.callable)))
if self.is_function:
return self.callable
return getattr(self.callable, method_name)
async def ensure_serializable_response(self, response: Any) -> Any:
if isinstance(response, starlette.responses.StreamingResponse):
async def mock_receive():
# This is called in a tight loop in response() just to check
# for an http disconnect. So rather than return immediately
# we should suspend execution to avoid wasting CPU cycles.
never_set_event = asyncio.Event()
await never_set_event.wait()
sender = ASGIHTTPSender()
await response(scope=None, receive=mock_receive, send=sender)
return sender.build_starlette_response()
return response
async def invoke_single(self, request_item: Query) -> Any:
logger.debug("Replica {} started executing request {}".format(
self.replica_tag, request_item.metadata.request_id))
arg = parse_request_item(request_item)
start = time.time()
try:
# TODO(simon): Split this section out when invoke_batch is removed.
if self.config.internal_metadata.is_asgi_app:
request: Request = arg
scope = request.scope
root_path = self.config.internal_metadata.path_prefix
# The incoming scope["path"] contains prefixed path and it
# won't be stripped by FastAPI.
request.scope["path"] = scope["path"].replace(root_path, "", 1)
# root_path is used such that the reverse look up and
# redirection works.
request.scope["root_path"] = root_path
sender = ASGIHTTPSender()
await self.callable._serve_asgi_app(
request.scope,
request._receive,
sender,
)
result = sender.build_starlette_response()
else:
method_to_call = sync_to_async(
self.get_runner_method(request_item))
result = await method_to_call(arg)
result = await self.ensure_serializable_response(result)
self.request_counter.inc()
except Exception as e:
import os
if "RAY_PDB" in os.environ:
ray.util.pdb.post_mortem()
result = wrap_to_ray_error(method_to_call.__name__, e)
self.error_counter.inc()
latency_ms = (time.time() - start) * 1000
self.processing_latency_tracker.observe(
latency_ms, tags={"batch_size": "1"})
return result
async def invoke_batch(self, request_item_list: List[Query]) -> List[Any]:
args = []
call_methods = set()
batch_size = len(request_item_list)
# Construct the batch of requests
for item in request_item_list:
logger.debug("Replica {} started executing request {}".format(
self.replica_tag, item.metadata.request_id))
args.append(parse_request_item(item))
call_methods.add(self.get_runner_method(item))
timing_start = time.time()
try:
if len(call_methods) != 1:
raise RayServeException(
f"Queries contain mixed calling methods: {call_methods}. "
"Please only send the same type of requests in batching "
"mode.")
self.request_counter.inc(batch_size)
call_method = sync_to_async(call_methods.pop())
result_list = await call_method(args)
if not isinstance(result_list, Iterable) or isinstance(
result_list, (dict, set)):
error_message = ("RayServe expects an ordered iterable object "
"but the replica returned a {}".format(
type(result_list)))
raise RayServeException(error_message)
# Normalize the result into a list type. This operation is fast
# in Python because it doesn't copy anything.
result_list = list(result_list)
if (len(result_list) != batch_size):
error_message = ("Worker doesn't preserve batch size. The "
"input has length {} but the returned list "
"has length {}. Please return a list of "
"results with length equal to the batch size"
".".format(batch_size, len(result_list)))
raise RayServeException(error_message)
for i, result in enumerate(result_list):
result_list[i] = (await
self.ensure_serializable_response(result))
except Exception as e:
wrapped_exception = wrap_to_ray_error(call_method.__name__, e)
self.error_counter.inc()
result_list = [wrapped_exception for _ in range(batch_size)]
latency_ms = (time.time() - timing_start) * 1000
self.processing_latency_tracker.observe(
latency_ms, tags={"batch_size": str(batch_size)})
return result_list
async def main_loop(self) -> None:
while True:
# NOTE(simon): There's an issue when user updated batch size and
# batch wait timeout during the execution, these values will not be
# updated until after the current iteration.
batch = await self.batch_queue.wait_for_batch()
# Record metrics
self.num_queued_items.set(self.batch_queue.qsize())
self.num_processing_items.set(self.num_ongoing_requests -
self.batch_queue.qsize())
for query in batch:
queuing_time = (time.time() - query.tick_enter_replica) * 1000
self.queuing_latency_tracker.observe(queuing_time)
all_evaluated_futures = []
if not self.config.internal_metadata.accepts_batches:
query = batch[0]
evaluated = asyncio.ensure_future(self.invoke_single(query))
all_evaluated_futures = [evaluated]
chain_future(evaluated, query.async_future)
else:
get_call_method = (
lambda query: query.metadata.call_method # noqa: E731
)
sorted_batch = sorted(batch, key=get_call_method)
for _, group in groupby(sorted_batch, key=get_call_method):
group = list(group)
evaluated = asyncio.ensure_future(self.invoke_batch(group))
all_evaluated_futures.append(evaluated)
result_futures = [q.async_future for q in group]
chain_future(
unpack_future(evaluated, len(group)), result_futures)
if self.config.internal_metadata.is_blocking:
# We use asyncio.wait here so if the result is exception,
# it will not be raised.
await asyncio.wait(all_evaluated_futures)
def reconfigure(self, user_config) -> None:
if user_config:
if self.is_function:
raise ValueError(
"backend_def must be a class to use user_config")
elif not hasattr(self.callable, BACKEND_RECONFIGURE_METHOD):
raise RayServeException("user_config specified but backend " +
self.backend_tag + " missing " +
BACKEND_RECONFIGURE_METHOD + " method")
reconfigure_method = getattr(self.callable,
BACKEND_RECONFIGURE_METHOD)
reconfigure_method(user_config)
def _update_backend_configs(self, new_config: BackendConfig) -> None:
self.config = new_config
self.batch_queue.set_config(self.config.max_batch_size or 1,
self.config.batch_wait_timeout)
self.reconfigure(self.config.user_config)
async def handle_request(self, request: Query) -> asyncio.Future:
request.tick_enter_replica = time.time()
logger.debug("Replica {} received request {}".format(
self.replica_tag, request.metadata.request_id))
request.async_future = asyncio.get_event_loop().create_future()
self.num_ongoing_requests += 1
self.batch_queue.put(request)
result = await request.async_future
request_time_ms = (time.time() - request.tick_enter_replica) * 1000
logger.debug("Replica {} finished request {} in {:.2f}ms".format(
self.replica_tag, request.metadata.request_id, request_time_ms))
self.num_ongoing_requests -= 1
# Returns a small object for router to track request status.
return b"", result
async def drain_pending_queries(self):
"""Perform graceful shutdown.
Trigger a graceful shutdown protocol that will wait for all the queued
tasks to be completed and return to the controller.
"""
sleep_time = self.config.experimental_graceful_shutdown_wait_loop_s
while True:
# Sleep first because we want to make sure all the routers receive
# the notification to remove this replica first.
await asyncio.sleep(sleep_time)
num_queries_waiting = self.batch_queue.qsize()
if (num_queries_waiting == 0) and (self.num_ongoing_requests == 0):
break
else:
logger.info(
f"Waiting for an additional {sleep_time}s "
f"to shutdown replica {self.replica_tag} because "
f"num_queries_waiting {num_queries_waiting} and "
f"num_ongoing_requests {self.num_ongoing_requests}")
ray.actor.exit_actor()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
internal/nhctl/utils/util.go | package utils
import (
"crypto/sha1"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"nocalhost/pkg/nhctl/tools"
"os"
"os/user"
"path/filepath"
"reflect"
"runtime"
"strconv"
)
func GetHomePath() string {
if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" {
if u, err := user.Lookup(sudoUser); err == nil {
return u.HomeDir
}
} else {
u, err := user.Current()
if err == nil {
return u.HomeDir
}
}
return ""
}
func IsSudoUser() bool {
if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" {
return true
}
return false
}
func Sha1ToString(str string) string {
hash := sha1.New()
hash.Write([]byte(str))
return fmt.Sprintf("%x", hash.Sum(nil))
}
func GetNhctlBinName() string {
if runtime.GOOS == "windows" {
return "nhctl.exe"
}
return "nhctl"
}
func CopyFile(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
if e := out.Close(); e != nil {
err = e
}
}()
_, err = io.Copy(out, in)
if err != nil {
return
}
err = out.Sync()
if err != nil {
return
}
si, err := os.Stat(src)
if err != nil {
return
}
err = os.Chmod(dst, si.Mode())
if err != nil {
return
}
return
}
// CopyDir recursively copies a directory tree, attempting to preserve permissions.
// Source directory must exist
// Symlinks are ignored and skipped.
func CopyDir(src string, dst string) (err error) {
src = filepath.Clean(src)
dst = filepath.Clean(dst)
si, err := os.Stat(src)
if err != nil {
return err
}
if !si.IsDir() {
return fmt.Errorf("source is not a directory")
}
_, err = os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
return
}
//if err == nil {
// return fmt.Errorf("destination already exists")
//}
err = os.MkdirAll(dst, si.Mode())
if err != nil {
return
}
entries, err := ioutil.ReadDir(src)
if err != nil {
return
}
for _, entry := range entries {
srcPath := filepath.Join(src, entry.Name())
dstPath := filepath.Join(dst, entry.Name())
if entry.IsDir() {
err = CopyDir(srcPath, dstPath)
if err != nil {
return
}
} else {
// Skip symlinks.
if entry.Mode()&os.ModeSymlink != 0 {
continue
}
err = CopyFile(srcPath, dstPath)
if err != nil {
return
}
}
}
return
}
func CheckKubectlVersion(compareMinor int) error {
commonParams := []string{"version", "-o", "json"}
jsonBody, err := tools.ExecCommand(nil, false, "kubectl", commonParams...)
if err != nil {
return err
}
var result map[string]interface{}
err = json.Unmarshal([]byte(jsonBody), &result)
if err != nil {
return err
}
targetResult := reflect.ValueOf(result["clientVersion"])
target := targetResult.Interface().(map[string]interface{})
minor, err := strconv.Atoi(target["minor"].(string))
if err != nil {
return err
}
if compareMinor > minor {
return errors.New(fmt.Sprintf("kubectl version required %d+", compareMinor))
}
return nil
}
| [
"\"SUDO_USER\"",
"\"SUDO_USER\""
]
| []
| [
"SUDO_USER"
]
| [] | ["SUDO_USER"] | go | 1 | 0 | |
src/sentry/utils/pytest.py | from __future__ import absolute_import
import mock
import os
import pytest
import signal
import urllib
from datetime import datetime
from django.conf import settings
from selenium import webdriver
def pytest_configure(config):
# HACK: Only needed for testing!
os.environ.setdefault('_SENTRY_SKIP_CONFIGURATION', '1')
os.environ.setdefault('RECAPTCHA_TESTING', 'True')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sentry.conf.server')
settings.SOUTH_TESTS_MIGRATE = os.environ.get('SENTRY_SOUTH_TESTS_MIGRATE', '1') == '1'
if not settings.configured:
# only configure the db if its not already done
test_db = os.environ.get('DB', 'postgres')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
# mysql requires running full migration all the time
settings.SOUTH_TESTS_MIGRATE = True
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'sentry.db.postgres',
'USER': 'postgres',
'NAME': 'sentry',
})
# postgres requires running full migration all the time
# since it has to install stored functions which come from
# an actual migration.
settings.SOUTH_TESTS_MIGRATE = True
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
settings.TEMPLATE_DEBUG = True
# Disable static compiling in tests
settings.STATIC_BUNDLES = {}
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
# Need a predictable key for tests that involve checking signatures
settings.SENTRY_PUBLIC = False
if not settings.SENTRY_CACHE:
settings.SENTRY_CACHE = 'sentry.cache.django.DjangoCache'
settings.SENTRY_CACHE_OPTIONS = {}
# This speeds up the tests considerably, pbkdf2 is by design, slow.
settings.PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
# Replace real sudo middleware with our mock sudo middleware
# to assert that the user is always in sudo mode
middleware = list(settings.MIDDLEWARE_CLASSES)
sudo = middleware.index('sentry.middleware.sudo.SudoMiddleware')
middleware[sudo] = 'sentry.testutils.middleware.SudoMiddleware'
settings.MIDDLEWARE_CLASSES = tuple(middleware)
# enable draft features
settings.SENTRY_OPTIONS['mail.enable-replies'] = True
settings.SENTRY_ALLOW_ORIGIN = '*'
settings.SENTRY_TSDB = 'sentry.tsdb.inmemory.InMemoryTSDB'
settings.SENTRY_TSDB_OPTIONS = {}
settings.RECAPTCHA_PUBLIC_KEY = 'a' * 40
settings.RECAPTCHA_PRIVATE_KEY = 'b' * 40
settings.BROKER_BACKEND = 'memory'
settings.BROKER_URL = None
settings.CELERY_ALWAYS_EAGER = False
settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
settings.DISABLE_RAVEN = True
settings.CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
if not hasattr(settings, 'SENTRY_OPTIONS'):
settings.SENTRY_OPTIONS = {}
settings.SENTRY_OPTIONS.update({
'redis.clusters': {
'default': {
'hosts': {
0: {
'db': 9,
},
},
},
},
'mail.backend': 'django.core.mail.backends.locmem.EmailBackend',
'system.url-prefix': 'http://testserver',
})
# django mail uses socket.getfqdn which doesn't play nice if our
# networking isn't stable
patcher = mock.patch('socket.getfqdn', return_value='localhost')
patcher.start()
from sentry.runner.initializer import (
bootstrap_options, initialize_receivers, fix_south, bind_cache_to_option_store)
bootstrap_options(settings)
fix_south(settings)
bind_cache_to_option_store()
initialize_receivers()
from sentry.utils.redis import clusters
with clusters.get('default').all() as client:
client.flushdb()
# force celery registration
from sentry.celery import app # NOQA
# disable DISALLOWED_IPS
from sentry import http
http.DISALLOWED_IPS = set()
def pytest_runtest_teardown(item):
from sentry.app import tsdb
tsdb.flush()
from sentry.utils.redis import clusters
with clusters.get('default').all() as client:
client.flushdb()
from celery.task.control import discard_all
discard_all()
# TODO(dcramer): ideally we could bundle up more of the browser logic here
# rather than splitting it between the fixtures and AcceptanceTestCase
@pytest.fixture(scope='session')
def percy(request, browser):
import percy
# Initialize Percy.
loader = percy.ResourceLoader(
root_dir=settings.STATIC_ROOT,
base_url=urllib.quote(settings.STATIC_URL),
webdriver=browser,
)
percy_config = percy.Config(default_widths=settings.PERCY_DEFAULT_TESTING_WIDTHS)
percy = percy.Runner(loader=loader, config=percy_config)
percy.initialize_build()
request.addfinalizer(percy.finalize_build)
return percy
@pytest.fixture(scope='session')
def browser(request):
# Initialize Selenium.
# NOTE: this relies on the phantomjs binary packaged from npm to be in the right
# location in node_modules.
phantomjs_path = os.path.join(
settings.NODE_MODULES_ROOT,
'phantomjs-prebuilt',
'bin',
'phantomjs',
)
browser = webdriver.PhantomJS(executable_path=phantomjs_path)
def fin():
# Teardown Selenium.
browser.close()
# TODO: remove this when fixed in: https://github.com/seleniumhq/selenium/issues/767
browser.service.process.send_signal(signal.SIGTERM)
browser.quit()
request.addfinalizer(fin)
return browser
@pytest.fixture(scope='class')
def screenshots_path_class(request, browser):
date = datetime.utcnow()
# AcceptanceTestCase.snapshot saves local screenshots here
path = os.path.normpath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'tmp', 'selenium-screenshots', date.strftime('%s'))
)
print('Screenshots will be stored in {}'.format(path))
os.makedirs(path)
request.cls.screenshots_path = path
@pytest.fixture(scope='class')
def browser_class(request, browser):
request.cls.browser = browser
@pytest.fixture(scope='class')
def percy_class(request, percy):
request.cls.percy = percy
@pytest.fixture(scope='function')
def reset_browser_session(request):
if not hasattr(request, 'browser'):
return
browser.delete_all_cookies()
browser.get('about:blank')
| []
| []
| [
"DB",
"SENTRY_SOUTH_TESTS_MIGRATE"
]
| [] | ["DB", "SENTRY_SOUTH_TESTS_MIGRATE"] | python | 2 | 0 | |
x-pack/elastic-agent/pkg/agent/cmd/run.go | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
package cmd
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/spf13/cobra"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/cli"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger"
)
func newRunCommandWithArgs(flags *globalFlags, _ []string, streams *cli.IOStreams) *cobra.Command {
return &cobra.Command{
Use: "run",
Short: "Start the elastic-agent.",
Run: func(_ *cobra.Command, _ []string) {
if err := run(flags, streams); err != nil {
fmt.Fprintf(streams.Err, "%v\n", err)
os.Exit(1)
}
},
}
}
func run(flags *globalFlags, streams *cli.IOStreams) error {
pathConfigFile := flags.Config()
config, err := config.LoadYAML(pathConfigFile)
if err != nil {
return errors.New(err,
fmt.Sprintf("could not read configuration file %s", pathConfigFile),
errors.TypeFilesystem,
errors.M(errors.MetaKeyPath, pathConfigFile))
}
logger, err := logger.NewFromConfig("", config)
if err != nil {
return err
}
locker := application.NewAppLocker(paths.Data())
if err := locker.TryLock(); err != nil {
return err
}
defer locker.Unlock()
app, err := application.New(logger, pathConfigFile)
if err != nil {
return err
}
if err := app.Start(); err != nil {
return err
}
// listen for kill signal
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGKILL, syscall.SIGTERM, syscall.SIGQUIT)
<-signals
return app.Stop()
}
| []
| []
| []
| [] | [] | go | null | null | null |
lakalici/wsgi.py | """
WSGI config for lakalici project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lakalici.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
common/src/main/java/com/turn/ttorrent/common/TorrentCreator.java | package com.turn.ttorrent.common;
import com.turn.ttorrent.Constants;
import com.turn.ttorrent.bcodec.BEValue;
import com.turn.ttorrent.bcodec.BEncoder;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import java.io.*;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import static com.turn.ttorrent.common.TorrentMetadataKeys.*;
public class TorrentCreator {
private final static Logger logger = TorrentLoggerFactory.getLogger();
/**
* Torrent file piece length (in bytes), we use 512 kB.
*/
public static final int DEFAULT_PIECE_LENGTH = 512 * 1024;
private static final int HASHING_TIMEOUT_SEC = 15;
public static int HASHING_THREADS_COUNT = Runtime.getRuntime().availableProcessors();
private static final ExecutorService HASHING_EXECUTOR = Executors.newFixedThreadPool(HASHING_THREADS_COUNT, new ThreadFactory() {
@Override
public Thread newThread(@NotNull final Runnable r) {
final Thread thread = new Thread(r);
thread.setDaemon(true);
return thread;
}
});
/**
* Create a {@link TorrentMetadata} object for a file.
*
* <p>
* Hash the given file to create the {@link TorrentMetadata} object representing
* the Torrent meta info about this file, needed for announcing and/or
* sharing said file.
* </p>
*
* @param source The file to use in the torrent.
* @param announce The announce URI that will be used for this torrent.
* @param createdBy The creator's name, or any string identifying the
* torrent's creator.
*/
public static TorrentMetadata create(File source, URI announce, String createdBy)
throws InterruptedException, IOException {
return create(source, null, announce, createdBy);
}
/**
* Create a {@link TorrentMetadata} object for a set of files.
*
* <p>
* Hash the given files to create the multi-file {@link TorrentMetadata} object
* representing the Torrent meta-info about them, needed for announcing
* and/or sharing these files. Since we created the torrent, we're
* considering we'll be a full initial seeder for it.
* </p>
*
* @param parent The parent directory or location of the torrent files,
* also used as the torrent's name.
* @param files The files to add into this torrent.
* @param announce The announce URI that will be used for this torrent.
* @param createdBy The creator's name, or any string identifying the
* torrent's creator.
*/
public static TorrentMetadata create(File parent, List<File> files, URI announce,
String createdBy) throws InterruptedException, IOException {
return create(parent, files, announce, null, createdBy);
}
/**
* Create a {@link TorrentMetadata} object for a file.
*
* <p>
* Hash the given file to create the {@link TorrentMetadata} object representing
* the Torrent metainfo about this file, needed for announcing and/or
* sharing said file.
* </p>
*
* @param source The file to use in the torrent.
* @param announceList The announce URIs organized as tiers that will
* be used for this torrent
* @param createdBy The creator's name, or any string identifying the
* torrent's creator.
*/
public static TorrentMetadata create(File source, List<List<URI>> announceList,
String createdBy) throws InterruptedException, IOException {
return create(source, null, null, announceList, createdBy);
}
/**
* Create a {@link TorrentMetadata} object for a set of files.
*
* <p>
* Hash the given files to create the multi-file {@link TorrentMetadata} object
* representing the Torrent meta-info about them, needed for announcing
* and/or sharing these files. Since we created the torrent, we're
* considering we'll be a full initial seeder for it.
* </p>
*
* @param source The parent directory or location of the torrent files,
* also used as the torrent's name.
* @param files The files to add into this torrent.
* @param announceList The announce URIs organized as tiers that will
* be used for this torrent
* @param createdBy The creator's name, or any string identifying the
* torrent's creator.
*/
public static TorrentMetadata create(File source, List<File> files,
List<List<URI>> announceList, String createdBy)
throws InterruptedException, IOException {
return create(source, files, null, announceList, createdBy);
}
/**
* Helper method to create a {@link TorrentMetadata} object for a set of files.
*
* <p>
* Hash the given files to create the multi-file {@link TorrentMetadata} object
* representing the Torrent meta-info about them, needed for announcing
* and/or sharing these files. Since we created the torrent, we're
* considering we'll be a full initial seeder for it.
* </p>
*
* @param parent The parent directory or location of the torrent files,
* also used as the torrent's name.
* @param files The files to add into this torrent.
* @param announce The announce URI that will be used for this torrent.
* @param announceList The announce URIs organized as tiers that will
* be used for this torrent
* @param createdBy The creator's name, or any string identifying the
* torrent's creator.
*/
public static TorrentMetadata create(File parent, List<File> files, URI announce, List<List<URI>> announceList, String createdBy)
throws InterruptedException, IOException {
return create(parent, files, announce, announceList, createdBy, DEFAULT_PIECE_LENGTH);
}
public static TorrentMetadata create(File parent, List<File> files, URI announce,
List<List<URI>> announceList, String createdBy, final int pieceSize)
throws InterruptedException, IOException {
return create(parent, files, announce, announceList, createdBy, System.currentTimeMillis() / 1000, pieceSize);
}
//for tests
/*package local*/
static TorrentMetadata create(File parent, List<File> files, URI announce,
List<List<URI>> announceList, String createdBy, long creationTimeSecs, final int pieceSize)
throws InterruptedException, IOException {
Map<String, BEValue> torrent = new HashMap<String, BEValue>();
if (announce != null) {
torrent.put(ANNOUNCE, new BEValue(announce.toString()));
}
if (announceList != null) {
List<BEValue> tiers = new LinkedList<BEValue>();
for (List<URI> trackers : announceList) {
List<BEValue> tierInfo = new LinkedList<BEValue>();
for (URI trackerURI : trackers) {
tierInfo.add(new BEValue(trackerURI.toString()));
}
tiers.add(new BEValue(tierInfo));
}
torrent.put(ANNOUNCE_LIST, new BEValue(tiers));
}
torrent.put(CREATION_DATE_SEC, new BEValue(creationTimeSecs));
torrent.put(CREATED_BY, new BEValue(createdBy));
Map<String, BEValue> info = new TreeMap<String, BEValue>();
info.put(NAME, new BEValue(parent.getName()));
info.put(PIECE_LENGTH, new BEValue(pieceSize));
if (files == null || files.isEmpty()) {
info.put(FILE_LENGTH, new BEValue(parent.length()));
info.put(PIECES, new BEValue(hashFile(parent, pieceSize),
Constants.BYTE_ENCODING));
} else {
List<BEValue> fileInfo = new LinkedList<BEValue>();
for (File file : files) {
Map<String, BEValue> fileMap = new HashMap<String, BEValue>();
fileMap.put(FILE_LENGTH, new BEValue(file.length()));
LinkedList<BEValue> filePath = new LinkedList<BEValue>();
while (file != null) {
if (file.equals(parent)) {
break;
}
filePath.addFirst(new BEValue(file.getName()));
file = file.getParentFile();
}
fileMap.put(FILE_PATH, new BEValue(filePath));
fileInfo.add(new BEValue(fileMap));
}
info.put(FILES, new BEValue(fileInfo));
info.put(PIECES, new BEValue(hashFiles(files, pieceSize),
Constants.BYTE_ENCODING));
}
torrent.put(INFO_TABLE, new BEValue(info));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BEncoder.bencode(new BEValue(torrent), baos);
return new TorrentParser().parse(baos.toByteArray());
}
/**
* Return the concatenation of the SHA-1 hashes of a file's pieces.
*
* <p>
* Hashes the given file piece by piece using the default Torrent piece
* length (see {@link #DEFAULT_PIECE_LENGTH}) and returns the concatenation of
* these hashes, as a string.
* </p>
*
* <p>
* This is used for creating Torrent meta-info structures from a file.
* </p>
*
* @param file The file to hash.
*/
private static String hashFile(final File file, final int pieceSize)
throws InterruptedException, IOException {
return hashFiles(Collections.singletonList(file), pieceSize);
}
private static String hashFiles(final List<File> files, final int pieceSize)
throws InterruptedException, IOException {
if (files.size() == 0) {
return "";
}
List<Future<String>> results = new LinkedList<Future<String>>();
long length = 0L;
final ByteBuffer buffer = ByteBuffer.allocate(pieceSize);
final AtomicInteger threadIdx = new AtomicInteger(0);
final String firstFileName = files.get(0).getName();
StringBuilder hashes = new StringBuilder();
long start = System.nanoTime();
for (File file : files) {
logger.debug("Analyzing local data for {} with {} threads...",
file.getName(), HASHING_THREADS_COUNT);
length += file.length();
FileInputStream fis = new FileInputStream(file);
FileChannel channel = fis.getChannel();
try {
while (channel.read(buffer) > 0) {
if (buffer.remaining() == 0) {
buffer.clear();
final ByteBuffer data = prepareDataFromBuffer(buffer);
results.add(HASHING_EXECUTOR.submit(new Callable<String>() {
@Override
public String call() throws Exception {
Thread.currentThread().setName(String.format("%s hasher #%d", firstFileName, threadIdx.incrementAndGet()));
return new CallableChunkHasher(data).call();
}
}));
}
if (results.size() >= HASHING_THREADS_COUNT) {
// process hashers, otherwise they will spend too much memory
waitForHashesToCalculate(results, hashes);
results.clear();
}
}
} finally {
channel.close();
fis.close();
}
}
// Hash the last bit, if any
if (buffer.position() > 0) {
buffer.limit(buffer.position());
buffer.position(0);
final ByteBuffer data = prepareDataFromBuffer(buffer);
results.add(HASHING_EXECUTOR.submit(new CallableChunkHasher(data)));
}
// here we have only a few hashes to wait for calculation
waitForHashesToCalculate(results, hashes);
long elapsed = System.nanoTime() - start;
int expectedPieces = (int) (Math.ceil(
(double) length / pieceSize));
logger.debug("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.",
new Object[]{
files.size(),
length,
results.size(),
expectedPieces,
String.format("%.1f", elapsed / 1e6),
});
return hashes.toString();
}
private static ByteBuffer prepareDataFromBuffer(ByteBuffer buffer) {
final ByteBuffer data = ByteBuffer.allocate(buffer.remaining());
buffer.mark();
data.put(buffer);
data.clear();
buffer.reset();
return data;
}
private static void waitForHashesToCalculate(List<Future<String>> results, StringBuilder hashes) throws InterruptedException, IOException {
try {
for (Future<String> chunk : results) {
hashes.append(chunk.get(HASHING_TIMEOUT_SEC, TimeUnit.SECONDS));
}
} catch (ExecutionException ee) {
throw new IOException("Error while hashing the torrent data!", ee);
} catch (TimeoutException e) {
throw new RuntimeException(String.format("very slow hashing: took more than %d seconds to calculate several pieces. Cancelling", HASHING_TIMEOUT_SEC));
}
}
/**
* Sets max number of threads to use when hash for file is calculated.
*
* @param hashingThreadsCount number of concurrent threads for file hash calculation
*/
public static void setHashingThreadsCount(int hashingThreadsCount) {
HASHING_THREADS_COUNT = hashingThreadsCount;
}
/**
* A {@link Callable} to hash a data chunk.
*
* @author mpetazzoni
*/
private static class CallableChunkHasher implements Callable<String> {
private final ByteBuffer data;
CallableChunkHasher(final ByteBuffer data) {
this.data = data;
}
@Override
public String call() throws UnsupportedEncodingException {
byte[] sha1Hash = TorrentUtils.calculateSha1Hash(this.data.array());
return new String(sha1Hash, Constants.BYTE_ENCODING);
}
}
static {
String threads = System.getenv("TTORRENT_HASHING_THREADS");
if (threads != null) {
try {
int count = Integer.parseInt(threads);
if (count > 0) {
TorrentCreator.HASHING_THREADS_COUNT = count;
}
} catch (NumberFormatException nfe) {
// Pass
}
}
}
}
| [
"\"TTORRENT_HASHING_THREADS\""
]
| []
| [
"TTORRENT_HASHING_THREADS"
]
| [] | ["TTORRENT_HASHING_THREADS"] | java | 1 | 0 | |
pkgs/conda-manager-0.3.1-py27_0/lib/python2.7/site-packages/conda_manager/api/conda_api.py | # -*- coding: utf-8 -*-
"""
Updated `conda-api` to include additional methods, queued worker processes
calling `QProcess` instead of `subprocess.Popen`.
"""
# Standard library imports
from os.path import basename, isdir, join
from collections import deque
import json
import os
import platform
import re
import sys
import yaml
# Third party imports
from qtpy.QtCore import QByteArray, QObject, QProcess, QTimer, Signal
# Local imports
from conda_manager.utils.findpip import PIP_LIST_SCRIPT
from conda_manager.utils.logs import logger
__version__ = '1.3.0'
# --- Errors
# -----------------------------------------------------------------------------
class PipError(Exception):
"""General pip error."""
pass
class CondaError(Exception):
"""General Conda error."""
pass
class CondaProcessWorker(CondaError):
"""General Conda error."""
pass
class CondaEnvExistsError(CondaError):
"""Conda environment already exists."""
pass
# --- Helpers
# -----------------------------------------------------------------------------
PY2 = sys.version[0] == '2'
PY3 = sys.version[0] == '3'
DEBUG = False
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string."""
if PY2:
# Python 2
if encoding is None:
return unicode(obj)
else:
return unicode(obj, encoding)
else:
# Python 3
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
def handle_qbytearray(obj, encoding):
"""
Qt/Python3 compatibility helper.
"""
if isinstance(obj, QByteArray):
obj = obj.data()
return to_text_string(obj, encoding=encoding)
class ProcessWorker(QObject):
"""
"""
sig_finished = Signal(object, object, object)
sig_partial = Signal(object, object, object)
def __init__(self, cmd_list, parse=False, pip=False, callback=None,
extra_kwargs={}):
super(ProcessWorker, self).__init__()
self._result = None
self._cmd_list = cmd_list
self._parse = parse
self._pip = pip
self._conda = not pip
self._callback = callback
self._fired = False
self._communicate_first = False
self._partial_stdout = None
self._extra_kwargs = extra_kwargs
self._timer = QTimer()
self._process = QProcess()
self._timer.setInterval(50)
self._timer.timeout.connect(self._communicate)
self._process.finished.connect(self._communicate)
self._process.readyReadStandardOutput.connect(self._partial)
def _partial(self):
raw_stdout = self._process.readAllStandardOutput()
stdout = handle_qbytearray(raw_stdout, _CondaAPI.UTF8)
json_stdout = stdout.replace('\n\x00', '')
try:
json_stdout = json.loads(json_stdout)
except Exception:
json_stdout = stdout
if self._partial_stdout is None:
self._partial_stdout = stdout
else:
self._partial_stdout += stdout
self.sig_partial.emit(self, json_stdout, None)
def _communicate(self):
"""
"""
if not self._communicate_first:
if self._process.state() == QProcess.NotRunning:
self.communicate()
elif self._fired:
self._timer.stop()
def communicate(self):
"""
"""
self._communicate_first = True
self._process.waitForFinished()
if self._partial_stdout is None:
raw_stdout = self._process.readAllStandardOutput()
stdout = handle_qbytearray(raw_stdout, _CondaAPI.UTF8)
else:
stdout = self._partial_stdout
raw_stderr = self._process.readAllStandardError()
stderr = handle_qbytearray(raw_stderr, _CondaAPI.UTF8)
result = [stdout.encode(_CondaAPI.UTF8), stderr.encode(_CondaAPI.UTF8)]
# FIXME: Why does anaconda client print to stderr???
if PY2:
stderr = stderr.decode()
if 'using anaconda cloud api site' not in stderr.lower():
if stderr.strip() and self._conda:
raise Exception('{0}:\n'
'STDERR:\n{1}\nEND'
''.format(' '.join(self._cmd_list),
stderr))
# elif stderr.strip() and self._pip:
# raise PipError(self._cmd_list)
else:
result[-1] = ''
if self._parse and stdout:
try:
result = json.loads(stdout), result[-1]
except ValueError as error:
result = stdout, error
if 'error' in result[0]:
error = '{0}: {1}'.format(" ".join(self._cmd_list),
result[0]['error'])
result = result[0], error
if self._callback:
result = self._callback(result[0], result[-1],
**self._extra_kwargs), result[-1]
self._result = result
self.sig_finished.emit(self, result[0], result[-1])
if result[-1]:
logger.error(str(('error', result[-1])))
self._fired = True
return result
def close(self):
"""
"""
self._process.close()
def is_finished(self):
"""
"""
return self._process.state() == QProcess.NotRunning and self._fired
def start(self):
"""
"""
logger.debug(str(' '.join(self._cmd_list)))
if not self._fired:
self._partial_ouput = None
self._process.start(self._cmd_list[0], self._cmd_list[1:])
self._timer.start()
else:
raise CondaProcessWorker('A Conda ProcessWorker can only run once '
'per method call.')
# --- API
# -----------------------------------------------------------------------------
class _CondaAPI(QObject):
"""
"""
ROOT_PREFIX = None
ENCODING = 'ascii'
UTF8 = 'utf-8'
DEFAULT_CHANNELS = ['https://repo.continuum.io/pkgs/pro',
'https://repo.continuum.io/pkgs/free']
def __init__(self, parent=None):
super(_CondaAPI, self).__init__()
self._parent = parent
self._queue = deque()
self._timer = QTimer()
self._current_worker = None
self._workers = []
self._timer.setInterval(1000)
self._timer.timeout.connect(self._clean)
self.set_root_prefix()
def _clean(self):
"""
Periodically check for inactive workers and remove their references.
"""
if self._workers:
for w in self._workers:
if w.is_finished():
self._workers.remove(w)
else:
self._current_worker = None
self._timer.stop()
def _start(self):
"""
"""
if len(self._queue) == 1:
self._current_worker = self._queue.popleft()
self._workers.append(self._current_worker)
self._current_worker.start()
self._timer.start()
def is_active(self):
"""
Check if a worker is still active.
"""
return len(self._workers) == 0
def terminate_all_processes(self):
"""
Kill all working processes.
"""
for worker in self._workers:
worker.close()
# --- Conda api
# -------------------------------------------------------------------------
def _call_conda(self, extra_args, abspath=True, parse=False,
callback=None):
"""
Call conda with the list of extra arguments, and return the worker.
The result can be force by calling worker.communicate(), which returns
the tuple (stdout, stderr).
"""
if abspath:
if sys.platform == 'win32':
python = join(self.ROOT_PREFIX, 'python.exe')
conda = join(self.ROOT_PREFIX, 'Scripts',
'conda-script.py')
else:
python = join(self.ROOT_PREFIX, 'bin/python')
conda = join(self.ROOT_PREFIX, 'bin/conda')
cmd_list = [python, conda]
else:
# Just use whatever conda is on the path
cmd_list = ['conda']
cmd_list.extend(extra_args)
process_worker = ProcessWorker(cmd_list, parse=parse,
callback=callback)
process_worker.sig_finished.connect(self._start)
self._queue.append(process_worker)
self._start()
return process_worker
def _call_and_parse(self, extra_args, abspath=True, callback=None):
"""
"""
return self._call_conda(extra_args, abspath=abspath, parse=True,
callback=callback)
def _setup_install_commands_from_kwargs(self, kwargs, keys=tuple()):
cmd_list = []
if kwargs.get('override_channels', False) and 'channel' not in kwargs:
raise TypeError('conda search: override_channels requires channel')
if 'env' in kwargs:
cmd_list.extend(['--name', kwargs.pop('env')])
if 'prefix' in kwargs:
cmd_list.extend(['--prefix', kwargs.pop('prefix')])
if 'channel' in kwargs:
channel = kwargs.pop('channel')
if isinstance(channel, str):
cmd_list.extend(['--channel', channel])
else:
cmd_list.append('--channel')
cmd_list.extend(channel)
for key in keys:
if key in kwargs and kwargs[key]:
cmd_list.append('--' + key.replace('_', '-'))
return cmd_list
def set_root_prefix(self, prefix=None):
"""
Set the prefix to the root environment (default is /opt/anaconda).
This function should only be called once (right after importing
conda_api).
"""
if prefix:
self.ROOT_PREFIX = prefix
else:
# Find some conda instance, and then use info to get 'root_prefix'
worker = self._call_and_parse(['info', '--json'], abspath=False)
info = worker.communicate()[0]
self.ROOT_PREFIX = info['root_prefix']
def get_conda_version(self):
"""
Return the version of conda being used (invoked) as a string.
"""
return self._call_conda(['--version'],
callback=self._get_conda_version)
def _get_conda_version(self, stdout, stderr):
# argparse outputs version to stderr in Python < 3.4.
# http://bugs.python.org/issue18920
pat = re.compile(r'conda:?\s+(\d+\.\d\S+|unknown)')
m = pat.match(stderr.decode().strip())
if m is None:
m = pat.match(stdout.decode().strip())
if m is None:
raise Exception('output did not match: {0}'.format(stderr))
return m.group(1)
def get_envs(self):
"""
Return all of the (named) environment (this does not include the root
environment), as a list of absolute path to their prefixes.
"""
logger.debug('')
# return self._call_and_parse(['info', '--json'],
# callback=lambda o, e: o['envs'])
envs = os.listdir(os.sep.join([self.ROOT_PREFIX, 'envs']))
envs = [os.sep.join([self.ROOT_PREFIX, 'envs', i]) for i in envs]
valid_envs = [e for e in envs if os.path.isdir(e) and
self.environment_exists(prefix=e)]
return valid_envs
def get_prefix_envname(self, name):
"""
Given the name of an environment return its full prefix path, or None
if it cannot be found.
"""
prefix = None
if name == 'root':
prefix = self.ROOT_PREFIX
# envs, error = self.get_envs().communicate()
envs = self.get_envs()
for p in envs:
if basename(p) == name:
prefix = p
return prefix
def linked(self, prefix):
"""
Return the (set of canonical names) of linked packages in `prefix`.
"""
logger.debug(str(prefix))
if not isdir(prefix):
raise Exception('no such directory: {0}'.format(prefix))
meta_dir = join(prefix, 'conda-meta')
if not isdir(meta_dir):
# We might have nothing in linked (and no conda-meta directory)
return set()
return set(fn[:-5] for fn in os.listdir(meta_dir)
if fn.endswith('.json'))
def split_canonical_name(self, cname):
"""
Split a canonical package name into (name, version, build) strings.
"""
return tuple(cname.rsplit('-', 2))
def info(self, abspath=True):
"""
Return a dictionary with configuration information.
No guarantee is made about which keys exist. Therefore this function
should only be used for testing and debugging.
"""
logger.debug(str(''))
return self._call_and_parse(['info', '--json'], abspath=abspath)
def package_info(self, package, abspath=True):
"""
Return a dictionary with package information.
"""
return self._call_and_parse(['info', package, '--json'],
abspath=abspath)
def search(self, regex=None, spec=None, **kwargs):
"""
Search for packages.
"""
cmd_list = ['search', '--json']
if regex and spec:
raise TypeError('conda search: only one of regex or spec allowed')
if regex:
cmd_list.append(regex)
if spec:
cmd_list.extend(['--spec', spec])
if 'platform' in kwargs:
cmd_list.extend(['--platform', kwargs.pop('platform')])
cmd_list.extend(
self._setup_install_commands_from_kwargs(
kwargs,
('canonical', 'unknown', 'use_index_cache', 'outdated',
'override_channels')))
return self._call_and_parse(cmd_list,
abspath=kwargs.get('abspath', True))
def create(self, name=None, prefix=None, pkgs=None, channels=None):
"""
Create an environment either by name or path with a specified set of
packages.
"""
logger.debug(str((prefix, pkgs, channels)))
# TODO: Fix temporal hack
if not pkgs or not isinstance(pkgs, (list, tuple, str)):
raise TypeError('must specify a list of one or more packages to '
'install into new environment')
cmd_list = ['create', '--yes', '--quiet', '--json', '--mkdir']
if name:
ref = name
search = [os.path.join(d, name) for d in
self.info().communicate()[0]['envs_dirs']]
cmd_list.extend(['--name', name])
elif prefix:
ref = prefix
search = [prefix]
cmd_list.extend(['--prefix', prefix])
else:
raise TypeError('must specify either an environment name or a '
'path for new environment')
if any(os.path.exists(prefix) for prefix in search):
raise CondaEnvExistsError('Conda environment {0} already '
'exists'.format(ref))
# TODO: Fix temporal hack
if isinstance(pkgs, (list, tuple)):
cmd_list.extend(pkgs)
elif isinstance(pkgs, str):
cmd_list.extend(['--file', pkgs])
# TODO: Check if correct
if channels:
cmd_list.extend(['--override-channels'])
for channel in channels:
cmd_list.extend(['--channel'])
cmd_list.extend([channel])
return self._call_and_parse(cmd_list)
def parse_token_channel(self, channel, token):
"""
Adapt a channel to include the authentication token of the logged
user.
Ignore default channels
"""
if token and channel not in self.DEFAULT_CHANNELS:
url_parts = channel.split('/')
start = url_parts[:-1]
middle = 't/{0}'.format(token)
end = url_parts[-1]
token_channel = '{0}/{1}/{2}'.format('/'.join(start), middle, end)
return token_channel
else:
return channel
def install(self, name=None, prefix=None, pkgs=None, dep=True,
channels=None, token=None):
"""
Install packages into an environment either by name or path with a
specified set of packages.
If token is specified, the channels different from the defaults will
get the token appended.
"""
logger.debug(str((prefix, pkgs, channels)))
# TODO: Fix temporal hack
if not pkgs or not isinstance(pkgs, (list, tuple, str)):
raise TypeError('must specify a list of one or more packages to '
'install into existing environment')
cmd_list = ['install', '--yes', '--json', '--force-pscheck']
if name:
cmd_list.extend(['--name', name])
elif prefix:
cmd_list.extend(['--prefix', prefix])
else:
# Just install into the current environment, whatever that is
pass
# TODO: Check if correct
if channels:
cmd_list.extend(['--override-channels'])
for channel in channels:
cmd_list.extend(['--channel'])
channel = self.parse_token_channel(channel, token)
cmd_list.extend([channel])
# TODO: Fix temporal hack
if isinstance(pkgs, (list, tuple)):
cmd_list.extend(pkgs)
elif isinstance(pkgs, str):
cmd_list.extend(['--file', pkgs])
if not dep:
cmd_list.extend(['--no-deps'])
return self._call_and_parse(cmd_list)
def update(self, *pkgs, **kwargs):
"""
Update package(s) (in an environment) by name.
"""
cmd_list = ['update', '--json', '--quiet', '--yes']
if not pkgs and not kwargs.get('all'):
raise TypeError("Must specify at least one package to update, or "
"all=True.")
cmd_list.extend(
self._setup_install_commands_from_kwargs(
kwargs,
('dry_run', 'no_deps', 'override_channels',
'no_pin', 'force', 'all', 'use_index_cache', 'use_local',
'alt_hint')))
cmd_list.extend(pkgs)
return self._call_and_parse(cmd_list, abspath=kwargs.get('abspath',
True))
def remove(self, name=None, prefix=None, pkgs=None, all_=False):
"""
Remove a package (from an environment) by name.
Returns {
success: bool, (this is always true),
(other information)
}
"""
logger.debug(str((prefix, pkgs)))
cmd_list = ['remove', '--json', '--quiet', '--yes']
if not pkgs and not all_:
raise TypeError("Must specify at least one package to remove, or "
"all=True.")
if name:
cmd_list.extend(['--name', name])
elif prefix:
cmd_list.extend(['--prefix', prefix])
else:
raise TypeError('must specify either an environment name or a '
'path for package removal')
if all_:
cmd_list.extend(['--all'])
else:
cmd_list.extend(pkgs)
return self._call_and_parse(cmd_list)
def remove_environment(self, name=None, path=None, **kwargs):
"""
Remove an environment entirely.
See ``remove``.
"""
return self.remove(name=name, path=path, all=True, **kwargs)
def clone_environment(self, clone, name=None, prefix=None, **kwargs):
"""
Clone the environment `clone` into `name` or `prefix`.
"""
cmd_list = ['create', '--json', '--quiet']
if (name and prefix) or not (name or prefix):
raise TypeError("conda clone_environment: exactly one of `name` "
"or `path` required")
if name:
cmd_list.extend(['--name', name])
if prefix:
cmd_list.extend(['--prefix', prefix])
cmd_list.extend(['--clone', clone])
cmd_list.extend(
self._setup_install_commands_from_kwargs(
kwargs,
('dry_run', 'unknown', 'use_index_cache', 'use_local',
'no_pin', 'force', 'all', 'channel', 'override_channels',
'no_default_packages')))
return self._call_and_parse(cmd_list, abspath=kwargs.get('abspath',
True))
# FIXME:
def process(self, name=None, prefix=None, cmd=None):
"""
Create a Popen process for cmd using the specified args but in the
conda environment specified by name or prefix.
The returned object will need to be invoked with p.communicate() or
similar.
"""
if bool(name) == bool(prefix):
raise TypeError('exactly one of name or prefix must be specified')
if not cmd:
raise TypeError('cmd to execute must be specified')
if not args:
args = []
if name:
prefix = self.get_prefix_envname(name)
conda_env = dict(os.environ)
sep = os.pathsep
if sys.platform == 'win32':
conda_env['PATH'] = join(prefix,
'Scripts') + sep + conda_env['PATH']
else:
# Unix
conda_env['PATH'] = join(prefix, 'bin') + sep + conda_env['PATH']
conda_env['PATH'] = prefix + os.pathsep + conda_env['PATH']
cmd_list = [cmd]
cmd_list.extend(args)
# = self.subprocess.process(cmd_list, env=conda_env, stdin=stdin,
# stdout=stdout, stderr=stderr)
def _setup_config_from_kwargs(self, kwargs):
cmd_list = ['--json', '--force']
if 'file' in kwargs:
cmd_list.extend(['--file', kwargs['file']])
if 'system' in kwargs:
cmd_list.append('--system')
return cmd_list
def config_path(self, **kwargs):
"""
Get the path to the config file.
"""
cmd_list = ['config', '--get']
cmd_list.extend(self._setup_config_from_kwargs(kwargs))
return self._call_and_parse(cmd_list,
abspath=kwargs.get('abspath', True),
callback=lambda o, e: o['rc_path'])
def config_get(self, *keys, **kwargs):
"""
Get the values of configuration keys.
Returns a dictionary of values. Note, the key may not be in the
dictionary if the key wasn't set in the configuration file.
"""
cmd_list = ['config', '--get']
cmd_list.extend(keys)
cmd_list.extend(self._setup_config_from_kwargs(kwargs))
return self._call_and_parse(cmd_list,
abspath=kwargs.get('abspath', True),
callback=lambda o, e: o['get'])
def config_set(self, key, value, **kwargs):
"""
Set a key to a (bool) value.
Returns a list of warnings Conda may have emitted.
"""
cmd_list = ['config', '--set', key, str(value)]
cmd_list.extend(self._setup_config_from_kwargs(kwargs))
return self._call_and_parse(
cmd_list,
abspath=kwargs.get('abspath', True),
callback=lambda o, e: o.get('warnings', []))
def config_add(self, key, value, **kwargs):
"""
Add a value to a key.
Returns a list of warnings Conda may have emitted.
"""
cmd_list = ['config', '--add', key, value]
cmd_list.extend(self._setup_config_from_kwargs(kwargs))
return self._call_and_parse(
cmd_list,
abspath=kwargs.get('abspath', True),
callback=lambda o, e: o.get('warnings', []))
def config_remove(self, key, value, **kwargs):
"""
Remove a value from a key.
Returns a list of warnings Conda may have emitted.
"""
cmd_list = ['config', '--remove', key, value]
cmd_list.extend(self._setup_config_from_kwargs(kwargs))
return self._call_and_parse(
cmd_list,
abspath=kwargs.get('abspath', True),
callback=lambda o, e: o.get('warnings', []))
def config_delete(self, key, **kwargs):
"""
Remove a key entirely.
Returns a list of warnings Conda may have emitted.
"""
cmd_list = ['config', '--remove-key', key]
cmd_list.extend(self._setup_config_from_kwargs(kwargs))
return self._call_and_parse(
cmd_list,
abspath=kwargs.get('abspath', True),
callback=lambda o, e: o.get('warnings', []))
def run(self, command, abspath=True):
"""
Launch the specified app by name or full package name.
Returns a dictionary containing the key "fn", whose value is the full
package (ending in ``.tar.bz2``) of the app.
"""
cmd_list = ['run', '--json', command]
return self._call_and_parse(cmd_list, abspath=abspath)
# --- Additional methods
# -----------------------------------------------------------------------------
def dependencies(self, name=None, prefix=None, pkgs=None, channels=None,
dep=True):
"""
Get dependenciy list for packages to be installed into an environment
defined either by 'name' or 'prefix'.
"""
if not pkgs or not isinstance(pkgs, (list, tuple)):
raise TypeError('must specify a list of one or more packages to '
'install into existing environment')
cmd_list = ['install', '--dry-run', '--json', '--force-pscheck']
if not dep:
cmd_list.extend(['--no-deps'])
if name:
cmd_list.extend(['--name', name])
elif prefix:
cmd_list.extend(['--prefix', prefix])
else:
pass
cmd_list.extend(pkgs)
# TODO: Check if correct
if channels:
cmd_list.extend(['--override-channels'])
for channel in channels:
cmd_list.extend(['--channel'])
cmd_list.extend([channel])
return self._call_and_parse(cmd_list)
def environment_exists(self, name=None, prefix=None, abspath=True):
"""
Check if an environment exists by 'name' or by 'prefix'. If query is
by 'name' only the default conda environments directory is searched.
"""
logger.debug(str((name, prefix)))
if name and prefix:
raise TypeError("Exactly one of 'name' or 'prefix' is required.")
if name:
prefix = self.get_prefix_envname(name)
if prefix is None:
prefix = self.ROOT_PREFIX
return os.path.isdir(os.path.join(prefix, 'conda-meta'))
def clear_lock(self, abspath=True):
"""
Clean any conda lock in the system.
"""
cmd_list = ['clean', '--lock', '--json']
return self._call_and_parse(cmd_list, abspath=abspath)
def package_version(self, prefix=None, name=None, pkg=None):
"""
"""
package_versions = {}
if name and prefix:
raise TypeError("Exactly one of 'name' or 'prefix' is required.")
if name:
prefix = self.get_prefix_envname(name)
if self.environment_exists(prefix=prefix):
for package in self.linked(prefix):
if pkg in package:
n, v, b = self.split_canonical_name(package)
package_versions[n] = v
return package_versions.get(pkg, None)
def get_platform(self):
"""
Get platform of current system (system and bitness).
"""
_sys_map = {'linux2': 'linux', 'linux': 'linux',
'darwin': 'osx', 'win32': 'win', 'openbsd5': 'openbsd'}
non_x86_linux_machines = {'armv6l', 'armv7l', 'ppc64le'}
sys_platform = _sys_map.get(sys.platform, 'unknown')
bits = 8 * tuple.__itemsize__
if (sys_platform == 'linux' and
platform.machine() in non_x86_linux_machines):
arch_name = platform.machine()
subdir = 'linux-{0}'.format(arch_name)
else:
arch_name = {64: 'x86_64', 32: 'x86'}[bits]
subdir = '{0}-{1}'.format(sys_platform, bits)
return subdir
def get_condarc_channels(self):
"""
Returns all the channel urls defined in .condarc using the defined
`channel_alias`.
If no condarc file is found, use the default channels.
"""
# First get the location of condarc file and parse it to get
# the channel alias and the channels.
default_channel_alias = 'https://conda.anaconda.org'
default_urls = ['https://repo.continuum.io/pkgs/free',
'https://repo.continuum.io/pkgs/pro']
condarc_path = os.path.abspath(os.path.expanduser('~/.condarc'))
channels = default_urls[:]
if not os.path.isfile(condarc_path):
condarc = None
channel_alias = default_channel_alias
else:
with open(condarc_path, 'r') as f:
data = f.read()
condarc = yaml.load(data)
channels += condarc.get('channels', [])
channel_alias = condarc.get('channel_alias',
default_channel_alias)
if channel_alias[-1] == '/':
template = "{0}{1}"
else:
template = "{0}/{1}"
if 'defaults' in channels:
channels.remove('defaults')
channel_urls = []
for channel in channels:
if not channel.startswith('http'):
channel_url = template.format(channel_alias, channel)
else:
channel_url = channel
channel_urls.append(channel_url)
return channel_urls
# --- Pip commands
# -------------------------------------------------------------------------
def _call_pip(self, name=None, prefix=None, extra_args=None,
callback=None):
""" """
cmd_list = self._pip_cmd(name=name, prefix=prefix)
cmd_list.extend(extra_args)
process_worker = ProcessWorker(cmd_list, pip=True, callback=callback)
process_worker.sig_finished.connect(self._start)
self._queue.append(process_worker)
self._start()
return process_worker
def _pip_cmd(self, name=None, prefix=None):
"""
Get pip location based on environment `name` or `prefix`.
"""
if (name and prefix) or not (name or prefix):
raise TypeError("conda pip: exactly one of 'name' ""or 'prefix' "
"required.")
if name and self.environment_exists(name=name):
prefix = self.get_prefix_envname(name)
if sys.platform == 'win32':
python = join(prefix, 'python.exe') # FIXME:
pip = join(prefix, 'pip.exe') # FIXME:
else:
python = join(prefix, 'bin/python')
pip = join(prefix, 'bin/pip')
cmd_list = [python, pip]
return cmd_list
def pip_list(self, name=None, prefix=None, abspath=True):
"""
Get list of pip installed packages.
"""
if (name and prefix) or not (name or prefix):
raise TypeError("conda pip: exactly one of 'name' ""or 'prefix' "
"required.")
if name:
prefix = self.get_prefix_envname(name)
pip_command = os.sep.join([prefix, 'bin', 'python'])
cmd_list = [pip_command, PIP_LIST_SCRIPT]
process_worker = ProcessWorker(cmd_list, pip=True, parse=True,
callback=self._pip_list,
extra_kwargs={'prefix': prefix})
process_worker.sig_finished.connect(self._start)
self._queue.append(process_worker)
self._start()
return process_worker
# if name:
# cmd_list = ['list', '--name', name]
# if prefix:
# cmd_list = ['list', '--prefix', prefix]
# return self._call_conda(cmd_list, abspath=abspath,
# callback=self._pip_list)
def _pip_list(self, stdout, stderr, prefix=None):
"""
"""
result = stdout # A dict
linked = self.linked(prefix)
pip_only = []
linked_names = [self.split_canonical_name(l)[0] for l in linked]
for pkg in result:
name = self.split_canonical_name(pkg)[0]
if name not in linked_names:
pip_only.append(pkg)
# FIXME: NEED A MORE ROBUST WAY!
# if '<pip>' in line and '#' not in line:
# temp = line.split()[:-1] + ['pip']
# temp = '-'.join(temp)
# if '-(' in temp:
# start = temp.find('-(')
# end = temp.find(')')
# substring = temp[start:end+1]
# temp = temp.replace(substring, '')
# result.append(temp)
return pip_only
def pip_remove(self, name=None, prefix=None, pkgs=None):
"""
Remove a pip package in given environment by `name` or `prefix`.
"""
logger.debug(str((prefix, pkgs)))
if isinstance(pkgs, list) or isinstance(pkgs, tuple):
pkg = ' '.join(pkgs)
else:
pkg = pkgs
extra_args = ['uninstall', '--yes', pkg]
return self._call_pip(name=name, prefix=prefix, extra_args=extra_args)
def pip_search(self, search_string=None):
"""
Search for pip installable python packages in PyPI matching
`search_string`.
"""
extra_args = ['search', search_string]
return self._call_pip(name='root', extra_args=extra_args,
callback=self._pip_search)
# if stderr:
# raise PipError(stderr)
# You are using pip version 7.1.2, however version 8.0.2 is available.
# You should consider upgrading via the 'pip install --upgrade pip'
# command.
def _pip_search(self, stdout, stderr):
result = {}
lines = to_text_string(stdout).split('\n')
while '' in lines:
lines.remove('')
for line in lines:
if ' - ' in line:
parts = line.split(' - ')
name = parts[0].strip()
description = parts[1].strip()
result[name] = description
return result
CONDA_API = None
def CondaAPI():
global CONDA_API
if CONDA_API is None:
CONDA_API = _CondaAPI()
return CONDA_API
COUNTER = 0
def ready_print(worker, output, error):
global COUNTER
COUNTER += 1
print(COUNTER, output, error)
def test():
"""
"""
from conda_manager.utils.qthelpers import qapplication
app = qapplication()
conda_api = CondaAPI()
# print(conda_api.get_condarc_channels())
# worker = conda_api.info()
## worker.sig_finished.connect(ready_print)
# worker = conda_api.info()
# worker = conda_api.info()
# worker = conda_api.info()
# worker = conda_api.info()
# worker = conda_api.info()
# worker = conda_api.info()
# worker = conda_api.info()
# worker = conda_api.info()
# worker = conda_api.info()
# worker = conda_api.info()
# worker = conda_api.pip_search('spyder')
# worker.sig_finished.connect(ready_print)
worker = conda_api.pip_list(name='py3')
worker.sig_finished.connect(ready_print)
# print(conda_api.package_version(name='root', pkg='spyder'))
app.exec_()
if __name__ == '__main__':
test()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test/e2e/app_management_test.go | package e2e
import (
"context"
"fmt"
"math/rand"
"os"
"path"
"reflect"
"regexp"
"strings"
"testing"
"time"
"github.com/argoproj/gitops-engine/pkg/diff"
"github.com/argoproj/gitops-engine/pkg/health"
. "github.com/argoproj/gitops-engine/pkg/sync/common"
. "github.com/argoproj/gitops-engine/pkg/utils/errors"
"github.com/argoproj/gitops-engine/pkg/utils/io"
"github.com/argoproj/gitops-engine/pkg/utils/kube"
"github.com/argoproj/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
networkingv1beta "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/pointer"
"github.com/argoproj/argo-cd/common"
applicationpkg "github.com/argoproj/argo-cd/pkg/apiclient/application"
repositorypkg "github.com/argoproj/argo-cd/pkg/apiclient/repository"
. "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
. "github.com/argoproj/argo-cd/test/e2e/fixture"
. "github.com/argoproj/argo-cd/test/e2e/fixture/app"
. "github.com/argoproj/argo-cd/util/argo"
"github.com/argoproj/argo-cd/util/settings"
)
const (
guestbookPath = "guestbook"
guestbookPathLocal = "./testdata/guestbook_local"
globalWithNoNameSpace = "global-with-no-namesapce"
guestbookWithNamespace = "guestbook-with-namespace"
)
func TestSyncToUnsignedCommit(t *testing.T) {
Given(t).
Project("gpg").
Path(guestbookPath).
When().
IgnoreErrors().
Create().
Sync().
Then().
Expect(OperationPhaseIs(OperationError)).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(HealthIs(health.HealthStatusMissing))
}
func TestSyncToSignedCommitWithoutKnownKey(t *testing.T) {
Given(t).
Project("gpg").
Path(guestbookPath).
When().
AddSignedFile("test.yaml", "null").
IgnoreErrors().
Create().
Sync().
Then().
Expect(OperationPhaseIs(OperationError)).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(HealthIs(health.HealthStatusMissing))
}
func TestSyncToSignedCommitKeyWithKnownKey(t *testing.T) {
Given(t).
Project("gpg").
Path(guestbookPath).
GPGPublicKeyAdded().
Sleep(2).
When().
AddSignedFile("test.yaml", "null").
IgnoreErrors().
Create().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(health.HealthStatusHealthy))
}
func TestAppCreation(t *testing.T) {
ctx := Given(t)
ctx.
Path(guestbookPath).
When().
Create().
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
assert.Equal(t, Name(), app.Name)
assert.Equal(t, RepoURL(RepoURLTypeFile), app.Spec.Source.RepoURL)
assert.Equal(t, guestbookPath, app.Spec.Source.Path)
assert.Equal(t, DeploymentNamespace(), app.Spec.Destination.Namespace)
assert.Equal(t, common.KubernetesInternalAPIServerAddr, app.Spec.Destination.Server)
}).
Expect(Event(EventReasonResourceCreated, "create")).
And(func(_ *Application) {
// app should be listed
output, err := RunCli("app", "list")
assert.NoError(t, err)
assert.Contains(t, output, Name())
}).
When().
// ensure that create is idempotent
Create().
Then().
Given().
Revision("master").
When().
// ensure that update replaces spec and merge labels and annotations
And(func() {
FailOnErr(AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Patch(
ctx.GetName(), types.MergePatchType, []byte(`{"metadata": {"labels": { "test": "label" }, "annotations": { "test": "annotation" }}}`)))
}).
Create("--upsert").
Then().
And(func(app *Application) {
assert.Equal(t, "label", app.Labels["test"])
assert.Equal(t, "annotation", app.Annotations["test"])
assert.Equal(t, "master", app.Spec.Source.TargetRevision)
})
}
// demonstrate that we cannot use a standard sync when an immutable field is changed, we must use "force"
func TestImmutableChange(t *testing.T) {
text := FailOnErr(Run(".", "kubectl", "get", "service", "-n", "kube-system", "kube-dns", "-o", "jsonpath={.spec.clusterIP}")).(string)
parts := strings.Split(text, ".")
n := rand.Intn(254)
ip1 := fmt.Sprintf("%s.%s.%s.%d", parts[0], parts[1], parts[2], n)
ip2 := fmt.Sprintf("%s.%s.%s.%d", parts[0], parts[1], parts[2], n+1)
Given(t).
Path("service").
When().
Create().
PatchFile("service.yaml", fmt.Sprintf(`[{"op": "add", "path": "/spec/clusterIP", "value": "%s"}]`, ip1)).
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(health.HealthStatusHealthy)).
When().
PatchFile("service.yaml", fmt.Sprintf(`[{"op": "add", "path": "/spec/clusterIP", "value": "%s"}]`, ip2)).
IgnoreErrors().
Sync().
DoNotIgnoreErrors().
Then().
Expect(OperationPhaseIs(OperationFailed)).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(ResourceResultNumbering(1)).
Expect(ResourceResultIs(ResourceResult{
Kind: "Service",
Version: "v1",
Namespace: DeploymentNamespace(),
Name: "my-service",
SyncPhase: "Sync",
Status: "SyncFailed",
HookPhase: "Failed",
Message: fmt.Sprintf(`Service "my-service" is invalid: spec.clusterIP: Invalid value: "%s": field is immutable`, ip2),
})).
// now we can do this will a force
Given().
Force().
When().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(health.HealthStatusHealthy))
}
func TestInvalidAppProject(t *testing.T) {
Given(t).
Path(guestbookPath).
Project("does-not-exist").
When().
IgnoreErrors().
Create().
Then().
Expect(Error("", "application references project does-not-exist which does not exist"))
}
func TestAppDeletion(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
When().
Delete(true).
Then().
Expect(DoesNotExist()).
Expect(Event(EventReasonResourceDeleted, "delete"))
output, err := RunCli("app", "list")
assert.NoError(t, err)
assert.NotContains(t, output, Name())
}
func TestAppLabels(t *testing.T) {
Given(t).
Path("config-map").
When().
Create("-l", "foo=bar").
Then().
And(func(app *Application) {
assert.Contains(t, FailOnErr(RunCli("app", "list")), Name())
assert.Contains(t, FailOnErr(RunCli("app", "list", "-l", "foo=bar")), Name())
assert.NotContains(t, FailOnErr(RunCli("app", "list", "-l", "foo=rubbish")), Name())
}).
Given().
// remove both name and replace labels means nothing will sync
Name("").
When().
IgnoreErrors().
Sync("-l", "foo=rubbish").
DoNotIgnoreErrors().
Then().
Expect(Error("", "no apps match selector foo=rubbish")).
// check we can update the app and it is then sync'd
Given().
When().
Sync("-l", "foo=bar")
}
func TestTrackAppStateAndSyncApp(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(HealthIs(health.HealthStatusHealthy)).
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui OutOfSync Missing", DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("Service %s guestbook-ui OutOfSync Missing", DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("Service %s guestbook-ui Synced ", DeploymentNamespace()))).
Expect(Success(fmt.Sprintf("apps Deployment %s guestbook-ui Synced", DeploymentNamespace()))).
Expect(Event(EventReasonResourceUpdated, "sync")).
And(func(app *Application) {
assert.NotNil(t, app.Status.OperationState.SyncResult)
})
}
func TestAppRollbackSuccessful(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.NotEmpty(t, app.Status.Sync.Revision)
}).
And(func(app *Application) {
appWithHistory := app.DeepCopy()
appWithHistory.Status.History = []RevisionHistory{{
ID: 1,
Revision: app.Status.Sync.Revision,
DeployedAt: metav1.Time{Time: metav1.Now().UTC().Add(-1 * time.Minute)},
Source: app.Spec.Source,
}, {
ID: 2,
Revision: "cdb",
DeployedAt: metav1.Time{Time: metav1.Now().UTC().Add(-2 * time.Minute)},
Source: app.Spec.Source,
}}
patch, _, err := diff.CreateTwoWayMergePatch(app, appWithHistory, &Application{})
assert.NoError(t, err)
app, err = AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Patch(app.Name, types.MergePatchType, patch)
assert.NoError(t, err)
// sync app and make sure it reaches InSync state
_, err = RunCli("app", "rollback", app.Name, "1")
assert.NoError(t, err)
}).
Expect(Event(EventReasonOperationStarted, "rollback")).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Equal(t, SyncStatusCodeSynced, app.Status.Sync.Status)
assert.NotNil(t, app.Status.OperationState.SyncResult)
assert.Equal(t, 2, len(app.Status.OperationState.SyncResult.Resources))
assert.Equal(t, OperationSucceeded, app.Status.OperationState.Phase)
assert.Equal(t, 3, len(app.Status.History))
})
}
func TestComparisonFailsIfClusterNotAdded(t *testing.T) {
Given(t).
Path(guestbookPath).
DestServer("https://not-registered-cluster/api").
When().
IgnoreErrors().
Create().
Then().
Expect(DoesNotExist())
}
func TestCannotSetInvalidPath(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
IgnoreErrors().
AppSet("--path", "garbage").
Then().
Expect(Error("", "app path does not exist"))
}
func TestManipulateApplicationResources(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
manifests, err := RunCli("app", "manifests", app.Name, "--source", "live")
assert.NoError(t, err)
resources, err := kube.SplitYAML(manifests)
assert.NoError(t, err)
index := -1
for i := range resources {
if resources[i].GetKind() == kube.DeploymentKind {
index = i
break
}
}
assert.True(t, index > -1)
deployment := resources[index]
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer io.Close(closer)
_, err = client.DeleteResource(context.Background(), &applicationpkg.ApplicationResourceDeleteRequest{
Name: &app.Name,
Group: deployment.GroupVersionKind().Group,
Kind: deployment.GroupVersionKind().Kind,
Version: deployment.GroupVersionKind().Version,
Namespace: deployment.GetNamespace(),
ResourceName: deployment.GetName(),
})
assert.NoError(t, err)
}).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync))
}
func assetSecretDataHidden(t *testing.T, manifest string) {
secret, err := UnmarshalToUnstructured(manifest)
assert.NoError(t, err)
_, hasStringData, err := unstructured.NestedMap(secret.Object, "stringData")
assert.NoError(t, err)
assert.False(t, hasStringData)
secretData, hasData, err := unstructured.NestedMap(secret.Object, "data")
assert.NoError(t, err)
assert.True(t, hasData)
for _, v := range secretData {
assert.Regexp(t, regexp.MustCompile(`[*]*`), v)
}
var lastAppliedConfigAnnotation string
annotations := secret.GetAnnotations()
if annotations != nil {
lastAppliedConfigAnnotation = annotations[v1.LastAppliedConfigAnnotation]
}
if lastAppliedConfigAnnotation != "" {
assetSecretDataHidden(t, lastAppliedConfigAnnotation)
}
}
func TestAppWithSecrets(t *testing.T) {
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer io.Close(closer)
Given(t).
Path("secrets").
When().
Create().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
res := FailOnErr(client.GetResource(context.Background(), &applicationpkg.ApplicationResourceRequest{
Namespace: app.Spec.Destination.Namespace,
Kind: kube.SecretKind,
Group: "",
Name: &app.Name,
Version: "v1",
ResourceName: "test-secret",
})).(*applicationpkg.ApplicationResourceResponse)
assetSecretDataHidden(t, res.Manifest)
manifests, err := client.GetManifests(context.Background(), &applicationpkg.ApplicationManifestQuery{Name: &app.Name})
errors.CheckError(err)
for _, manifest := range manifests.Manifests {
assetSecretDataHidden(t, manifest)
}
diffOutput := FailOnErr(RunCli("app", "diff", app.Name)).(string)
assert.Empty(t, diffOutput)
// patch secret and make sure app is out of sync and diff detects the change
FailOnErr(KubeClientset.CoreV1().Secrets(DeploymentNamespace()).Patch(
"test-secret", types.JSONPatchType, []byte(`[
{"op": "remove", "path": "/data/username"},
{"op": "add", "path": "/stringData", "value": {"password": "foo"}}
]`)))
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
diffOutput, err := RunCli("app", "diff", app.Name)
assert.Error(t, err)
assert.Contains(t, diffOutput, "username: ++++++++")
assert.Contains(t, diffOutput, "password: ++++++++++++")
// local diff should ignore secrets
diffOutput = FailOnErr(RunCli("app", "diff", app.Name, "--local", "testdata/secrets")).(string)
assert.Empty(t, diffOutput)
// ignore missing field and make sure diff shows no difference
app.Spec.IgnoreDifferences = []ResourceIgnoreDifferences{{
Kind: kube.SecretKind, JSONPointers: []string{"/data"},
}}
FailOnErr(client.UpdateSpec(context.Background(), &applicationpkg.ApplicationUpdateSpecRequest{Name: &app.Name, Spec: app.Spec}))
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
diffOutput := FailOnErr(RunCli("app", "diff", app.Name)).(string)
assert.Empty(t, diffOutput)
})
}
func TestResourceDiffing(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
// Patch deployment
_, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Patch(
"guestbook-ui", types.JSONPatchType, []byte(`[{ "op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "test" }]`))
assert.NoError(t, err)
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
diffOutput, err := RunCli("app", "diff", app.Name, "--local", "testdata/guestbook")
assert.Error(t, err)
assert.Contains(t, diffOutput, fmt.Sprintf("===== apps/Deployment %s/guestbook-ui ======", DeploymentNamespace()))
}).
Given().
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {
IgnoreDifferences: OverrideIgnoreDiff{JSONPointers: []string{"/spec/template/spec/containers/0/image"}},
}}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
diffOutput, err := RunCli("app", "diff", app.Name, "--local", "testdata/guestbook")
assert.NoError(t, err)
assert.Empty(t, diffOutput)
})
}
func TestCRDs(t *testing.T) {
testEdgeCasesApplicationResources(t, "crd-creation", health.HealthStatusHealthy)
}
func TestKnownTypesInCRDDiffing(t *testing.T) {
dummiesGVR := schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "dummies"}
Given(t).
Path("crd-creation").
When().Create().Sync().Then().
Expect(OperationPhaseIs(OperationSucceeded)).Expect(SyncStatusIs(SyncStatusCodeSynced)).
When().
And(func() {
dummyResIf := DynamicClientset.Resource(dummiesGVR).Namespace(DeploymentNamespace())
patchData := []byte(`{"spec":{"requests": {"cpu": "2"}}}`)
FailOnErr(dummyResIf.Patch("dummy-crd-instance", types.MergePatchType, patchData, metav1.PatchOptions{}))
}).Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
When().
And(func() {
SetResourceOverrides(map[string]ResourceOverride{
"argoproj.io/Dummy": {
KnownTypeFields: []KnownTypeField{{
Field: "spec.requests",
Type: "core/v1/ResourceList",
}},
},
})
}).
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced))
}
func TestDuplicatedResources(t *testing.T) {
testEdgeCasesApplicationResources(t, "duplicated-resources", health.HealthStatusHealthy)
}
func TestConfigMap(t *testing.T) {
testEdgeCasesApplicationResources(t, "config-map", health.HealthStatusHealthy, "my-map Synced configmap/my-map created")
}
func TestFailedConversion(t *testing.T) {
defer func() {
FailOnErr(Run("", "kubectl", "delete", "apiservice", "v1beta1.metrics.k8s.io"))
}()
testEdgeCasesApplicationResources(t, "failed-conversion", health.HealthStatusProgressing)
}
func testEdgeCasesApplicationResources(t *testing.T, appPath string, statusCode health.HealthStatusCode, message ...string) {
expect := Given(t).
Path(appPath).
When().
Create().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced))
for i := range message {
expect = expect.Expect(Success(message[i]))
}
expect.
Expect(HealthIs(statusCode)).
And(func(app *Application) {
diffOutput, err := RunCli("app", "diff", app.Name, "--local", path.Join("testdata", appPath))
assert.Empty(t, diffOutput)
assert.NoError(t, err)
})
}
func TestKsonnetApp(t *testing.T) {
Given(t).
Path("ksonnet").
Env("prod").
// Null out dest server to verify that destination is inferred from ksonnet app
Parameter("guestbook-ui=image=gcr.io/heptio-images/ks-guestbook-demo:0.1").
DestServer("").
When().
Create().
Sync().
Then().
And(func(app *Application) {
closer, client, err := ArgoCDClientset.NewRepoClient()
assert.NoError(t, err)
defer io.Close(closer)
details, err := client.GetAppDetails(context.Background(), &repositorypkg.RepoAppDetailsQuery{
Source: &app.Spec.Source,
})
assert.NoError(t, err)
serviceType := ""
for _, param := range details.Ksonnet.Parameters {
if param.Name == "type" && param.Component == "guestbook-ui" {
serviceType = param.Value
}
}
assert.Equal(t, serviceType, "LoadBalancer")
})
}
const actionsConfig = `discovery.lua: return { sample = {} }
definitions:
- name: sample
action.lua: |
obj.metadata.labels.sample = 'test'
return obj`
func TestResourceAction(t *testing.T) {
Given(t).
Path(guestbookPath).
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {Actions: actionsConfig}}).
When().
Create().
Sync().
Then().
And(func(app *Application) {
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer io.Close(closer)
actions, err := client.ListResourceActions(context.Background(), &applicationpkg.ApplicationResourceRequest{
Name: &app.Name,
Group: "apps",
Kind: "Deployment",
Version: "v1",
Namespace: DeploymentNamespace(),
ResourceName: "guestbook-ui",
})
assert.NoError(t, err)
assert.Equal(t, []ResourceAction{{Name: "sample", Disabled: false}}, actions.Actions)
_, err = client.RunResourceAction(context.Background(), &applicationpkg.ResourceActionRunRequest{Name: &app.Name,
Group: "apps",
Kind: "Deployment",
Version: "v1",
Namespace: DeploymentNamespace(),
ResourceName: "guestbook-ui",
Action: "sample",
})
assert.NoError(t, err)
deployment, err := KubeClientset.AppsV1().Deployments(DeploymentNamespace()).Get("guestbook-ui", metav1.GetOptions{})
assert.NoError(t, err)
assert.Equal(t, "test", deployment.Labels["sample"])
})
}
func TestSyncResourceByLabel(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
Sync().
Then().
And(func(app *Application) {
_, _ = RunCli("app", "sync", app.Name, "--label", fmt.Sprintf("app.kubernetes.io/instance=%s", app.Name))
}).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
_, err := RunCli("app", "sync", app.Name, "--label", "this-label=does-not-exist")
assert.Error(t, err)
assert.Contains(t, err.Error(), "level=fatal")
})
}
func TestLocalManifestSync(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
Sync().
Then().
And(func(app *Application) {
res, _ := RunCli("app", "manifests", app.Name)
assert.Contains(t, res, "containerPort: 80")
assert.Contains(t, res, "image: gcr.io/heptio-images/ks-guestbook-demo:0.2")
}).
Given().
LocalPath(guestbookPathLocal).
When().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
res, _ := RunCli("app", "manifests", app.Name)
assert.Contains(t, res, "containerPort: 81")
assert.Contains(t, res, "image: gcr.io/heptio-images/ks-guestbook-demo:0.3")
}).
Given().
LocalPath("").
When().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
res, _ := RunCli("app", "manifests", app.Name)
assert.Contains(t, res, "containerPort: 80")
assert.Contains(t, res, "image: gcr.io/heptio-images/ks-guestbook-demo:0.2")
})
}
func TestLocalSync(t *testing.T) {
Given(t).
// we've got to use Helm as this uses kubeVersion
Path("helm").
When().
Create().
Then().
And(func(app *Application) {
FailOnErr(RunCli("app", "sync", app.Name, "--local", "testdata/helm"))
})
}
func TestNoLocalSyncWithAutosyncEnabled(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
Sync().
Then().
And(func(app *Application) {
_, err := RunCli("app", "set", app.Name, "--sync-policy", "automated")
assert.NoError(t, err)
_, err = RunCli("app", "sync", app.Name, "--local", guestbookPathLocal)
assert.Error(t, err)
})
}
func TestLocalSyncDryRunWithAutosyncEnabled(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
Create().
Sync().
Then().
And(func(app *Application) {
_, err := RunCli("app", "set", app.Name, "--sync-policy", "automated")
assert.NoError(t, err)
appBefore := app.DeepCopy()
_, err = RunCli("app", "sync", app.Name, "--dry-run", "--local", guestbookPathLocal)
assert.NoError(t, err)
appAfter := app.DeepCopy()
assert.True(t, reflect.DeepEqual(appBefore, appAfter))
})
}
func TestSyncAsync(t *testing.T) {
Given(t).
Path(guestbookPath).
Async(true).
When().
Create().
Sync().
Then().
Expect(Success("")).
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced))
}
func TestPermissions(t *testing.T) {
EnsureCleanState(t)
appName := Name()
_, err := RunCli("proj", "create", "test")
assert.NoError(t, err)
// make sure app cannot be created without permissions in project
_, err = RunCli("app", "create", appName, "--repo", RepoURL(RepoURLTypeFile),
"--path", guestbookPath, "--project", "test", "--dest-server", common.KubernetesInternalAPIServerAddr, "--dest-namespace", DeploymentNamespace())
assert.Error(t, err)
sourceError := fmt.Sprintf("application repo %s is not permitted in project 'test'", RepoURL(RepoURLTypeFile))
destinationError := fmt.Sprintf("application destination {%s %s} is not permitted in project 'test'", common.KubernetesInternalAPIServerAddr, DeploymentNamespace())
assert.Contains(t, err.Error(), sourceError)
assert.Contains(t, err.Error(), destinationError)
proj, err := AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Get("test", metav1.GetOptions{})
assert.NoError(t, err)
proj.Spec.Destinations = []ApplicationDestination{{Server: "*", Namespace: "*"}}
proj.Spec.SourceRepos = []string{"*"}
proj, err = AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Update(proj)
assert.NoError(t, err)
// make sure controller report permissions issues in conditions
_, err = RunCli("app", "create", appName, "--repo", RepoURL(RepoURLTypeFile),
"--path", guestbookPath, "--project", "test", "--dest-server", common.KubernetesInternalAPIServerAddr, "--dest-namespace", DeploymentNamespace())
assert.NoError(t, err)
defer func() {
err = AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Delete(appName, &metav1.DeleteOptions{})
assert.NoError(t, err)
}()
proj.Spec.Destinations = []ApplicationDestination{}
proj.Spec.SourceRepos = []string{}
_, err = AppClientset.ArgoprojV1alpha1().AppProjects(ArgoCDNamespace).Update(proj)
assert.NoError(t, err)
time.Sleep(1 * time.Second)
closer, client, err := ArgoCDClientset.NewApplicationClient()
assert.NoError(t, err)
defer io.Close(closer)
refresh := string(RefreshTypeNormal)
app, err := client.Get(context.Background(), &applicationpkg.ApplicationQuery{Name: &appName, Refresh: &refresh})
assert.NoError(t, err)
destinationErrorExist := false
sourceErrorExist := false
for i := range app.Status.Conditions {
if strings.Contains(app.Status.Conditions[i].Message, destinationError) {
destinationErrorExist = true
}
if strings.Contains(app.Status.Conditions[i].Message, sourceError) {
sourceErrorExist = true
}
}
assert.True(t, destinationErrorExist)
assert.True(t, sourceErrorExist)
}
// make sure that if we deleted a resource from the app, it is not pruned if annotated with Prune=false
func TestSyncOptionPruneFalse(t *testing.T) {
Given(t).
Path("two-nice-pods").
When().
PatchFile("pod-1.yaml", `[{"op": "add", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/sync-options": "Prune=false"}}]`).
Create().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
When().
DeleteFile("pod-1.yaml").
Refresh(RefreshTypeHard).
IgnoreErrors().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
Expect(ResourceSyncStatusIs("Pod", "pod-1", SyncStatusCodeOutOfSync))
}
// make sure that if we have an invalid manifest, we can add it if we disable validation, we get a server error rather than a client error
func TestSyncOptionValidateFalse(t *testing.T) {
// k3s does not validate at all, so this test does not work
if os.Getenv("ARGOCD_E2E_K3S") == "true" {
t.SkipNow()
}
Given(t).
Path("crd-validation").
When().
Create().
Then().
Expect(Success("")).
When().
IgnoreErrors().
Sync().
Then().
// client error
Expect(Error("error validating data", "")).
When().
PatchFile("deployment.yaml", `[{"op": "add", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/sync-options": "Validate=false"}}]`).
Sync().
Then().
// server error
Expect(Error("Error from server", ""))
}
// make sure that, if we have a resource that needs pruning, but we're ignoring it, the app is in-sync
func TestCompareOptionIgnoreExtraneous(t *testing.T) {
Given(t).
Prune(false).
Path("two-nice-pods").
When().
PatchFile("pod-1.yaml", `[{"op": "add", "path": "/metadata/annotations", "value": {"argocd.argoproj.io/compare-options": "IgnoreExtraneous"}}]`).
Create().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
When().
DeleteFile("pod-1.yaml").
Refresh(RefreshTypeHard).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Len(t, app.Status.Resources, 2)
statusByName := map[string]SyncStatusCode{}
for _, r := range app.Status.Resources {
statusByName[r.Name] = r.Status
}
assert.Equal(t, SyncStatusCodeOutOfSync, statusByName["pod-1"])
assert.Equal(t, SyncStatusCodeSynced, statusByName["pod-2"])
}).
When().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced))
}
func TestSelfManagedApps(t *testing.T) {
Given(t).
Path("self-managed-app").
When().
PatchFile("resources.yaml", fmt.Sprintf(`[{"op": "replace", "path": "/spec/source/repoURL", "value": "%s"}]`, RepoURL(RepoURLTypeFile))).
Create().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(a *Application) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
defer cancel()
reconciledCount := 0
var lastReconciledAt *metav1.Time
for event := range ArgoCDClientset.WatchApplicationWithRetry(ctx, a.Name, a.ResourceVersion) {
reconciledAt := event.Application.Status.ReconciledAt
if reconciledAt == nil {
reconciledAt = &metav1.Time{}
}
if lastReconciledAt != nil && !lastReconciledAt.Equal(reconciledAt) {
reconciledCount = reconciledCount + 1
}
lastReconciledAt = reconciledAt
}
assert.True(t, reconciledCount < 3, "Application was reconciled too many times")
})
}
func TestExcludedResource(t *testing.T) {
Given(t).
ResourceOverrides(map[string]ResourceOverride{"apps/Deployment": {Actions: actionsConfig}}).
Path(guestbookPath).
ResourceFilter(settings.ResourcesFilter{
ResourceExclusions: []settings.FilteredResource{{Kinds: []string{kube.DeploymentKind}}},
}).
When().
Create().
Sync().
Refresh(RefreshTypeNormal).
Then().
Expect(Condition(ApplicationConditionExcludedResourceWarning, "Resource apps/Deployment guestbook-ui is excluded in the settings"))
}
func TestRevisionHistoryLimit(t *testing.T) {
Given(t).
Path("config-map").
When().
Create().
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Len(t, app.Status.History, 1)
}).
When().
AppSet("--revision-history-limit", "1").
Sync().
Then().
Expect(OperationPhaseIs(OperationSucceeded)).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.Len(t, app.Status.History, 1)
})
}
func TestOrphanedResource(t *testing.T) {
Given(t).
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: &OrphanedResourcesMonitorSettings{Warn: pointer.BoolPtr(true)},
}).
Path(guestbookPath).
When().
Create().
Sync().
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions()).
When().
And(func() {
FailOnErr(KubeClientset.CoreV1().ConfigMaps(DeploymentNamespace()).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "orphaned-configmap",
},
}))
}).
Refresh(RefreshTypeNormal).
Then().
Expect(Condition(ApplicationConditionOrphanedResourceWarning, "Application has 1 orphaned resources")).
Given().
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: &OrphanedResourcesMonitorSettings{Warn: pointer.BoolPtr(true), Ignore: []OrphanedResourceKey{{Group: "Test", Kind: "ConfigMap"}}},
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(Condition(ApplicationConditionOrphanedResourceWarning, "Application has 1 orphaned resources")).
Given().
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: &OrphanedResourcesMonitorSettings{Warn: pointer.BoolPtr(true), Ignore: []OrphanedResourceKey{{Kind: "ConfigMap"}}},
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions()).
Given().
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: &OrphanedResourcesMonitorSettings{Warn: pointer.BoolPtr(true), Ignore: []OrphanedResourceKey{{Kind: "ConfigMap", Name: "orphaned-configmap"}}},
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions()).
Given().
ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: "*", Server: "*"}},
OrphanedResources: nil,
}).
When().
Refresh(RefreshTypeNormal).
Then().
Expect(SyncStatusIs(SyncStatusCodeSynced)).
Expect(NoConditions())
}
func TestNotPermittedResources(t *testing.T) {
ctx := Given(t)
ingress := &networkingv1beta.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "sample-ingress",
Labels: map[string]string{
common.LabelKeyAppInstance: ctx.GetName(),
},
},
Spec: networkingv1beta.IngressSpec{
Rules: []networkingv1beta.IngressRule{{
IngressRuleValue: networkingv1beta.IngressRuleValue{
HTTP: &networkingv1beta.HTTPIngressRuleValue{
Paths: []networkingv1beta.HTTPIngressPath{{
Path: "/",
Backend: networkingv1beta.IngressBackend{
ServiceName: "guestbook-ui",
ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80},
},
}},
},
},
}},
},
}
defer func() {
log.Infof("Ingress 'sample-ingress' deleted from %s", ArgoCDNamespace)
CheckError(KubeClientset.NetworkingV1beta1().Ingresses(ArgoCDNamespace).Delete("sample-ingress", &metav1.DeleteOptions{}))
}()
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "guestbook-ui",
Labels: map[string]string{
common.LabelKeyAppInstance: ctx.GetName(),
},
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: 80},
}},
Selector: map[string]string{
"app": "guestbook-ui",
},
},
}
ctx.ProjectSpec(AppProjectSpec{
SourceRepos: []string{"*"},
Destinations: []ApplicationDestination{{Namespace: DeploymentNamespace(), Server: "*"}},
NamespaceResourceBlacklist: []metav1.GroupKind{
{Group: "", Kind: "Service"},
}}).
And(func() {
FailOnErr(KubeClientset.NetworkingV1beta1().Ingresses(ArgoCDNamespace).Create(ingress))
FailOnErr(KubeClientset.CoreV1().Services(DeploymentNamespace()).Create(svc))
}).
Path(guestbookPath).
When().
Create().
Then().
Expect(SyncStatusIs(SyncStatusCodeOutOfSync)).
And(func(app *Application) {
statusByKind := make(map[string]ResourceStatus)
for _, res := range app.Status.Resources {
statusByKind[res.Kind] = res
}
_, hasIngress := statusByKind[kube.IngressKind]
assert.False(t, hasIngress, "Ingress is prohibited not managed object and should be even visible to user")
serviceStatus := statusByKind[kube.ServiceKind]
assert.Equal(t, serviceStatus.Status, SyncStatusCodeUnknown, "Service is prohibited managed resource so should be set to Unknown")
deploymentStatus := statusByKind[kube.DeploymentKind]
assert.Equal(t, deploymentStatus.Status, SyncStatusCodeOutOfSync)
}).
When().
Delete(true).
Then().
Expect(DoesNotExist())
// Make sure prohibited resources are not deleted during application deletion
FailOnErr(KubeClientset.NetworkingV1beta1().Ingresses(ArgoCDNamespace).Get("sample-ingress", metav1.GetOptions{}))
FailOnErr(KubeClientset.CoreV1().Services(DeploymentNamespace()).Get("guestbook-ui", metav1.GetOptions{}))
}
func TestSyncWithInfos(t *testing.T) {
expectedInfo := make([]*Info, 2)
expectedInfo[0] = &Info{Name: "name1", Value: "val1"}
expectedInfo[1] = &Info{Name: "name2", Value: "val2"}
Given(t).
Path(guestbookPath).
When().
Create().
Then().
And(func(app *Application) {
_, err := RunCli("app", "sync", app.Name,
"--info", fmt.Sprintf("%s=%s", expectedInfo[0].Name, expectedInfo[0].Value),
"--info", fmt.Sprintf("%s=%s", expectedInfo[1].Name, expectedInfo[1].Value))
assert.NoError(t, err)
}).
Expect(SyncStatusIs(SyncStatusCodeSynced)).
And(func(app *Application) {
assert.ElementsMatch(t, app.Status.OperationState.Operation.Info, expectedInfo)
})
}
//Given: argocd app create does not provide --dest-namespace
// Manifest contains resource console which does not require namespace
//Expect: no app.Status.Conditions
func TestCreateAppWithNoNameSpaceForGlobalResourse(t *testing.T) {
Given(t).
Path(globalWithNoNameSpace).
When().
CreateWithNoNameSpace().
Then().
And(func(app *Application) {
time.Sleep(500 * time.Millisecond)
app, err := AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Get(app.Name, metav1.GetOptions{})
assert.NoError(t, err)
assert.Len(t, app.Status.Conditions, 0)
})
}
//Given: argocd app create does not provide --dest-namespace
// Manifest contains resource deployment, and service which requires namespace
// Deployment and service do not have namespace in manifest
//Expect: app.Status.Conditions for deployment ans service which does not have namespace in manifest
func TestCreateAppWithNoNameSpaceWhenRequired(t *testing.T) {
Given(t).
Path(guestbookPath).
When().
CreateWithNoNameSpace().
Then().
And(func(app *Application) {
var updatedApp *Application
for i := 0; i < 3; i++ {
obj, err := AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Get(app.Name, metav1.GetOptions{})
assert.NoError(t, err)
updatedApp = obj
if len(updatedApp.Status.Conditions) > 0 {
break
}
time.Sleep(500 * time.Millisecond)
}
assert.Len(t, updatedApp.Status.Conditions, 2)
assert.Equal(t, updatedApp.Status.Conditions[0].Type, ApplicationConditionInvalidSpecError)
assert.Equal(t, updatedApp.Status.Conditions[1].Type, ApplicationConditionInvalidSpecError)
})
}
//Given: argocd app create does not provide --dest-namespace
// Manifest contains resource deployment, and service which requires namespace
// Some deployment and service has namespace in manifest
// Some deployment and service does not have namespace in manifest
//Expect: app.Status.Conditions for deployment and service which does not have namespace in manifest
func TestCreateAppWithNoNameSpaceWhenRequired2(t *testing.T) {
Given(t).
Path(guestbookWithNamespace).
When().
CreateWithNoNameSpace().
Then().
And(func(app *Application) {
var updatedApp *Application
for i := 0; i < 3; i++ {
obj, err := AppClientset.ArgoprojV1alpha1().Applications(ArgoCDNamespace).Get(app.Name, metav1.GetOptions{})
assert.NoError(t, err)
updatedApp = obj
if len(updatedApp.Status.Conditions) > 0 {
break
}
time.Sleep(500 * time.Millisecond)
}
assert.Len(t, updatedApp.Status.Conditions, 2)
assert.Equal(t, updatedApp.Status.Conditions[0].Type, ApplicationConditionInvalidSpecError)
assert.Equal(t, updatedApp.Status.Conditions[1].Type, ApplicationConditionInvalidSpecError)
})
}
| [
"\"ARGOCD_E2E_K3S\""
]
| []
| [
"ARGOCD_E2E_K3S"
]
| [] | ["ARGOCD_E2E_K3S"] | go | 1 | 0 | |
es4x/src/main/java/io/reactiverse/es4x/jul/ANSIFormatter.java | /*
* Copyright 2018 Red Hat, Inc.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Apache License v2.0 which accompanies this distribution.
*
* The Eclipse Public License is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* The Apache License v2.0 is available at
* http://www.opensource.org/licenses/apache2.0.php
*
* You may elect to redistribute this code under either of these licenses.
*/
package io.reactiverse.es4x.jul;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.logging.*;
import static java.util.logging.Level.*;
public class ANSIFormatter extends Formatter {
// are ANSI colors allowed?
private static final boolean colors;
static {
if (Boolean.getBoolean("noTTY")) {
// in this case rely on the system property to DISABLE the colors.
colors = false;
} else {
String term = System.getenv("TERM");
if (term != null) {
term = term.toLowerCase();
colors =
// this is where the most common config will be on unices
term.equals("xterm-color")
// however as there are lots of terminal emulators, it seems
// safer to look up for the suffix "-256color" as it covers:
// vte, linux, tmux, screen, putty, rxvt, nsterm, ...
|| term.endsWith("-256color");
} else {
// there's no env variable (we're running either embedded (no shell)
// or on a OS that doesn't set the TERM variable (Windows maybe)
colors = false;
}
}
}
@Override
public synchronized String format(LogRecord record) {
Throwable thrown = record.getThrown();
String message = record.getMessage();
String thrownMessage = null;
String thrownTrace = null;
if (thrown != null) {
// collect the trace back to a string
try (StringWriter sw = new StringWriter()) {
PrintWriter pw = new PrintWriter(sw);
// print the thrown to String
thrown.printStackTrace(pw);
String sStackTrace = sw.toString(); // stack trace as a string
int idx = sStackTrace.indexOf("\n\tat");
if (idx != -1) {
thrownMessage = sStackTrace.substring(0, idx);
thrownTrace = sStackTrace.substring(idx);
} else {
thrownTrace = sStackTrace;
}
} catch (IOException e) {
// ignore...
}
}
StringBuilder sb = new StringBuilder();
if (colors) {
sb.append(prefix(record.getLevel()));
}
sb.append(message);
if (thrownMessage != null) {
sb.append(" caused by ");
sb.append(thrownMessage);
}
if (colors) {
sb.append(suffix(record.getLevel()));
}
if (thrownTrace != null) {
sb.append(thrownTrace);
} else {
sb.append(System.lineSeparator());
}
return sb.toString();
}
private static String prefix(Level l) {
if (SEVERE.equals(l)) {
return "\u001B[1m\u001B[31m";
}
if (WARNING.equals(l)) {
return "\u001B[1m\u001B[33m";
}
if (INFO.equals(l)) {
return "";
}
if (CONFIG.equals(l)) {
return "\u001B[1m\u001B[34m";
}
if (FINE.equals(l)) {
return "\u001B[1m\u001B[32m";
}
if (FINER.equals(l)) {
return "\u001B[1m\u001B[94m";
}
if (FINEST.equals(l)) {
return "\u001B[94m";
}
return "[" + l.getName().toUpperCase() + "] ";
}
private static String suffix(Level l) {
if (SEVERE.equals(l)) {
return "\u001B[0m";
}
if (WARNING.equals(l)) {
return "\u001B[0m";
}
if (INFO.equals(l)) {
return "";
}
if (CONFIG.equals(l)) {
return "\u001B[0m";
}
if (FINE.equals(l)) {
return "\u001B[0m";
}
if (FINER.equals(l)) {
return "\u001B[0m";
}
if (FINEST.equals(l)) {
return "\u001B[0m";
}
return "";
}
}
| [
"\"TERM\""
]
| []
| [
"TERM"
]
| [] | ["TERM"] | java | 1 | 0 | |
Backend/AndroidBackend/views.py | import json
from django.http import HttpResponse
import urllib2
# Create your views here.
def list_of_exchange_rates(request, iso_code):
data = {}
try:
jsonRequest = urllib2.urlopen("http://api.fixer.io/latest?base=" + iso_code)
content = jsonRequest.read()
tmpData = json.loads(content.decode("utf8"))
data['date'] = tmpData['date']
data['base'] = {'currency': RateConverter.get_rate_name(tmpData['base']), 'country': RateConverter.get_rate_country_name(tmpData['base'])}
mmr = tmpData['rates']
rate_recs = []
for key, value in mmr.iteritems():
rate_recs.append({'currency': RateConverter.get_rate_name(key), 'country': RateConverter.get_rate_country_name(key), 'rate': value})
data['rates'] = rate_recs
except Exception as detail:
data = {"date" : "2015-04-02",
"base" : {
"currency" : "Dollar",
"country" : "United States"
},
"rates" : [{
"currency" : "Rupiah",
"rate" : 13001.44,
"country" : "Indonesia"
}, {
"currency" : "Lev",
"rate" : 1.8059,
"country" : "Bulgaria"
}, {
"currency" : "Shekel",
"rate" : 3.9485,
"country" : "Israel"
}, {
"currency" : "Pound",
"rate" : 0.6755,
"country" : "United Kingdom"
}, {
"currency" : "Krone",
"rate" : 6.8982,
"country" : "Denmark"
}, {
"currency" : "Dollar",
"rate" : 1.2644,
"country" : "Canada"
}, {
"currency" : "Yen",
"rate" : 119.66,
"country" : "Japan"
}, {
"currency" : "Forint",
"rate" : 276.62,
"country" : "Hungary"
}, {
"currency" : "New Leu",
"rate" : 4.0783,
"country" : "Romania"
}, {
"currency" : "Ringgit",
"rate" : 3.6602,
"country" : "Malaysia"
}, {
"currency" : "Krona",
"rate" : 8.6283,
"country" : "Sweden"
}, {
"currency" : "Dollar",
"rate" : 1.3591,
"country" : "Singapore"
}, {
"currency" : "Dollar",
"rate" : 7.7524,
"country" : "Hong Kong"
}, {
"currency" : "Dollar",
"rate" : 1.3244,
"country" : "Australia"
}, {
"currency" : "Franc",
"rate" : 0.9603,
"country" : "Switzerland"
}, {
"currency" : "Won",
"rate" : 1092.62,
"country" : "Korea (South)"
}, {
"currency" : "Yuan Renminbi",
"rate" : 6.1962,
"country" : "China"
}, {
"currency" : "Lira",
"rate" : 2.5988,
"country" : "Turkey"
}, {
"currency" : "Kuna",
"rate" : 7.0455,
"country" : "Croatia"
}, {
"currency" : "Dollar",
"rate" : 1.3388,
"country" : "New Zealand"
}, {
"currency" : "Baht",
"rate" : 32.49,
"country" : "Thailand"
}, {
"currency" : "Euro",
"rate" : 0.9234,
"country" : "Euro Member"
}, {
"currency" : "Krone",
"rate" : 7.9876,
"country" : "Norway"
}, {
"currency" : "Ruble",
"rate" : 57.09,
"country" : "Russia"
}, {
"currency" : "Rupee",
"rate" : 62.182,
"country" : "India"
}, {
"currency" : "Peso",
"rate" : 15.112,
"country" : "Mexico"
}, {
"currency" : "Koruna",
"rate" : 25.457,
"country" : "Czech Republic"
}, {
"currency" : "Real",
"rate" : 3.1678,
"country" : "Brazil"
}, {
"currency" : "Zloty",
"rate" : 3.7532,
"country" : "Poland"
}, {
"currency" : "Peso",
"rate" : 44.406,
"country" : "Philippines"
}, {
"currency" : "Rand",
"rate" : 11.999,
"country" : "South Africa"
}
],
"error": "Something went wrong, You got a predefined exchange rates"}
return HttpResponse(json.dumps(data), content_type="application/json")
class CurrencyRate():
currency = ''
country = ''
def __init__(self, currency, country):
self.currency = currency
self.country = country
class RateConverter():
rates = {
'IDR': CurrencyRate('Rupiah','Indonesia'),
'BGN': CurrencyRate('Lev','Bulgaria'),
'ILS': CurrencyRate('Shekel','Israel'),
'GBP': CurrencyRate('Pound','United Kingdom'),
'DKK': CurrencyRate('Krone','Denmark'),
'CAD': CurrencyRate('Dollar','Canada'),
'JPY': CurrencyRate('Yen','Japan'),
'HUF': CurrencyRate('Forint','Hungary'),
'RON': CurrencyRate('New Leu','Romania'),
'MYR': CurrencyRate('Ringgit','Malaysia'),
'SEK': CurrencyRate('Krona','Sweden'),
'SGD': CurrencyRate('Dollar','Singapore'),
'HKD': CurrencyRate('Dollar','Hong Kong'),
'AUD': CurrencyRate('Dollar','Australia'),
'CHF': CurrencyRate('Franc','Switzerland'),
'KRW': CurrencyRate('Won','Korea (South)'),
'CNY': CurrencyRate('Yuan Renminbi','China'),
'TRY': CurrencyRate('Lira','Turkey'),
'HRK': CurrencyRate('Kuna','Croatia'),
'NZD': CurrencyRate('Dollar','New Zealand'),
'THB': CurrencyRate('Baht','Thailand'),
'EUR': CurrencyRate('Euro','Euro Member'),
'NOK': CurrencyRate('Krone','Norway'),
'RUB': CurrencyRate('Ruble','Russia'),
'INR': CurrencyRate('Rupee','India'),
'MXN': CurrencyRate('Peso','Mexico'),
'CZK': CurrencyRate('Koruna','Czech Republic'),
'BRL': CurrencyRate('Real','Brazil'),
'PLN': CurrencyRate('Zloty','Poland'),
'PHP': CurrencyRate('Peso','Philippines'),
'ZAR': CurrencyRate('Rand','South Africa'),
'USD': CurrencyRate('Dollar','United States')
};
@staticmethod
def get_rate_name(rate):
return RateConverter.rates.get(rate, CurrencyRate('','')).currency
@staticmethod
def get_rate_country_name(rate):
return RateConverter.rates.get(rate, CurrencyRate('','')).country | []
| []
| []
| [] | [] | python | null | null | null |
tsuru_autoscale/datasource/tests.py | from django.test import TestCase
from django.core.urlresolvers import reverse
from tsuru_autoscale.datasource.forms import DataSourceForm
from tsuru_autoscale.datasource import client
import httpretty
import mock
import os
class RemoveTestCase(TestCase):
@mock.patch("tsuru_autoscale.datasource.client.list")
@mock.patch("tsuru_autoscale.datasource.client.remove")
def test_remove(self, remove_mock, list_mock):
url = "{}?TSURU_TOKEN=bla".format(reverse("datasource-remove", args=["name"]))
response = self.client.delete(url)
url = "{}?TSURU_TOKEN=bla".format(reverse("datasource-list"))
self.assertRedirects(response, url)
remove_mock.assert_called_with("name", "bla")
class NewTestCase(TestCase):
def test_new(self):
url = "{}?TSURU_TOKEN=bla".format(reverse("datasource-new"))
response = self.client.get(url)
self.assertTemplateUsed(response, "datasource/new.html")
self.assertIsInstance(response.context['form'], DataSourceForm)
self.assertFalse(response.context['form'].is_bound)
def test_new_invalid_post(self):
url = "{}?TSURU_TOKEN=bla".format(reverse("datasource-new"))
response = self.client.post(url, {})
self.assertFalse(response.context['form'].is_valid())
@mock.patch("tsuru_autoscale.datasource.client.list")
@mock.patch("tsuru_autoscale.datasource.client.new")
def test_new_post(self, new_mock, list_mock):
data = {
'url': u'someurl',
'body': u'',
'headers': u'',
'name': u'name',
'method': u'GET',
}
url = "{}?TSURU_TOKEN=bla".format(reverse("datasource-new"))
response = self.client.post(url, data)
url = "{}?TSURU_TOKEN=bla".format(reverse("datasource-list"))
self.assertRedirects(response, url)
new_mock.assert_called_with(data, "bla")
class DataSourceListTest(TestCase):
@mock.patch("tsuru_autoscale.datasource.client.list")
def test_list(self, list_mock):
url = "{}?TSURU_TOKEN=bla".format(reverse("datasource-list"))
response = self.client.get(url)
self.assertTemplateUsed(response, "datasource/list.html")
self.assertIn('list', response.context)
list_mock.assert_called_with("bla")
class GetTest(TestCase):
@mock.patch("tsuru_autoscale.datasource.client.get")
def test_get(self, get_mock):
result_mock = mock.Mock()
result_mock.json.return_value = {"Name": "ble"}
get_mock.return_value = result_mock
url = "{}?TSURU_TOKEN=bla".format(reverse("datasource-get", args=["ble"]))
response = self.client.get(url)
self.assertTemplateUsed(response, "datasource/get.html")
self.assertIn('item', response.context)
get_mock.assert_called_with("ble", "bla")
class DataSourceFormTestCase(TestCase):
def test_required_fields(self):
fields = {
"url": True,
"method": True,
"name": True,
"body": False,
"headers": False,
}
form = DataSourceForm()
for field, required in fields.items():
self.assertEqual(form.fields[field].required, required)
class ClientTestCase(TestCase):
def setUp(self):
httpretty.enable()
def tearDown(self):
httpretty.disable()
httpretty.reset()
def test_new(self):
os.environ["AUTOSCALE_HOST"] = "http://autoscalehost.com"
httpretty.register_uri(
httpretty.POST,
"http://autoscalehost.com/datasource",
)
client.new({}, "token")
def test_list(self):
os.environ["AUTOSCALE_HOST"] = "http://autoscalehost.com"
httpretty.register_uri(
httpretty.GET,
"http://autoscalehost.com/datasource",
)
client.list("token")
self.assertDictEqual(httpretty.last_request().querystring, {"public": ["true"]})
def test_remove(self):
os.environ["AUTOSCALE_HOST"] = "http://autoscalehost.com"
httpretty.register_uri(
httpretty.DELETE,
"http://autoscalehost.com/datasource/name",
)
client.remove("name", "token")
def test_get(self):
os.environ["AUTOSCALE_HOST"] = "http://autoscalehost.com"
httpretty.register_uri(
httpretty.GET,
"http://autoscalehost.com/datasource/name",
"result",
)
result = client.get("name", "token")
self.assertEqual(result.text, "result")
| []
| []
| [
"AUTOSCALE_HOST"
]
| [] | ["AUTOSCALE_HOST"] | python | 1 | 0 | |
code_examples/Java/skeleton_syncservice/src/main/java/no/sintef/skeleton_syncservice/SyncExample.java | // Important: To use this as a template for your own service, please follow the
// instructions given in the comments. The instructions are assuming you do this
// from within the Netbeans IDE.
// Change the package name to something fitting to your application
// The easiest way to do this is to do navigate in the project pane (to the left)
// syncServiceSkeleton -> Source Packages. Here you should see the current package name.
// Right click on it and choose refactor->rename.
// Also change "no.sintef" appropriately
package no.sintef.skeleton_syncservice;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.jws.WebService;
import javax.jws.WebMethod;
import javax.jws.WebParam;
import javax.xml.ws.Holder;
// Change the serviceName and the Java class name. The service name will be part
// of the endpoint url to your service
@WebService(serviceName = "SyncExample")
public class SyncExample {
// The namespace should match the package name in the first non-commented
// line of this file.
// If package name is a.b.c, the namespace should be "http://c.b.a/" (case sensitive)
// WFM will have an easier time recognizing your web service if this is fulfilled
private final String namespace = "http://skeleton_syncservice.sintef.no/";
// Also rename your Java project:
// Right click on the project in the left pane, choose "Properties" and rename
// group id, artifact id and name.
// Container configuration is accessed via environment variables
private final static String config_value = System.getenv("MY_CONFIG_VALUE");
// Name your service here, and add your required input and output parameters.
// Note that output parameters should be Holder objects.
@WebMethod(operationName = "basicIO")
public void basicIO(
@WebParam(name = "serviceID",
targetNamespace = namespace,
mode = WebParam.Mode.IN) String serviceID,
@WebParam(name = "sessionToken",
targetNamespace = namespace,
mode = WebParam.Mode.IN) String sessionToken,
@WebParam(name = "input1",
targetNamespace = namespace,
mode = WebParam.Mode.IN) String input1,
@WebParam(name = "input2",
targetNamespace = namespace,
mode = WebParam.Mode.IN) String input2,
@WebParam(name = "myOut",
targetNamespace = namespace,
mode = WebParam.Mode.OUT) Holder<String> myOut)
{
// Edit the input and output parameters above to fit your need, and program the method body according to your wishes.
log("basicIO was called!");
// Output parameters are set like this:
myOut.value = "Config was: " + config_value + "; input was: " + input1
+ " " + input2;
}
/*
* Utility function for less verbose logging
*/
private void log(String message) {
Logger.getLogger(this.getClass().getName()).log(Level.INFO, message);
}
/*
* Utility function for less verbose error message in log
*/
private void error(String message) {
Logger.getLogger(this.getClass().getName()).log(Level.SEVERE, message);
}
/*
* Utility function for less verbose error message in log
*/
private void error(IOException ex) {
Logger.getLogger(SyncExample.class.getName()).log(Level.SEVERE, null, ex);
}
}
| [
"\"MY_CONFIG_VALUE\""
]
| []
| [
"MY_CONFIG_VALUE"
]
| [] | ["MY_CONFIG_VALUE"] | java | 1 | 0 | |
python/pyspark/pandas/groupby.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper for GroupedData to behave similar to pandas GroupBy.
"""
from abc import ABCMeta, abstractmethod
import inspect
from collections import defaultdict, namedtuple
from distutils.version import LooseVersion
from functools import partial
from itertools import product
from typing import (
Any,
Callable,
Dict,
Generic,
Iterator,
Mapping,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
TYPE_CHECKING,
)
import warnings
import pandas as pd
from pandas.api.types import is_hashable, is_list_like # type: ignore[attr-defined]
if LooseVersion(pd.__version__) >= LooseVersion("1.3.0"):
from pandas.core.common import _builtin_table # type: ignore[attr-defined]
else:
from pandas.core.base import SelectionMixin
_builtin_table = SelectionMixin._builtin_table # type: ignore[attr-defined]
from pyspark import SparkContext
from pyspark.sql import Column, DataFrame as SparkDataFrame, Window, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
NumericType,
StructField,
StructType,
StringType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, FrameLike, Label, Name
from pyspark.pandas.typedef import infer_return_type, DataFrameType, ScalarType, SeriesType
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
SPARK_DEFAULT_SERIES_NAME,
SPARK_INDEX_NAME_PATTERN,
)
from pyspark.pandas.missing.groupby import (
MissingPandasLikeDataFrameGroupBy,
MissingPandasLikeSeriesGroupBy,
)
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.config import get_option
from pyspark.pandas.utils import (
align_diff_frames,
is_name_like_tuple,
is_name_like_value,
name_like_string,
same_anchor,
scol_for,
verify_temp_column_name,
log_advice,
)
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.exceptions import DataError
if TYPE_CHECKING:
from pyspark.pandas.window import RollingGroupby, ExpandingGroupby, ExponentialMovingGroupby
# to keep it the same as pandas
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
class GroupBy(Generic[FrameLike], metaclass=ABCMeta):
"""
:ivar _psdf: The parent dataframe that is used to perform the groupby
:type _psdf: DataFrame
:ivar _groupkeys: The list of keys that will be used to perform the grouping
:type _groupkeys: List[Series]
"""
def __init__(
self,
psdf: DataFrame,
groupkeys: List[Series],
as_index: bool,
dropna: bool,
column_labels_to_exclude: Set[Label],
agg_columns_selected: bool,
agg_columns: List[Series],
):
self._psdf = psdf
self._groupkeys = groupkeys
self._as_index = as_index
self._dropna = dropna
self._column_labels_to_exclude = column_labels_to_exclude
self._agg_columns_selected = agg_columns_selected
self._agg_columns = agg_columns
@property
def _groupkeys_scols(self) -> List[Column]:
return [s.spark.column for s in self._groupkeys]
@property
def _agg_columns_scols(self) -> List[Column]:
return [s.spark.column for s in self._agg_columns]
@abstractmethod
def _apply_series_op(
self,
op: Callable[["SeriesGroupBy"], Series],
should_resolve: bool = False,
numeric_only: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _cleanup_and_return(self, psdf: DataFrame) -> FrameLike:
pass
# TODO: Series support is not implemented yet.
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(
self,
func_or_funcs: Optional[Union[str, List[str], Dict[Name, Union[str, List[str]]]]] = None,
*args: Any,
**kwargs: Any,
) -> DataFrame:
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func_or_funcs : dict, str or list
a dict mapping from column name (string) to
aggregate functions (string or list of strings).
Returns
-------
Series or DataFrame
The return can be:
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return Series or DataFrame.
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': [0.362, 0.227, 1.267, -0.562]},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1 1 0.362
1 1 2 0.227
2 2 3 1.267
3 2 4 -0.562
Different aggregations per column
>>> aggregated = df.groupby('A').agg({'B': 'min', 'C': 'sum'})
>>> aggregated[['B', 'C']].sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.589
2 3 0.705
>>> aggregated = df.groupby('A').agg({'B': ['min', 'max']})
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B
min max
A
1 1 2
2 3 4
>>> aggregated = df.groupby('A').agg('min')
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.227
2 3 -0.562
>>> aggregated = df.groupby('A').agg(['min', 'max'])
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
min max min max
A
1 1 2 0.227 0.362
2 3 4 -0.562 1.267
To control the output names with different aggregations per column, pandas-on-Spark
also supports 'named aggregation' or nested renaming in .agg. It can also be
used when applying multiple aggregation functions to specific columns.
>>> aggregated = df.groupby('A').agg(b_max=ps.NamedAgg(column='B', aggfunc='max'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max
A
1 2
2 4
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), b_min=('B', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max b_min
A
1 2 1
2 4 3
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), c_min=('C', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max c_min
A
1 2 0.227
2 4 -0.562
"""
# I think current implementation of func and arguments in pandas-on-Spark for aggregate
# is different than pandas, later once arguments are added, this could be removed.
if func_or_funcs is None and kwargs is None:
raise ValueError("No aggregation argument or function specified.")
relabeling = func_or_funcs is None and is_multi_agg_with_relabel(**kwargs)
if relabeling:
(
func_or_funcs,
columns,
order,
) = normalize_keyword_aggregation( # type: ignore[assignment]
kwargs
)
if not isinstance(func_or_funcs, (str, list)):
if not isinstance(func_or_funcs, dict) or not all(
is_name_like_value(key)
and (
isinstance(value, str)
or isinstance(value, list)
and all(isinstance(v, str) for v in value)
)
for key, value in func_or_funcs.items()
):
raise ValueError(
"aggs must be a dict mapping from column name "
"to aggregate functions (string or list of strings)."
)
else:
agg_cols = [col.name for col in self._agg_columns]
func_or_funcs = {col: func_or_funcs for col in agg_cols}
psdf: DataFrame = DataFrame(
GroupBy._spark_groupby(self._psdf, func_or_funcs, self._groupkeys)
)
if self._dropna:
psdf = DataFrame(
psdf._internal.with_new_sdf(
psdf._internal.spark_frame.dropna(
subset=psdf._internal.index_spark_column_names
)
)
)
if not self._as_index:
should_drop_index = set(
i for i, gkey in enumerate(self._groupkeys) if gkey._psdf is not self._psdf
)
if len(should_drop_index) > 0:
psdf = psdf.reset_index(level=should_drop_index, drop=True)
if len(should_drop_index) < len(self._groupkeys):
psdf = psdf.reset_index()
if relabeling:
psdf = psdf[order]
psdf.columns = columns # type: ignore[assignment]
return psdf
agg = aggregate
@staticmethod
def _spark_groupby(
psdf: DataFrame,
func: Mapping[Name, Union[str, List[str]]],
groupkeys: Sequence[Series] = (),
) -> InternalFrame:
groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]
groupkey_scols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]
multi_aggs = any(isinstance(v, list) for v in func.values())
reordered = []
data_columns = []
column_labels = []
for key, value in func.items():
label = key if is_name_like_tuple(key) else (key,)
if len(label) != psdf._internal.column_labels_level:
raise TypeError("The length of the key must be the same as the column label level.")
for aggfunc in [value] if isinstance(value, str) else value:
column_label = tuple(list(label) + [aggfunc]) if multi_aggs else label
column_labels.append(column_label)
data_col = name_like_string(column_label)
data_columns.append(data_col)
col_name = psdf._internal.spark_column_name_for(label)
if aggfunc == "nunique":
reordered.append(
F.expr("count(DISTINCT `{0}`) as `{1}`".format(col_name, data_col))
)
# Implement "quartiles" aggregate function for ``describe``.
elif aggfunc == "quartiles":
reordered.append(
F.expr(
"percentile_approx(`{0}`, array(0.25, 0.5, 0.75)) as `{1}`".format(
col_name, data_col
)
)
)
else:
reordered.append(
F.expr("{1}(`{0}`) as `{2}`".format(col_name, aggfunc, data_col))
)
sdf = psdf._internal.spark_frame.select(groupkey_scols + psdf._internal.data_spark_columns)
sdf = sdf.groupby(*groupkey_names).agg(*reordered)
return InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(groupkeys, groupkey_names)
],
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
)
def count(self) -> FrameLike:
"""
Compute count of group, excluding missing values.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
>>> df.groupby('A').count().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 2 3
2 2 2
"""
return self._reduce_for_stat_function(F.count)
# TODO: We should fix See Also when Series implementation is finished.
def first(self, numeric_only: Optional[bool] = False) -> FrameLike:
"""
Compute first of group values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
.. versionadded:: 3.4.0
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({"A": [1, 2, 1, 2], "B": [True, False, False, True],
... "C": [3, 3, 4, 4], "D": ["a", "b", "b", "a"]})
>>> df
A B C D
0 1 True 3 a
1 2 False 3 b
2 1 False 4 b
3 2 True 4 a
>>> df.groupby("A").first().sort_index()
B C D
A
1 True 3 a
2 False 3 b
Include only float, int, boolean columns when set numeric_only True.
>>> df.groupby("A").first(numeric_only=True).sort_index()
B C
A
1 True 3
2 False 3
"""
return self._reduce_for_stat_function(
F.first, accepted_spark_types=(NumericType, BooleanType) if numeric_only else None
)
def last(self, numeric_only: Optional[bool] = False) -> FrameLike:
"""
Compute last of group values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
.. versionadded:: 3.4.0
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({"A": [1, 2, 1, 2], "B": [True, False, False, True],
... "C": [3, 3, 4, 4], "D": ["a", "b", "b", "a"]})
>>> df
A B C D
0 1 True 3 a
1 2 False 3 b
2 1 False 4 b
3 2 True 4 a
>>> df.groupby("A").last().sort_index()
B C D
A
1 False 4 b
2 True 4 a
Include only float, int, boolean columns when set numeric_only True.
>>> df.groupby("A").last(numeric_only=True).sort_index()
B C
A
1 False 4
2 True 4
"""
return self._reduce_for_stat_function(
lambda col: F.last(col, ignorenulls=True),
accepted_spark_types=(NumericType, BooleanType) if numeric_only else None,
)
def max(self, numeric_only: Optional[bool] = False) -> FrameLike:
"""
Compute max of group values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
.. versionadded:: 3.4.0
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({"A": [1, 2, 1, 2], "B": [True, False, False, True],
... "C": [3, 4, 3, 4], "D": ["a", "b", "b", "a"]})
>>> df.groupby("A").max().sort_index()
B C D
A
1 True 3 b
2 True 4 b
Include only float, int, boolean columns when set numeric_only True.
>>> df.groupby("A").max(numeric_only=True).sort_index()
B C
A
1 True 3
2 True 4
"""
return self._reduce_for_stat_function(
F.max, accepted_spark_types=(NumericType, BooleanType) if numeric_only else None
)
def mean(self, numeric_only: Optional[bool] = True) -> FrameLike:
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
.. versionadded:: 3.4.0
Returns
-------
pyspark.pandas.Series or pyspark.pandas.DataFrame
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2],
... 'D': [True, False, True, False, True]})
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C D
A
1 3.0 1.333333 0.333333
2 4.0 1.500000 1.000000
"""
self._validate_agg_columns(numeric_only=numeric_only, function_name="median")
return self._reduce_for_stat_function(
F.mean, accepted_spark_types=(NumericType,), bool_to_numeric=True
)
def min(self, numeric_only: Optional[bool] = False) -> FrameLike:
"""
Compute min of group values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
.. versionadded:: 3.4.0
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({"A": [1, 2, 1, 2], "B": [True, False, False, True],
... "C": [3, 4, 3, 4], "D": ["a", "b", "b", "a"]})
>>> df.groupby("A").min().sort_index()
B C D
A
1 False 3 a
2 False 4 a
Include only float, int, boolean columns when set numeric_only True.
>>> df.groupby("A").min(numeric_only=True).sort_index()
B C
A
1 False 3
2 False 4
"""
return self._reduce_for_stat_function(
F.min, accepted_spark_types=(NumericType, BooleanType) if numeric_only else None
)
# TODO: sync the doc.
def std(self, ddof: int = 1) -> FrameLike:
"""
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
Examples
--------
>>> df = ps.DataFrame({"A": [1, 2, 1, 2], "B": [True, False, False, True],
... "C": [3, 4, 3, 4], "D": ["a", "b", "b", "a"]})
>>> df.groupby("A").std()
B C
A
1 0.707107 0.0
2 0.707107 0.0
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
assert ddof in (0, 1)
# Raise the TypeError when all aggregation columns are of unaccepted data types
all_unaccepted = True
for _agg_col in self._agg_columns:
if isinstance(_agg_col.spark.data_type, (NumericType, BooleanType)):
all_unaccepted = False
break
if all_unaccepted:
raise TypeError(
"Unaccepted data types of aggregation columns; numeric or bool expected."
)
return self._reduce_for_stat_function(
F.stddev_pop if ddof == 0 else F.stddev_samp,
accepted_spark_types=(NumericType,),
bool_to_numeric=True,
)
def sum(self) -> FrameLike:
"""
Compute sum of group values
Examples
--------
>>> df = ps.DataFrame({"A": [1, 2, 1, 2], "B": [True, False, False, True],
... "C": [3, 4, 3, 4], "D": ["a", "b", "b", "a"]})
>>> df.groupby("A").sum()
B C
A
1 1 6
2 1 8
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
return self._reduce_for_stat_function(
F.sum, accepted_spark_types=(NumericType,), bool_to_numeric=True
)
# TODO: sync the doc.
def var(self, ddof: int = 1) -> FrameLike:
"""
Compute variance of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
Examples
--------
>>> df = ps.DataFrame({"A": [1, 2, 1, 2], "B": [True, False, False, True],
... "C": [3, 4, 3, 4], "D": ["a", "b", "b", "a"]})
>>> df.groupby("A").var()
B C
A
1 0.5 0.0
2 0.5 0.0
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
assert ddof in (0, 1)
return self._reduce_for_stat_function(
F.var_pop if ddof == 0 else F.var_samp,
accepted_spark_types=(NumericType,),
bool_to_numeric=True,
)
def skew(self) -> FrameLike:
"""
Compute skewness of groups, excluding missing values.
.. versionadded:: 3.4.0
Examples
--------
>>> df = ps.DataFrame({"A": [1, 2, 1, 1], "B": [True, False, False, True],
... "C": [3, 4, 3, 4], "D": ["a", "b", "b", "a"]})
>>> df.groupby("A").skew()
B C
A
1 -1.732051 1.732051
2 NaN NaN
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
"""
def skew(scol: Column) -> Column:
sql_utils = SparkContext._active_spark_context._jvm.PythonSQLUtils
return Column(sql_utils.pandasSkewness(scol._jc))
return self._reduce_for_stat_function(
skew,
accepted_spark_types=(NumericType,),
bool_to_numeric=True,
)
# TODO: skipna should be implemented.
def all(self) -> FrameLike:
"""
Returns True if all values in the group are truthful, else False.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').all().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 False
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.min(F.coalesce(col.cast("boolean"), SF.lit(True)))
)
# TODO: skipna should be implemented.
def any(self) -> FrameLike:
"""
Returns True if any value in the group is truthful, else False.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').any().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 True
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.max(F.coalesce(col.cast("boolean"), SF.lit(False)))
)
# TODO: groupby multiply columns should be implemented.
def size(self) -> Series:
"""
Compute group sizes.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 2, 3, 3, 3],
... 'B': [1, 1, 2, 3, 3, 3]},
... columns=['A', 'B'])
>>> df
A B
0 1 1
1 2 1
2 2 2
3 3 3
4 3 3
5 3 3
>>> df.groupby('A').size().sort_index()
A
1 1
2 2
3 3
dtype: int64
>>> df.groupby(['A', 'B']).size().sort_index()
A B
1 1 1
2 1 1
2 1
3 3 3
dtype: int64
For Series,
>>> df.B.groupby(df.A).size().sort_index()
A
1 1
2 2
3 3
Name: B, dtype: int64
>>> df.groupby(df.A).B.size().sort_index()
A
1 1
2 2
3 3
Name: B, dtype: int64
"""
groupkeys = self._groupkeys
groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]
groupkey_scols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]
sdf = self._psdf._internal.spark_frame.select(
groupkey_scols + self._psdf._internal.data_spark_columns
)
sdf = sdf.groupby(*groupkey_names).count()
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(groupkeys, groupkey_names)
],
column_labels=[None],
data_spark_columns=[scol_for(sdf, "count")],
)
return first_series(DataFrame(internal))
def diff(self, periods: int = 1) -> FrameLike:
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame group (default is the element in the same column of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : DataFrame or Series
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.groupby(['b']).diff().sort_index()
a c
0 NaN NaN
1 1.0 3.0
2 NaN NaN
3 NaN NaN
4 NaN NaN
5 NaN NaN
Difference with previous column in a group.
>>> df.groupby(['b'])['a'].diff().sort_index()
0 NaN
1 1.0
2 NaN
3 NaN
4 NaN
5 NaN
Name: a, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._diff(periods, part_cols=sg._groupkeys_scols), should_resolve=True
)
def cumcount(self, ascending: bool = True) -> Series:
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
.. code-block:: python
self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Returns
-------
Series
Sequence number of each element within each group.
Examples
--------
>>> df = ps.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount().sort_index()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False).sort_index()
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
ret = (
self._groupkeys[0]
.rename()
.spark.transform(lambda _: SF.lit(0))
._cum(F.count, True, part_cols=self._groupkeys_scols, ascending=ascending)
- 1
)
internal = ret._internal.resolved_copy
return first_series(DataFrame(internal))
def cummax(self) -> FrameLike:
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummax
DataFrame.cummax
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummax().sort_index()
B C
0 NaN 4
1 0.1 4
2 20.0 4
3 10.0 1
It works as below in Series.
>>> df.C.groupby(df.A).cummax().sort_index()
0 4
1 4
2 4
3 1
Name: C, dtype: int64
"""
return self._apply_series_op(
lambda sg: sg._psser._cum(F.max, True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def cummin(self) -> FrameLike:
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummin
DataFrame.cummin
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummin().sort_index()
B C
0 NaN 4
1 0.1 3
2 0.1 2
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cummin().sort_index()
0 NaN
1 0.1
2 0.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._cum(F.min, True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def cumprod(self) -> FrameLike:
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumprod
DataFrame.cumprod
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumprod().sort_index()
B C
0 NaN 4
1 0.1 12
2 2.0 24
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cumprod().sort_index()
0 NaN
1 0.1
2 2.0
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._cumprod(True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def cumsum(self) -> FrameLike:
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumsum
DataFrame.cumsum
Examples
--------
>>> df = ps.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumsum().sort_index()
B C
0 NaN 4
1 0.1 7
2 20.1 9
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cumsum().sort_index()
0 NaN
1 0.1
2 20.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._cumsum(True, part_cols=sg._groupkeys_scols),
should_resolve=True,
numeric_only=True,
)
def apply(self, func: Callable, *args: Any, **kwargs: Any) -> Union[DataFrame, Series]:
"""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a DataFrame as its first
argument and return a DataFrame. `apply` will
then take care of combining the results back together into a single
dataframe. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. pandas-on-Spark offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def pandas_div(x) -> ps.DataFrame[int, [float, float]]:
... return x[['B', 'C']] / x[['B', 'C']]
If the return type is specified, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``.
To specify the column names, you can assign them in a NumPy compound type style
as below:
>>> def pandas_div(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> pdf = pd.DataFrame({'B': [1.], 'C': [3.]})
>>> def plus_one(x) -> ps.DataFrame[
... (pdf.index.name, pdf.index.dtype), zip(pdf.columns, pdf.dtypes)]:
... return x[['B', 'C']] / x[['B', 'C']]
.. note:: the dataframe within ``func`` is actually a pandas dataframe. Therefore,
any pandas API within this function is allowed.
Parameters
----------
func : callable
A callable that takes a DataFrame as its first argument, and
returns a dataframe.
*args
Positional arguments to pass to func.
**kwargs
Keyword arguments to pass to func.
Returns
-------
applied : DataFrame or Series
See Also
--------
aggregate : Apply aggregate function to the GroupBy object.
DataFrame.apply : Apply a function to a DataFrame.
Series.apply : Apply a function to a Series.
Examples
--------
>>> df = ps.DataFrame({'A': 'a a b'.split(),
... 'B': [1, 2, 3],
... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Below the functions passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> def plus_min(x):
... return x + x.min()
>>> g.apply(plus_min).sort_index() # doctest: +NORMALIZE_WHITESPACE
A B C
0 aa 2 8
1 aa 3 10
2 bb 6 10
>>> g.apply(sum).sort_index() # doctest: +NORMALIZE_WHITESPACE
A B C
A
a aa 3 10
b b 3 5
>>> g.apply(len).sort_index() # doctest: +NORMALIZE_WHITESPACE
A
a 2
b 1
dtype: int64
You can specify the type hint and prevent schema inference for better performance.
>>> def pandas_div(x) -> ps.DataFrame[int, [float, float]]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE
c0 c1
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
>>> def pandas_div(x) -> ps.DataFrame[("index", int), [("f1", float), ("f2", float)]]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE
f1 f2
index
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
In case of Series, it works as below.
>>> def plus_max(x) -> ps.Series[np.int]:
... return x + x.max()
>>> df.B.groupby(df.A).apply(plus_max).sort_index() # doctest: +SKIP
0 6
1 3
2 4
Name: B, dtype: int64
>>> def plus_min(x):
... return x + x.min()
>>> df.B.groupby(df.A).apply(plus_min).sort_index()
0 2
1 3
2 6
Name: B, dtype: int64
You can also return a scalar value as a aggregated value of the group:
>>> def plus_length(x) -> np.int:
... return len(x)
>>> df.B.groupby(df.A).apply(plus_length).sort_index() # doctest: +SKIP
0 1
1 2
Name: B, dtype: int64
The extra arguments to the function can be passed as below.
>>> def calculation(x, y, z) -> np.int:
... return len(x) + y * z
>>> df.B.groupby(df.A).apply(calculation, 5, z=10).sort_index() # doctest: +SKIP
0 51
1 52
Name: B, dtype: int64
"""
if not callable(func):
raise TypeError("%s object is not callable" % type(func).__name__)
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
should_retain_index = should_infer_schema
is_series_groupby = isinstance(self, SeriesGroupBy)
psdf = self._psdf
if self._agg_columns_selected:
agg_columns = self._agg_columns
else:
agg_columns = [
psdf._psser_for(label)
for label in psdf._internal.column_labels
if label not in self._column_labels_to_exclude
]
psdf, groupkey_labels, groupkey_names = GroupBy._prepare_group_map_apply(
psdf, self._groupkeys, agg_columns
)
if is_series_groupby:
name = psdf.columns[-1]
pandas_apply = _builtin_table.get(func, func)
else:
f = _builtin_table.get(func, func)
def pandas_apply(pdf: pd.DataFrame, *a: Any, **k: Any) -> Any:
return f(pdf.drop(groupkey_names, axis=1), *a, **k)
should_return_series = False
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
log_advice(
"If the type hints is not specified for `grouby.apply`, "
"it is expensive to infer the data type internally."
)
limit = get_option("compute.shortcut_limit")
# Ensure sampling rows >= 2 to make sure apply's infer schema is accurate
# See related: https://github.com/pandas-dev/pandas/issues/46893
sample_limit = limit + 1 if limit else 2
pdf = psdf.head(sample_limit)._to_internal_pandas()
groupkeys = [
pdf[groupkey_name].rename(psser.name)
for groupkey_name, psser in zip(groupkey_names, self._groupkeys)
]
grouped = pdf.groupby(groupkeys)
if is_series_groupby:
pser_or_pdf = grouped[name].apply(pandas_apply, *args, **kwargs)
else:
pser_or_pdf = grouped.apply(pandas_apply, *args, **kwargs)
psser_or_psdf = ps.from_pandas(pser_or_pdf)
if len(pdf) <= limit:
if isinstance(psser_or_psdf, ps.Series) and is_series_groupby:
psser_or_psdf = psser_or_psdf.rename(cast(SeriesGroupBy, self)._psser.name)
return cast(Union[Series, DataFrame], psser_or_psdf)
if len(grouped) <= 1:
with warnings.catch_warnings():
warnings.simplefilter("always")
warnings.warn(
"The amount of data for return type inference might not be large enough. "
"Consider increasing an option `compute.shortcut_limit`."
)
if isinstance(psser_or_psdf, Series):
should_return_series = True
psdf_from_pandas = psser_or_psdf._psdf
else:
psdf_from_pandas = cast(DataFrame, psser_or_psdf)
index_fields = [
field.normalize_spark_type() for field in psdf_from_pandas._internal.index_fields
]
data_fields = [
field.normalize_spark_type() for field in psdf_from_pandas._internal.data_fields
]
return_schema = StructType([field.struct_field for field in index_fields + data_fields])
else:
return_type = infer_return_type(func)
if not is_series_groupby and isinstance(return_type, SeriesType):
raise TypeError(
"Series as a return type hint at frame groupby is not supported "
"currently; however got [%s]. Use DataFrame type hint instead." % return_sig
)
if isinstance(return_type, DataFrameType):
data_fields = return_type.data_fields
return_schema = return_type.spark_type
index_fields = return_type.index_fields
should_retain_index = len(index_fields) > 0
psdf_from_pandas = None
else:
should_return_series = True
dtype = cast(Union[SeriesType, ScalarType], return_type).dtype
spark_type = cast(Union[SeriesType, ScalarType], return_type).spark_type
if is_series_groupby:
data_fields = [
InternalField(
dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)
)
]
else:
data_fields = [
InternalField(
dtype=dtype,
struct_field=StructField(
name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type
),
)
]
return_schema = StructType([field.struct_field for field in data_fields])
def pandas_groupby_apply(pdf: pd.DataFrame) -> pd.DataFrame:
if is_series_groupby:
pdf_or_ser = pdf.groupby(groupkey_names)[name].apply(pandas_apply, *args, **kwargs)
else:
pdf_or_ser = pdf.groupby(groupkey_names).apply(pandas_apply, *args, **kwargs)
if should_return_series and isinstance(pdf_or_ser, pd.DataFrame):
pdf_or_ser = pdf_or_ser.stack()
if not isinstance(pdf_or_ser, pd.DataFrame):
return pd.DataFrame(pdf_or_ser)
else:
return pdf_or_ser
sdf = GroupBy._spark_group_map_apply(
psdf,
pandas_groupby_apply,
[psdf._internal.spark_column_for(label) for label in groupkey_labels],
return_schema,
retain_index=should_retain_index,
)
if should_retain_index:
# If schema is inferred, we can restore indexes too.
if psdf_from_pandas is not None:
internal = psdf_from_pandas._internal.with_new_sdf(
spark_frame=sdf, index_fields=index_fields, data_fields=data_fields
)
else:
index_names: Optional[List[Optional[Tuple[Any, ...]]]] = None
index_spark_columns = [
scol_for(sdf, index_field.struct_field.name) for index_field in index_fields
]
if not any(
[
SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name)
for index_field in index_fields
]
):
index_names = [(index_field.struct_field.name,) for index_field in index_fields]
internal = InternalFrame(
spark_frame=sdf,
index_names=index_names,
index_spark_columns=index_spark_columns,
index_fields=index_fields,
data_fields=data_fields,
)
else:
# Otherwise, it loses index.
internal = InternalFrame(
spark_frame=sdf, index_spark_columns=None, data_fields=data_fields
)
if should_return_series:
psser = first_series(DataFrame(internal))
if is_series_groupby:
psser = psser.rename(cast(SeriesGroupBy, self)._psser.name)
return psser
else:
return DataFrame(internal)
# TODO: implement 'dropna' parameter
def filter(self, func: Callable[[FrameLike], FrameLike]) -> FrameLike:
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame or Series
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = ps.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]}, columns=['A', 'B', 'C'])
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
>>> df.B.groupby(df.A).filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
"""
if not callable(func):
raise TypeError("%s object is not callable" % type(func).__name__)
is_series_groupby = isinstance(self, SeriesGroupBy)
psdf = self._psdf
if self._agg_columns_selected:
agg_columns = self._agg_columns
else:
agg_columns = [
psdf._psser_for(label)
for label in psdf._internal.column_labels
if label not in self._column_labels_to_exclude
]
data_schema = (
psdf[agg_columns]._internal.resolved_copy.spark_frame.drop(*HIDDEN_COLUMNS).schema
)
psdf, groupkey_labels, groupkey_names = GroupBy._prepare_group_map_apply(
psdf, self._groupkeys, agg_columns
)
if is_series_groupby:
def pandas_filter(pdf: pd.DataFrame) -> pd.DataFrame:
return pd.DataFrame(pdf.groupby(groupkey_names)[pdf.columns[-1]].filter(func))
else:
f = _builtin_table.get(func, func)
def wrapped_func(pdf: pd.DataFrame) -> pd.DataFrame:
return f(pdf.drop(groupkey_names, axis=1))
def pandas_filter(pdf: pd.DataFrame) -> pd.DataFrame:
return pdf.groupby(groupkey_names).filter(wrapped_func).drop(groupkey_names, axis=1)
sdf = GroupBy._spark_group_map_apply(
psdf,
pandas_filter,
[psdf._internal.spark_column_for(label) for label in groupkey_labels],
data_schema,
retain_index=True,
)
psdf = DataFrame(self._psdf[agg_columns]._internal.with_new_sdf(sdf))
if is_series_groupby:
return cast(FrameLike, first_series(psdf))
else:
return cast(FrameLike, psdf)
@staticmethod
def _prepare_group_map_apply(
psdf: DataFrame, groupkeys: List[Series], agg_columns: List[Series]
) -> Tuple[DataFrame, List[Label], List[str]]:
groupkey_labels: List[Label] = [
verify_temp_column_name(psdf, "__groupkey_{}__".format(i))
for i in range(len(groupkeys))
]
psdf = psdf[[s.rename(label) for s, label in zip(groupkeys, groupkey_labels)] + agg_columns]
groupkey_names = [label if len(label) > 1 else label[0] for label in groupkey_labels]
return DataFrame(psdf._internal.resolved_copy), groupkey_labels, groupkey_names
@staticmethod
def _spark_group_map_apply(
psdf: DataFrame,
func: Callable[[pd.DataFrame], pd.DataFrame],
groupkeys_scols: List[Column],
return_schema: StructType,
retain_index: bool,
) -> SparkDataFrame:
output_func = GroupBy._make_pandas_df_builder_func(psdf, func, return_schema, retain_index)
sdf = psdf._internal.spark_frame.drop(*HIDDEN_COLUMNS)
return sdf.groupby(*groupkeys_scols).applyInPandas(output_func, return_schema)
@staticmethod
def _make_pandas_df_builder_func(
psdf: DataFrame,
func: Callable[[pd.DataFrame], pd.DataFrame],
return_schema: StructType,
retain_index: bool,
) -> Callable[[pd.DataFrame], pd.DataFrame]:
"""
Creates a function that can be used inside the pandas UDF. This function can construct
the same pandas DataFrame as if the pandas-on-Spark DataFrame is collected to driver side.
The index, column labels, etc. are re-constructed within the function.
"""
from pyspark.sql.utils import is_timestamp_ntz_preferred
arguments_for_restore_index = psdf._internal.arguments_for_restore_index
prefer_timestamp_ntz = is_timestamp_ntz_preferred()
def rename_output(pdf: pd.DataFrame) -> pd.DataFrame:
pdf = InternalFrame.restore_index(pdf.copy(), **arguments_for_restore_index)
pdf = func(pdf)
# If schema should be inferred, we don't restore index. pandas seems restoring
# the index in some cases.
# When Spark output type is specified, without executing it, we don't know
# if we should restore the index or not. For instance, see the example in
# https://github.com/pyspark.pandas/issues/628.
pdf, _, _, _, _ = InternalFrame.prepare_pandas_frame(
pdf, retain_index=retain_index, prefer_timestamp_ntz=prefer_timestamp_ntz
)
# Just positionally map the column names to given schema's.
pdf.columns = return_schema.names
return pdf
return rename_output
def rank(self, method: str = "average", ascending: bool = True) -> FrameLike:
"""
Provide the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
DataFrame with ranking of values within each group
Examples
--------
>>> df = ps.DataFrame({
... 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df
a b
0 1 1
1 1 2
2 1 2
3 2 2
4 2 3
5 2 3
6 3 3
7 3 4
8 3 4
>>> df.groupby("a").rank().sort_index()
b
0 1.0
1 2.5
2 2.5
3 1.0
4 2.5
5 2.5
6 1.0
7 2.5
8 2.5
>>> df.b.groupby(df.a).rank(method='max').sort_index()
0 1.0
1 3.0
2 3.0
3 1.0
4 3.0
5 3.0
6 1.0
7 3.0
8 3.0
Name: b, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._psser._rank(method, ascending, part_cols=sg._groupkeys_scols),
should_resolve=True,
)
# TODO: add axis parameter
def idxmax(self, skipna: bool = True) -> FrameLike:
"""
Return index of first occurrence of maximum over requested axis in group.
NA/null values are excluded.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
See Also
--------
Series.idxmax
DataFrame.idxmax
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 2, 2, 3],
... 'b': [1, 2, 3, 4, 5],
... 'c': [5, 4, 3, 2, 1]}, columns=['a', 'b', 'c'])
>>> df.groupby(['a'])['b'].idxmax().sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 1
2 3
3 4
Name: b, dtype: int64
>>> df.groupby(['a']).idxmax().sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
a
1 1 0
2 3 2
3 4 4
"""
if self._psdf._internal.index_level != 1:
raise ValueError("idxmax only support one-level index now")
groupkey_names = ["__groupkey_{}__".format(i) for i in range(len(self._groupkeys))]
sdf = self._psdf._internal.spark_frame
for s, name in zip(self._groupkeys, groupkey_names):
sdf = sdf.withColumn(name, s.spark.column)
index = self._psdf._internal.index_spark_column_names[0]
stat_exprs = []
for psser, scol in zip(self._agg_columns, self._agg_columns_scols):
name = psser._internal.data_spark_column_names[0]
if skipna:
order_column = scol.desc_nulls_last()
else:
order_column = scol.desc_nulls_first()
window = Window.partitionBy(*groupkey_names).orderBy(
order_column, NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn(
name, F.when(F.row_number().over(window) == 1, scol_for(sdf, index)).otherwise(None)
)
stat_exprs.append(F.max(scol_for(sdf, name)).alias(name))
sdf = sdf.groupby(*groupkey_names).agg(*stat_exprs)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in self._groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(self._groupkeys, groupkey_names)
],
column_labels=[psser._column_label for psser in self._agg_columns],
data_spark_columns=[
scol_for(sdf, psser._internal.data_spark_column_names[0])
for psser in self._agg_columns
],
)
return self._cleanup_and_return(DataFrame(internal))
# TODO: add axis parameter
def idxmin(self, skipna: bool = True) -> FrameLike:
"""
Return index of first occurrence of minimum over requested axis in group.
NA/null values are excluded.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
See Also
--------
Series.idxmin
DataFrame.idxmin
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 2, 2, 3],
... 'b': [1, 2, 3, 4, 5],
... 'c': [5, 4, 3, 2, 1]}, columns=['a', 'b', 'c'])
>>> df.groupby(['a'])['b'].idxmin().sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 0
2 2
3 4
Name: b, dtype: int64
>>> df.groupby(['a']).idxmin().sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
a
1 0 1
2 2 3
3 4 4
"""
if self._psdf._internal.index_level != 1:
raise ValueError("idxmin only support one-level index now")
groupkey_names = ["__groupkey_{}__".format(i) for i in range(len(self._groupkeys))]
sdf = self._psdf._internal.spark_frame
for s, name in zip(self._groupkeys, groupkey_names):
sdf = sdf.withColumn(name, s.spark.column)
index = self._psdf._internal.index_spark_column_names[0]
stat_exprs = []
for psser, scol in zip(self._agg_columns, self._agg_columns_scols):
name = psser._internal.data_spark_column_names[0]
if skipna:
order_column = scol.asc_nulls_last()
else:
order_column = scol.asc_nulls_first()
window = Window.partitionBy(*groupkey_names).orderBy(
order_column, NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn(
name, F.when(F.row_number().over(window) == 1, scol_for(sdf, index)).otherwise(None)
)
stat_exprs.append(F.max(scol_for(sdf, name)).alias(name))
sdf = sdf.groupby(*groupkey_names).agg(*stat_exprs)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in self._groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(self._groupkeys, groupkey_names)
],
column_labels=[psser._column_label for psser in self._agg_columns],
data_spark_columns=[
scol_for(sdf, psser._internal.data_spark_column_names[0])
for psser in self._agg_columns
],
)
return self._cleanup_and_return(DataFrame(internal))
def fillna(
self,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool = False,
limit: Optional[int] = None,
) -> FrameLike:
"""Fill NA/NaN values in group.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ps.DataFrame({
... 'A': [1, 1, 2, 2],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 1 2.0 NaN 0
1 1 4.0 NaN 1
2 2 NaN NaN 5
3 2 3.0 1.0 4
We can also propagate non-null values forward or backward in group.
>>> df.groupby(['A'])['B'].fillna(method='ffill').sort_index()
0 2.0
1 4.0
2 NaN
3 3.0
Name: B, dtype: float64
>>> df.groupby(['A']).fillna(method='bfill').sort_index()
B C D
0 2.0 NaN 0
1 4.0 NaN 1
2 3.0 1.0 5
3 3.0 1.0 4
"""
return self._apply_series_op(
lambda sg: sg._psser._fillna(
value=value, method=method, axis=axis, limit=limit, part_cols=sg._groupkeys_scols
),
should_resolve=(method is not None),
)
def bfill(self, limit: Optional[int] = None) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` with ``method=`bfill```.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ps.DataFrame({
... 'A': [1, 1, 2, 2],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 1 2.0 NaN 0
1 1 4.0 NaN 1
2 2 NaN NaN 5
3 2 3.0 1.0 4
Propagate non-null values backward.
>>> df.groupby(['A']).bfill().sort_index()
B C D
0 2.0 NaN 0
1 4.0 NaN 1
2 3.0 1.0 5
3 3.0 1.0 4
"""
return self.fillna(method="bfill", limit=limit)
backfill = bfill
def ffill(self, limit: Optional[int] = None) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` with ``method=`ffill```.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ps.DataFrame({
... 'A': [1, 1, 2, 2],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 1 2.0 NaN 0
1 1 4.0 NaN 1
2 2 NaN NaN 5
3 2 3.0 1.0 4
Propagate non-null values forward.
>>> df.groupby(['A']).ffill().sort_index()
B C D
0 2.0 NaN 0
1 4.0 NaN 1
2 NaN NaN 5
3 3.0 1.0 4
"""
return self.fillna(method="ffill", limit=limit)
pad = ffill
def _limit(self, n: int, asc: bool) -> FrameLike:
"""
Private function for tail and head.
"""
psdf = self._psdf
if self._agg_columns_selected:
agg_columns = self._agg_columns
else:
agg_columns = [
psdf._psser_for(label)
for label in psdf._internal.column_labels
if label not in self._column_labels_to_exclude
]
psdf, groupkey_labels, _ = GroupBy._prepare_group_map_apply(
psdf,
self._groupkeys,
agg_columns,
)
groupkey_scols = [psdf._internal.spark_column_for(label) for label in groupkey_labels]
sdf = psdf._internal.spark_frame
window = Window.partitionBy(*groupkey_scols)
# This part is handled differently depending on whether it is a tail or a head.
ordered_window = (
window.orderBy(F.col(NATURAL_ORDER_COLUMN_NAME).asc())
if asc
else window.orderBy(F.col(NATURAL_ORDER_COLUMN_NAME).desc())
)
if n >= 0 or LooseVersion(pd.__version__) < LooseVersion("1.4.0"):
tmp_row_num_col = verify_temp_column_name(sdf, "__row_number__")
sdf = (
sdf.withColumn(tmp_row_num_col, F.row_number().over(ordered_window))
.filter(F.col(tmp_row_num_col) <= n)
.drop(tmp_row_num_col)
)
else:
# Pandas supports Groupby positional indexing since v1.4.0
# https://pandas.pydata.org/docs/whatsnew/v1.4.0.html#groupby-positional-indexing
#
# To support groupby positional indexing, we need add a `__tmp_lag__` column to help
# us filtering rows before the specified offset row.
#
# For example for the dataframe:
# >>> df = ps.DataFrame([["g", "g0"],
# ... ["g", "g1"],
# ... ["g", "g2"],
# ... ["g", "g3"],
# ... ["h", "h0"],
# ... ["h", "h1"]], columns=["A", "B"])
# >>> df.groupby("A").head(-1)
#
# Below is a result to show the `__tmp_lag__` column for above df, the limit n is
# `-1`, the `__tmp_lag__` will be set to `0` in rows[:-1], and left will be set to
# `null`:
#
# >>> sdf.withColumn(tmp_lag_col, F.lag(F.lit(0), -1).over(ordered_window))
# +-----------------+--------------+---+---+-----------------+-----------+
# |__index_level_0__|__groupkey_0__| A| B|__natural_order__|__tmp_lag__|
# +-----------------+--------------+---+---+-----------------+-----------+
# | 0| g| g| g0| 0| 0|
# | 1| g| g| g1| 8589934592| 0|
# | 2| g| g| g2| 17179869184| 0|
# | 3| g| g| g3| 25769803776| null|
# | 4| h| h| h0| 34359738368| 0|
# | 5| h| h| h1| 42949672960| null|
# +-----------------+--------------+---+---+-----------------+-----------+
#
tmp_lag_col = verify_temp_column_name(sdf, "__tmp_lag__")
sdf = (
sdf.withColumn(tmp_lag_col, F.lag(F.lit(0), n).over(ordered_window))
.where(~F.isnull(F.col(tmp_lag_col)))
.drop(tmp_lag_col)
)
internal = psdf._internal.with_new_sdf(sdf)
return self._cleanup_and_return(DataFrame(internal).drop(groupkey_labels, axis=1))
def head(self, n: int = 5) -> FrameLike:
"""
Return first n rows of each group.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
... 'c': [3, 5, 2, 5, 1, 2, 6, 4, 3, 6]},
... columns=['a', 'b', 'c'],
... index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6])
>>> df
a b c
7 1 2 3
2 1 3 5
4 1 1 2
1 1 4 5
3 2 6 1
4 2 9 2
9 2 8 6
10 3 10 4
5 3 7 3
6 3 5 6
>>> df.groupby('a').head(2).sort_index()
a b c
2 1 3 5
3 2 6 1
4 2 9 2
5 3 7 3
7 1 2 3
10 3 10 4
>>> df.groupby('a')['b'].head(2).sort_index()
2 3
3 6
4 9
5 7
7 2
10 10
Name: b, dtype: int64
Supports Groupby positional indexing Since pandas on Spark 3.4 (with pandas 1.4+):
>>> df = ps.DataFrame([["g", "g0"],
... ["g", "g1"],
... ["g", "g2"],
... ["g", "g3"],
... ["h", "h0"],
... ["h", "h1"]], columns=["A", "B"])
>>> df.groupby("A").head(-1) # doctest: +SKIP
A B
0 g g0
1 g g1
2 g g2
4 h h0
"""
return self._limit(n, asc=True)
def tail(self, n: int = 5) -> FrameLike:
"""
Return last n rows of each group.
Similar to `.apply(lambda x: x.tail(n))`, but it returns a subset of rows from
the original DataFrame with original index and order preserved (`as_index` flag is ignored).
Does not work for negative values of n.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
... 'c': [3, 5, 2, 5, 1, 2, 6, 4, 3, 6]},
... columns=['a', 'b', 'c'],
... index=[7, 2, 3, 1, 3, 4, 9, 10, 5, 6])
>>> df
a b c
7 1 2 3
2 1 3 5
3 1 1 2
1 1 4 5
3 2 6 1
4 2 9 2
9 2 8 6
10 3 10 4
5 3 7 3
6 3 5 6
>>> df.groupby('a').tail(2).sort_index()
a b c
1 1 4 5
3 1 1 2
4 2 9 2
5 3 7 3
6 3 5 6
9 2 8 6
>>> df.groupby('a')['b'].tail(2).sort_index()
1 4
3 1
4 9
5 7
6 5
9 8
Name: b, dtype: int64
Supports Groupby positional indexing Since pandas on Spark 3.4 (with pandas 1.4+):
>>> df = ps.DataFrame([["g", "g0"],
... ["g", "g1"],
... ["g", "g2"],
... ["g", "g3"],
... ["h", "h0"],
... ["h", "h1"]], columns=["A", "B"])
>>> df.groupby("A").tail(-1) # doctest: +SKIP
A B
3 g g3
2 g g2
1 g g1
5 h h1
"""
return self._limit(n, asc=False)
def shift(self, periods: int = 1, fill_value: Optional[Any] = None) -> FrameLike:
"""
Shift each group by periods observations.
Parameters
----------
periods : integer, default 1
number of periods to shift
fill_value : optional
Returns
-------
Series or DataFrame
Object shifted within each group.
Examples
--------
>>> df = ps.DataFrame({
... 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df
a b
0 1 1
1 1 2
2 1 2
3 2 2
4 2 3
5 2 3
6 3 3
7 3 4
8 3 4
>>> df.groupby('a').shift().sort_index() # doctest: +SKIP
b
0 NaN
1 1.0
2 2.0
3 NaN
4 2.0
5 3.0
6 NaN
7 3.0
8 4.0
>>> df.groupby('a').shift(periods=-1, fill_value=0).sort_index() # doctest: +SKIP
b
0 2
1 2
2 0
3 3
4 3
5 0
6 4
7 4
8 0
"""
return self._apply_series_op(
lambda sg: sg._psser._shift(periods, fill_value, part_cols=sg._groupkeys_scols),
should_resolve=True,
)
def transform(self, func: Callable[..., pd.Series], *args: Any, **kwargs: Any) -> FrameLike:
"""
Apply function column-by-column to the GroupBy object.
The function passed to `transform` must take a Series as its first
argument and return a Series. The given function is executed for
each series in each grouped data.
While `transform` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. pandas-on-Spark offers a wide range of method that will
be much faster than using `transform` for their specific purposes, so try to
use them before reaching for `transform`.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def convert_to_string(x) -> ps.Series[str]:
... return x.apply("a string {}".format)
When the given function has the return type annotated, the original index of the
GroupBy object will be lost and a default index will be attached to the result.
Please be careful about configuring the default index. See also `Default Index Type
<https://koalas.readthedocs.io/en/latest/user_guide/options.html#default-index-type>`_.
.. note:: the series within ``func`` is actually a pandas series. Therefore,
any pandas API within this function is allowed.
Parameters
----------
func : callable
A callable that takes a Series as its first argument, and
returns a Series.
*args
Positional arguments to pass to func.
**kwargs
Keyword arguments to pass to func.
Returns
-------
applied : DataFrame
See Also
--------
aggregate : Apply aggregate function to the GroupBy object.
Series.apply : Apply a function to a Series.
Examples
--------
>>> df = ps.DataFrame({'A': [0, 0, 1],
... 'B': [1, 2, 3],
... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``0`` and ``1``.
Calling `transform` in various ways, we can get different grouping results:
Below the functions passed to `transform` takes a Series as
its argument and returns a Series. `transform` applies the function on each series
in each grouped data, and combine them into a new DataFrame:
>>> def convert_to_string(x) -> ps.Series[str]:
... return x.apply("a string {}".format)
>>> g.transform(convert_to_string) # doctest: +NORMALIZE_WHITESPACE
B C
0 a string 1 a string 4
1 a string 2 a string 6
2 a string 3 a string 5
>>> def plus_max(x) -> ps.Series[np.int]:
... return x + x.max()
>>> g.transform(plus_max) # doctest: +NORMALIZE_WHITESPACE
B C
0 3 10
1 4 12
2 6 10
You can omit the type hint and let pandas-on-Spark infer its type.
>>> def plus_min(x):
... return x + x.min()
>>> g.transform(plus_min) # doctest: +NORMALIZE_WHITESPACE
B C
0 2 8
1 3 10
2 6 10
In case of Series, it works as below.
>>> df.B.groupby(df.A).transform(plus_max)
0 3
1 4
2 6
Name: B, dtype: int64
>>> (df * -1).B.groupby(df.A).transform(abs)
0 1
1 2
2 3
Name: B, dtype: int64
You can also specify extra arguments to pass to the function.
>>> def calculation(x, y, z) -> ps.Series[np.int]:
... return x + x.min() + y + z
>>> g.transform(calculation, 5, z=20) # doctest: +NORMALIZE_WHITESPACE
B C
0 27 33
1 28 35
2 31 35
"""
if not callable(func):
raise TypeError("%s object is not callable" % type(func).__name__)
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
psdf, groupkey_labels, groupkey_names = GroupBy._prepare_group_map_apply(
self._psdf, self._groupkeys, agg_columns=self._agg_columns
)
def pandas_transform(pdf: pd.DataFrame) -> pd.DataFrame:
return pdf.groupby(groupkey_names).transform(func, *args, **kwargs)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
log_advice(
"If the type hints is not specified for `grouby.transform`, "
"it is expensive to infer the data type internally."
)
limit = get_option("compute.shortcut_limit")
pdf = psdf.head(limit + 1)._to_internal_pandas()
pdf = pdf.groupby(groupkey_names).transform(func, *args, **kwargs)
psdf_from_pandas: DataFrame = DataFrame(pdf)
return_schema = force_decimal_precision_scale(
as_nullable_spark_type(
psdf_from_pandas._internal.spark_frame.drop(*HIDDEN_COLUMNS).schema
)
)
if len(pdf) <= limit:
return self._cleanup_and_return(psdf_from_pandas)
sdf = GroupBy._spark_group_map_apply(
psdf,
pandas_transform,
[psdf._internal.spark_column_for(label) for label in groupkey_labels],
return_schema,
retain_index=True,
)
# If schema is inferred, we can restore indexes too.
internal = psdf_from_pandas._internal.with_new_sdf(
sdf,
index_fields=[
field.copy(nullable=True) for field in psdf_from_pandas._internal.index_fields
],
data_fields=[
field.copy(nullable=True) for field in psdf_from_pandas._internal.data_fields
],
)
else:
return_type = infer_return_type(func)
if not isinstance(return_type, SeriesType):
raise TypeError(
"Expected the return type of this function to be of Series type, "
"but found type {}".format(return_type)
)
dtype = return_type.dtype
spark_type = return_type.spark_type
data_fields = [
InternalField(dtype=dtype, struct_field=StructField(name=c, dataType=spark_type))
for c in psdf._internal.data_spark_column_names
if c not in groupkey_names
]
return_schema = StructType([field.struct_field for field in data_fields])
sdf = GroupBy._spark_group_map_apply(
psdf,
pandas_transform,
[psdf._internal.spark_column_for(label) for label in groupkey_labels],
return_schema,
retain_index=False,
)
# Otherwise, it loses index.
internal = InternalFrame(
spark_frame=sdf, index_spark_columns=None, data_fields=data_fields
)
return self._cleanup_and_return(DataFrame(internal))
def nunique(self, dropna: bool = True) -> FrameLike:
"""
Return DataFrame with number of distinct observations per group for each column.
Parameters
----------
dropna : boolean, default True
Don’t include NaN in the counts.
Returns
-------
nunique : DataFrame or Series
Examples
--------
>>> df = ps.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')}, columns=['id', 'value1', 'value2'])
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique().sort_index() # doctest: +SKIP
value1 value2
id
egg 1 1
ham 1 2
spam 2 1
>>> df.groupby('id')['value1'].nunique().sort_index() # doctest: +NORMALIZE_WHITESPACE
id
egg 1
ham 1
spam 2
Name: value1, dtype: int64
"""
if dropna:
def stat_function(col: Column) -> Column:
return F.countDistinct(col)
else:
def stat_function(col: Column) -> Column:
return F.countDistinct(col) + F.when(
F.count(F.when(col.isNull(), 1).otherwise(None)) >= 1, 1
).otherwise(0)
return self._reduce_for_stat_function(stat_function)
def rolling(
self, window: int, min_periods: Optional[int] = None
) -> "RollingGroupby[FrameLike]":
"""
Return an rolling grouper, providing rolling
functionality per group.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
See Also
--------
Series.groupby
DataFrame.groupby
"""
from pyspark.pandas.window import RollingGroupby
return RollingGroupby(self, window, min_periods=min_periods)
def expanding(self, min_periods: int = 1) -> "ExpandingGroupby[FrameLike]":
"""
Return an expanding grouper, providing expanding
functionality per group.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
See Also
--------
Series.groupby
DataFrame.groupby
"""
from pyspark.pandas.window import ExpandingGroupby
return ExpandingGroupby(self, min_periods=min_periods)
# TODO: 'adjust', 'axis', 'method' parameter should be implemented.
def ewm(
self,
com: Optional[float] = None,
span: Optional[float] = None,
halflife: Optional[float] = None,
alpha: Optional[float] = None,
min_periods: Optional[int] = None,
ignore_na: bool = False,
) -> "ExponentialMovingGroupby[FrameLike]":
"""
Return an ewm grouper, providing ewm functionality per group.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
.. versionadded:: 3.4.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass.
alpha = 1 / (1 + com), for com >= 0.
span : float, optional
Specify decay in terms of span.
alpha = 2 / (span + 1), for span >= 1.
halflife : float, optional
Specify decay in terms of half-life.
alpha = 1 - exp(-ln(2) / halflife), for halflife > 0.
alpha : float, optional
Specify smoothing factor alpha directly.
0 < alpha <= 1.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
ignore_na : bool, default False
Ignore missing values when calculating weights.
- When ``ignore_na=False`` (default), weights are based on absolute positions.
For example, the weights of :math:`x_0` and :math:`x_2` used in calculating
the final weighted average of [:math:`x_0`, None, :math:`x_2`] are
:math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and
:math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``.
- When ``ignore_na=True``, weights are based
on relative positions. For example, the weights of :math:`x_0` and :math:`x_2`
used in calculating the final weighted average of
[:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if
``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``.
"""
from pyspark.pandas.window import ExponentialMovingGroupby
return ExponentialMovingGroupby(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
ignore_na=ignore_na,
)
def get_group(self, name: Union[Name, List[Name]]) -> FrameLike:
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
Returns
-------
group : same type as obj
Examples
--------
>>> psdf = ps.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> psdf
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
>>> psdf.groupby("class").get_group("bird").sort_index()
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
>>> psdf.groupby("class").get_group("mammal").sort_index()
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
groupkeys = self._groupkeys
if not is_hashable(name):
raise TypeError("unhashable type: '{}'".format(type(name).__name__))
elif len(groupkeys) > 1:
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if len(groupkeys) != len(name):
raise ValueError(
"must supply a same-length tuple to get_group with multiple grouping keys"
)
if not is_list_like(name):
name = [name]
cond = SF.lit(True)
for groupkey, item in zip(groupkeys, name):
scol = groupkey.spark.column
cond = cond & (scol == item)
if self._agg_columns_selected:
internal = self._psdf._internal
spark_frame = internal.spark_frame.select(
internal.index_spark_columns + self._agg_columns_scols
).filter(cond)
internal = internal.copy(
spark_frame=spark_frame,
index_spark_columns=[
scol_for(spark_frame, col) for col in internal.index_spark_column_names
],
column_labels=[s._column_label for s in self._agg_columns],
data_spark_columns=[
scol_for(spark_frame, s._internal.data_spark_column_names[0])
for s in self._agg_columns
],
data_fields=[s._internal.data_fields[0] for s in self._agg_columns],
)
else:
internal = self._psdf._internal.with_filter(cond)
if internal.spark_frame.head() is None:
raise KeyError(name)
return self._cleanup_and_return(DataFrame(internal))
def median(self, numeric_only: Optional[bool] = True, accuracy: int = 10000) -> FrameLike:
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
.. versionadded:: 3.4.0
Returns
-------
Series or DataFrame
Median of values within each group.
Examples
--------
>>> psdf = ps.DataFrame({'a': [1., 1., 1., 1., 2., 2., 2., 3., 3., 3.],
... 'b': [2., 3., 1., 4., 6., 9., 8., 10., 7., 5.],
... 'c': [3., 5., 2., 5., 1., 2., 6., 4., 3., 6.]},
... columns=['a', 'b', 'c'],
... index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6])
>>> psdf
a b c
7 1.0 2.0 3.0
2 1.0 3.0 5.0
4 1.0 1.0 2.0
1 1.0 4.0 5.0
3 2.0 6.0 1.0
4 2.0 9.0 2.0
9 2.0 8.0 6.0
10 3.0 10.0 4.0
5 3.0 7.0 3.0
6 3.0 5.0 6.0
DataFrameGroupBy
>>> psdf.groupby('a').median().sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
a
1.0 2.0 3.0
2.0 8.0 2.0
3.0 7.0 4.0
SeriesGroupBy
>>> psdf.groupby('a')['b'].median().sort_index()
a
1.0 2.0
2.0 8.0
3.0 7.0
Name: b, dtype: float64
"""
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
self._validate_agg_columns(numeric_only=numeric_only, function_name="median")
def stat_function(col: Column) -> Column:
return F.percentile_approx(col, 0.5, accuracy)
return self._reduce_for_stat_function(
stat_function,
accepted_spark_types=(NumericType,),
bool_to_numeric=True,
)
def _validate_agg_columns(self, numeric_only: Optional[bool], function_name: str) -> None:
"""Validate aggregation columns and raise an error or a warning following pandas."""
has_non_numeric = False
for _agg_col in self._agg_columns:
if not isinstance(_agg_col.spark.data_type, (NumericType, BooleanType)):
has_non_numeric = True
break
if has_non_numeric:
if isinstance(self, SeriesGroupBy):
raise TypeError("Only numeric aggregation column is accepted.")
if not numeric_only:
if has_non_numeric:
warnings.warn(
"Dropping invalid columns in DataFrameGroupBy.mean is deprecated. "
"In a future version, a TypeError will be raised. "
"Before calling .%s, select only columns which should be "
"valid for the function." % function_name,
FutureWarning,
)
def _reduce_for_stat_function(
self,
sfun: Callable[[Column], Column],
accepted_spark_types: Optional[Tuple[Type[DataType], ...]] = None,
bool_to_numeric: bool = False,
) -> FrameLike:
"""Apply an aggregate function `sfun` per column and reduce to a FrameLike.
Parameters
----------
sfun : The aggregate function to apply per column.
accepted_spark_types: Accepted spark types of columns to be aggregated;
default None means all spark types are accepted.
bool_to_numeric: If True, boolean columns are converted to numeric columns, which
are accepted for all statistical functions regardless of
`accepted_spark_types`.
"""
groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(self._groupkeys))]
groupkey_scols = [s.alias(name) for s, name in zip(self._groupkeys_scols, groupkey_names)]
agg_columns = []
for psser in self._agg_columns:
if bool_to_numeric and isinstance(psser.spark.data_type, BooleanType):
agg_columns.append(psser.astype(int))
elif (accepted_spark_types is None) or isinstance(
psser.spark.data_type, accepted_spark_types
):
agg_columns.append(psser)
sdf = self._psdf._internal.spark_frame.select(
*groupkey_scols, *[psser.spark.column for psser in agg_columns]
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in self._groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(self._groupkeys, groupkey_names)
],
data_spark_columns=[
scol_for(sdf, psser._internal.data_spark_column_names[0]) for psser in agg_columns
],
column_labels=[psser._column_label for psser in agg_columns],
data_fields=[psser._internal.data_fields[0] for psser in agg_columns],
column_label_names=self._psdf._internal.column_label_names,
)
psdf: DataFrame = DataFrame(internal)
if len(psdf._internal.column_labels) > 0:
stat_exprs = []
for label in psdf._internal.column_labels:
psser = psdf._psser_for(label)
stat_exprs.append(
sfun(psser._dtype_op.nan_to_null(psser).spark.column).alias(
psser._internal.data_spark_column_names[0]
)
)
sdf = sdf.groupby(*groupkey_names).agg(*stat_exprs)
else:
sdf = sdf.select(*groupkey_names).distinct()
internal = internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
data_spark_columns=[scol_for(sdf, col) for col in internal.data_spark_column_names],
data_fields=None,
)
psdf = DataFrame(internal)
if self._dropna:
psdf = DataFrame(
psdf._internal.with_new_sdf(
psdf._internal.spark_frame.dropna(
subset=psdf._internal.index_spark_column_names
)
)
)
if not self._as_index:
should_drop_index = set(
i for i, gkey in enumerate(self._groupkeys) if gkey._psdf is not self._psdf
)
if len(should_drop_index) > 0:
psdf = psdf.reset_index(level=should_drop_index, drop=True)
if len(should_drop_index) < len(self._groupkeys):
psdf = psdf.reset_index()
return self._cleanup_and_return(psdf)
@staticmethod
def _resolve_grouping_from_diff_dataframes(
psdf: DataFrame, by: List[Union[Series, Label]]
) -> Tuple[DataFrame, List[Series], Set[Label]]:
column_labels_level = psdf._internal.column_labels_level
column_labels = []
additional_pssers = []
additional_column_labels = []
tmp_column_labels = set()
for i, col_or_s in enumerate(by):
if isinstance(col_or_s, Series):
if col_or_s._psdf is psdf:
column_labels.append(col_or_s._column_label)
elif same_anchor(col_or_s, psdf):
temp_label = verify_temp_column_name(psdf, "__tmp_groupkey_{}__".format(i))
column_labels.append(temp_label)
additional_pssers.append(col_or_s.rename(temp_label))
additional_column_labels.append(temp_label)
else:
temp_label = verify_temp_column_name(
psdf,
tuple(
([""] * (column_labels_level - 1)) + ["__tmp_groupkey_{}__".format(i)]
),
)
column_labels.append(temp_label)
tmp_column_labels.add(temp_label)
elif isinstance(col_or_s, tuple):
psser = psdf[col_or_s]
if not isinstance(psser, Series):
raise ValueError(name_like_string(col_or_s))
column_labels.append(col_or_s)
else:
raise ValueError(col_or_s)
psdf = DataFrame(
psdf._internal.with_new_columns(
[psdf._psser_for(label) for label in psdf._internal.column_labels]
+ additional_pssers
)
)
def assign_columns(
psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label]
) -> Iterator[Tuple[Series, Label]]:
raise NotImplementedError(
"Duplicated labels with groupby() and "
"'compute.ops_on_diff_frames' option are not supported currently "
"Please use unique labels in series and frames."
)
for col_or_s, label in zip(by, column_labels):
if label in tmp_column_labels:
psser = col_or_s
psdf = align_diff_frames(
assign_columns,
psdf,
psser.rename(label),
fillna=False,
how="inner",
preserve_order_column=True,
)
tmp_column_labels |= set(additional_column_labels)
new_by_series = []
for col_or_s, label in zip(by, column_labels):
if label in tmp_column_labels:
psser = col_or_s
new_by_series.append(psdf._psser_for(label).rename(psser.name))
else:
new_by_series.append(psdf._psser_for(label))
return psdf, new_by_series, tmp_column_labels
@staticmethod
def _resolve_grouping(psdf: DataFrame, by: List[Union[Series, Label]]) -> List[Series]:
new_by_series = []
for col_or_s in by:
if isinstance(col_or_s, Series):
new_by_series.append(col_or_s)
elif isinstance(col_or_s, tuple):
psser = psdf[col_or_s]
if not isinstance(psser, Series):
raise ValueError(name_like_string(col_or_s))
new_by_series.append(psser)
else:
raise ValueError(col_or_s)
return new_by_series
class DataFrameGroupBy(GroupBy[DataFrame]):
@staticmethod
def _build(
psdf: DataFrame, by: List[Union[Series, Label]], as_index: bool, dropna: bool
) -> "DataFrameGroupBy":
if any(isinstance(col_or_s, Series) and not same_anchor(psdf, col_or_s) for col_or_s in by):
(
psdf,
new_by_series,
column_labels_to_exclude,
) = GroupBy._resolve_grouping_from_diff_dataframes(psdf, by)
else:
new_by_series = GroupBy._resolve_grouping(psdf, by)
column_labels_to_exclude = set()
return DataFrameGroupBy(
psdf,
new_by_series,
as_index=as_index,
dropna=dropna,
column_labels_to_exclude=column_labels_to_exclude,
)
def __init__(
self,
psdf: DataFrame,
by: List[Series],
as_index: bool,
dropna: bool,
column_labels_to_exclude: Set[Label],
agg_columns: List[Label] = None,
):
agg_columns_selected = agg_columns is not None
if agg_columns_selected:
for label in agg_columns:
if label in column_labels_to_exclude:
raise KeyError(label)
else:
agg_columns = [
label
for label in psdf._internal.column_labels
if not any(label == key._column_label and key._psdf is psdf for key in by)
and label not in column_labels_to_exclude
]
super().__init__(
psdf=psdf,
groupkeys=by,
as_index=as_index,
dropna=dropna,
column_labels_to_exclude=column_labels_to_exclude,
agg_columns_selected=agg_columns_selected,
agg_columns=[psdf[label] for label in agg_columns],
)
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeDataFrameGroupBy, item):
property_or_func = getattr(MissingPandasLikeDataFrameGroupBy, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self)
else:
return partial(property_or_func, self)
return self.__getitem__(item)
def __getitem__(self, item: Any) -> GroupBy:
if self._as_index and is_name_like_value(item):
return SeriesGroupBy(
self._psdf._psser_for(item if is_name_like_tuple(item) else (item,)),
self._groupkeys,
dropna=self._dropna,
)
else:
if is_name_like_tuple(item):
item = [item]
elif is_name_like_value(item):
item = [(item,)]
else:
item = [i if is_name_like_tuple(i) else (i,) for i in item]
if not self._as_index:
groupkey_names = set(key._column_label for key in self._groupkeys)
for name in item:
if name in groupkey_names:
raise ValueError(
"cannot insert {}, already exists".format(name_like_string(name))
)
return DataFrameGroupBy(
self._psdf,
self._groupkeys,
as_index=self._as_index,
dropna=self._dropna,
column_labels_to_exclude=self._column_labels_to_exclude,
agg_columns=item,
)
def _apply_series_op(
self,
op: Callable[["SeriesGroupBy"], Series],
should_resolve: bool = False,
numeric_only: bool = False,
) -> DataFrame:
applied = []
for column in self._agg_columns:
applied.append(op(column.groupby(self._groupkeys)))
if numeric_only:
applied = [col for col in applied if isinstance(col.spark.data_type, NumericType)]
if not applied:
raise DataError("No numeric types to aggregate")
internal = self._psdf._internal.with_new_columns(applied, keep_order=False)
if should_resolve:
internal = internal.resolved_copy
return DataFrame(internal)
def _cleanup_and_return(self, psdf: DataFrame) -> DataFrame:
return psdf
# TODO: Implement 'percentiles', 'include', and 'exclude' arguments.
# TODO: Add ``DataFrame.select_dtypes`` to See Also when 'include'
# and 'exclude' arguments are implemented.
def describe(self) -> DataFrame:
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
.. note:: Unlike pandas, the percentiles in pandas-on-Spark are based upon
approximate percentile computation because computing percentiles
across a large dataset is extremely expensive.
Returns
-------
DataFrame
Summary statistics of the DataFrame provided.
See Also
--------
DataFrame.count
DataFrame.max
DataFrame.min
DataFrame.mean
DataFrame.std
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df
a b c
0 1 4 7
1 1 5 8
2 3 6 9
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> described = df.groupby('a').describe()
>>> described.sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
count mean std min 25% 50% 75% max count mean std min 25% 50% 75% max
a
1 2.0 4.5 0.707107 4.0 4.0 4.0 5.0 5.0 2.0 7.5 0.707107 7.0 7.0 7.0 8.0 8.0
3 1.0 6.0 NaN 6.0 6.0 6.0 6.0 6.0 1.0 9.0 NaN 9.0 9.0 9.0 9.0 9.0
"""
for col in self._agg_columns:
if isinstance(col.spark.data_type, StringType):
raise NotImplementedError(
"DataFrameGroupBy.describe() doesn't support for string type for now"
)
psdf = self.aggregate(["count", "mean", "std", "min", "quartiles", "max"])
sdf = psdf._internal.spark_frame
agg_column_labels = [col._column_label for col in self._agg_columns]
formatted_percentiles = ["25%", "50%", "75%"]
# Split "quartiles" columns into first, second, and third quartiles.
for label in agg_column_labels:
quartiles_col = name_like_string(tuple(list(label) + ["quartiles"]))
for i, percentile in enumerate(formatted_percentiles):
sdf = sdf.withColumn(
name_like_string(tuple(list(label) + [percentile])),
scol_for(sdf, quartiles_col)[i],
)
sdf = sdf.drop(quartiles_col)
# Reorder columns lexicographically by agg column followed by stats.
stats = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
column_labels = [tuple(list(label) + [s]) for label, s in product(agg_column_labels, stats)]
data_columns = map(name_like_string, column_labels)
# Reindex the DataFrame to reflect initial grouping and agg columns.
internal = psdf._internal.copy(
spark_frame=sdf,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=None,
)
# Cast columns to ``"float64"`` to match `pandas.DataFrame.groupby`.
return DataFrame(internal).astype("float64")
class SeriesGroupBy(GroupBy[Series]):
@staticmethod
def _build(
psser: Series, by: List[Union[Series, Label]], as_index: bool, dropna: bool
) -> "SeriesGroupBy":
if any(
isinstance(col_or_s, Series) and not same_anchor(psser, col_or_s) for col_or_s in by
):
psdf, new_by_series, _ = GroupBy._resolve_grouping_from_diff_dataframes(
psser.to_frame(), by
)
return SeriesGroupBy(
first_series(psdf).rename(psser.name),
new_by_series,
as_index=as_index,
dropna=dropna,
)
else:
new_by_series = GroupBy._resolve_grouping(psser._psdf, by)
return SeriesGroupBy(psser, new_by_series, as_index=as_index, dropna=dropna)
def __init__(self, psser: Series, by: List[Series], as_index: bool = True, dropna: bool = True):
if not as_index:
raise TypeError("as_index=False only valid with DataFrame")
super().__init__(
psdf=psser._psdf,
groupkeys=by,
as_index=True,
dropna=dropna,
column_labels_to_exclude=set(),
agg_columns_selected=True,
agg_columns=[psser],
)
self._psser = psser
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeSeriesGroupBy, item):
property_or_func = getattr(MissingPandasLikeSeriesGroupBy, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self)
else:
return partial(property_or_func, self)
raise AttributeError(item)
def _apply_series_op(
self,
op: Callable[["SeriesGroupBy"], Series],
should_resolve: bool = False,
numeric_only: bool = False,
) -> Series:
if numeric_only and not isinstance(self._agg_columns[0].spark.data_type, NumericType):
raise DataError("No numeric types to aggregate")
psser = op(self)
if should_resolve:
internal = psser._internal.resolved_copy
return first_series(DataFrame(internal))
else:
return psser.copy()
def _cleanup_and_return(self, psdf: DataFrame) -> Series:
return first_series(psdf).rename().rename(self._psser.name)
def agg(self, *args: Any, **kwargs: Any) -> None:
return MissingPandasLikeSeriesGroupBy.agg(self, *args, **kwargs)
def aggregate(self, *args: Any, **kwargs: Any) -> None:
return MissingPandasLikeSeriesGroupBy.aggregate(self, *args, **kwargs)
def size(self) -> Series:
return super().size().rename(self._psser.name)
size.__doc__ = GroupBy.size.__doc__
# TODO: add keep parameter
def nsmallest(self, n: int = 5) -> Series:
"""
Return the smallest `n` elements.
Parameters
----------
n : int
Number of items to retrieve.
See Also
--------
pyspark.pandas.Series.nsmallest
pyspark.pandas.DataFrame.nsmallest
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df.groupby(['a'])['b'].nsmallest(1).sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 0 1
2 3 2
3 6 3
Name: b, dtype: int64
"""
if self._psser._internal.index_level > 1:
raise ValueError("nsmallest do not support multi-index now")
groupkey_col_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(self._groupkeys))]
sdf = self._psser._internal.spark_frame.select(
*[scol.alias(name) for scol, name in zip(self._groupkeys_scols, groupkey_col_names)],
*[
scol.alias(SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))
for i, scol in enumerate(self._psser._internal.index_spark_columns)
],
self._psser.spark.column,
NATURAL_ORDER_COLUMN_NAME,
)
window = Window.partitionBy(*groupkey_col_names).orderBy(
scol_for(sdf, self._psser._internal.data_spark_column_names[0]).asc(),
NATURAL_ORDER_COLUMN_NAME,
)
temp_rank_column = verify_temp_column_name(sdf, "__rank__")
sdf = (
sdf.withColumn(temp_rank_column, F.row_number().over(window))
.filter(F.col(temp_rank_column) <= n)
.drop(temp_rank_column)
).drop(NATURAL_ORDER_COLUMN_NAME)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=(
[scol_for(sdf, col) for col in groupkey_col_names]
+ [
scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))
for i in range(self._psdf._internal.index_level)
]
),
index_names=(
[psser._column_label for psser in self._groupkeys]
+ self._psdf._internal.index_names
),
index_fields=(
[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(self._groupkeys, groupkey_col_names)
]
+ [
field.copy(name=SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))
for i, field in enumerate(self._psdf._internal.index_fields)
]
),
column_labels=[self._psser._column_label],
data_spark_columns=[scol_for(sdf, self._psser._internal.data_spark_column_names[0])],
data_fields=[self._psser._internal.data_fields[0]],
)
return first_series(DataFrame(internal))
# TODO: add keep parameter
def nlargest(self, n: int = 5) -> Series:
"""
Return the first n rows ordered by columns in descending order in group.
Return the first n rows with the smallest values in columns, in descending order.
The columns that are not specified are returned as well, but not used for ordering.
Parameters
----------
n : int
Number of items to retrieve.
See Also
--------
pyspark.pandas.Series.nlargest
pyspark.pandas.DataFrame.nlargest
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df.groupby(['a'])['b'].nlargest(1).sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 1 2
2 4 3
3 7 4
Name: b, dtype: int64
"""
if self._psser._internal.index_level > 1:
raise ValueError("nlargest do not support multi-index now")
groupkey_col_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(self._groupkeys))]
sdf = self._psser._internal.spark_frame.select(
*[scol.alias(name) for scol, name in zip(self._groupkeys_scols, groupkey_col_names)],
*[
scol.alias(SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))
for i, scol in enumerate(self._psser._internal.index_spark_columns)
],
self._psser.spark.column,
NATURAL_ORDER_COLUMN_NAME,
)
window = Window.partitionBy(*groupkey_col_names).orderBy(
scol_for(sdf, self._psser._internal.data_spark_column_names[0]).desc(),
NATURAL_ORDER_COLUMN_NAME,
)
temp_rank_column = verify_temp_column_name(sdf, "__rank__")
sdf = (
sdf.withColumn(temp_rank_column, F.row_number().over(window))
.filter(F.col(temp_rank_column) <= n)
.drop(temp_rank_column)
).drop(NATURAL_ORDER_COLUMN_NAME)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=(
[scol_for(sdf, col) for col in groupkey_col_names]
+ [
scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))
for i in range(self._psdf._internal.index_level)
]
),
index_names=(
[psser._column_label for psser in self._groupkeys]
+ self._psdf._internal.index_names
),
index_fields=(
[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(self._groupkeys, groupkey_col_names)
]
+ [
field.copy(name=SPARK_INDEX_NAME_FORMAT(i + len(self._groupkeys)))
for i, field in enumerate(self._psdf._internal.index_fields)
]
),
column_labels=[self._psser._column_label],
data_spark_columns=[scol_for(sdf, self._psser._internal.data_spark_column_names[0])],
data_fields=[self._psser._internal.data_fields[0]],
)
return first_series(DataFrame(internal))
# TODO: add bins, normalize parameter
def value_counts(
self, sort: Optional[bool] = None, ascending: Optional[bool] = None, dropna: bool = True
) -> Series:
"""
Compute group sizes.
Parameters
----------
sort : boolean, default None
Sort by frequencies.
ascending : boolean, default False
Sort in ascending order.
dropna : boolean, default True
Don't include counts of NaN.
See Also
--------
pyspark.pandas.Series.groupby
pyspark.pandas.DataFrame.groupby
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 2, 3, 3, 3],
... 'B': [1, 1, 2, 3, 3, np.nan]},
... columns=['A', 'B'])
>>> df
A B
0 1 1.0
1 2 1.0
2 2 2.0
3 3 3.0
4 3 3.0
5 3 NaN
>>> df.groupby('A')['B'].value_counts().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
1 1.0 1
2 1.0 1
2.0 1
3 3.0 2
Name: B, dtype: int64
Don't include counts of NaN when dropna is False.
>>> df.groupby('A')['B'].value_counts(
... dropna=False).sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
1 1.0 1
2 1.0 1
2.0 1
3 3.0 2
NaN 1
Name: B, dtype: int64
"""
groupkeys = self._groupkeys + self._agg_columns
groupkey_names = [SPARK_INDEX_NAME_FORMAT(i) for i in range(len(groupkeys))]
groupkey_cols = [s.spark.column.alias(name) for s, name in zip(groupkeys, groupkey_names)]
sdf = self._psdf._internal.spark_frame
agg_column = self._agg_columns[0]._internal.data_spark_column_names[0]
sdf = sdf.groupby(*groupkey_cols).count().withColumnRenamed("count", agg_column)
if self._dropna:
_groupkey_column_names = groupkey_names[: len(self._groupkeys)]
sdf = sdf.dropna(subset=_groupkey_column_names)
if dropna:
_agg_columns_names = groupkey_names[len(self._groupkeys) :]
sdf = sdf.dropna(subset=_agg_columns_names)
if sort:
if ascending:
sdf = sdf.orderBy(scol_for(sdf, agg_column).asc())
else:
sdf = sdf.orderBy(scol_for(sdf, agg_column).desc())
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in groupkey_names],
index_names=[psser._column_label for psser in groupkeys],
index_fields=[
psser._internal.data_fields[0].copy(name=name)
for psser, name in zip(groupkeys, groupkey_names)
],
column_labels=[self._agg_columns[0]._column_label],
data_spark_columns=[scol_for(sdf, agg_column)],
)
return first_series(DataFrame(internal))
def unique(self) -> Series:
"""
Return unique values in group.
Uniques are returned in order of unknown. It does NOT sort.
See Also
--------
pyspark.pandas.Series.unique
pyspark.pandas.Index.unique
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df.groupby(['a'])['b'].unique().sort_index() # doctest: +SKIP
a
1 [1, 2]
2 [2, 3]
3 [3, 4]
Name: b, dtype: object
"""
return self._reduce_for_stat_function(F.collect_set)
def is_multi_agg_with_relabel(**kwargs: Any) -> bool:
"""
Check whether the kwargs pass to .agg look like multi-agg with relabling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a='max')
False
>>> is_multi_agg_with_relabel(a_max=('a', 'max'),
... a_min=('a', 'min'))
True
>>> is_multi_agg_with_relabel()
False
"""
if not kwargs:
return False
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values())
def normalize_keyword_aggregation(
kwargs: Dict[str, Tuple[Name, str]],
) -> Tuple[Dict[Name, List[str]], List[str], List[Tuple]]:
"""
Normalize user-provided kwargs.
Transforms from the new ``Dict[str, NamedAgg]`` style kwargs
to the old defaultdict[str, List[scalar]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
order : List[Tuple[str, str]]
Pairs of the input and output column names.
Examples
--------
>>> normalize_keyword_aggregation({'output': ('input', 'sum')})
(defaultdict(<class 'list'>, {'input': ['sum']}), ['output'], [('input', 'sum')])
"""
aggspec: Dict[Union[Any, Tuple], List[str]] = defaultdict(list)
order: List[Tuple] = []
columns, pairs = zip(*kwargs.items())
for column, aggfunc in pairs:
if column in aggspec:
aggspec[column].append(aggfunc)
else:
aggspec[column] = [aggfunc]
order.append((column, aggfunc))
# For MultiIndex, we need to flatten the tuple, e.g. (('y', 'A'), 'max') needs to be
# flattened to ('y', 'A', 'max'), it won't do anything on normal Index.
if isinstance(order[0][0], tuple):
order = [(*levs, method) for levs, method in order]
return aggspec, list(columns), order
def _test() -> None:
import os
import doctest
import sys
import numpy
from pyspark.sql import SparkSession
import pyspark.pandas.groupby
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.groupby.__dict__.copy()
globs["np"] = numpy
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.groupby tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.groupby,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| []
| []
| [
"SPARK_HOME"
]
| [] | ["SPARK_HOME"] | python | 1 | 0 | |
modules/openapi-generator/src/main/java/org/openapitools/codegen/languages/Swift5ClientCodegen.java | /*
* Copyright 2018 OpenAPI-Generator Contributors (https://openapi-generator.tech)
* Copyright 2018 SmartBear Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openapitools.codegen.languages;
import io.swagger.v3.oas.models.media.ArraySchema;
import io.swagger.v3.oas.models.media.Schema;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.text.WordUtils;
import org.openapitools.codegen.*;
import org.openapitools.codegen.meta.GeneratorMetadata;
import org.openapitools.codegen.meta.Stability;
import org.openapitools.codegen.utils.ModelUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.time.OffsetDateTime;
import java.time.Instant;
import java.time.temporal.ChronoField;
import java.util.concurrent.TimeUnit;
import static org.openapitools.codegen.utils.StringUtils.camelize;
public class Swift5ClientCodegen extends DefaultCodegen implements CodegenConfig {
private final Logger LOGGER = LoggerFactory.getLogger(Swift5ClientCodegen.class);
public static final String PROJECT_NAME = "projectName";
public static final String RESPONSE_AS = "responseAs";
public static final String OBJC_COMPATIBLE = "objcCompatible";
public static final String POD_SOURCE = "podSource";
public static final String POD_AUTHORS = "podAuthors";
public static final String POD_SOCIAL_MEDIA_URL = "podSocialMediaURL";
public static final String POD_LICENSE = "podLicense";
public static final String POD_HOMEPAGE = "podHomepage";
public static final String POD_SUMMARY = "podSummary";
public static final String POD_DESCRIPTION = "podDescription";
public static final String POD_SCREENSHOTS = "podScreenshots";
public static final String POD_DOCUMENTATION_URL = "podDocumentationURL";
public static final String READONLY_PROPERTIES = "readonlyProperties";
public static final String SWIFT_USE_API_NAMESPACE = "swiftUseApiNamespace";
public static final String DEFAULT_POD_AUTHORS = "OpenAPI Generator";
public static final String LENIENT_TYPE_CAST = "lenientTypeCast";
public static final String USE_SPM_FILE_STRUCTURE = "useSPMFileStructure";
public static final String SWIFT_PACKAGE_PATH = "swiftPackagePath";
public static final String USE_CLASSES = "useClasses";
public static final String USE_BACKTICK_ESCAPES = "useBacktickEscapes";
public static final String GENERATE_MODEL_ADDITIONAL_PROPERTIES = "generateModelAdditionalProperties";
public static final String HASHABLE_MODELS = "hashableModels";
public static final String MAP_FILE_BINARY_TO_DATA = "mapFileBinaryToData";
protected static final String LIBRARY_ALAMOFIRE = "alamofire";
protected static final String LIBRARY_URLSESSION = "urlsession";
protected static final String LIBRARY_VAPOR = "vapor";
protected static final String RESPONSE_LIBRARY_PROMISE_KIT = "PromiseKit";
protected static final String RESPONSE_LIBRARY_RX_SWIFT = "RxSwift";
protected static final String RESPONSE_LIBRARY_RESULT = "Result";
protected static final String RESPONSE_LIBRARY_COMBINE = "Combine";
protected static final String[] RESPONSE_LIBRARIES = {RESPONSE_LIBRARY_PROMISE_KIT, RESPONSE_LIBRARY_RX_SWIFT, RESPONSE_LIBRARY_RESULT, RESPONSE_LIBRARY_COMBINE};
protected String projectName = "OpenAPIClient";
protected boolean nonPublicApi = false;
protected boolean objcCompatible = false;
protected boolean lenientTypeCast = false;
protected boolean readonlyProperties = false;
protected boolean swiftUseApiNamespace = false;
protected boolean useSPMFileStructure = false;
protected String swiftPackagePath = "Classes" + File.separator + "OpenAPIs";
protected boolean useClasses = false;
protected boolean useBacktickEscapes = false;
protected boolean generateModelAdditionalProperties = true;
protected boolean hashableModels = true;
protected boolean mapFileBinaryToData = false;
protected String[] responseAs = new String[0];
protected String sourceFolder = swiftPackagePath;
protected HashSet objcReservedWords;
protected String apiDocPath = "docs/";
protected String modelDocPath = "docs/";
/**
* Constructor for the swift5 language codegen module.
*/
public Swift5ClientCodegen() {
super();
this.useOneOfInterfaces = true;
generatorMetadata = GeneratorMetadata.newBuilder(generatorMetadata)
.stability(Stability.STABLE)
.build();
outputFolder = "generated-code" + File.separator + "swift";
modelTemplateFiles.put("model.mustache", ".swift");
apiTemplateFiles.put("api.mustache", ".swift");
embeddedTemplateDir = templateDir = "swift5";
apiPackage = File.separator + "APIs";
modelPackage = File.separator + "Models";
modelDocTemplateFiles.put("model_doc.mustache", ".md");
apiDocTemplateFiles.put("api_doc.mustache", ".md");
languageSpecificPrimitives = new HashSet<>(
Arrays.asList(
"Int",
"Int32",
"Int64",
"Float",
"Double",
"Bool",
"Void",
"String",
"Data",
"Date",
"Character",
"UUID",
"URL",
"AnyObject",
"Any",
"Decimal")
);
defaultIncludes = new HashSet<>(
Arrays.asList(
"Data",
"Date",
"URL", // for file
"UUID",
"Array",
"Dictionary",
"Set",
"Any",
"Empty",
"AnyObject",
"Any",
"Decimal")
);
objcReservedWords = new HashSet<>(
Arrays.asList(
// Added for Objective-C compatibility
"id", "description", "NSArray", "NSURL", "CGFloat", "NSSet", "NSString", "NSInteger", "NSUInteger",
"NSError", "NSDictionary",
// 'Property 'hash' with type 'String' cannot override a property with type 'Int' (when objcCompatible=true)
"hash",
// Cannot override with a stored property 'className'
"className"
)
);
reservedWords = new HashSet<>(
Arrays.asList(
// name used by swift client
"ErrorResponse", "Response",
// Swift keywords. This list is taken from here:
// https://developer.apple.com/library/content/documentation/Swift/Conceptual/Swift_Programming_Language/LexicalStructure.html#//apple_ref/doc/uid/TP40014097-CH30-ID410
//
// Keywords used in declarations
"associatedtype", "class", "deinit", "enum", "extension", "fileprivate", "func", "import", "init",
"inout", "internal", "let", "open", "operator", "private", "protocol", "public", "static", "struct",
"subscript", "typealias", "var",
// Keywords uses in statements
"break", "case", "continue", "default", "defer", "do", "else", "fallthrough", "for", "guard", "if",
"in", "repeat", "return", "switch", "where", "while",
// Keywords used in expressions and types
"as", "Any", "catch", "false", "is", "nil", "rethrows", "super", "self", "Self", "throw", "throws", "true", "try",
// Keywords used in patterns
"_",
// Keywords that begin with a number sign
"#available", "#colorLiteral", "#column", "#else", "#elseif", "#endif", "#file", "#fileLiteral", "#function", "#if",
"#imageLiteral", "#line", "#selector", "#sourceLocation",
// Keywords reserved in particular contexts
"associativity", "convenience", "dynamic", "didSet", "final", "get", "infix", "indirect", "lazy", "left",
"mutating", "none", "nonmutating", "optional", "override", "postfix", "precedence", "prefix", "Protocol",
"required", "right", "set", "Type", "unowned", "weak", "willSet",
//
// Swift Standard Library types
// https://developer.apple.com/documentation/swift
//
// Numbers and Basic Values
"Bool", "Int", "Double", "Float", "Range", "ClosedRange", "Error", "Optional",
// Special-Use Numeric Types
"UInt", "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64", "Float80", "Float32", "Float64",
// Strings and Text
"String", "Character", "Unicode", "StaticString",
// Collections
"Array", "Dictionary", "Set", "OptionSet", "CountableRange", "CountableClosedRange",
// The following are commonly-used Foundation types
"URL", "Data", "Codable", "Encodable", "Decodable",
// The following are other words we want to reserve
"Void", "AnyObject", "Class", "dynamicType", "COLUMN", "FILE", "FUNCTION", "LINE"
)
);
typeMapping = new HashMap<>();
typeMapping.put("array", "Array");
typeMapping.put("map", "Dictionary");
typeMapping.put("set", "Set");
typeMapping.put("date", "Date");
typeMapping.put("Date", "Date");
typeMapping.put("DateTime", "Date");
typeMapping.put("boolean", "Bool");
typeMapping.put("string", "String");
typeMapping.put("char", "Character");
typeMapping.put("short", "Int");
typeMapping.put("int", "Int");
typeMapping.put("long", "Int64");
typeMapping.put("integer", "Int");
typeMapping.put("Integer", "Int");
typeMapping.put("float", "Float");
typeMapping.put("number", "Double");
typeMapping.put("double", "Double");
typeMapping.put("file", "URL");
typeMapping.put("binary", "URL");
typeMapping.put("ByteArray", "Data");
typeMapping.put("UUID", "UUID");
typeMapping.put("URI", "String");
typeMapping.put("decimal", "Decimal");
typeMapping.put("object", "AnyCodable");
typeMapping.put("AnyType", "AnyCodable");
importMapping = new HashMap<>();
cliOptions.add(new CliOption(PROJECT_NAME, "Project name in Xcode"));
cliOptions.add(new CliOption(RESPONSE_AS,
"Optionally use libraries to manage response. Currently "
+ StringUtils.join(RESPONSE_LIBRARIES, ", ")
+ " are available."));
cliOptions.add(new CliOption(CodegenConstants.NON_PUBLIC_API,
CodegenConstants.NON_PUBLIC_API_DESC
+ "(default: false)"));
cliOptions.add(new CliOption(OBJC_COMPATIBLE,
"Add additional properties and methods for Objective-C "
+ "compatibility (default: false)"));
cliOptions.add(new CliOption(POD_SOURCE, "Source information used for Podspec"));
cliOptions.add(new CliOption(CodegenConstants.POD_VERSION, "Version used for Podspec"));
cliOptions.add(new CliOption(POD_AUTHORS, "Authors used for Podspec"));
cliOptions.add(new CliOption(POD_SOCIAL_MEDIA_URL, "Social Media URL used for Podspec"));
cliOptions.add(new CliOption(POD_LICENSE, "License used for Podspec"));
cliOptions.add(new CliOption(POD_HOMEPAGE, "Homepage used for Podspec"));
cliOptions.add(new CliOption(POD_SUMMARY, "Summary used for Podspec"));
cliOptions.add(new CliOption(POD_DESCRIPTION, "Description used for Podspec"));
cliOptions.add(new CliOption(POD_SCREENSHOTS, "Screenshots used for Podspec"));
cliOptions.add(new CliOption(POD_DOCUMENTATION_URL,
"Documentation URL used for Podspec"));
cliOptions.add(new CliOption(READONLY_PROPERTIES, "Make properties "
+ "readonly (default: false)"));
cliOptions.add(new CliOption(SWIFT_USE_API_NAMESPACE,
"Flag to make all the API classes inner-class "
+ "of {{projectName}}API"));
cliOptions.add(new CliOption(CodegenConstants.HIDE_GENERATION_TIMESTAMP,
CodegenConstants.HIDE_GENERATION_TIMESTAMP_DESC)
.defaultValue(Boolean.TRUE.toString()));
cliOptions.add(new CliOption(LENIENT_TYPE_CAST,
"Accept and cast values for simple types (string->bool, "
+ "string->int, int->string)")
.defaultValue(Boolean.FALSE.toString()));
cliOptions.add(new CliOption(USE_BACKTICK_ESCAPES,
"Escape reserved words using backticks (default: false)")
.defaultValue(Boolean.FALSE.toString()));
cliOptions.add(new CliOption(GENERATE_MODEL_ADDITIONAL_PROPERTIES,
"Generate model additional properties (default: true)")
.defaultValue(Boolean.TRUE.toString()));
cliOptions.add(new CliOption(CodegenConstants.API_NAME_PREFIX, CodegenConstants.API_NAME_PREFIX_DESC));
cliOptions.add(new CliOption(USE_SPM_FILE_STRUCTURE, "Use SPM file structure"
+ " and set the source path to Sources" + File.separator + "{{projectName}} (default: false)."));
cliOptions.add(new CliOption(SWIFT_PACKAGE_PATH, "Set a custom source path instead of "
+ projectName + File.separator + "Classes" + File.separator + "OpenAPIs" + "."));
cliOptions.add(new CliOption(USE_CLASSES, "Use final classes for models instead of structs (default: false)")
.defaultValue(Boolean.FALSE.toString()));
cliOptions.add(new CliOption(HASHABLE_MODELS,
"Make hashable models (default: true)")
.defaultValue(Boolean.TRUE.toString()));
cliOptions.add(new CliOption(MAP_FILE_BINARY_TO_DATA,
"[WARNING] This option will be removed and enabled by default in the future once we've enhanced the code to work with `Data` in all the different situations. Map File and Binary to Data (default: false)")
.defaultValue(Boolean.FALSE.toString()));
supportedLibraries.put(LIBRARY_URLSESSION, "[DEFAULT] HTTP client: URLSession");
supportedLibraries.put(LIBRARY_ALAMOFIRE, "HTTP client: Alamofire");
supportedLibraries.put(LIBRARY_VAPOR, "HTTP client: Vapor");
CliOption libraryOption = new CliOption(CodegenConstants.LIBRARY, "Library template (sub-template) to use");
libraryOption.setEnum(supportedLibraries);
libraryOption.setDefault(LIBRARY_URLSESSION);
cliOptions.add(libraryOption);
setLibrary(LIBRARY_URLSESSION);
}
private static CodegenModel reconcileProperties(CodegenModel codegenModel,
CodegenModel parentCodegenModel) {
// To support inheritance in this generator, we will analyze
// the parent and child models, look for properties that match, and remove
// them from the child models and leave them in the parent.
// Because the child models extend the parents, the properties
// will be available via the parent.
// Get the properties for the parent and child models
final List<CodegenProperty> parentModelCodegenProperties = parentCodegenModel.vars;
List<CodegenProperty> codegenProperties = codegenModel.vars;
codegenModel.allVars = new ArrayList<CodegenProperty>(codegenProperties);
codegenModel.parentVars = parentCodegenModel.allVars;
// Iterate over all of the parent model properties
boolean removedChildProperty = false;
for (CodegenProperty parentModelCodegenProperty : parentModelCodegenProperties) {
// Now that we have found a prop in the parent class,
// and search the child class for the same prop.
Iterator<CodegenProperty> iterator = codegenProperties.iterator();
while (iterator.hasNext()) {
CodegenProperty codegenProperty = iterator.next();
if (codegenProperty.baseName.equals(parentModelCodegenProperty.baseName)) {
// We found a property in the child class that is
// a duplicate of the one in the parent, so remove it.
iterator.remove();
removedChildProperty = true;
}
}
}
if (removedChildProperty) {
codegenModel.vars = codegenProperties;
}
return codegenModel;
}
@Override
public CodegenType getTag() {
return CodegenType.CLIENT;
}
@Override
public String getName() {
return "swift5";
}
@Override
public String getHelp() {
return "Generates a Swift 5.x client library.";
}
@Override
protected void addAdditionPropertiesToCodeGenModel(CodegenModel codegenModel,
Schema schema) {
final Schema additionalProperties = getAdditionalProperties(schema);
if (additionalProperties != null) {
Schema inner = null;
if (ModelUtils.isArraySchema(schema)) {
ArraySchema ap = (ArraySchema) schema;
inner = ap.getItems();
} else if (ModelUtils.isMapSchema(schema)) {
inner = getAdditionalProperties(schema);
}
codegenModel.additionalPropertiesType = inner != null ? getTypeDeclaration(inner) : getSchemaType(additionalProperties);
}
}
@Override
public void processOpts() {
super.processOpts();
if (StringUtils.isEmpty(System.getenv("SWIFT_POST_PROCESS_FILE"))) {
LOGGER.info("Environment variable SWIFT_POST_PROCESS_FILE not defined so the Swift code may not be properly formatted. To define it, try 'export SWIFT_POST_PROCESS_FILE=/usr/local/bin/swiftformat' (Linux/Mac)");
LOGGER.info("NOTE: To enable file post-processing, 'enablePostProcessFile' must be set to `true` (--enable-post-process-file for CLI).");
}
// Setup project name
if (additionalProperties.containsKey(PROJECT_NAME)) {
setProjectName((String) additionalProperties.get(PROJECT_NAME));
} else {
additionalProperties.put(PROJECT_NAME, projectName);
}
sourceFolder = projectName + File.separator + sourceFolder;
// Setup nonPublicApi option, which generates code with reduced access
// modifiers; allows embedding elsewhere without exposing non-public API calls
// to consumers
if (additionalProperties.containsKey(CodegenConstants.NON_PUBLIC_API)) {
setNonPublicApi(convertPropertyToBooleanAndWriteBack(CodegenConstants.NON_PUBLIC_API));
}
additionalProperties.put(CodegenConstants.NON_PUBLIC_API, nonPublicApi);
// Setup objcCompatible option, which adds additional properties
// and methods for Objective-C compatibility
if (additionalProperties.containsKey(OBJC_COMPATIBLE)) {
setObjcCompatible(convertPropertyToBooleanAndWriteBack(OBJC_COMPATIBLE));
}
additionalProperties.put(OBJC_COMPATIBLE, objcCompatible);
// add objc reserved words
if (Boolean.TRUE.equals(objcCompatible)) {
reservedWords.addAll(objcReservedWords);
}
if (additionalProperties.containsKey(RESPONSE_AS)) {
Object responseAsObject = additionalProperties.get(RESPONSE_AS);
if (responseAsObject instanceof String) {
setResponseAs(((String) responseAsObject).split(","));
} else {
setResponseAs((String[]) responseAsObject);
}
}
additionalProperties.put(RESPONSE_AS, responseAs);
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_PROMISE_KIT)) {
additionalProperties.put("usePromiseKit", true);
}
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_RX_SWIFT)) {
additionalProperties.put("useRxSwift", true);
}
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_RESULT)) {
additionalProperties.put("useResult", true);
}
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_COMBINE)) {
additionalProperties.put("useCombine", true);
}
// Setup readonlyProperties option, which declares properties so they can only
// be set at initialization
if (additionalProperties.containsKey(READONLY_PROPERTIES)) {
setReadonlyProperties(convertPropertyToBooleanAndWriteBack(READONLY_PROPERTIES));
}
additionalProperties.put(READONLY_PROPERTIES, readonlyProperties);
// Setup swiftUseApiNamespace option, which makes all the API
// classes inner-class of {{projectName}}
if (additionalProperties.containsKey(SWIFT_USE_API_NAMESPACE)) {
setSwiftUseApiNamespace(convertPropertyToBooleanAndWriteBack(SWIFT_USE_API_NAMESPACE));
}
if (!additionalProperties.containsKey(POD_AUTHORS)) {
additionalProperties.put(POD_AUTHORS, DEFAULT_POD_AUTHORS);
}
if (additionalProperties.containsKey(USE_SPM_FILE_STRUCTURE)) {
setUseSPMFileStructure(convertPropertyToBooleanAndWriteBack(USE_SPM_FILE_STRUCTURE));
sourceFolder = "Sources" + File.separator + projectName;
}
if (additionalProperties.containsKey(SWIFT_PACKAGE_PATH) && ((String)additionalProperties.get(SWIFT_PACKAGE_PATH)).length() > 0) {
setSwiftPackagePath((String)additionalProperties.get(SWIFT_PACKAGE_PATH));
sourceFolder = swiftPackagePath;
}
if (additionalProperties.containsKey(USE_BACKTICK_ESCAPES)) {
setUseBacktickEscapes(convertPropertyToBooleanAndWriteBack(USE_BACKTICK_ESCAPES));
}
if (additionalProperties.containsKey(GENERATE_MODEL_ADDITIONAL_PROPERTIES)) {
setGenerateModelAdditionalProperties(convertPropertyToBooleanAndWriteBack(GENERATE_MODEL_ADDITIONAL_PROPERTIES));
}
additionalProperties.put(GENERATE_MODEL_ADDITIONAL_PROPERTIES, generateModelAdditionalProperties);
if (additionalProperties.containsKey(HASHABLE_MODELS)) {
setHashableModels(convertPropertyToBooleanAndWriteBack(HASHABLE_MODELS));
}
additionalProperties.put(HASHABLE_MODELS, hashableModels);
if (additionalProperties.containsKey(MAP_FILE_BINARY_TO_DATA)) {
setMapFileBinaryToData(convertPropertyToBooleanAndWriteBack(MAP_FILE_BINARY_TO_DATA));
}
additionalProperties.put(MAP_FILE_BINARY_TO_DATA, mapFileBinaryToData);
if (mapFileBinaryToData) {
typeMapping.put("file", "Data");
typeMapping.put("binary", "Data");
}
if (additionalProperties.containsKey(USE_CLASSES)) {
setUseClasses(convertPropertyToBooleanAndWriteBack(USE_CLASSES));
}
additionalProperties.put(USE_CLASSES, useClasses);
setLenientTypeCast(convertPropertyToBooleanAndWriteBack(LENIENT_TYPE_CAST));
// make api and model doc path available in mustache template
additionalProperties.put("apiDocPath", apiDocPath);
additionalProperties.put("modelDocPath", modelDocPath);
if (!getLibrary().equals(LIBRARY_VAPOR)) {
supportingFiles.add(new SupportingFile("Podspec.mustache",
"",
projectName + ".podspec"));
supportingFiles.add(new SupportingFile("Cartfile.mustache",
"",
"Cartfile"));
supportingFiles.add(new SupportingFile("CodableHelper.mustache",
sourceFolder,
"CodableHelper.swift"));
supportingFiles.add(new SupportingFile("OpenISO8601DateFormatter.mustache",
sourceFolder,
"OpenISO8601DateFormatter.swift"));
supportingFiles.add(new SupportingFile("JSONDataEncoding.mustache",
sourceFolder,
"JSONDataEncoding.swift"));
supportingFiles.add(new SupportingFile("JSONEncodingHelper.mustache",
sourceFolder,
"JSONEncodingHelper.swift"));
supportingFiles.add(new SupportingFile("git_push.sh.mustache",
"",
"git_push.sh"));
supportingFiles.add(new SupportingFile("SynchronizedDictionary.mustache",
sourceFolder,
"SynchronizedDictionary.swift"));
supportingFiles.add(new SupportingFile("XcodeGen.mustache",
"",
"project.yml"));
supportingFiles.add(new SupportingFile("APIHelper.mustache",
sourceFolder,
"APIHelper.swift"));
supportingFiles.add(new SupportingFile("Models.mustache",
sourceFolder,
"Models.swift"));
}
supportingFiles.add(new SupportingFile("Package.swift.mustache",
"",
"Package.swift"));
supportingFiles.add(new SupportingFile("Configuration.mustache",
sourceFolder,
"Configuration.swift"));
supportingFiles.add(new SupportingFile("Extensions.mustache",
sourceFolder,
"Extensions.swift"));
supportingFiles.add(new SupportingFile("APIs.mustache",
sourceFolder,
"APIs.swift"));
supportingFiles.add(new SupportingFile("gitignore.mustache",
"",
".gitignore"));
supportingFiles.add(new SupportingFile("README.mustache",
"",
"README.md"));
switch (getLibrary()) {
case LIBRARY_ALAMOFIRE:
additionalProperties.put("useAlamofire", true);
supportingFiles.add(new SupportingFile("AlamofireImplementations.mustache",
sourceFolder,
"AlamofireImplementations.swift"));
break;
case LIBRARY_URLSESSION:
additionalProperties.put("useURLSession", true);
supportingFiles.add(new SupportingFile("URLSessionImplementations.mustache",
sourceFolder,
"URLSessionImplementations.swift"));
break;
case LIBRARY_VAPOR:
additionalProperties.put("useVapor", true);
break;
default:
break;
}
}
public boolean isMapFileBinaryToData() {
return mapFileBinaryToData;
}
public void setMapFileBinaryToData(boolean mapFileBinaryToData) {
this.mapFileBinaryToData = mapFileBinaryToData;
}
@Override
protected boolean isReservedWord(String word) {
return word != null && reservedWords.contains(word); //don't lowercase as super does
}
@Override
public String escapeReservedWord(String name) {
if (this.reservedWordsMappings().containsKey(name)) {
return this.reservedWordsMappings().get(name);
}
return useBacktickEscapes && !objcCompatible ? "`" + name + "`" : "_" + name;
}
@Override
public String modelFileFolder() {
return outputFolder + File.separator + sourceFolder
+ modelPackage().replace('.', File.separatorChar);
}
@Override
public String apiFileFolder() {
return outputFolder + File.separator + sourceFolder
+ apiPackage().replace('.', File.separatorChar);
}
@Override
public String getTypeDeclaration(Schema p) {
if (ModelUtils.isArraySchema(p)) {
ArraySchema ap = (ArraySchema) p;
Schema inner = ap.getItems();
return ModelUtils.isSet(p) ? "Set<" + getTypeDeclaration(inner) + ">" : "[" + getTypeDeclaration(inner) + "]";
} else if (ModelUtils.isMapSchema(p)) {
Schema inner = getAdditionalProperties(p);
return "[String: " + getTypeDeclaration(inner) + "]";
}
return super.getTypeDeclaration(p);
}
@Override
public String getSchemaType(Schema p) {
String openAPIType = super.getSchemaType(p);
String type;
if (typeMapping.containsKey(openAPIType)) {
type = typeMapping.get(openAPIType);
if (languageSpecificPrimitives.contains(type) || defaultIncludes.contains(type)) {
return type;
}
} else {
type = openAPIType;
}
return toModelName(type);
}
@Override
public boolean isDataTypeFile(String dataType) {
return "URL".equals(dataType);
}
@Override
public boolean isDataTypeBinary(final String dataType) {
return "Data".equals(dataType);
}
/**
* Output the proper model name (capitalized).
*
* @param name the name of the model
* @return capitalized model name
*/
@Override
public String toModelName(String name) {
// FIXME parameter should not be assigned. Also declare it as "final"
name = sanitizeName(name);
if (!StringUtils.isEmpty(modelNameSuffix)) { // set model suffix
name = name + "_" + modelNameSuffix;
}
if (!StringUtils.isEmpty(modelNamePrefix)) { // set model prefix
name = modelNamePrefix + "_" + name;
}
// camelize the model name
// phone_number => PhoneNumber
name = camelize(name);
// model name cannot use reserved keyword, e.g. return
if (isReservedWord(name)) {
String modelName = "Model" + name;
LOGGER.warn("{} (reserved word) cannot be used as model name. Renamed to {}", name, modelName);
return modelName;
}
// model name starts with number
if (name.matches("^\\d.*")) {
// e.g. 200Response => Model200Response (after camelize)
String modelName = "Model" + name;
LOGGER.warn("{} (model name starts with number) cannot be used as model name. Renamed to {}", name,
modelName);
return modelName;
}
return name;
}
/**
* Return the capitalized file name of the model.
*
* @param name the model name
* @return the file name of the model
*/
@Override
public String toModelFilename(String name) {
// should be the same as the model name
return toModelName(name);
}
@Override
public String toDefaultValue(Schema p) {
if (p.getEnum() != null && !p.getEnum().isEmpty()) {
if (p.getDefault() != null) {
if (ModelUtils.isStringSchema(p)) {
return "." + toEnumVarName(escapeText((String) p.getDefault()), p.getType());
} else {
return "." + toEnumVarName(escapeText(p.getDefault().toString()), p.getType());
}
}
}
if (p.getDefault() != null) {
if (ModelUtils.isIntegerSchema(p) || ModelUtils.isNumberSchema(p) || ModelUtils.isBooleanSchema(p)) {
return p.getDefault().toString();
} else if (ModelUtils.isDateTimeSchema(p)) {
// Datetime time stamps in Swift are expressed as Seconds with Microsecond precision.
// In Java, we need to be creative to get the Timestamp in Microseconds as a long.
Instant instant = ((OffsetDateTime) p.getDefault()).toInstant();
long epochMicro = TimeUnit.SECONDS.toMicros(instant.getEpochSecond()) + (instant.get(ChronoField.MICRO_OF_SECOND));
return "Date(timeIntervalSince1970: " + String.valueOf(epochMicro) + ".0 / 1_000_000)";
} else if (ModelUtils.isStringSchema(p)) {
return "\"" + escapeText((String) p.getDefault()) + "\"";
}
// TODO: Handle more cases from `ModelUtils`, such as Date
}
return null;
}
@Override
public String toInstantiationType(Schema p) {
if (ModelUtils.isMapSchema(p)) {
return getSchemaType(getAdditionalProperties(p));
} else if (ModelUtils.isArraySchema(p)) {
ArraySchema ap = (ArraySchema) p;
String inner = getSchemaType(ap.getItems());
return ModelUtils.isSet(p) ? "Set<" + inner + ">" : "[" + inner + "]";
}
return null;
}
@Override
public String toApiName(String name) {
if (name.length() == 0) {
return "DefaultAPI";
}
return camelize(apiNamePrefix + "_" + name) + "API";
}
@Override
public String apiDocFileFolder() {
return (outputFolder + "/" + apiDocPath).replace("/", File.separator);
}
@Override
public String modelDocFileFolder() {
return (outputFolder + "/" + modelDocPath).replace("/", File.separator);
}
@Override
public String toModelDocFilename(String name) {
return toModelName(name);
}
@Override
public String toApiDocFilename(String name) {
return toApiName(name);
}
@Override
public String toOperationId(String operationId) {
operationId = camelize(sanitizeName(operationId), true);
// Throw exception if method name is empty.
// This should not happen but keep the check just in case
if (StringUtils.isEmpty(operationId)) {
throw new RuntimeException("Empty method name (operationId) not allowed");
}
// method name cannot use reserved keyword, e.g. return
if (isReservedWord(operationId)) {
String newOperationId = camelize(("call_" + operationId), true);
LOGGER.warn("{} (reserved word) cannot be used as method name. Renamed to {}", operationId, newOperationId);
return newOperationId;
}
// operationId starts with a number
if (operationId.matches("^\\d.*")) {
LOGGER.warn("{} (starting with a number) cannot be used as method name. Renamed to {}", operationId, camelize(sanitizeName("call_" + operationId), true));
operationId = camelize(sanitizeName("call_" + operationId), true);
}
return operationId;
}
@Override
public String toVarName(String name) {
// sanitize name
name = sanitizeName(name);
// if it's all upper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved words surround with `` or append _
if (isReservedWord(name)) {
name = escapeReservedWord(name);
}
// for words starting with number, append _
if (name.matches("^\\d.*")) {
name = "_" + name;
}
return name;
}
@Override
public String toParamName(String name) {
// sanitize name
name = sanitizeName(name);
// replace - with _ e.g. created-at => created_at
name = name.replaceAll("-", "_");
// if it's all upper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize(lower) the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved words surround with ``
if (isReservedWord(name)) {
name = escapeReservedWord(name);
}
// for words starting with number, append _
if (name.matches("^\\d.*")) {
name = "_" + name;
}
return name;
}
@Override
public CodegenModel fromModel(String name, Schema model) {
Map<String, Schema> allDefinitions = ModelUtils.getSchemas(this.openAPI);
CodegenModel codegenModel = super.fromModel(name, model);
if (codegenModel.description != null) {
codegenModel.imports.add("ApiModel");
}
if (allDefinitions != null) {
String parentSchema = codegenModel.parentSchema;
// multilevel inheritance: reconcile properties of all the parents
while (parentSchema != null) {
final Schema parentModel = allDefinitions.get(parentSchema);
final CodegenModel parentCodegenModel = super.fromModel(codegenModel.parent,
parentModel);
codegenModel = Swift5ClientCodegen.reconcileProperties(codegenModel, parentCodegenModel);
// get the next parent
parentSchema = parentCodegenModel.parentSchema;
}
}
if (hashableModels) {
codegenModel.vendorExtensions.put("x-swift-hashable", true);
}
return codegenModel;
}
public void setProjectName(String projectName) {
this.projectName = projectName;
}
public void setNonPublicApi(boolean nonPublicApi) {
this.nonPublicApi = nonPublicApi;
}
public void setObjcCompatible(boolean objcCompatible) {
this.objcCompatible = objcCompatible;
}
public void setLenientTypeCast(boolean lenientTypeCast) {
this.lenientTypeCast = lenientTypeCast;
}
public void setReadonlyProperties(boolean readonlyProperties) {
this.readonlyProperties = readonlyProperties;
}
public void setResponseAs(String[] responseAs) {
this.responseAs = responseAs;
}
public void setSwiftUseApiNamespace(boolean swiftUseApiNamespace) {
this.swiftUseApiNamespace = swiftUseApiNamespace;
}
public void setUseSPMFileStructure(boolean useSPMFileStructure) {
this.useSPMFileStructure = useSPMFileStructure;
}
public void setSwiftPackagePath(String swiftPackagePath) {
this.swiftPackagePath = swiftPackagePath;
}
public void setUseClasses(boolean useClasses) {
this.useClasses = useClasses;
}
public void setUseBacktickEscapes(boolean useBacktickEscapes) {
this.useBacktickEscapes = useBacktickEscapes;
}
public void setGenerateModelAdditionalProperties(boolean generateModelAdditionalProperties) {
this.generateModelAdditionalProperties = generateModelAdditionalProperties;
}
public void setHashableModels(boolean hashableModels) {
this.hashableModels = hashableModels;
}
@Override
public String toEnumValue(String value, String datatype) {
// for string, array of string
if ("String".equals(datatype) || "[String]".equals(datatype) || "[String: String]".equals(datatype)) {
return "\"" + String.valueOf(value) + "\"";
} else {
return String.valueOf(value);
}
}
@Override
public String toEnumDefaultValue(String value, String datatype) {
return datatype + "_" + value;
}
@Override
public String toEnumVarName(String name, String datatype) {
if (name.length() == 0) {
return "empty";
}
Pattern startWithNumberPattern = Pattern.compile("^\\d+");
Matcher startWithNumberMatcher = startWithNumberPattern.matcher(name);
if (startWithNumberMatcher.find()) {
String startingNumbers = startWithNumberMatcher.group(0);
String nameWithoutStartingNumbers = name.substring(startingNumbers.length());
return "_" + startingNumbers + camelize(nameWithoutStartingNumbers, true);
}
// for symbol, e.g. $, #
if (getSymbolName(name) != null) {
return camelize(WordUtils.capitalizeFully(getSymbolName(name).toUpperCase(Locale.ROOT)), true);
}
// Camelize only when we have a structure defined below
Boolean camelized = false;
if (name.matches("[A-Z][a-z0-9]+[a-zA-Z0-9]*")) {
name = camelize(name, true);
camelized = true;
}
// Reserved Name
String nameLowercase = StringUtils.lowerCase(name);
if (isReservedWord(nameLowercase)) {
return escapeReservedWord(nameLowercase);
}
// Check for numerical conversions
if ("Int".equals(datatype) || "Int32".equals(datatype) || "Int64".equals(datatype)
|| "Float".equals(datatype) || "Double".equals(datatype)) {
String varName = "number" + camelize(name);
varName = varName.replaceAll("-", "minus");
varName = varName.replaceAll("\\+", "plus");
varName = varName.replaceAll("\\.", "dot");
return varName;
}
// If we have already camelized the word, don't progress
// any further
if (camelized) {
return name;
}
char[] separators = {'-', '_', ' ', ':', '(', ')'};
return camelize(WordUtils.capitalizeFully(StringUtils.lowerCase(name), separators)
.replaceAll("[-_ :\\(\\)]", ""),
true);
}
@Override
public String toEnumName(CodegenProperty property) {
String enumName = toModelName(property.name);
// Ensure that the enum type doesn't match a reserved word or
// the variable name doesn't match the generated enum type or the
// Swift compiler will generate an error
if (isReservedWord(property.datatypeWithEnum)
|| toVarName(property.name).equals(property.datatypeWithEnum)) {
enumName = property.datatypeWithEnum + "Enum";
}
// TODO: toModelName already does something for names starting with number,
// so this code is probably never called
if (enumName.matches("\\d.*")) { // starts with number
return "_" + enumName;
} else {
return enumName;
}
}
@Override
public Map<String, Object> postProcessModels(Map<String, Object> objs) {
Map<String, Object> postProcessedModelsEnum = postProcessModelsEnum(objs);
// We iterate through the list of models, and also iterate through each of the
// properties for each model. For each property, if:
//
// CodegenProperty.name != CodegenProperty.baseName
//
// then we set
//
// CodegenProperty.vendorExtensions["x-codegen-escaped-property-name"] = true
//
// Also, if any property in the model has x-codegen-escaped-property-name=true, then we mark:
//
// CodegenModel.vendorExtensions["x-codegen-has-escaped-property-names"] = true
//
List<Object> models = (List<Object>) postProcessedModelsEnum.get("models");
for (Object _mo : models) {
Map<String, Object> mo = (Map<String, Object>) _mo;
CodegenModel cm = (CodegenModel) mo.get("model");
boolean modelHasPropertyWithEscapedName = false;
for (CodegenProperty prop : cm.allVars) {
if (!prop.name.equals(prop.baseName)) {
prop.vendorExtensions.put("x-codegen-escaped-property-name", true);
modelHasPropertyWithEscapedName = true;
}
}
if (modelHasPropertyWithEscapedName) {
cm.vendorExtensions.put("x-codegen-has-escaped-property-names", true);
}
}
return postProcessedModelsEnum;
}
@Override
public void postProcessModelProperty(CodegenModel model, CodegenProperty property) {
super.postProcessModelProperty(model, property);
boolean isSwiftScalarType = property.isInteger || property.isLong || property.isFloat
|| property.isDouble || property.isBoolean;
if ((!property.required || property.isNullable) && isSwiftScalarType) {
// Optional scalar types like Int?, Int64?, Float?, Double?, and Bool?
// do not translate to Objective-C. So we want to flag those
// properties in case we want to put special code in the templates
// which provide Objective-C compatibility.
property.vendorExtensions.put("x-swift-optional-scalar", true);
}
}
@Override
public String escapeQuotationMark(String input) {
// remove " to avoid code injection
return input.replace("\"", "");
}
@Override
public String escapeUnsafeCharacters(String input) {
return input.replace("*/", "*_/").replace("/*", "/_*");
}
@Override
public void postProcessFile(File file, String fileType) {
if (file == null) {
return;
}
String swiftPostProcessFile = System.getenv("SWIFT_POST_PROCESS_FILE");
if (StringUtils.isEmpty(swiftPostProcessFile)) {
return; // skip if SWIFT_POST_PROCESS_FILE env variable is not defined
}
// only process files with swift extension
if ("swift".equals(FilenameUtils.getExtension(file.toString()))) {
String command = swiftPostProcessFile + " " + file.toString();
try {
Process p = Runtime.getRuntime().exec(command);
int exitValue = p.waitFor();
if (exitValue != 0) {
LOGGER.error("Error running the command ({}). Exit value: {}", command, exitValue);
} else {
LOGGER.info("Successfully executed: {}", command);
}
} catch (InterruptedException | IOException e) {
LOGGER.error("Error running the command ({}). Exception: {}", command, e.getMessage());
// Restore interrupted state
Thread.currentThread().interrupt();
}
}
}
@Override
public Map<String, Object> postProcessOperationsWithModels(Map<String, Object> objs, List<Object> allModels) {
Map<String, Object> objectMap = (Map<String, Object>) objs.get("operations");
HashMap<String, CodegenModel> modelMaps = new HashMap<String, CodegenModel>();
for (Object o : allModels) {
HashMap<String, Object> h = (HashMap<String, Object>) o;
CodegenModel m = (CodegenModel) h.get("model");
modelMaps.put(m.classname, m);
}
List<CodegenOperation> operations = (List<CodegenOperation>) objectMap.get("operation");
for (CodegenOperation operation : operations) {
for (CodegenParameter cp : operation.allParams) {
cp.vendorExtensions.put("x-swift-example", constructExampleCode(cp, modelMaps, new HashSet<String>()));
}
}
return objs;
}
public String constructExampleCode(CodegenParameter codegenParameter, HashMap<String, CodegenModel> modelMaps, Set<String> visitedModels) {
if (codegenParameter.isArray) { // array
return "[" + constructExampleCode(codegenParameter.items, modelMaps, visitedModels) + "]";
} else if (codegenParameter.isMap) { // TODO: map, file type
return "\"TODO\"";
} else if (languageSpecificPrimitives.contains(codegenParameter.dataType)) { // primitive type
if ("String".equals(codegenParameter.dataType) || "Character".equals(codegenParameter.dataType)) {
if (StringUtils.isEmpty(codegenParameter.example)) {
return "\"" + codegenParameter.example + "\"";
} else {
return "\"" + codegenParameter.paramName + "_example\"";
}
} else if ("Bool".equals(codegenParameter.dataType)) { // boolean
if (Boolean.parseBoolean(codegenParameter.example)) {
return "true";
} else {
return "false";
}
} else if ("URL".equals(codegenParameter.dataType)) { // URL
return "URL(string: \"https://example.com\")!";
} else if ("Data".equals(codegenParameter.dataType)) { // URL
return "Data([9, 8, 7])";
} else if ("Date".equals(codegenParameter.dataType)) { // date
return "Date()";
} else { // numeric
if (StringUtils.isEmpty(codegenParameter.example)) {
return codegenParameter.example;
} else {
return "987";
}
}
} else { // model
// look up the model
if (modelMaps.containsKey(codegenParameter.dataType)) {
if (visitedModels.contains(codegenParameter.dataType)) {
// recursive/self-referencing model, simply return nil to avoid stackoverflow
return "nil";
} else {
visitedModels.add(codegenParameter.dataType);
return constructExampleCode(modelMaps.get(codegenParameter.dataType), modelMaps, visitedModels);
}
} else {
//LOGGER.error("Error in constructing examples. Failed to look up the model " + codegenParameter.dataType);
return "TODO";
}
}
}
public String constructExampleCode(CodegenProperty codegenProperty, HashMap<String, CodegenModel> modelMaps, Set<String> visitedModels) {
if (codegenProperty.isArray) { // array
return "[" + constructExampleCode(codegenProperty.items, modelMaps, visitedModels) + "]";
} else if (codegenProperty.isMap) { // TODO: map, file type
return "\"TODO\"";
} else if (languageSpecificPrimitives.contains(codegenProperty.dataType)) { // primitive type
if ("String".equals(codegenProperty.dataType) || "Character".equals(codegenProperty.dataType)) {
if (StringUtils.isEmpty(codegenProperty.example)) {
return "\"" + codegenProperty.example + "\"";
} else {
return "\"" + codegenProperty.name + "_example\"";
}
} else if ("Bool".equals(codegenProperty.dataType)) { // boolean
if (Boolean.parseBoolean(codegenProperty.example)) {
return "true";
} else {
return "false";
}
} else if ("URL".equals(codegenProperty.dataType)) { // URL
return "URL(string: \"https://example.com\")!";
} else if ("Date".equals(codegenProperty.dataType)) { // date
return "Date()";
} else { // numeric
if (StringUtils.isEmpty(codegenProperty.example)) {
return codegenProperty.example;
} else {
return "123";
}
}
} else {
// look up the model
if (modelMaps.containsKey(codegenProperty.dataType)) {
if (visitedModels.contains(codegenProperty.dataType)) {
// recursive/self-referencing model, simply return nil to avoid stackoverflow
return "nil";
} else {
visitedModels.add(codegenProperty.dataType);
return constructExampleCode(modelMaps.get(codegenProperty.dataType), modelMaps, visitedModels);
}
} else {
//LOGGER.error("Error in constructing examples. Failed to look up the model " + codegenProperty.dataType);
return "\"TODO\"";
}
}
}
public String constructExampleCode(CodegenModel codegenModel, HashMap<String, CodegenModel> modelMaps, Set<String> visitedModels) {
String example;
example = codegenModel.name + "(";
List<String> propertyExamples = new ArrayList<>();
for (CodegenProperty codegenProperty : codegenModel.vars) {
propertyExamples.add(codegenProperty.name + ": " + constructExampleCode(codegenProperty, modelMaps, visitedModels));
}
example += StringUtils.join(propertyExamples, ", ");
example += ")";
return example;
}
@Override
public void postProcess() {
System.out.println("################################################################################");
System.out.println("# Thanks for using OpenAPI Generator. #");
System.out.println("# Please consider donation to help us maintain this project \uD83D\uDE4F #");
System.out.println("# https://opencollective.com/openapi_generator/donate #");
System.out.println("# #");
System.out.println("# swift5 generator is contributed by Bruno Coelho (https://github.com/4brunu). #");
System.out.println("# Please support his work directly via https://paypal.com/paypalme/4brunu \uD83D\uDE4F #");
System.out.println("################################################################################");
}
}
| [
"\"SWIFT_POST_PROCESS_FILE\"",
"\"SWIFT_POST_PROCESS_FILE\""
]
| []
| [
"SWIFT_POST_PROCESS_FILE"
]
| [] | ["SWIFT_POST_PROCESS_FILE"] | java | 1 | 0 | |
tests/test_http.py | import logging
import io
import pytest
import os
import json
from requests.adapters import Response
from http import HTTPStatus
from typing import Dict, Any
from unittest.mock import patch
from pypwext.pwlogging import PyPwExtLogger
from pypwext.errors import PyPwExtError, PyPwExtHTTPError
from pypwext.pwhttp import LambdaResponse, PyPwExtHTTPSession
from .test_logging import get_new_logger_name
def test_no_region_and_no_aws_regin_env_var_raises_error():
with pytest.raises(PyPwExtError):
with PyPwExtHTTPSession():
pass
def test_default_pypwext_http_session_produces_no_json_content_type_and_accept_headers():
try:
os.environ['AWS_REGION'] = 'eu-west-1'
with io.StringIO() as s:
logger = PyPwExtLogger(
service=get_new_logger_name(),
logger_handler=logging.StreamHandler(s),
level=logging.DEBUG
)
with PyPwExtHTTPSession(logger=logger, api_gateway_mapping=False) as http:
http.get(
'https://api.openaq.org/v1/cities',
params={'country': 'SE'},
verify=False
)
value = s.getvalue()
vd = json.loads(value.splitlines()[0])
assert '"url":"https://api.openaq.org/v1/cities?country=SE"' in value
assert '"Connection":"keep-alive"' in value
assert '"Content-Type":"application/json"' in value
assert '"status":200' in value
assert '"name":"openaq-api"' in value
assert f'"service":"{logger.service}"' in value
assert vd['message']['request']['headers']['Content-Type'] == 'application/json'
assert vd['message']['request']['headers']['Accept'] == 'application/json'
finally:
del os.environ['AWS_REGION']
def test_default_headers_are_merged_with_explicit_set():
try:
os.environ['AWS_REGION'] = 'eu-west-1'
with io.StringIO() as s:
logger = PyPwExtLogger(
service=get_new_logger_name(),
logger_handler=logging.StreamHandler(s),
level=logging.DEBUG
)
with PyPwExtHTTPSession(
logger=logger,
api_gateway_mapping=False,
headers={
'Content-Type': 'application/text',
'Accept': 'application/json'
}
) as http:
http.get(
'https://api.openaq.org/v1/cities',
params={'country': 'SE'},
headers={'Content-Type': 'application/json'},
verify=False
)
value = json.loads(s.getvalue().splitlines()[0])
headers = value['message']['request']['headers']
assert headers.get('Content-Type') == 'application/json'
assert headers.get('Accept') == 'application/json'
finally:
del os.environ['AWS_REGION']
@pytest.mark.skip(reason="must setup a lambda environment on github account")
def test_api_gateway_execute_adds_headers():
import requests
the_headers = None
def send(request, **kwargs):
nonlocal the_headers
the_headers = request.headers
try:
os.environ['AWS_REGION'] = 'eu-west-1'
os.environ['AWS_ACCESS_KEY_ID'] = 'test'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'test'
with patch.object(
requests.sessions.Session, 'send', side_effect=send
):
with PyPwExtHTTPSession(api_gateway_mapping=False) as http:
http.get(
'https://abc123.execute-api.eu-west-1.amazonaws.com/dev/cities',
params={'country': 'SE'}
)
assert the_headers['x-amz-date'] is not None
assert 'Credential=test' in the_headers['Authorization']
assert 'SignedHeaders=host;x-amz-date' in the_headers['Authorization']
assert 'AWS4-HMAC-SHA256' in the_headers['Authorization']
assert 'Signature=' in the_headers['Authorization']
finally:
del os.environ['AWS_REGION']
del os.environ['AWS_ACCESS_KEY_ID']
del os.environ['AWS_SECRET_ACCESS_KEY']
def test_decorator_simple():
with PyPwExtHTTPSession(api_gateway_mapping=False) as http:
@http.method(url='https://api.openaq.org/v1/cities', params={'country': '{country}'}, verify_ssl=False)
def get_cities(country: str, response: Response = None) -> str:
if response.status_code == HTTPStatus.OK.value:
return response.text
else:
raise PyPwExtHTTPError(
code=response.status_code,
message=f'Failed to get cities from {country}'
)
value = get_cities(country='SE')
assert '{"country":"SE","name":"Västernorrland","city":"Västernorrland","count":81637329,"locations":2}' in value
def test_decorated_api_gw_auth():
import requests
the_headers = None
def send(request, **kwargs):
nonlocal the_headers
the_headers = request.headers
resp = requests.Response()
resp.status_code = 200
resp._content = f"message: {request.body.decode('utf-8')}".encode('utf-8')
resp.request = request
return resp
try:
os.environ['AWS_REGION'] = 'eu-west-1'
os.environ['AWS_ACCESS_KEY_ID'] = 'test'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'test'
with patch.object(
requests.sessions.Session, 'send', side_effect=send
):
with PyPwExtHTTPSession() as http:
@http.method(
method='POST',
url='https://{gw_id}.execute-api.{AWS_REGION}.amazonaws.com/dev/cities',
params={'country': '{country}'},
body='body'
)
def do_http(gw_id: str, country: str, body: str, response: requests.Response = None) -> str:
"""Gets the cities from a specific country."""
return f'processed response: {response.text}'
value = do_http('abc123', 'SE', 'the body')
assert 'processed response: message: the body' in value
assert the_headers['x-amz-date'] is not None
assert 'Credential=test' in the_headers['Authorization']
assert 'SignedHeaders=host;x-amz-date' in the_headers['Authorization']
assert 'AWS4-HMAC-SHA256' in the_headers['Authorization']
assert 'Signature=' in the_headers['Authorization']
finally:
del os.environ['AWS_REGION']
del os.environ['AWS_ACCESS_KEY_ID']
del os.environ['AWS_SECRET_ACCESS_KEY']
@pytest.mark.skip(reason="must setup a lambda on other account to test it from GitHub Actions")
def test_decorator_lambda_func():
with PyPwExtHTTPSession(api_gateway_mapping=False) as http:
@http.method(
method='FUNC',
url='arn:aws:lambda:eu-west-1:<account>:function:mario-unit-test-function',
params={'country': '{country}'}
)
def get_cities(country: str, response: LambdaResponse = None) -> str:
if response.StatusCode == HTTPStatus.OK.value:
return response.payload_as_text()
else:
raise PyPwExtHTTPError(
code=response.StatusCode,
message=f'Failed to get cities from {country}',
details={
'error': response.payload_as_text()
}
)
try:
value = get_cities(country='SE')
except Exception as e:
value = e.details['error']
assert '{"country": "SE", "name": "Västernorrland", "city": "Västernorrland", "count": 81637329, "locations": 2}' in value
@pytest.mark.skip(reason="must setup a lambda on other account to test it from GitHub Actions")
def test_manual_lambda_func_invoke():
with PyPwExtHTTPSession(api_gateway_mapping=False) as http:
result = http.func(
url='mario-unit-test-function',
params={'country': 'SE'})
assert result.StatusCode == HTTPStatus.OK.value
assert '"city": "Västernorrland"' in result.payload_as_text()
@pytest.mark.skip(reason="must setup a lambda on other account to test it from GitHub Actions")
def test_decorator_lambda_func_partial_arn():
with PyPwExtHTTPSession(api_gateway_mapping=False) as http:
@http.method(
method='FUNC',
url='function:mario-unit-test-function',
params={'country': '{country}'}
)
def get_cities(country: str, response: LambdaResponse = None) -> str:
if response.StatusCode == HTTPStatus.OK.value:
return response.payload_as_text()
else:
raise PyPwExtHTTPError(
code=response.StatusCode,
message=f'Failed to get cities from {country}',
details={
'error': response.payload_as_text()
}
)
try:
value = get_cities(country='SE')
except Exception as e:
value = e.details['error']
assert '{"country": "SE", "name": "Västernorrland", "city": "Västernorrland", "count": 81637329, "locations": 2}' in value
@pytest.mark.skip(reason="must setup a lambda on other account to test it from GitHub Actions")
def test_decorator_lambda_event():
with PyPwExtHTTPSession(api_gateway_mapping=False) as http:
@http.method(
method='EVENT',
url='arn:aws:lambda:eu-west-1:<account>:function:mario-unit-test-function',
body='body'
)
def get_cities(body: Dict[str, Any], response: LambdaResponse = None) -> str:
if response.StatusCode == HTTPStatus.ACCEPTED.value:
return json.dumps(response.ResponseMetadata)
else:
raise PyPwExtHTTPError(
code=response.StatusCode,
message=f'Failed to get cities from {body}',
details={
'error': response.payload_as_text()
}
)
try:
value = get_cities({'country': 'SE'})
except Exception as e:
value = e.details['error']
assert ' "HTTPStatusCode": 202' in value
| []
| []
| [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION"
]
| [] | ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"] | python | 3 | 0 | |
src/app.py | import os
import layout
import callbacks # layout needs to be defined before creating callbacks
import routes
import appserver
server = appserver.app.server
if __name__ == "__main__":
debug_mode = True if os.getenv("DEBUG", "false") == "true" else False
if debug_mode is True:
print(f"Initiating server. Debug mode enabled.")
# appserver.app.enable_dev_tools(debug=True)
else:
print(f"Initiating server.")
appserver.app.run_server(
debug=debug_mode,
host="0.0.0.0",
port=5000
) | []
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | python | 1 | 0 | |
art/test/138-duplicate-classes-check/src/FancyLoader.java | /*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.lang.reflect.InvocationTargetException;
/**
* A class loader with atypical behavior: we try to load a private
* class implementation before asking the system or boot loader. This
* is used to create multiple classes with identical names in a single VM.
*
* If DexFile is available, we use that; if not, we assume we're not in
* Dalvik and instantiate the class with defineClass().
*
* The location of the DEX files and class data is dependent upon the
* test framework.
*/
public class FancyLoader extends ClassLoader {
/* this is where the "alternate" .class files live */
static final String CLASS_PATH = "classes-ex/";
/* this is the "alternate" DEX/Jar file */
static final String DEX_FILE = System.getenv("DEX_LOCATION") +
"/138-duplicate-classes-check-ex.jar";
/* on Dalvik, this is a DexFile; otherwise, it's null */
private Class mDexClass;
private Object mDexFile;
/**
* Construct FancyLoader, grabbing a reference to the DexFile class
* if we're running under Dalvik.
*/
public FancyLoader(ClassLoader parent) {
super(parent);
try {
mDexClass = parent.loadClass("dalvik.system.DexFile");
} catch (ClassNotFoundException cnfe) {
// ignore -- not running Dalvik
}
}
/**
* Finds the class with the specified binary name.
*
* We search for a file in CLASS_PATH or pull an entry from DEX_FILE.
* If we don't find a match, we throw an exception.
*/
protected Class<?> findClass(String name) throws ClassNotFoundException
{
if (mDexClass != null) {
return findClassDalvik(name);
} else {
return findClassNonDalvik(name);
}
}
/**
* Finds the class with the specified binary name, from a DEX file.
*/
private Class<?> findClassDalvik(String name)
throws ClassNotFoundException {
if (mDexFile == null) {
synchronized (FancyLoader.class) {
Constructor ctor;
/*
* Construct a DexFile object through reflection.
*/
try {
ctor = mDexClass.getConstructor(new Class[] {String.class});
} catch (NoSuchMethodException nsme) {
throw new ClassNotFoundException("getConstructor failed",
nsme);
}
try {
mDexFile = ctor.newInstance(DEX_FILE);
} catch (InstantiationException ie) {
throw new ClassNotFoundException("newInstance failed", ie);
} catch (IllegalAccessException iae) {
throw new ClassNotFoundException("newInstance failed", iae);
} catch (InvocationTargetException ite) {
throw new ClassNotFoundException("newInstance failed", ite);
}
}
}
/*
* Call DexFile.loadClass(String, ClassLoader).
*/
Method meth;
try {
meth = mDexClass.getMethod("loadClass",
new Class[] { String.class, ClassLoader.class });
} catch (NoSuchMethodException nsme) {
throw new ClassNotFoundException("getMethod failed", nsme);
}
try {
meth.invoke(mDexFile, name, this);
} catch (IllegalAccessException iae) {
throw new ClassNotFoundException("loadClass failed", iae);
} catch (InvocationTargetException ite) {
throw new ClassNotFoundException("loadClass failed",
ite.getCause());
}
return null;
}
/**
* Finds the class with the specified binary name, from .class files.
*/
private Class<?> findClassNonDalvik(String name)
throws ClassNotFoundException {
String pathName = CLASS_PATH + name + ".class";
//System.out.println("--- Fancy: looking for " + pathName);
File path = new File(pathName);
RandomAccessFile raf;
try {
raf = new RandomAccessFile(path, "r");
} catch (FileNotFoundException fnfe) {
throw new ClassNotFoundException("Not found: " + pathName);
}
/* read the entire file in */
byte[] fileData;
try {
fileData = new byte[(int) raf.length()];
raf.readFully(fileData);
} catch (IOException ioe) {
throw new ClassNotFoundException("Read error: " + pathName);
} finally {
try {
raf.close();
} catch (IOException ioe) {
// drop
}
}
/* create the class */
//System.out.println("--- Fancy: defining " + name);
try {
return defineClass(name, fileData, 0, fileData.length);
} catch (Throwable th) {
throw new ClassNotFoundException("defineClass failed", th);
}
}
/**
* Load a class.
*
* Normally a class loader wouldn't override this, but we want our
* version of the class to take precedence over an already-loaded
* version.
*
* We still want the system classes (e.g. java.lang.Object) from the
* bootstrap class loader.
*/
protected Class<?> loadClass(String name, boolean resolve)
throws ClassNotFoundException
{
Class res;
/*
* 1. Invoke findLoadedClass(String) to check if the class has
* already been loaded.
*
* This doesn't change.
*/
res = findLoadedClass(name);
if (res != null) {
System.out.println("FancyLoader.loadClass: "
+ name + " already loaded");
if (resolve)
resolveClass(res);
return res;
}
/*
* 3. Invoke the findClass(String) method to find the class.
*/
try {
res = findClass(name);
if (resolve)
resolveClass(res);
}
catch (ClassNotFoundException e) {
// we couldn't find it, so eat the exception and keep going
}
/*
* 2. Invoke the loadClass method on the parent class loader. If
* the parent loader is null the class loader built-in to the
* virtual machine is used, instead.
*
* (Since we're not in java.lang, we can't actually invoke the
* parent's loadClass() method, but we passed our parent to the
* super-class which can take care of it for us.)
*/
res = super.loadClass(name, resolve); // returns class or throws
return res;
}
}
| [
"\"DEX_LOCATION\""
]
| []
| [
"DEX_LOCATION"
]
| [] | ["DEX_LOCATION"] | java | 1 | 0 | |
cvat/apps/engine/task.py | # Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
import sys
import rq
import shlex
import shutil
import tempfile
import requests
import re
import xml.etree.ElementTree as ET
from threading import Thread
from io import BytesIO
from PIL import Image
from traceback import print_exception
from ast import literal_eval
from .handle_file_s3 import copyFileToOSByThread, deleteFolder, getFileUrl, copyFileToOS, uploadFile, downloadFile, getBucketConnection
from .segmentation import process_watershed
import numpy as np
import urllib
import ssl
from imutils.video import FPS
import argparse
import imutils
import cv2
import threading
import time
import json
import errno
import skvideo.io
import mimetypes
_SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__))
_MEDIA_MIMETYPES_FILE = os.path.join(_SCRIPT_DIR, "media.mimetypes")
mimetypes.init(files=[_MEDIA_MIMETYPES_FILE])
from cvat.apps.engine.models import StatusChoice
from cvat.apps.engine import formatter
import django_rq
from django.forms.models import model_to_dict
from django.conf import settings
from django.core import serializers
from django.db import transaction
from django.db.models import Max
from ffmpy import FFmpeg
from pyunpack import Archive
from distutils.dir_util import copy_tree
from collections import OrderedDict
from django.contrib.auth.models import User
from . import models
from .log import slogger
############################# Global Variables
TRACKER_THREADS = {}
############################# Low Level server API
@transaction.atomic
def create_empty(params):
"""Create empty directory structure for a new task, add it to DB."""
db_task = models.Task()
db_task.name = params['task_name']
db_task.bug_tracker = params['bug_tracker_link']
db_task.path = ""
db_task.size = 0
db_task.owner = params['owner']
db_task.project = models.Projects.objects.get(pk=params['project'])
db_task.assignee = User.objects.get(pk=params['assignee'])
db_task.save()
task_path = os.path.join(settings.DATA_ROOT, str(db_task.id))
db_task.set_task_dirname(task_path)
task_path = db_task.get_task_dirname()
if os.path.isdir(task_path):
shutil.rmtree(task_path)
os.mkdir(task_path)
upload_dir = db_task.get_upload_dirname()
os.makedirs(upload_dir)
output_dir = db_task.get_data_dirname()
os.makedirs(output_dir)
return db_task
def create(tid, params):
"""Schedule the task"""
q = django_rq.get_queue('default')
q.enqueue_call(func=_create_thread, args=(tid, params),
job_id="task.create/{}".format(tid))
def check(tid):
"""Check status of the scheduled task"""
response = {}
queue = django_rq.get_queue('default')
job = queue.fetch_job("task.create/{}".format(tid))
if job is None:
response = {"state": "unknown"}
elif job.is_failed:
response = {"state": "error", "stderr": "Could not create the task. " + job.exc_info }
elif job.is_finished:
destFile = r'/home/django/data/' + str(tid) + r'/data/xml/annotations.txt'
if os.path.exists(destFile):
with open(destFile, 'r') as f:
fileData = f.read()
response = {"state": "created", "annotationFile" : fileData, "tid" : tid}
else:
response = {"state": "created"}
else:
response = {"state": "started"}
if 'status' in job.meta:
response['status'] = job.meta['status']
return response
@transaction.atomic
def delete(tid):
"""Delete the task"""
db_task = models.Task.objects.select_for_update().get(pk=tid)
if db_task:
db_task.delete()
shutil.rmtree(db_task.get_task_dirname(), ignore_errors=True)
threads = deleteFolder(db_task.get_task_dirname())
for t in threads:
t.join()
else:
raise Exception("The task doesn't exist")
@transaction.atomic
def update(tid, labels, score, assignee):
"""Update labels for the task"""
db_task = models.Task.objects.select_for_update().get(pk=tid)
db_labels = list(db_task.label_set.prefetch_related('attributespec_set').all())
if (labels):
new_labels = _parse_labels(labels)
old_labels = _parse_db_labels(db_labels)
for label_name in new_labels:
if label_name in old_labels:
db_label = [l for l in db_labels if l.name == label_name][0]
for attr_name in new_labels[label_name]:
if attr_name in old_labels[label_name]:
db_attr = [attr for attr in db_label.attributespec_set.all()
if attr.get_name() == attr_name][0]
new_attr = new_labels[label_name][attr_name]
old_attr = old_labels[label_name][attr_name]
if new_attr['prefix'] != old_attr['prefix']:
raise Exception("new_attr['prefix'] != old_attr['prefix']")
if new_attr['type'] != old_attr['type']:
raise Exception("new_attr['type'] != old_attr['type']")
if set(old_attr['values']) - set(new_attr['values']):
raise Exception("set(old_attr['values']) - set(new_attr['values'])")
db_attr.text = "{}{}={}:{}".format(new_attr['prefix'],
new_attr['type'], attr_name, ",".join(new_attr['values']))
db_attr.save()
else:
db_attr = models.AttributeSpec()
attr = new_labels[label_name][attr_name]
db_attr.text = "{}{}={}:{}".format(attr['prefix'],
attr['type'], attr_name, ",".join(attr['values']))
db_attr.label = db_label
db_attr.save()
else:
db_label = models.Label()
db_label.name = label_name
db_label.task = db_task
db_label.save()
for attr_name in new_labels[label_name]:
db_attr = models.AttributeSpec()
attr = new_labels[label_name][attr_name]
db_attr.text = "{}{}={}:{}".format(attr['prefix'],
attr['type'], attr_name, ",".join(attr['values']))
db_attr.label = db_label
db_attr.save()
db_task.assignee = User.objects.get(pk=assignee)
# If score sent from the client is -1 it means there is no score because the project has_score attribute is set to true.
if (score != -1):
db_task.score = score
db_task.save()
@transaction.atomic
def updateProperties(tid, properties):
db_task = models.Task.objects.select_for_update().get(pk=tid)
newFrameProperties = _parse_frameproperties(properties)
for frameprop in newFrameProperties:
db_taskframespec = models.TaskFrameSpec()
db_taskframespec.task = db_task
db_framepropvals = models.FrameProperties.objects.get(prop=frameprop[0], value=frameprop[1], project__pk=db_task.project.pk)
db_taskframespec.propVal = db_framepropvals
if (models.TaskFrameSpec.objects.filter(task__id=db_task.id, propVal__id=db_framepropvals.id).count() == 0):
db_taskframespec.save()
def get_frame_path(tid, frame):
"""Read corresponding frame for the task"""
db_task = models.Task.objects.get(pk=tid)
path = _get_frame_path(frame, db_task.get_data_dirname())
return path
def get_frame_watershed_path(tid, frame):
"""Read corresponding frame for the task"""
db_task = models.Task.objects.get(pk=tid)
path = _get_frame_watershed_path(frame, db_task.get_data_dirname())
return path
def get(tid):
"""Get the task as dictionary of attributes"""
db_task = models.Task.objects.get(pk=tid)
if db_task:
db_labels = db_task.label_set.prefetch_related('attributespec_set').order_by('-pk').all()
im_meta_data = get_image_meta_cache(db_task)
attributes = {}
for db_label in db_labels:
attributes[db_label.id] = {}
for db_attrspec in db_label.attributespec_set.all():
attributes[db_label.id][db_attrspec.id] = db_attrspec.text
db_segments = list(db_task.segment_set.prefetch_related('job_set').all())
segment_length = max(db_segments[0].stop_frame - db_segments[0].start_frame + 1, 1)
job_indexes = []
for segment in db_segments:
db_job = segment.job_set.first()
job_indexes.append({
"job_id": db_job.id,
"max_shape_id": db_job.max_shape_id,
})
labels_colors = models.LabelDetails.objects.filter(labelType__label__in=[db_label.name for db_label in db_labels])
response = {
"status": db_task.status,
"spec": {
"labels": OrderedDict((db_label.id, db_label.name) for db_label in db_labels),
"attributes": attributes,
"segmentation": {label_color.labelType.label: {"color": label_color.color, "label_type_id": label_color.labelType.id} for label_color in labels_colors}
},
"size": db_task.size,
"taskid": db_task.id,
"name": db_task.name,
"mode": db_task.mode,
"segment_length": segment_length,
"jobs": job_indexes,
"overlap": db_task.overlap,
"z_orded": db_task.z_order,
"flipped": db_task.flipped,
"score": db_task.score,
"image_meta_data": im_meta_data,
}
else:
raise Exception("Cannot find the task: {}".format(tid))
return response
@transaction.atomic
def save_job_status(jid, status, user):
db_job = models.Job.objects.select_related("segment__task").select_for_update().get(pk = jid)
db_task = db_job.segment.task
status = StatusChoice(status)
slogger.job[jid].info('changing job status from {} to {} by an user {}'.format(db_job.status, str(status), user))
db_job.status = status.value
db_job.save()
db_segments = list(db_task.segment_set.prefetch_related('job_set').all())
db_jobs = [db_segment.job_set.first() for db_segment in db_segments]
if len(list(filter(lambda x: StatusChoice(x.status) == StatusChoice.ANNOTATION, db_jobs))) > 0:
db_task.status = StatusChoice.ANNOTATION
elif len(list(filter(lambda x: StatusChoice(x.status) == StatusChoice.VALIDATION, db_jobs))) > 0:
db_task.status = StatusChoice.VALIDATION
else:
db_task.status = StatusChoice.COMPLETED
db_task.save()
class CSRTTrackerThread(threading.Thread):
def __init__(self, data, base_dir, results):
threading.Thread.__init__(self)
self.data = data
self.base_dir = base_dir
self.results = results
self._stop_event = False
def stop(self):
self._stop_event = True
def stopped(self):
return self._stop_event
def run(self):
def _frame_path(frame, base_dir):
d1 = str(frame // 10000)
d2 = str(frame // 100)
path = os.path.join(d1, d2, str(frame) + '.jpg')
if base_dir:
path = os.path.join(base_dir, path)
return path
def _get_frame(currentFrame, base_dir):
# Download the requested frame
frame_path = _frame_path(currentFrame, base_dir)
downloadFile(settings.AWS_STORAGE_BUCKET_NAME, frame_path, frame_path)
return frame_path
tracker = cv2.TrackerCSRT_create()
currentFrame = self.data['frame']
frame_path = _get_frame(currentFrame, self.base_dir)
frame = cv2.imread(frame_path)
self.results[self.data['id']] = {'results': {}}
counter = 0
x = self.data['positions']['x']
y = self.data['positions']['y']
w = self.data['positions']['w']
h = self.data['positions']['h']
bbox = (x, y, w, h)
tracker.init(frame, bbox)
if os.environ.get('WITH_OS') == 'True':
os.remove(frame_path)
while ((not self.stopped()) and (counter < 10)):
currentFrame += 1
frame_path = _get_frame(currentFrame, self.base_dir)
frame = cv2.imread(frame_path)
if frame is None:
break
ok, bbox = tracker.update(frame)
if os.environ.get('WITH_OS') == 'True':
os.remove(frame_path)
(x, y, w, h) = [int(v) for v in bbox]
if (h == 0 and w == 0):
self.results[self.data['id']]['results'][currentFrame] = {'x': x, 'y': y, 'h': h, 'w': w}
break
self.results[self.data['id']]['results'][currentFrame] = {'x': x, 'y': y, 'h': h, 'w': w}
key = cv2.waitKey(1) & 0xFF
counter += 1
def track_shapes(data, tid):
base_dir='/home/django/data/%d/data' % (tid)
results = {}
shape = data['shapes'][0]
results[shape['id']] = {'results': {}}
thread = CSRTTrackerThread(shape, base_dir, results)
thread.start()
if tid not in TRACKER_THREADS:
TRACKER_THREADS[tid] = []
TRACKER_THREADS[tid].append(thread)
thread.join()
return results
def stop_tracking(tid):
for thread in TRACKER_THREADS[tid]:
thread.stop()
def download_vid(tid, currentTask):
base_dir = '/home/django/data/%d/data' % (tid)
vid_dir_path = os.path.join(base_dir, 'video')
if not os.path.isdir(vid_dir_path):
try:
os.mkdir(vid_dir_path)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(vid_dir_path)
os.mkdir(vid_dir_path)
if os.environ.get('WITH_OS') == 'True':
bucket = getBucketConnection()
for object_summary in bucket.objects.filter(Prefix=base_dir + "/video"):
currentTask["video path"] = object_summary.key
downloadFile(settings.AWS_STORAGE_BUCKET_NAME, currentTask["video path"], currentTask["video path"])
break
else:
currentTask["video path"] = os.path.join(vid_dir_path, os.listdir(vid_dir_path)[0])
currentTask["downloaded"] = True
currentTask["startedDownloading"] = False
def track_all_video(shape, currentTask):
vs = cv2.VideoCapture(currentTask["video path"].split("?")[0])
currentFrame = shape['frame']
vs.set(1, currentFrame)
box = (shape['positions']['x'], shape['positions']['y'], shape['positions']['h'], shape['positions']['w'])
ok, frame = vs.read()
# Add a tracker to each box in the frame
tracker = cv2.TrackerCSRT_create()
tracker.init(frame, box)
while not currentTask["shapes"][shape["id"]]["stopped"]:
currentFrame += 1
ok, frame = vs.read()
if frame is None:
break
ok, box = tracker.update(frame)
(x, y, h, w) = [int(v) for v in box]
if (h == 0 and w == 0):
break
# If the predicted position is lower than 0 the box is out of bounds.
xtl = x if x > 0 else 0
ytl = y if y > 0 else 0
# If the predicted position is greater than either the frame width or height the box is out of bounds.
xbr = shape["frameWidth"] if (x + w) > shape["frameWidth"] else (x + w)
ybr = shape["frameHeight"] if (y + h) > shape["frameHeight"] else (y + h)
currentTask["shapes"][shape["id"]]["positions"][currentFrame] = {"xtl": xtl, "ytl": ytl, "xbr": xbr, "ybr": ybr, "occluded": shape["occluded"], "z_order": shape["z_order"], "outside": shape["outside"]}
currentTask["shapes"][shape["id"]]["finished"] = True
def check_video_path(tid):
base_dir = '/home/django/data/%d/data' % (tid)
if os.environ.get('WITH_OS') == 'False':
return os.path.exists(base_dir + "/video")
else:
bucket = getBucketConnection()
objs = list(bucket.objects.filter(Prefix=base_dir + "/video"))
return len(objs) > 0
def watershed(tid, frame, draws, username):
frame_path = "/home/django/data/watershed/" + str(tid) + "/" + username + "/" + str(frame) + ".jpg"
watershed_path = get_frame_path(tid, frame).replace('.jpg', '_w.png')
cvImg = cv2.imread(frame_path)
print("start process")
overlay = process_watershed(cvImg, draws, tid, frame)
print("end process")
print("start save")
save_watershed_image(overlay, watershed_path)
print("end save")
# q = django_rq.get_queue('default')
# q.enqueue_call(func=save_watershed_matrix, args=(tid, frame, matrix),
# job_id="task/{}.frame/{}.save_matrix".format(tid, frame))
#result['polygons'] = polygons
#return result
def save_watershed_image(image, path):
im = Image.fromarray(image)
im.save(path)
def compress_matrix(matrix):
compressedMatrix = []
sequenceCount = 0
prevLabel = matrix[0][0]
# Each sequence (n elements) of label in matrix is reduced
# to array with 2 elements: [the label, n (sequence count)]
for currLabel in np.nditer(matrix):
if currLabel == prevLabel:
sequenceCount += 1
else:
compressedMatrix.append([prevLabel, sequenceCount])
sequenceCount = 1
prevLabel = currLabel
return compressedMatrix
def save_watershed_matrix(tid, frame, matrix):
db_task = models.Task.objects.get(pk=tid)
models.Watershed.objects.update_or_create(task=db_task, frame=frame, defaults={'task':db_task, 'frame':frame, 'watershed':compress_matrix(matrix)})
def save_paintings(tid, frame, paintings):
db_task = models.Task.objects.get(pk=tid)
models.Watershed.objects.update_or_create(task=db_task, frame=frame, defaults={'task':db_task, 'frame':frame, 'paintings':paintings})
def get_paintings(tid, frame):
db_task = models.Task.objects.get(pk=tid)
db_frame_paintings = models.Watershed.objects.filter(task=db_task, frame=frame).first()
if db_frame_paintings is None:
paintings = []
else:
paintings = db_frame_paintings.paintings
return paintings
def get_task_byjob(jid):
"""Get the task by the jobid"""
db_job = models.Job.objects.select_related("segment__task").get(id=jid)
if db_job:
db_segment = db_job.segment
db_task = db_segment.task
return get(db_task.id)
else:
raise Exception("Cannot find the job: {}".format(jid))
return {}
def get_job(jid):
"""Get the job as dictionary of attributes"""
db_job = models.Job.objects.select_related("segment__task").get(id=jid)
if db_job:
db_segment = db_job.segment
db_task = db_segment.task
im_meta_data = get_image_meta_cache(db_task)
# Truncate extra image sizes
if db_task.mode == 'annotation':
im_meta_data['original_size'] = im_meta_data['original_size'][db_segment.start_frame:db_segment.stop_frame + 1]
db_labels = db_task.label_set.prefetch_related('attributespec_set').order_by('-pk').all()
attributes = {}
for db_label in db_labels:
attributes[db_label.id] = {}
for db_attrspec in db_label.attributespec_set.all():
attributes[db_label.id][db_attrspec.id] = db_attrspec.text
framePropertiesDict = {"allProperties": {}, "keyframeSpec": {}}
# Get all of the task frame spec rows related to the requested task.
taskFrameSpecQuerySet = db_task.taskframespec_set.all()
# Save the prop name, value name, and relation id for each row in the database for the task in a dictionary
for taskFrameSpec in taskFrameSpecQuerySet:
propName = taskFrameSpec.propVal.prop
valName = taskFrameSpec.propVal.value
propValId = taskFrameSpec.propVal.pk
# If the propName is not in the dictionary yet, add an empty dictionary to it
if (propName not in framePropertiesDict["allProperties"]):
framePropertiesDict["allProperties"][propName] = {}
framePropertiesDict["allProperties"][propName][valName] = propValId
keyframes = taskFrameSpec.keyframespec_set.all()
for keyframe in keyframes:
frame = keyframe.frame
if (frame not in framePropertiesDict["keyframeSpec"]):
framePropertiesDict["keyframeSpec"][frame] = {}
framePropertiesDict["keyframeSpec"][frame][propName] = propValId
labels_colors = models.LabelDetails.objects.filter(labelType__label__in=[db_label.name for db_label in db_labels])
commentsList = list(models.Comments.objects.filter(task=db_task).values_list('frame', 'comment'))
comments = {}
for comment in commentsList:
comments[comment[0]] = comment[1]
project = serializers.serialize('json', [db_task.project])
response = {
"status": db_job.status,
"labels": OrderedDict((db_label.id, db_label.name) for db_label in db_labels),
"frameProperties": framePropertiesDict,
"comments": comments,
"segmentation": {label_color.labelType.label: {"color": label_color.color, "label_type_id": label_color.labelType.id} for label_color in labels_colors},
"stop": db_segment.stop_frame,
"taskid": db_task.id,
"slug": db_task.name,
"jobid": jid,
"start": db_segment.start_frame,
"mode": db_task.mode,
"overlap": db_task.overlap,
"attributes": attributes,
"z_order": db_task.z_order,
"flipped": db_task.flipped,
"score": db_task.score,
"project": project,
"image_meta_data": im_meta_data,
"max_shape_id": db_job.max_shape_id,
"current": models.Task.objects.get(pk=db_task.id).last_viewed_frame, # db_task.last_viewed_frame returns the previous value from the database
}
else:
raise Exception("Cannot find the job: {}".format(jid))
return response
@transaction.atomic
def rq_handler(job, exc_type, exc_value, traceback):
tid = job.id.split('/')[1]
db_task = models.Task.objects.select_for_update().get(pk=tid)
with open(db_task.get_log_path(), "wt") as log_file:
print_exception(exc_type, exc_value, traceback, file=log_file)
db_task.delete()
return False
def nextJobIdByPriority(username, status, tid):
project = models.Task.objects.get(pk=tid).project
if not username == "staff_user":
currentUser = User.objects.get(username=username)
opened_tasks = models.Task.objects.filter(project=project, assignee=currentUser, status=status)
else:
currentUser = username
opened_tasks = models.Task.objects.filter(project=project, status=status)
if opened_tasks.exists():
max_score = opened_tasks.aggregate(maxscore=Max('score'))['maxscore']
if currentUser == "staff_user":
highest_priority_task = models.Task.objects.filter(project=project, status=status, score=max_score)
else:
highest_priority_task = models.Task.objects.filter(project=project, assignee=currentUser, status=status, score=max_score)
return models.Job.objects.get(segment__task=highest_priority_task[0]).id
else:
return "No task found"
############################# Internal implementation for server API
def _make_image_meta_cache(db_task, sorted_filenames=None):
with open(db_task.get_image_meta_cache_path(), 'w') as meta_file:
cache = {
'original_size': []
}
if db_task.mode == 'interpolation':
frame_0_url = getFileUrl(get_frame_path(db_task.id, 0))
image = Image.open(frame_0_url)
cache['original_size'].append({
'width': image.size[0],
'height': image.size[1]
})
image.close()
else:
filenames = []
if sorted_filenames is None:
for root, _, files in os.walk(db_task.get_upload_dirname()):
fullnames = map(lambda f: os.path.join(root, f), files)
images = filter(lambda x: _get_mime(x) == 'image', fullnames)
filenames.extend(images)
filenames.sort()
else:
filenames = sorted_filenames
for image_path in filenames:
image = Image.open(image_path)
cache['original_size'].append({
'width': image.size[0],
'height': image.size[1]
})
image.close()
meta_file.write(str(cache))
def get_image_meta_cache(db_task):
try:
with open(db_task.get_image_meta_cache_path()) as meta_cache_file:
return literal_eval(meta_cache_file.read())
except Exception:
_make_image_meta_cache(db_task)
with open(db_task.get_image_meta_cache_path()) as meta_cache_file:
return literal_eval(meta_cache_file.read())
def _get_mime(name):
mime = mimetypes.guess_type(name)
mime_type = mime[0]
encoding = mime[1]
# zip, rar, tar, tar.gz, tar.bz2, 7z, cpio
supportedArchives = ['application/zip', 'application/x-rar-compressed',
'application/x-tar', 'application/x-7z-compressed', 'application/x-cpio',
'gzip', 'bzip2']
if mime_type is not None:
if mime_type.startswith('video'):
return 'video'
elif mime_type in supportedArchives or encoding in supportedArchives:
return 'archive'
elif mime_type.startswith('image'):
return 'image'
else:
return 'empty'
else:
if os.path.isdir(name):
return 'directory'
else:
return 'empty'
def _get_frame_path(frame, base_dir):
d1 = str(frame // 10000)
d2 = str(frame // 100)
path = os.path.join(d1, d2, str(frame) + '.jpg')
if base_dir:
path = os.path.join(base_dir, path)
return path
def _parse_frameproperties(frameproperties):
parsed_frameprops = []
for row in frameproperties:
if (row['parent'] != '#' and row['parent'] != '$$$'):
parsed_frameprops.append(row['original']['path'].split("/"))
return parsed_frameprops
def _get_frame_watershed_path(frame, base_dir):
d1 = str(frame // 10000)
d2 = str(frame // 100)
path = os.path.join(d1, d2, str(frame) + '_w.png')
if base_dir:
path = os.path.join(base_dir, path)
return path
def _parse_labels(labels):
parsed_labels = OrderedDict()
last_label = ""
for token in shlex.split(labels):
if token[0] != "~" and token[0] != "@":
if token in parsed_labels:
raise ValueError("labels string is not corect. " +
"`{}` label is specified at least twice.".format(token))
parsed_labels[token] = {}
last_label = token
else:
attr = models.parse_attribute(token)
attr['text'] = token
if not attr['type'] in ['checkbox', 'radio', 'number', 'text', 'select']:
raise ValueError("labels string is not corect. " +
"`{}` attribute has incorrect type {}.".format(
attr['name'], attr['type']))
values = attr['values']
if attr['type'] == 'checkbox': # <prefix>checkbox=name:true/false
if not (len(values) == 1 and values[0] in ['true', 'false']):
raise ValueError("labels string is not corect. " +
"`{}` attribute has incorrect value.".format(attr['name']))
elif attr['type'] == 'number': # <prefix>number=name:min,max,step
try:
if len(values) != 3 or float(values[2]) <= 0 or \
float(values[0]) >= float(values[1]):
raise ValueError
except ValueError:
raise ValueError("labels string is not correct. " +
"`{}` attribute has incorrect format.".format(attr['name']))
if attr['name'] in parsed_labels[last_label]:
raise ValueError("labels string is not corect. " +
"`{}` attribute is specified at least twice.".format(attr['name']))
parsed_labels[last_label][attr['name']] = attr
return parsed_labels
def _parse_db_labels(db_labels):
result = []
for db_label in db_labels:
result += [db_label.name]
result += [attr.text for attr in db_label.attributespec_set.all()]
return _parse_labels(" ".join(result))
'''
Count all files, remove garbage (unknown mime types or extra dirs)
'''
def _prepare_paths(source_paths, target_paths, storage):
counters = {
"image": 0,
"directory": 0,
"video": 0,
"archive": 0
}
share_dirs_mapping = {}
share_files_mapping = {}
if storage == 'local':
# Files were uploaded early. Remove trash if it exists. Count them.
for path in target_paths:
mime = _get_mime(path)
if mime in ['video', 'archive', 'image']:
counters[mime] += 1
else:
try:
os.remove(path)
except:
os.rmdir(path)
else:
# Files are available via mount share. Count them and separate dirs.
for source_path, target_path in zip(source_paths, target_paths):
mime = _get_mime(source_path)
if mime in ['directory', 'image', 'video', 'archive']:
counters[mime] += 1
if mime == 'directory':
share_dirs_mapping[source_path] = target_path
else:
share_files_mapping[source_path] = target_path
# Remove directories if other files from them exists in input paths
exclude = []
for dir_name in share_dirs_mapping.keys():
for patch in share_files_mapping.keys():
if dir_name in patch:
exclude.append(dir_name)
break
for excluded_dir in exclude:
del share_dirs_mapping[excluded_dir]
counters['directory'] = len(share_dirs_mapping.keys())
return (counters, share_dirs_mapping, share_files_mapping)
'''
Check file set on valid
Valid if:
1 video, 0 images and 0 dirs (interpolation mode)
1 archive, 0 images and 0 dirs (annotation mode)
Many images or many dirs with images (annotation mode), 0 archives and 0 videos
'''
def _valid_file_set(counters):
if (counters['image'] or counters['directory']) and (counters['video'] or counters['archive']):
return False
elif counters['video'] > 1 or (counters['video'] and (counters['archive'] or counters['image'] or counters['directory'])):
return False
elif counters['archive'] > 1 or (counters['archive'] and (counters['video'] or counters['image'] or counters['directory'])):
return False
return True
'''
Copy data from share to local
'''
def _copy_data_from_share(share_files_mapping, share_dirs_mapping):
for source_path in share_dirs_mapping:
copy_tree(source_path, share_dirs_mapping[source_path])
for source_path in share_files_mapping:
target_path = share_files_mapping[source_path]
target_dir = os.path.dirname(target_path)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
shutil.copyfile(source_path, target_path)
'''
Find and unpack archive in upload dir
'''
def _find_and_unpack_archive(upload_dir):
archive = None
for root, _, files in os.walk(upload_dir):
fullnames = map(lambda f: os.path.join(root, f), files)
archives = list(filter(lambda x: _get_mime(x) == 'archive', fullnames))
if len(archives):
archive = archives[0]
break
if archive:
Archive(archive).extractall(upload_dir)
os.remove(archive)
else:
raise Exception('Type defined as archive, but archives were not found.')
return archive
'''
Search a video in upload dir and split it by frames. Copy frames to target dirs
'''
def _find_and_extract_video(upload_dir, output_dir, db_task, job):
video = None
for root, _, files in os.walk(upload_dir):
fullnames = map(lambda f: os.path.join(root, f), files)
videos = list(filter(lambda x: _get_mime(x) == 'video', fullnames))
if len(videos):
video = videos[0]
break
if video:
job.meta['status'] = 'Video is being extracted..'
job.save_meta()
_dir, vid_name = os.path.split(video)
uploadFile(video, os.path.join(output_dir, 'video', vid_name))
frame_count = extract_frames(video, output_dir)
db_task.size += frame_count
else:
raise Exception("Video files were not found")
return video
def count_frames(path):
video = cv2.VideoCapture(path)
total = 0
# Try to count the frames using opencv property.
# If opencv can't count the frames, count them manually.
try:
# VieoCapture.get returns float value, so we need to convert it to int.
total = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
except:
total = count_frames_manual(video)
video.release()
return total
def count_frames_manual(video):
total = 0
# frameExists is a boolean returned from read that indicates wether or not
# a frame was read.
(frameExists, _) = video.read()
# Continue to iterate over the video frames until the end of the video.
while frameExists:
total += 1
# video.read() is a function that advances the pointer of the video and
# returns wether or not the frame exists and the frame itself.
(frameExists, _) = video.read()
return total
def get_meta_data(source_path):
meta_data = skvideo.io.ffprobe(source_path)['video']
if '@nb_frames' not in meta_data:
meta_data['@nb_frames'] = count_frames(source_path)
return meta_data
def extract_frames(source_path, output_dir):
count = 0
threads = []
output = tempfile.mkdtemp(prefix='cvat-', suffix='.data')
target_path = os.path.join(output, '%d.jpg')
LocalImagesPath = target_path
# create a folder for this video and for entire dataser (if doesnt exist)
_dir, vid_name = os.path.split(source_path)
name = os.path.splitext(vid_name)[0]
save_dir = os.path.abspath(os.path.join(LocalImagesPath, name))
os.makedirs(save_dir)
# Parse the video
for frame_count, frame in protected_reader(source_path):
if frame is not False:
img_path = os.path.join(save_dir, str(frame_count) + 'jpg')
#Remove combing lines effect from image
deint_image = deinterlace(frame)
cv2.imwrite(img_path, deint_image[:, :,::-1]) # save image (cv2 uses BGR color channels so reverse)
image_dest_path = _get_frame_path(frame_count, output_dir)
count += 1
dirname = os.path.dirname(image_dest_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
t = copyFileToOSByThread(img_path, image_dest_path)
t.start()
threads.append(t)
else:
break
threads = [t for t in threads if t.isAlive()]
for t in threads:
t.join()
return count
def protected_reader(src_path, max_frames=None):
"""A wrapper reader for skvideo.io.FFmpegReader to avoid crashing on a RuntimeError exception.
:param src_path: Path to the video file to be read.
:param max_frames: (default=None) Number of frames to read. If left as None will attempt to read the entire video.
:return: A tuple of frame_count, frame
"""
frame, reader, count = False, None, 0
metadata = get_meta_data(src_path)
if max_frames is None:
max_frames = metadata['@nb_frames']
video_codec = metadata['@codec_name']
reader = skvideo.io.FFmpegReader(filename=src_path, inputdict={'-vcodec': video_codec})
gen = reader.nextFrame()
while count < int(max_frames):
try:
frame = gen.__next__()
except Exception:
frame = False
reader.close()
finally:
yield count, frame
count += 1
try:
reader.close()
except Exception:
pass
def deinterlace(image):
interpolation = cv2.INTER_LINEAR # cv2.INTER_NEAREST - fast, looks ok for tagging | cv2.INTER_LINEAR - slower, looks good
# if sample of image and tags
h, w, c = image.shape
# cut image in half
temp = image[::2, :, :] if h % 2 == 0 else image[:h-1:2, :, :]
return cv2.resize(temp, (w, h), interpolation=interpolation)
'''
Recursive search for all images in upload dir and compress it to RGB jpg with specified quality. Create symlinks for them.
'''
def _find_and_compress_images(upload_dir, output_dir, db_task, compress_quality, flip_flag, job):
filenames = []
for root, _, files in os.walk(upload_dir):
fullnames = map(lambda f: os.path.join(root, f), files)
images = filter(lambda x: _get_mime(x) == 'image', fullnames)
filenames.extend(images)
filenames.sort()
_make_image_meta_cache(db_task, filenames)
if len(filenames):
for idx, name in enumerate(filenames):
job.meta['status'] = 'Images are being compressed.. {}%'.format(idx * 100 // len(filenames))
job.save_meta()
compressed_name = os.path.splitext(name)[0] + '.jpg'
image = Image.open(name).convert('RGB')
if flip_flag:
image = image.transpose(Image.ROTATE_180)
image.save(compressed_name, quality=compress_quality, optimize=True)
image.close()
if compressed_name != name:
os.remove(name)
# PIL::save uses filename in order to define image extension.
# We need save it as jpeg for compression and after rename the file
# Else annotation file will contain invalid file names (with other extensions)
os.rename(compressed_name, name)
threads = []
for frame, image_orig_path in enumerate(filenames):
image_dest_path = _get_frame_path(frame, output_dir)
image_orig_path = os.path.abspath(image_orig_path)
db_task.size += 1
dirname = os.path.dirname(image_dest_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.rename(image_orig_path, image_dest_path)
t = copyFileToOSByThread(image_orig_path, image_dest_path)
t.start()
threads.append(t)
threads = [t for t in threads if t.isAlive()]
for t in threads:
t.join()
else:
raise Exception("Image files were not found")
return filenames
def _save_task_to_db(db_task, task_params):
db_task.overlap = min(db_task.size, task_params['overlap'])
db_task.mode = task_params['mode']
db_task.z_order = task_params['z_order']
db_task.flipped = task_params['flip']
db_task.score = task_params['score'] and task_params['score'] or 0 # Set to task_params['score'] unless its undefined, then 0.
db_task.video_id = task_params['video_id']
db_task.source = task_params['data']
segment_step = task_params['segment'] - db_task.overlap
for x in range(0, db_task.size, segment_step):
start_frame = x
stop_frame = min(x + task_params['segment'] - 1, db_task.size - 1)
slogger.glob.info("New segment for task #{}: start_frame = {}, \
stop_frame = {}".format(db_task.id, start_frame, stop_frame))
db_segment = models.Segment()
db_segment.task = db_task
db_segment.start_frame = start_frame
db_segment.stop_frame = stop_frame
db_segment.save()
db_job = models.Job()
db_job.segment = db_segment
db_job.save()
parsed_frameprops = _parse_frameproperties(task_params['frame_properties'])
for frameprop in parsed_frameprops:
db_taskframespec = models.TaskFrameSpec()
db_taskframespec.task = db_task
db_framepropvals = models.FrameProperties.objects.get(prop=frameprop[0], value=frameprop[1], project__pk=db_task.project.pk)
db_taskframespec.propVal = db_framepropvals
db_taskframespec.save()
parsed_labels = _parse_labels(task_params['labels'])
for label in parsed_labels:
db_label = models.Label()
db_label.task = db_task
db_label.name = label
db_label.save()
for attr in parsed_labels[label]:
db_attrspec = models.AttributeSpec()
db_attrspec.label = db_label
db_attrspec.text = parsed_labels[label][attr]['text']
db_attrspec.save()
db_task.save()
def _save_paths_to_db(task, files):
count = 0
for currFile in files:
db_task_source = models.TaskSource()
db_task_source.task = task
db_task_source.source_name = currFile
db_task_source.frame = count
count+=1
db_task_source.save()
def parseTxtToXml(fileData, taskId):
try:
# Getting image size
frame_0_url = getFileUrl(get_frame_path(taskId, 0))
width, height = Image.open(frame_0_url).size
except Exception:
raise ex
return (formatter.parse_format(fileData, frame_0_url, width, height))
@transaction.atomic
def _create_thread(tid, params):
def raise_exception(images, dirs, videos, archives):
raise Exception('Only one archive, one video or many images can be dowloaded simultaneously. \
{} image(s), {} dir(s), {} video(s), {} archive(s) found'.format(images, dirs, videos, archives))
slogger.glob.info("create task #{}".format(tid))
job = rq.get_current_job()
db_task = models.Task.objects.select_for_update().get(pk=tid)
upload_dir = db_task.get_upload_dirname()
output_dir = db_task.get_data_dirname()
counters, share_dirs_mapping, share_files_mapping = _prepare_paths(
params['SOURCE_PATHS'],
params['TARGET_PATHS'],
params['storage']
)
if (not _valid_file_set(counters)):
raise Exception('Only one archive, one video or many images can be dowloaded simultaneously. \
{} image(s), {} dir(s), {} video(s), {} archive(s) found'.format(
counters['image'],
counters['directory'],
counters['video'],
counters['archive']
)
)
archive = None
if counters['archive']:
job.meta['status'] = 'Archive is being unpacked..'
job.save_meta()
archive = _find_and_unpack_archive(upload_dir)
# Define task mode and other parameters
task_video_id = -1
print(params)
task_score = params['score']
if 'video_id' in params:
task_video_id = params['video_id']
task_params = {
'mode': 'annotation' if counters['image'] or counters['directory'] or counters['archive'] else 'interpolation',
'flip': params['flip_flag'].lower() == 'true',
'score': task_score,
'video_id': task_video_id,
'z_order': params['z_order'].lower() == 'true',
'compress': int(params.get('compress_quality', 50)),
'segment': int(sys.maxsize),
'labels': params['labels'],
'frame_properties': json.loads(params['frame_properties'])
}
task_params['overlap'] = int(params.get('overlap_size', 5 if task_params['mode'] == 'interpolation' else 0))
slogger.glob.info("Task #{} parameters: {}".format(tid, task_params))
files = []
if task_params['mode'] == 'interpolation':
video = _find_and_extract_video(upload_dir, output_dir, db_task, job)
task_params['data'] = os.path.relpath(video, upload_dir)
else:
files =_find_and_compress_images(upload_dir, output_dir, db_task,
task_params['compress'], task_params['flip'], job)
if archive:
task_params['data'] = os.path.relpath(archive, upload_dir)
else:
task_params['data'] = '{} images: {}, ...'.format(len(files),
", ".join([os.path.relpath(x, upload_dir) for x in files[0:2]]))
slogger.glob.info("Founded frames {} for task #{}".format(db_task.size, tid))
task_params['segment'] = db_task.size + 10
job.meta['status'] = 'Task is being saved in database'
job.save_meta()
try:
_save_task_to_db(db_task, task_params)
if task_params['mode'] == 'annotation':
# add sources paths to db
_save_paths_to_db(db_task, params['SOURCE_PATHS'])
# Parsing taggs file
if params['storage'] == 'share':
txt = parseTxtToXml(upload_dir, db_task.id)
destDir = r'/home/django/data/' + str(db_task.id) + r'/data/xml/'
os.makedirs(destDir)
with open(destDir + r'annotations.txt', 'w') as annotationFile:
annotationFile.write(txt)
except Exception:
pass
finally:
# Deleting upload dir
shutil.rmtree(upload_dir)
| []
| []
| [
"WITH_OS"
]
| [] | ["WITH_OS"] | python | 1 | 0 | |
simple_recipes/db/__init__.py | from psycopg2 import sql, connect
import psycopg2.extras
import os
from simple_recipes import app
def get_connection():
DATABASE_URL = None
if app.config['ENV'] == 'development':
DATABASE_URL = app.config['DATABASE_URL']
elif 'DATABASE_URL' in os.environ:
DATABASE_URL = os.environ['DATABASE_URL']
else:
DATABASE_URL = 'host=localhost port=5433 dbname=recipes user=postgres password=admin'
return connect(DATABASE_URL)
def get_cursor(cn):
return cn.cursor(cursor_factory = psycopg2.extras.DictCursor)
def get_table_names():
table_names = []
statement = sql.SQL( "SELECT table_name "
"FROM information_schema.tables "
"WHERE "
"table_schema = 'public' "
"AND table_type = 'BASE TABLE'")
with get_connection() as cn:
with get_cursor(cn) as cur:
cur.execute(statement)
for record in cur:
table_names.append(record[0])
return table_names
def get_table_counts():
table_names = get_table_names()
select_template = sql.SQL( "SELECT "
"{table_as_literal}, "
"COUNT(*) "
"FROM {table_as_identifier}")
table_statements = [select_template.format(
table_as_literal = sql.Literal(s),
table_as_identifier = sql.Identifier(s))
for s in table_names]
statement = sql.SQL(" UNION ALL ").join(table_statements)
with get_connection() as cn:
with cn.cursor() as cur:
cur.execute(statement)
return cur.fetchall() | []
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | python | 1 | 0 | |
f1.py | #!/usr/bin/env python3
import argparse
import os
import http.server
import socketserver
import requests
# Arguments parsing
parser = argparse.ArgumentParser(
description="Expose acestreams from morningstreams in you local network.",
epilog="Source code: https://github.com/S1M0N38/morningstreams",
)
parser.add_argument(
"--ip",
default="127.0.0.1",
help="ip where the m3u8 will be exposed",
)
parser.add_argument(
"--port",
default=8080,
help="port where the m3u8 will be exposed",
)
parser.add_argument(
"--username",
default=os.getenv("MORNINGSTREAMS_USERNAME"),
help="your morningstreams username",
)
parser.add_argument(
"--password",
default=os.getenv("MORNINGSTREAMS_PASSWORD"),
help="your morningstreams password",
)
args = parser.parse_args()
# Check if username and password are set
if not args.username or not args.password:
raise ValueError(
"""
morningstreams credentials are not provided.
Used the flags --username and --password or
export MORNINGSTREAMS_USERNAME and MORNINGSTREAMS_PASSWORD
environment variables."""
)
# Check if ACE stream Engine is running
try:
requests.get(f"http://{args.ip}:6878/webui/api/service")
except requests.exceptions.ConnectionError:
raise EnvironmentError("ACE stream engine is not running.")
# Login Morningstreams
login_url = "https://api.morningstreams.com/api/users/login"
credentials = {
"username": args.username,
"password": args.password,
"rememberMe": False,
}
response = requests.post(login_url, json=credentials)
assert response.json()["success"]
token = response.json()["token"]
# Update IP
update_id_url = "https://api.morningstreams.com/api/posts/update_ip"
headers = {"authorization": token}
ace_ip = {"aceIP": requests.get("https://wtfismyip.com/text").text.strip()}
response = requests.post(update_id_url, headers=headers, json=ace_ip)
assert response.json()["aceIP"] == ace_ip["aceIP"]
# ACE Stream
posts_url = "https://api.morningstreams.com/api/posts"
response = requests.get(posts_url, headers=headers)
m3u8 = "#EXTM3U\n"
for link in response.json():
try:
int(link["text"], 16) # check if is a acestream link
m3u8 += f'#EXTINF:-1,{link["title"]}\n'
m3u8 += f'http://{args.ip}:6878/ace/getstream?id={link["text"]}\n'
except ValueError:
pass
# Save links in m3u8 file
with open("playlist.m3u8", "w") as f:
f.write(m3u8)
# Spawn http server
address = ("", args.port)
httpd = socketserver.TCPServer(address, http.server.SimpleHTTPRequestHandler)
print(f"Starting httpd... http://{args.ip}:{args.port}/playlist.m3u8")
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print("\nStopping httpd...")
| []
| []
| [
"MORNINGSTREAMS_PASSWORD",
"MORNINGSTREAMS_USERNAME"
]
| [] | ["MORNINGSTREAMS_PASSWORD", "MORNINGSTREAMS_USERNAME"] | python | 2 | 0 | |
test/test_tcp_address.py | from asyncdbus import Message, MessageBus
from asyncdbus._private.address import parse_address
import anyio
import pytest
import os
@pytest.mark.anyio
async def test_tcp_connection_with_forwarding():
async with anyio.create_task_group() as tg:
closables = []
host = '127.0.0.1'
port = '55556'
addr_info = parse_address(os.environ.get('DBUS_SESSION_BUS_ADDRESS'))
assert addr_info
assert 'abstract' in addr_info[0][1]
path = f'\0{addr_info[0][1]["abstract"]}'
async def handle_connection(tcp_sock):
async with await anyio.connect_unix(path) as unix_sock:
async with anyio.create_task_group() as tg:
async def handle_read():
try:
while True:
data = await tcp_sock.receive()
await unix_sock.send(data)
except (anyio.ClosedResourceError, anyio.EndOfStream):
return
async def handle_write():
try:
while True:
data = await unix_sock.receive()
await tcp_sock.send(data)
except (anyio.ClosedResourceError, anyio.EndOfStream):
return
tg.spawn(handle_read)
tg.spawn(handle_write)
listener = await anyio.create_tcp_listener(local_port=port, local_host=host)
tg.spawn(listener.serve, handle_connection)
await anyio.sleep(0.1)
try:
async with MessageBus(bus_address=f'tcp:host={host},port={port}').connect() as bus:
# basic tests to see if it works
result = await bus.call(
Message(
destination='org.freedesktop.DBus',
path='/org/freedesktop/DBus',
interface='org.freedesktop.DBus.Peer',
member='Ping'))
assert result
intr = await bus.introspect('org.freedesktop.DBus', '/org/freedesktop/DBus')
obj = await bus.get_proxy_object('org.freedesktop.DBus', '/org/freedesktop/DBus',
intr)
iface = await obj.get_interface('org.freedesktop.DBus.Peer')
await iface.call_ping()
sock = bus._sock.extra(anyio.abc.SocketAttribute.raw_socket) \
if hasattr(bus._sock,'extra') else bus._sock
assert sock.getpeername()[0] == host
assert sock.getsockname()[0] == host
assert sock.gettimeout() == 0
pass # A
finally:
tg.cancel_scope.cancel()
| []
| []
| [
"DBUS_SESSION_BUS_ADDRESS"
]
| [] | ["DBUS_SESSION_BUS_ADDRESS"] | python | 1 | 0 | |
python_modules/libraries/dagster-aws/dagster_aws_tests/s3_tests/test_intermediate_store.py | import os
import uuid
import pytest
from dagster_aws.s3.intermediate_store import S3IntermediateStore
from dagster_aws.s3.resources import s3_resource
from dagster_aws.s3.system_storage import s3_plus_default_storage_defs
from dagster import (
Bool,
InputDefinition,
Int,
List,
ModeDefinition,
OutputDefinition,
RunConfig,
SerializationStrategy,
String,
check,
execute_pipeline,
lambda_solid,
pipeline,
)
from dagster.core.events import DagsterEventType
from dagster.core.execution.api import create_execution_plan, execute_plan, scoped_pipeline_context
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.core.storage.type_storage import TypeStoragePlugin, TypeStoragePluginRegistry
from dagster.core.types.runtime.runtime_type import Bool as RuntimeBool
from dagster.core.types.runtime.runtime_type import RuntimeType
from dagster.core.types.runtime.runtime_type import String as RuntimeString
from dagster.core.types.runtime.runtime_type import resolve_to_runtime_type
from dagster.utils.test import yield_empty_pipeline_context
class UppercaseSerializationStrategy(SerializationStrategy): # pylint: disable=no-init
def serialize(self, value, write_file_obj):
return write_file_obj.write(bytes(value.upper().encode('utf-8')))
def deserialize(self, read_file_obj):
return read_file_obj.read().decode('utf-8').lower()
class LowercaseString(RuntimeType):
def __init__(self):
super(LowercaseString, self).__init__(
'lowercase_string',
'LowercaseString',
serialization_strategy=UppercaseSerializationStrategy('uppercase'),
)
def aws_credentials_present():
return os.getenv('AWS_ACCESS_KEY_ID') and os.getenv('AWS_SECRET_ACCESS_KEY')
nettest = pytest.mark.nettest
def define_inty_pipeline(should_throw=True):
@lambda_solid
def return_one():
return 1
@lambda_solid(input_defs=[InputDefinition('num', Int)], output_def=OutputDefinition(Int))
def add_one(num):
return num + 1
@lambda_solid
def user_throw_exception():
raise Exception('whoops')
@pipeline(
mode_defs=[
ModeDefinition(
system_storage_defs=s3_plus_default_storage_defs, resource_defs={'s3': s3_resource}
)
]
)
def basic_external_plan_execution():
add_one(return_one())
if should_throw:
user_throw_exception()
return basic_external_plan_execution
def get_step_output(step_events, step_key, output_name='result'):
for step_event in step_events:
if (
step_event.event_type == DagsterEventType.STEP_OUTPUT
and step_event.step_key == step_key
and step_event.step_output_data.output_name == output_name
):
return step_event
return None
@nettest
def test_using_s3_for_subplan(s3_bucket):
pipeline_def = define_inty_pipeline()
environment_dict = {'storage': {'s3': {'config': {'s3_bucket': s3_bucket}}}}
run_id = str(uuid.uuid4())
execution_plan = create_execution_plan(
pipeline_def, environment_dict=environment_dict, run_config=RunConfig(run_id=run_id)
)
assert execution_plan.get_step_by_key('return_one.compute')
step_keys = ['return_one.compute']
instance = DagsterInstance.ephemeral()
pipeline_run = PipelineRun.create_empty_run(
pipeline_def.name, run_id=run_id, environment_dict=environment_dict
)
return_one_step_events = list(
execute_plan(
execution_plan.build_subset_plan(step_keys),
environment_dict=environment_dict,
pipeline_run=pipeline_run,
instance=instance,
)
)
assert get_step_output(return_one_step_events, 'return_one.compute')
with scoped_pipeline_context(pipeline_def, environment_dict, pipeline_run, instance) as context:
store = S3IntermediateStore(
s3_bucket, run_id, s3_session=context.scoped_resources_builder.build().s3.session
)
assert store.has_intermediate(context, 'return_one.compute')
assert store.get_intermediate(context, 'return_one.compute', Int).obj == 1
add_one_step_events = list(
execute_plan(
execution_plan.build_subset_plan(['add_one.compute']),
environment_dict=environment_dict,
pipeline_run=pipeline_run,
instance=instance,
)
)
assert get_step_output(add_one_step_events, 'add_one.compute')
with scoped_pipeline_context(pipeline_def, environment_dict, pipeline_run, instance) as context:
assert store.has_intermediate(context, 'add_one.compute')
assert store.get_intermediate(context, 'add_one.compute', Int).obj == 2
class FancyStringS3TypeStoragePlugin(TypeStoragePlugin): # pylint:disable=no-init
@classmethod
def compatible_with_storage_def(cls, _):
# Not needed for these tests
raise NotImplementedError()
@classmethod
def set_object(cls, intermediate_store, obj, context, runtime_type, paths):
check.inst_param(intermediate_store, 'intermediate_store', S3IntermediateStore)
paths.append(obj)
return intermediate_store.set_object('', context, runtime_type, paths)
@classmethod
def get_object(cls, intermediate_store, _context, _runtime_type, paths):
check.inst_param(intermediate_store, 'intermediate_store', S3IntermediateStore)
res = intermediate_store.object_store.s3.list_objects(
Bucket=intermediate_store.object_store.bucket,
Prefix=intermediate_store.key_for_paths(paths),
)
return res['Contents'][0]['Key'].split('/')[-1]
@nettest
def test_s3_intermediate_store_with_type_storage_plugin(s3_bucket):
run_id = str(uuid.uuid4())
intermediate_store = S3IntermediateStore(
run_id=run_id,
s3_bucket=s3_bucket,
type_storage_plugin_registry=TypeStoragePluginRegistry(
{RuntimeString.inst(): FancyStringS3TypeStoragePlugin}
),
)
with yield_empty_pipeline_context(run_id=run_id) as context:
try:
intermediate_store.set_value('hello', context, RuntimeString.inst(), ['obj_name'])
assert intermediate_store.has_object(context, ['obj_name'])
assert (
intermediate_store.get_value(context, RuntimeString.inst(), ['obj_name']) == 'hello'
)
finally:
intermediate_store.rm_object(context, ['obj_name'])
@nettest
def test_s3_intermediate_store_with_composite_type_storage_plugin(s3_bucket):
run_id = str(uuid.uuid4())
intermediate_store = S3IntermediateStore(
run_id=run_id,
s3_bucket=s3_bucket,
type_storage_plugin_registry=TypeStoragePluginRegistry(
{RuntimeString.inst(): FancyStringS3TypeStoragePlugin}
),
)
with yield_empty_pipeline_context(run_id=run_id) as context:
with pytest.raises(check.NotImplementedCheckError):
intermediate_store.set_value(
['hello'], context, resolve_to_runtime_type(List[String]), ['obj_name']
)
@nettest
def test_s3_intermediate_store_composite_types_with_custom_serializer_for_inner_type(s3_bucket):
run_id = str(uuid.uuid4())
intermediate_store = S3IntermediateStore(run_id=run_id, s3_bucket=s3_bucket)
with yield_empty_pipeline_context(run_id=run_id) as context:
try:
intermediate_store.set_object(
['foo', 'bar'],
context,
resolve_to_runtime_type(List[LowercaseString]).inst(),
['list'],
)
assert intermediate_store.has_object(context, ['list'])
assert intermediate_store.get_object(
context, resolve_to_runtime_type(List[Bool]).inst(), ['list']
).obj == ['foo', 'bar']
finally:
intermediate_store.rm_object(context, ['foo'])
@nettest
def test_s3_intermediate_store_with_custom_serializer(s3_bucket):
run_id = str(uuid.uuid4())
intermediate_store = S3IntermediateStore(run_id=run_id, s3_bucket=s3_bucket)
with yield_empty_pipeline_context(run_id=run_id) as context:
try:
intermediate_store.set_object('foo', context, LowercaseString.inst(), ['foo'])
assert (
intermediate_store.object_store.s3.get_object(
Bucket=intermediate_store.object_store.bucket,
Key='/'.join([intermediate_store.root] + ['foo']),
)['Body']
.read()
.decode('utf-8')
== 'FOO'
)
assert intermediate_store.has_object(context, ['foo'])
assert (
intermediate_store.get_object(context, LowercaseString.inst(), ['foo']).obj == 'foo'
)
finally:
intermediate_store.rm_object(context, ['foo'])
@nettest
def test_s3_pipeline_with_custom_prefix(s3_bucket):
run_id = str(uuid.uuid4())
s3_prefix = 'custom_prefix'
pipe = define_inty_pipeline(should_throw=False)
environment_dict = {
'storage': {'s3': {'config': {'s3_bucket': s3_bucket, 's3_prefix': s3_prefix}}}
}
pipeline_run = PipelineRun.create_empty_run(
pipe.name, run_id=run_id, environment_dict=environment_dict
)
instance = DagsterInstance.ephemeral()
result = execute_pipeline(
pipe, environment_dict=environment_dict, run_config=RunConfig(run_id=run_id),
)
assert result.success
with scoped_pipeline_context(pipe, environment_dict, pipeline_run, instance) as context:
store = S3IntermediateStore(
run_id=run_id,
s3_bucket=s3_bucket,
s3_prefix=s3_prefix,
s3_session=context.scoped_resources_builder.build().s3.session,
)
assert store.root == '/'.join(['custom_prefix', 'storage', run_id])
assert store.get_intermediate(context, 'return_one.compute', Int).obj == 1
assert store.get_intermediate(context, 'add_one.compute', Int).obj == 2
@nettest
def test_s3_intermediate_store_with_custom_prefix(s3_bucket):
run_id = str(uuid.uuid4())
intermediate_store = S3IntermediateStore(
run_id=run_id, s3_bucket=s3_bucket, s3_prefix='custom_prefix'
)
assert intermediate_store.root == '/'.join(['custom_prefix', 'storage', run_id])
try:
with yield_empty_pipeline_context(run_id=run_id) as context:
intermediate_store.set_object(True, context, RuntimeBool.inst(), ['true'])
assert intermediate_store.has_object(context, ['true'])
assert intermediate_store.uri_for_paths(['true']).startswith(
's3://%s/custom_prefix' % s3_bucket
)
finally:
intermediate_store.rm_object(context, ['true'])
@nettest
def test_s3_intermediate_store(s3_bucket):
run_id = str(uuid.uuid4())
run_id_2 = str(uuid.uuid4())
intermediate_store = S3IntermediateStore(run_id=run_id, s3_bucket=s3_bucket)
assert intermediate_store.root == '/'.join(['dagster', 'storage', run_id])
intermediate_store_2 = S3IntermediateStore(run_id=run_id_2, s3_bucket=s3_bucket)
assert intermediate_store_2.root == '/'.join(['dagster', 'storage', run_id_2])
try:
with yield_empty_pipeline_context(run_id=run_id) as context:
intermediate_store.set_object(True, context, RuntimeBool.inst(), ['true'])
assert intermediate_store.has_object(context, ['true'])
assert intermediate_store.get_object(context, RuntimeBool.inst(), ['true']).obj is True
assert intermediate_store.uri_for_paths(['true']).startswith('s3://')
intermediate_store_2.copy_object_from_prev_run(context, run_id, ['true'])
assert intermediate_store_2.has_object(context, ['true'])
assert (
intermediate_store_2.get_object(context, RuntimeBool.inst(), ['true']).obj is True
)
finally:
intermediate_store.rm_object(context, ['true'])
intermediate_store_2.rm_object(context, ['true'])
| []
| []
| [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
]
| [] | ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] | python | 2 | 0 | |
Lambda/vanilla-golang/shoppingcart-example/add-item-to-cart/main.go | package main
import (
"encoding/json"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/satori/go.uuid"
"io/ioutil"
"log"
"os"
"net/http"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
)
var baseUrl = os.Getenv("APIURL")
type ItemInventory struct {
Name string `json:"name"`
Stock int `json:"stock"`
Cost float64 `json:"cost"`
}
type ItemCart struct {
Name string `json:"name"`
Quantity int `json:"quantity"`
Cost float64 `json:"cost"`
}
type Affectee struct {
Name string `json:"name"`
Quantity int `json:"quantity"`
}
type Affected struct {
Name string `json:"name"`
CostPtg float64 `json:"costPtg"`
CostFixed float64 `json:"costFixed"`
}
type Promotion struct {
UUID string `json:"uuid"`
Affectee Affectee `json:"affectee"`
Affected Affected `json:"affected"`
}
type CartSession struct {
Session string `json:"session"`
Cart []ItemCart `json:"cart"`
Total float64 `json:"total"`
Promos []Promotion `json:"promos"`
}
type RequestBody struct {
Name string `json:"name"`
Quantity int `json:"quantity"`
}
func Handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
// ************
// Preparation
// ************
sess, err := session.NewSession(&aws.Config{
Region: aws.String("us-west-2")},
)
if err != nil {
return serverError(err)
}
// Get body of request
requestBody := new(RequestBody)
err = json.Unmarshal([]byte(request.Body), requestBody)
if err != nil {
return serverError(err)
}
// Verifying request
if requestBody.Name == "" || requestBody.Quantity <= 0 {
return parametersError()
}
// Create DynamoDB client
svc := dynamodb.New(sess)
cartSession := new(CartSession)
itemInventory := new(ItemInventory)
// ************
// Operation
// ************
// Step 1: Find existing session or create one
if request.PathParameters["session"] != "" {
cartString := getUrl("/cart/" + request.PathParameters["session"])
err := json.Unmarshal(cartString, cartSession)
if err != nil {
return serverError(err)
}
if cartSession.Session == "" {
cartSession, err = addCart(svc)
}
} else {
cartSession, err = addCart(svc)
}
// Step 2: Modify cart array
// Step 2.1: Check if inventory has enough stock
itemCart := new(ItemCart)
// Get the item from inventory
inventoryString := getUrl("/item/" + requestBody.Name)
err = json.Unmarshal(inventoryString, itemInventory)
if err != nil {
return serverError(err)
}
// Get the item from cart array
itemIndexInCart := -1
for i, item := range cartSession.Cart {
if item.Name == requestBody.Name {
itemCart = &item
itemIndexInCart = i
}
}
// Check if quantity exceeds stock
if itemInventory.Stock < (requestBody.Quantity + itemCart.Quantity) {
log.Println("Error: Not enough stock", itemInventory.Stock, requestBody.Quantity, itemCart.Quantity)
return notEnoughStockError()
}
// Step 2.2 if item isn't found in cart then create it & update cart
if itemIndexInCart == -1 {
itemCart.Name = requestBody.Name
itemCart.Quantity = requestBody.Quantity
itemCart.Cost = float64(itemCart.Quantity) * itemInventory.Cost
cartSession.Cart = append(cartSession.Cart, *itemCart)
} else {
cartSession.Cart[itemIndexInCart].Quantity += requestBody.Quantity
cartSession.Cart[itemIndexInCart].Cost += (itemCart.Cost / float64(itemCart.Quantity)) * float64(requestBody.Quantity)
}
// Step 3: Apply promotions
// Get all promotions
var promotions []Promotion
promoString := getUrl("/promo/")
err = json.Unmarshal(promoString, &promotions)
if err != nil {
return serverError(err)
}
OUTER:
for i, item := range cartSession.Cart {
for _, promo := range promotions {
// Skip applied promos if they do not apply to the same
// UNLESS it's a promo where the affected and the affectee are the same
alreadyApplied := false
skippable := false
for _, appliedPromo := range cartSession.Promos {
if promo.UUID == appliedPromo.UUID {
alreadyApplied = true
if appliedPromo.Affectee.Name != appliedPromo.Affected.Name {
skippable = true
} else {
skippable = false
}
break
}
}
if alreadyApplied && skippable {
alreadyApplied = false
skippable = false
continue
}
if item.Name == promo.Affected.Name {
// If an item in the cart can be affected by the promo
// then start investigating if we have the affectee
// If the item is the affected and affectee
if item.Name == promo.Affectee.Name {
// If the item does not exceed affectee quantity then there is no affected
if item.Quantity+requestBody.Quantity <= promo.Affectee.Quantity {
continue OUTER
}
// then modify the affected's cost without
// modifying the affectee
// Calculating the affectee cost
costOfAffecteeItems := itemInventory.Cost * float64(promo.Affectee.Quantity)
quantityOfAffected := cartSession.Cart[i].Quantity - promo.Affectee.Quantity
var costOfAffectedItems float64
if promo.Affected.CostPtg != 0 {
// Calcuating the affected cost
costOfAffectedItems = float64(quantityOfAffected) * (itemInventory.Cost * promo.Affected.CostPtg)
} else {
costOfAffectedItems = float64(quantityOfAffected) * promo.Affected.CostFixed
}
cartSession.Cart[i].Cost = costOfAffecteeItems + costOfAffectedItems
// Add promo to cart
if !alreadyApplied {
cartSession.Promos = append(cartSession.Promos, promo)
}
continue OUTER
} else {
for _, subItem := range cartSession.Cart {
// If we have the affectee & its quantity is equal or higher than promo
if subItem.Name == promo.Affectee.Name && subItem.Quantity >= promo.Affectee.Quantity {
// Apply the promo
if promo.Affected.CostPtg != 0 {
cartSession.Cart[i].Cost *= promo.Affected.CostPtg
} else {
cartSession.Cart[i].Cost = float64(cartSession.Cart[i].Quantity) * promo.Affected.CostFixed
}
// Add promo to cart
cartSession.Promos = append(cartSession.Promos, promo)
continue OUTER
}
}
}
}
}
}
// Step 4: Calculate total cost
// Even though this iteration already happened in another area
// it is safer to keep different functionalities separate
// and avoid spaghetti code as long as it does not have a big
// impact on the performance
cartSession.Total = 0
for _, item := range cartSession.Cart {
cartSession.Total += item.Cost
}
// Update Cart Session
err = updateCart(svc, cartSession)
if err != nil {
return serverError(err)
}
// ************
// Return
// ************
js, err := json.Marshal(cartSession)
if err != nil {
return serverError(err)
}
return events.APIGatewayProxyResponse{
Headers: map[string]string{"content-type": "application/json"},
Body: string(js),
StatusCode: 200,
}, nil
}
func updateCart(svc *dynamodb.DynamoDB, cartSession *CartSession) error {
// Add new cart session in database
av, err := dynamodbattribute.MarshalMap(cartSession)
if err != nil {
log.Println("Got error marshalling map")
return err
}
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String("Cart"),
}
_, err = svc.PutItem(input)
if err != nil {
log.Println("Got error calling PutItem")
return err
}
return nil
}
func addCart(svc *dynamodb.DynamoDB) (*CartSession, error) {
// Create UUID for new session
uid := uuid.Must(uuid.NewV4())
cartSession := CartSession{
Session: uid.String(),
}
// Add new cart session in database
av, err := dynamodbattribute.MarshalMap(cartSession)
if err != nil {
log.Println("Got error marshalling map")
serverError(err)
return nil, err
}
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String("Cart"),
}
_, err = svc.PutItem(input)
if err != nil {
log.Println("Got error calling PutItem")
serverError(err)
return nil, err
}
return &cartSession, nil
}
// Function used to call other lambda functions
func getUrl(url string) []byte {
// Make a get request
rs, err := http.Get(baseUrl + url)
// Process response
if err != nil {
log.Printf("error calling url")
serverError(err)
}
defer rs.Body.Close()
bodyBytes, err := ioutil.ReadAll(rs.Body)
if err != nil {
log.Printf("error reading body from url")
serverError(err)
}
return bodyBytes
}
func serverError(err error) (events.APIGatewayProxyResponse, error) {
log.Println("Error: " + err.Error())
return events.APIGatewayProxyResponse{
StatusCode: http.StatusInternalServerError,
Body: http.StatusText(http.StatusInternalServerError),
}, nil
}
func parametersError() (events.APIGatewayProxyResponse, error) {
log.Println("Parameters requirment not met")
return events.APIGatewayProxyResponse{
StatusCode: http.StatusPreconditionFailed,
Body: http.StatusText(http.StatusPreconditionFailed),
}, nil
}
func notEnoughStockError() (events.APIGatewayProxyResponse, error) {
log.Println("Not enough stock")
return events.APIGatewayProxyResponse{
StatusCode: http.StatusForbidden,
Body: http.StatusText(http.StatusForbidden),
}, nil
}
func main() {
lambda.Start(Handler)
}
| [
"\"APIURL\""
]
| []
| [
"APIURL"
]
| [] | ["APIURL"] | go | 1 | 0 | |
data_steward/test/unit_test/achilles_test.py | import os
import unittest
from google.appengine.ext import testbed
import bq_utils
import gcs_utils
import resources
import test_util
import validation.sql_wrangle
from test_util import FAKE_HPO_ID
from validation import achilles
# This may change if we strip out unused analyses
ACHILLES_LOOKUP_COUNT = 215
ACHILLES_ANALYSIS_COUNT = 134
ACHILLES_RESULTS_COUNT = 2497
SOURCE_NAME_QUERY = """insert into synpuf_100.achilles_analysis (analysis_id, analysis_name)
values (0, 'Source name')"""
TEMP_QUERY_1 = """INTO temp.tempresults
WITH rawdata as ( select p.person_id as person_id, min(EXTRACT(YEAR from observation_period_start_date)) -
p.year_of_birth as age_value from synpuf_100.person p
join synpuf_100.observation_period op on p.person_id = op.person_id
group by p.person_id, p.year_of_birth
), overallstats as (select cast(avg(1.0 * age_value) as float64) as avg_value, cast(STDDEV(age_value) as
float64) as stdev_value, min(age_value) as min_value, max(age_value) as max_value, COUNT(*) as total from rawdata
), agestats as ( select age_value as age_value, COUNT(*) as total, row_number() over (order by age_value) as rn
from rawdata
group by 1 ), agestatsprior as ( select s.age_value as age_value, s.total as total, sum(p.total) as
accumulated from agestats s
join agestats p on p.rn <= s.rn
group by s.age_value, s.total, s.rn
)
select 103 as analysis_id, o.total as count_value, o.min_value, o.max_value, o.avg_value, o.stdev_value,
min(case when p.accumulated >= .50 * o.total then age_value end) as median_value, min(case when p.accumulated >= .10
* o.total then age_value end) as p10_value, min(case when p.accumulated >= .25 * o.total then age_value end) as
p25_value, min(case when p.accumulated >= .75 * o.total then age_value end) as p75_value, min(case when
p.accumulated >= .90 * o.total then age_value end) as p90_value
FROM agestatsprior p
cross join overallstats o
group by o.total, o.min_value, o.max_value, o.avg_value, o.stdev_value"""
TEMP_QUERY_2 = """INTO temp.rawdata_1006
SELECT ce.condition_concept_id as subject_id, p1.gender_concept_id, ce.condition_start_year - p1.year_of_birth as
count_value
FROM synpuf_100.person p1
inner join
(
select person_id, condition_concept_id, min(EXTRACT(YEAR from condition_era_start_date)) as condition_start_year
from synpuf_100.condition_era
group by 1, 2 ) ce on p1.person_id = ce.person_id"""
@unittest.skipIf(os.getenv('ALL_TESTS') == 'False', 'Skipping AchillesTest cases')
class AchillesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_app_identity_stub()
self.testbed.init_memcache_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
self.hpo_bucket = gcs_utils.get_hpo_bucket(test_util.FAKE_HPO_ID)
test_util.empty_bucket(self.hpo_bucket)
test_util.delete_all_tables(bq_utils.get_dataset_id())
def _load_dataset(self):
for cdm_table in resources.CDM_TABLES:
cdm_file_name = os.path.join(test_util.FIVE_PERSONS_PATH, cdm_table + '.csv')
if os.path.exists(cdm_file_name):
test_util.write_cloud_file(self.hpo_bucket, cdm_file_name)
else:
test_util.write_cloud_str(self.hpo_bucket, cdm_table + '.csv', 'dummy\n')
bq_utils.load_cdm_csv(FAKE_HPO_ID, cdm_table)
def test_detect_commented_block(self):
commented_command = """
--1300- ORGANIZATION
--NOT APPLICABLE IN CDMV5
--insert into fake_ACHILLES_analysis (analysis_id, analysis_name, stratum_1_name)
-- values (1300, 'Number of organizations by place of service', 'place_of_service_concept_id')"""
self.assertFalse(validation.sql_wrangle.is_active_command(commented_command))
def test_load_analyses(self):
achilles.create_tables(FAKE_HPO_ID, True)
achilles.load_analyses(FAKE_HPO_ID)
cmd = validation.sql_wrangle.qualify_tables(
'SELECT DISTINCT(analysis_id) FROM %sachilles_analysis' % validation.sql_wrangle.PREFIX_PLACEHOLDER,
FAKE_HPO_ID)
result = bq_utils.query(cmd)
self.assertEqual(ACHILLES_LOOKUP_COUNT, int(result['totalRows']))
def test_get_run_analysis_commands(self):
cmd_iter = achilles._get_run_analysis_commands(FAKE_HPO_ID)
commands = list(cmd_iter)
self.assertEqual(len(commands), ACHILLES_ANALYSIS_COUNT)
def test_temp_table(self):
self.assertTrue(validation.sql_wrangle.is_to_temp_table(TEMP_QUERY_1))
self.assertTrue(validation.sql_wrangle.is_to_temp_table(TEMP_QUERY_2))
self.assertFalse(validation.sql_wrangle.is_to_temp_table(SOURCE_NAME_QUERY))
self.assertEqual(validation.sql_wrangle.get_temp_table_name(TEMP_QUERY_1), 'temp.tempresults')
self.assertEqual(validation.sql_wrangle.get_temp_table_name(TEMP_QUERY_2), 'temp.rawdata_1006')
self.assertTrue(validation.sql_wrangle.get_temp_table_query(TEMP_QUERY_1).startswith('WITH rawdata'))
self.assertTrue(
validation.sql_wrangle.get_temp_table_query(TEMP_QUERY_2).startswith('SELECT ce.condition_concept_id'))
def test_run_analyses(self):
# Long-running test
self._load_dataset()
achilles.create_tables(FAKE_HPO_ID, True)
achilles.load_analyses(FAKE_HPO_ID)
achilles.run_analyses(hpo_id=FAKE_HPO_ID)
cmd = validation.sql_wrangle.qualify_tables(
'SELECT COUNT(1) FROM %sachilles_results' % validation.sql_wrangle.PREFIX_PLACEHOLDER, FAKE_HPO_ID)
result = bq_utils.query(cmd)
self.assertEqual(int(result['rows'][0]['f'][0]['v']), ACHILLES_RESULTS_COUNT)
def test_parse_temp(self):
commands = achilles._get_run_analysis_commands(FAKE_HPO_ID)
for command in commands:
is_temp = validation.sql_wrangle.is_to_temp_table(command)
self.assertFalse(is_temp)
def tearDown(self):
test_util.delete_all_tables(bq_utils.get_dataset_id())
test_util.empty_bucket(self.hpo_bucket)
self.testbed.deactivate()
| []
| []
| [
"ALL_TESTS"
]
| [] | ["ALL_TESTS"] | python | 1 | 0 | |
src/catkin_projects/rlg_simulation/scripts/generate_dishrack_arrangements.py | import argparse
import math
import numpy as np
import os
import re
import time
import yaml
# For physical simulation
#import pybullet as p
# For projection into nonpenetration
# (pre-processing for simulation)
#import pydrake
#from pydrake.solvers import ik
# Eventually, for visualization,
# though not used yet.
#from director import viewerclient
models = {
"plate_11in": "drake/../models/dish_models/plate_11in_decomp/plate_11in_decomp.urdf",
"plate_8p5in": "drake/../models/dish_models/plate_8p5in_decomp/plate_8p5in_decomp.urdf",
"bowl_6p25in": "drake/../models/dish_models/bowl_6p25in_decomp/bowl_6p25in_decomp.urdf",
"dish_rack": "drake/../models/dish_models/dish_rack_simple.urdf",
"floor": "drake/../build/bullet3/data/plane.urdf"
}
class ObjectInstance:
def __init__(self, model, q0, fixed=False):
self.model = model
self.q0 = q0
self.fixed = fixed
def to_dict(self):
return {"model": str(self.model),
"q0": [float(x) for x in self.q0], "fixed": self.fixed}
def load_rbt_from_urdf_rel_drake_root(model_name, rbt, weld_frame = None):
urdf_filename = models[model_name]
drake_root = os.getenv("DRAKE_RESOURCE_ROOT")
urdf_string = open(drake_root + "/" + urdf_filename).read()
base_dir = os.path.dirname(drake_root + "/" + urdf_filename)
package_map = pydrake.rbtree.PackageMap()
if weld_frame is None:
floating_base_type = pydrake.rbtree.kRollPitchYaw
else:
floating_base_type = pydrake.rbtree.kFixed
pydrake.rbtree.AddModelInstanceFromUrdfStringSearchingInRosPackages(
urdf_string,
package_map,
base_dir,
floating_base_type,
weld_frame,
rbt)
class DishrackArrangement:
def __init__(self):
self.instances = [ObjectInstance("dish_rack", [0, 0, 0, 0, 0, 0], True)]
def add_instance(self, instance):
self.instances.append(instance)
def get_non_rack_instances(self):
return [instance for instance in self.instances if instance.model != "dish_rack"]
def project_instance_to_nonpenetration(self):
# Set up a rigidbodytree
r = pydrake.rbtree.RigidBodyTree()
q0 = []
# create hacky ground frame just under world frame, but a little lower so no collision
# between world and ground (which causes IK infeasibility)
# could replace with careful collision groups)
ground_frame = pydrake.rbtree.RigidBodyFrame("ground_frame", r.world(), [0, 0, -1E-3], [0, 0, 0])
# Add floor as a fixed frame.
#load_rbt_from_urdf_rel_drake_root("floor", r, ground_frame)
world_frame = pydrake.rbtree.RigidBodyFrame("world_frame", r.world(), [0, 0, 0], [0, 0, 0])
for instance in self.instances:
# I'd do this to load in the tray as a fixed mesh.
# But, Drake's bullet interface doesn't support closest points
# checks against nonconvex geometry, even though bullet does.
# So instead I'll load it as dynamic geometry, have drake take
# a chull of it, and allow this step to give feasible but
# conservative results...
if instance.fixed:
#load_rbt_from_urdf_rel_drake_root(instance.model, r, world_frame)
#[q0.append(x) for x in instance.q0]
pass
else:
load_rbt_from_urdf_rel_drake_root(instance.model, r)
[q0.append(x) for x in instance.q0]
tspan = np.array([0., 1.])
constraints = [
# Nonpenetration: no two bodies with negative min distance
# (i.e. no penetration)
ik.MinDistanceConstraint(r, 1e-3, list(), set())
# ik.QuasiStaticConstraint(r, tspan)
]
info = 13
while info != 100 and info != 1:
options = ik.IKoptions(r)
print "Feasibility: ", options.getMajorFeasibilityTolerance()
print "superbasics lim: ", options.getSuperbasicsLimit()
q0_array = np.array(q0, dtype=np.float64, ndmin=2)[0]
q0_seed = q0_array + np.random.randn(len(q0))
q0_seed[0:6] = [0.0]*6
results = ik.InverseKin(r, q0_array, q0_seed, constraints, options)
qsol = list(results.q_sol[0])
info = results.info[0]
print("Results ", qsol, " with info ", info)
if (info != 100 or info != 1):
print "Trying against because IK failed."
else:
q0 = qsol
# Remap flattened state back into the individual instance states
ind = 0
for instance in self.instances:
num_states = len(instance.q0)
print("For instance %s: mapped ", instance.q0, " to ", q0[ind:(ind+num_states)])
instance.q0 = q0[ind:(ind+num_states)]
ind += num_states
def simulate_instance(self, n_secs, timestep=0.01):
# Assumes physics client already set up
p.resetSimulation()
p.setGravity(0,0,-9.81)
p.setTimeStep(timestep)
# Load in a ground
p.loadURDF(os.environ["SPARTAN_SOURCE_DIR"] + "/build/bullet3/data/plane.urdf")
# Add each model as requested
drake_resource_root = os.environ["DRAKE_RESOURCE_ROOT"]
ids = []
for instance in self.instances:
urdf = drake_resource_root + "/" + models[instance.model]
q0 = instance.q0
position = q0[0:3]
quaternion = p.getQuaternionFromEuler(q0[3:8])
fixed = instance.fixed
ids.append(p.loadURDF(urdf, position, quaternion, fixed))
# Simulate it
for i in range(int(n_secs / timestep)):
p.stepSimulation()
# Extract model states
for i, instance in enumerate(self.instances):
pos, quat = p.getBasePositionAndOrientation(i+1)
instance.q0[0:3] = pos
instance.q0[3:7] = p.getEulerFromQuaternion(quat)
def save_to_file(self, filename):
data = {}
data["models"] = models
data["with_ground"] = True
data["instances"] = []
for instance in self.instances:
data["instances"].append(instance.to_dict())
with open(filename, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
@staticmethod
def load_from_file(filename):
arrangement = DishrackArrangement()
config = yaml.load(open(filename, 'r'))
for instance in config["instances"]:
if instance["model"] != "dish_rack":
arrangement.add_instance(ObjectInstance(instance["model"], instance["q0"]))
return arrangement
# Place plates vertically, but with random yaw from [0, pi/2, pi, 3pi/2],
# within the bounds of the rack
plate_11in_params = {
"lower_bound_x": 0.017,
"upper_bound_x": 0.47,
"lower_bound_y": -0.47,
"upper_bound_y": -0.017,
"height": 0.3
}
def place_plate_11in():
center_location_x = np.random.uniform(plate_11in_params["lower_bound_x"], plate_11in_params["upper_bound_x"])
center_location_y = np.random.uniform(plate_11in_params["lower_bound_y"], plate_11in_params["upper_bound_y"])
yaw = float(np.random.randint(0, 4))*math.pi/2.
return [center_location_x, center_location_y, plate_11in_params["height"],
0, 0, yaw]
placement_generators = {
"plate_11in": place_plate_11in
}
# Hand-written data generation script
def generate_dishrack_arrangement(max_num_dishes, allowable_dish_types):
arrangement = DishrackArrangement()
num_dishes = np.random.randint(0, max_num_dishes)
for k in range(num_dishes):
# Pick dish type
dish_type = allowable_dish_types[np.random.randint(0, len(allowable_dish_types))]
if dish_type not in placement_generators.keys():
print("Error: generator not defined for dish type %s" % (dish_type))
exit(-1)
# Generate a placement and add it to the arrangement
arrangement.add_instance(ObjectInstance(dish_type, placement_generators[dish_type](), False))
return arrangement
def load_arrangements_from_folder(folder_name, filename_regex=".*\.yaml"):
all_arrangements = []
matcher = re.compile(filename_regex)
if not os.path.isdir(folder_name):
print "Data directory ", folder_name, " doesn't exist."
exit(0)
filenames = next(os.walk(folder_name))[2]
for filename in sorted(filenames):
if matcher.match(filename):
all_arrangements.append(DishrackArrangement.load_from_file(folder_name + "/" + filename))
print "Loaded %d arrangements." % len(all_arrangements)
return all_arrangements
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("folder", help="Folder to put generated files in.", type=str)
parser.add_argument("-n", "--max_num_dishes", help="Max # of dishes to generate", type=int, default=20)
parser.add_argument("-m", "--num_samples", help="# of arrangements to generate", type=int, default=1)
parser.add_argument("-s", "--seed", help="Random seed", type=int)
args = parser.parse_args()
# Set up a simulation with a ground plane and desired timestep
physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version
if args.seed is not None:
np.random.seed(args.seed)
os.system("mkdir -p " + args.folder)
for i in range(args.num_samples):
arrangement = generate_dishrack_arrangement(args.max_num_dishes, ["plate_11in",])
arrangement.save_to_file(args.folder + "/" + "%03d_1_pre_projection.yaml" % i)
#arrangement.project_instance_to_nonpenetration()
#arrangement.save_to_file(args.folder + "/" + "%03d_2_post_projection.yaml" % i)
arrangement.simulate_instance(2.)
arrangement.save_to_file(args.folder + "/" + "%03d_3_post_simulation.yaml" % i)
| []
| []
| [
"DRAKE_RESOURCE_ROOT",
"SPARTAN_SOURCE_DIR"
]
| [] | ["DRAKE_RESOURCE_ROOT", "SPARTAN_SOURCE_DIR"] | python | 2 | 0 | |
backend/metadata_writer/src/metadata_writer.py | # Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import hashlib
import os
import sys
import re
import kubernetes
import yaml
from time import sleep
from metadata_helpers import *
namespace_to_watch = os.environ.get('NAMESPACE_TO_WATCH', 'default')
kubernetes.config.load_incluster_config()
k8s_api = kubernetes.client.CoreV1Api()
k8s_watch = kubernetes.watch.Watch()
patch_retries = 20
sleep_time = 0.1
def patch_pod_metadata(
namespace: str,
pod_name: str,
patch: dict,
k8s_api: kubernetes.client.CoreV1Api = None,
):
k8s_api = k8s_api or kubernetes.client.CoreV1Api()
patch = {
'metadata': patch
}
for retry in range(patch_retries):
try:
pod = k8s_api.patch_namespaced_pod(
name=pod_name,
namespace=namespace,
body=patch,
)
return pod
except Exception as e:
print(e)
sleep(sleep_time)
#Connecting to MetadataDB
mlmd_store = connect_to_mlmd()
print("Connected to the metadata store")
PIPELINE_RUNTIME = os.getenv("PIPELINE_RUNTIME", "tekton").lower()
ARGO_OUTPUTS_ANNOTATION_KEY = 'workflows.argoproj.io/outputs'
ARGO_TEMPLATE_ANNOTATION_KEY = 'workflows.argoproj.io/template'
KFP_COMPONENT_SPEC_ANNOTATION_KEY = 'pipelines.kubeflow.org/component_spec'
METADATA_EXECUTION_ID_LABEL_KEY = 'pipelines.kubeflow.org/metadata_execution_id'
METADATA_CONTEXT_ID_LABEL_KEY = 'pipelines.kubeflow.org/metadata_context_id'
METADATA_ARTIFACT_IDS_ANNOTATION_KEY = 'pipelines.kubeflow.org/metadata_artifact_ids'
METADATA_INPUT_ARTIFACT_IDS_ANNOTATION_KEY = 'pipelines.kubeflow.org/metadata_input_artifact_ids'
METADATA_OUTPUT_ARTIFACT_IDS_ANNOTATION_KEY = 'pipelines.kubeflow.org/metadata_output_artifact_ids'
ARGO_WORKFLOW_LABEL_KEY = 'workflows.argoproj.io/workflow'
ARGO_COMPLETED_LABEL_KEY = 'workflows.argoproj.io/completed'
METADATA_WRITTEN_LABEL_KEY = 'pipelines.kubeflow.org/metadata_written'
TEKTON_PIPELINERUN_LABEL_KEY = 'tekton.dev/pipelineRun'
TEKTON_READY_ANNOTATION_KEY = 'tekton.dev/ready'
TEKTON_TASKRUN_LABEL_KEY = 'tekton.dev/taskRun'
TEKTON_PIPELINETASK_LABEL_KEY = 'tekton.dev/pipelineTask'
TEKTON_INPUT_ARTIFACT_ANNOTATION_KEY = 'tekton.dev/input_artifacts'
TEKTON_OUTPUT_ARTIFACT_ANNOTATION_KEY = 'tekton.dev/output_artifacts'
TEKTON_BUCKET_ARTIFACT_ANNOTATION_KEY = 'tekton.dev/artifact_bucket'
PIPELINE_LABEL_KEY = TEKTON_PIPELINERUN_LABEL_KEY if PIPELINE_RUNTIME == "tekton" else ARGO_WORKFLOW_LABEL_KEY
def output_name_to_argo(name: str) -> str:
import re
# This sanitization code should be kept in sync with the code in the DSL compiler.
# See https://github.com/kubeflow/pipelines/blob/39975e3cde7ba4dcea2bca835b92d0fe40b1ae3c/sdk/python/kfp/compiler/_k8s_helper.py#L33
return re.sub('-+', '-', re.sub('[^-_0-9A-Za-z]+', '-', name)).strip('-')
def is_s3_endpoint(endpoint: str) -> bool:
return re.search('^.*s3.*amazonaws.com.*$', endpoint)
def get_object_store_provider(endpoint: str) -> bool:
if is_s3_endpoint(endpoint):
return 's3'
else:
return 'minio'
def artifact_to_uri(artifact: dict) -> str:
# s3 here means s3 compatible object storage. not AWS S3.
if 's3' in artifact:
s3_artifact = artifact['s3']
return '{provider}://{bucket}/{key}'.format(
provider=get_object_store_provider(s3_artifact.get('endpoint', 'minio')),
bucket=s3_artifact.get('bucket', ''),
key=s3_artifact.get('key', ''),
)
elif 'raw' in artifact:
return None
else:
return None
def is_tfx_pod(pod) -> bool:
main_step_name = 'step-main' if PIPELINE_RUNTIME == "tekton" else 'main'
main_containers = [container for container in pod.spec.containers if container.name == main_step_name]
if len(main_containers) != 1:
return False
main_container = main_containers[0]
return main_container.command and main_container.command[-1].endswith('tfx/orchestration/kubeflow/container_entrypoint.py')
def get_component_template(obj):
'''
Return an Argo formatted component template
'''
if PIPELINE_RUNTIME == "tekton":
artifacts = json.loads(obj.metadata.annotations[TEKTON_INPUT_ARTIFACT_ANNOTATION_KEY])
results = artifacts.get(obj.metadata.labels[TEKTON_PIPELINETASK_LABEL_KEY], [])
component_template = {
"name": obj.metadata.labels[TEKTON_PIPELINETASK_LABEL_KEY],
"inputs": {
"artifacts": [
{
"name": i["name"],
"s3": {
"bucket": obj.metadata.annotations.get(TEKTON_BUCKET_ARTIFACT_ANNOTATION_KEY, "mlpipeline"),
"key": "artifacts/%s/%s/%s.tgz" % (obj.metadata.labels[TEKTON_PIPELINERUN_LABEL_KEY],
i['parent_task'],
i["name"].replace(i['parent_task'] + '-', ''))
}
} for i in results
]
}
}
return component_template
else:
return json.loads(obj.metadata.annotations[ARGO_TEMPLATE_ANNOTATION_KEY])
def get_output_template(obj):
'''
Return an Argo formatted artifact output template
'''
if PIPELINE_RUNTIME == "tekton":
artifacts = json.loads(obj.metadata.annotations[TEKTON_OUTPUT_ARTIFACT_ANNOTATION_KEY])
results = artifacts.get(obj.metadata.labels[TEKTON_PIPELINETASK_LABEL_KEY], [])
artifact_prefix = obj.metadata.labels[TEKTON_PIPELINETASK_LABEL_KEY] + '-'
s3_key_prefix = 'artifacts/%s/%s' % (obj.metadata.labels[TEKTON_PIPELINERUN_LABEL_KEY],
obj.metadata.labels[TEKTON_PIPELINETASK_LABEL_KEY])
output_template = {
"name": obj.metadata.labels[TEKTON_PIPELINETASK_LABEL_KEY],
"artifacts": [
{
"name": i["name"],
"path": i["path"],
"s3": {
"bucket": obj.metadata.annotations.get(TEKTON_BUCKET_ARTIFACT_ANNOTATION_KEY, "mlpipeline"),
"key": "%s/%s.tgz" % (s3_key_prefix,
i["name"].replace(artifact_prefix, ''))
}
} for i in results
]
}
return output_template
else:
return json.loads(obj.metadata.annotations[ARGO_OUTPUTS_ANNOTATION_KEY])
# Caches (not expected to be persistent)
# These caches are only used to prevent race conditions. Race conditions happen because the writer can see multiple versions of K8s object before the applied labels show up.
# They are expected to be lost when restarting the service.
# The operation of the Metadata Writer remains correct even if it's getting restarted frequently. (Kubernetes only sends the latest version of resource for new watchers.)
# Technically, we could remove the objects from cache as soon as we see that our labels have been applied successfully.
pod_name_to_execution_id = {}
workflow_name_to_context_id = {}
pods_with_written_metadata = set()
while True:
print("Start watching Kubernetes Pods created by Argo or Tekton")
try:
for event in k8s_watch.stream(
k8s_api.list_namespaced_pod,
namespace=namespace_to_watch,
label_selector=PIPELINE_LABEL_KEY,
timeout_seconds=1800, # Sometimes watch gets stuck
_request_timeout=2000, # Sometimes HTTP GET gets stuck
):
obj = event['object']
print('Kubernetes Pod event: ', event['type'], obj.metadata.name, obj.metadata.resource_version)
if event['type'] == 'ERROR':
print(event)
pod_name = obj.metadata.name
# Logging pod changes for debugging
with open('/tmp/pod_' + obj.metadata.name + '_' + obj.metadata.resource_version, 'w') as f:
f.write(yaml.dump(obj.to_dict()))
assert obj.kind == 'Pod'
if METADATA_WRITTEN_LABEL_KEY in obj.metadata.labels:
continue
# Skip TFX pods - they have their own metadata writers
if is_tfx_pod(obj):
continue
pipeline_name = obj.metadata.labels[PIPELINE_LABEL_KEY] # Should exist due to initial filtering
template = get_component_template(obj)
template_name = template['name']
component_name = template_name
component_version = component_name
output_name_to_type = {}
if KFP_COMPONENT_SPEC_ANNOTATION_KEY in obj.metadata.annotations:
component_spec_text = obj.metadata.annotations[KFP_COMPONENT_SPEC_ANNOTATION_KEY]
component_spec = json.loads(component_spec_text)
component_spec_digest = hashlib.sha256(component_spec_text.encode()).hexdigest()
component_name = component_spec.get('name', component_name)
component_version = component_name + '@sha256=' + component_spec_digest
output_name_to_type = {output['name']: output.get('type', None) for output in component_spec.get('outputs', [])}
output_name_to_type = {output_name_to_argo(k): v for k, v in output_name_to_type.items() if v}
if obj.metadata.name in pod_name_to_execution_id:
execution_id = pod_name_to_execution_id[obj.metadata.name]
context_id = workflow_name_to_context_id[pipeline_name]
elif METADATA_EXECUTION_ID_LABEL_KEY in obj.metadata.labels:
execution_id = int(obj.metadata.labels[METADATA_EXECUTION_ID_LABEL_KEY])
context_id = int(obj.metadata.labels[METADATA_CONTEXT_ID_LABEL_KEY])
print('Found execution id: {}, context id: {} for pod {}.'.format(execution_id, context_id, obj.metadata.name))
else:
run_context = get_or_create_run_context(
store=mlmd_store,
run_id=pipeline_name, # We can switch to internal run IDs once backend starts adding them
)
# Adding new execution to the database
execution = create_new_execution_in_existing_run_context(
store=mlmd_store,
context_id=run_context.id,
execution_type_name=KFP_EXECUTION_TYPE_NAME_PREFIX + component_version,
pod_name=pod_name,
pipeline_name=pipeline_name,
run_id=pipeline_name,
instance_id=component_name,
)
input_artifacts = template.get('inputs', {}).get('artifacts', [])
input_artifact_ids = []
for input_artifact in input_artifacts:
artifact_uri = artifact_to_uri(input_artifact)
if not artifact_uri:
continue
if PIPELINE_RUNTIME == "tekton":
input_name = input_artifact.get('name', '')
input_prefix = template_name + '-'
input_name = input_name[len(input_prefix):]
else:
input_name = input_artifact.get('path', '') # Every artifact should have a path in Argo
input_artifact_path_prefix = '/tmp/inputs/'
input_artifact_path_postfix = '/data'
if input_name.startswith(input_artifact_path_prefix):
input_name = input_name[len(input_artifact_path_prefix):]
if input_name.endswith(input_artifact_path_postfix):
input_name = input_name[0: -len(input_artifact_path_postfix)]
artifact = link_execution_to_input_artifact(
store=mlmd_store,
execution_id=execution.id,
uri=artifact_uri,
input_name=input_name,
)
if artifact is None:
# TODO: Maybe there is a better way to handle missing upstream artifacts
continue
input_artifact_ids.append(dict(
id=artifact.id,
name=input_name,
uri=artifact.uri,
))
print('Found Input Artifact: ' + str(dict(
input_name=input_name,
id=artifact.id,
uri=artifact.uri,
)))
execution_id = execution.id
context_id = run_context.id
obj.metadata.labels[METADATA_EXECUTION_ID_LABEL_KEY] = execution_id
obj.metadata.labels[METADATA_CONTEXT_ID_LABEL_KEY] = context_id
metadata_to_add = {
'labels': {
METADATA_EXECUTION_ID_LABEL_KEY: str(execution_id),
METADATA_CONTEXT_ID_LABEL_KEY: str(context_id),
},
'annotations': {
METADATA_INPUT_ARTIFACT_IDS_ANNOTATION_KEY: json.dumps(input_artifact_ids),
},
}
patch_pod_metadata(
namespace=obj.metadata.namespace,
pod_name=obj.metadata.name,
patch=metadata_to_add,
)
pod_name_to_execution_id[obj.metadata.name] = execution_id
workflow_name_to_context_id[pipeline_name] = context_id
print('New execution id: {}, context id: {} for pod {}.'.format(execution_id, context_id, obj.metadata.name))
print('Execution: ' + str(dict(
context_id=context_id,
context_name=pipeline_name,
execution_id=execution_id,
execution_name=obj.metadata.name,
component_name=component_name,
)))
# TODO: Log input parameters as execution options.
# Unfortunately, DSL compiler loses the information about inputs and their arguments.
if (
obj.metadata.name not in pods_with_written_metadata
and (
obj.metadata.labels.get(ARGO_COMPLETED_LABEL_KEY, 'false') == 'true'
or ARGO_OUTPUTS_ANNOTATION_KEY in obj.metadata.annotations
or TEKTON_OUTPUT_ARTIFACT_ANNOTATION_KEY in obj.metadata.annotations
)
):
artifact_ids = []
if ARGO_OUTPUTS_ANNOTATION_KEY in obj.metadata.annotations or TEKTON_OUTPUT_ARTIFACT_ANNOTATION_KEY in obj.metadata.annotations: # Should be present
outputs = get_output_template(obj)
pipeline_output_artifacts = {}
# Add logging as output artifacts if enabled.
archive_logs = os.environ.get('ARCHIVE_LOGS', 'false')
if archive_logs.lower() == 'true':
main_log_artifact = {
"name": "main-log",
"path": "/var/log/containers/",
"s3": {
"bucket": obj.metadata.annotations.get(TEKTON_BUCKET_ARTIFACT_ANNOTATION_KEY, "mlpipeline"),
"key": "artifacts/%s/%s/main-log.tgz" % (obj.metadata.labels[TEKTON_PIPELINERUN_LABEL_KEY],
obj.metadata.labels[TEKTON_PIPELINETASK_LABEL_KEY])
}
}
pipeline_output_artifacts['main-log'] = main_log_artifact
for artifact in outputs.get('artifacts', []):
art_name = artifact['name']
output_prefix = template_name + '-'
if art_name.startswith(output_prefix):
art_name = art_name[len(output_prefix):]
pipeline_output_artifacts[art_name] = artifact
output_artifacts = []
for name, art in pipeline_output_artifacts.items():
artifact_uri = artifact_to_uri(art)
if not artifact_uri:
continue
artifact_type_name = output_name_to_type.get(name, 'NoType') # Cannot be None or ''
print('Adding Output Artifact: ' + str(dict(
output_name=name,
uri=artifact_uri,
type=artifact_type_name,
)))
artifact = create_new_output_artifact(
store=mlmd_store,
execution_id=execution_id,
context_id=context_id,
uri=artifact_uri,
type_name=artifact_type_name,
output_name=name,
#run_id='Context_' + str(context_id) + '_run',
run_id=pipeline_name,
argo_artifact=art,
)
artifact_ids.append(dict(
id=artifact.id,
name=name,
uri=artifact_uri,
type=artifact_type_name,
))
metadata_to_add = {
'labels': {
METADATA_WRITTEN_LABEL_KEY: 'true',
},
'annotations': {
METADATA_OUTPUT_ARTIFACT_IDS_ANNOTATION_KEY: json.dumps(artifact_ids),
},
}
patch_pod_metadata(
namespace=obj.metadata.namespace,
pod_name=obj.metadata.name,
patch=metadata_to_add,
)
pods_with_written_metadata.add(obj.metadata.name)
except Exception as e:
import traceback
print(traceback.format_exc())
| []
| []
| [
"PIPELINE_RUNTIME",
"NAMESPACE_TO_WATCH",
"ARCHIVE_LOGS"
]
| [] | ["PIPELINE_RUNTIME", "NAMESPACE_TO_WATCH", "ARCHIVE_LOGS"] | python | 3 | 0 | |
mmic_md_gmx/components/gmx_compute_component.py | # Import models
from ..models import InputComputeGmx, OutputComputeGmx
from cmselemental.util.decorators import classproperty
# Import components
from mmic_cmd.components import CmdComponent
from mmic.components.blueprints import GenericComponent
from typing import Dict, Any, List, Tuple, Optional
from pathlib import Path
import os
import shutil
import tempfile
import ntpath
__all__ = ["ComputeGmxComponent"]
class ComputeGmxComponent(GenericComponent):
@classproperty
def input(cls):
return InputComputeGmx
@classproperty
def output(cls):
return OutputComputeGmx
@classproperty
def version(cls) -> str:
"""Finds program, extracts version, returns normalized version string.
Returns
-------
str
Return a valid, safe python version string.
"""
return ""
def execute(
self,
inputs: InputComputeGmx,
extra_outfiles: Optional[List[str]] = None,
extra_commands: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
timeout: Optional[int] = None,
) -> Tuple[bool, OutputComputeGmx]:
# Call gmx pdb2gmx, mdrun, etc. here
if isinstance(inputs, dict):
inputs = self.input(**inputs)
# Extract info from ComputeGmxInput
proc_input, mdp_file, gro_file, top_file = (
inputs.proc_input,
inputs.mdp_file,
inputs.molecule,
inputs.forcefield,
)
tpr_file = tempfile.NamedTemporaryFile(suffix=".tpr").name
# tpr file's name must be defined out of input builders
input_model = {
"proc_input": proc_input,
"mdp_file": mdp_file,
"gro_file": gro_file,
"top_file": top_file,
"tpr_file": tpr_file,
}
clean_files, cmd_input_grompp = self.build_input_grompp(input_model)
rvalue = CmdComponent.compute(cmd_input_grompp)
grompp_scratch_dir = [str(rvalue.scratch_directory)]
self.cleanup(clean_files) # Del mdp and top file in the working dir
self.cleanup([inputs.scratch_dir])
input_model = {"proc_input": proc_input, "tpr_file": tpr_file}
cmd_input_mdrun = self.build_input_mdrun(input_model)
rvalue = CmdComponent.compute(cmd_input_mdrun)
self.cleanup([tpr_file, gro_file])
self.cleanup(grompp_scratch_dir)
return True, self.parse_output(rvalue.dict(), proc_input)
@staticmethod
def cleanup(remove: List[str]):
for item in remove:
if os.path.isdir(item):
shutil.rmtree(item)
elif os.path.isfile(item):
os.remove(item)
def build_input_grompp(
self,
inputs: Dict[str, Any],
config: Optional["TaskConfig"] = None,
template: Optional[str] = None,
) -> Dict[str, Any]:
"""
Build the input for grompp
"""
assert inputs["proc_input"].engine == "gmx", "Engine must be gmx (Gromacs)!"
env = os.environ.copy()
if config:
env["MKL_NUM_THREADS"] = str(config.ncores)
env["OMP_NUM_THREADS"] = str(config.ncores)
scratch_directory = config.scratch_directory if config else None
tpr_file = inputs["tpr_file"]
clean_files = []
clean_files.append(inputs["mdp_file"])
clean_files.append(inputs["top_file"])
cmd = [
inputs["proc_input"].engine,
"grompp",
"-f",
inputs["mdp_file"],
"-c",
inputs["gro_file"],
"-p",
inputs["top_file"],
"-o",
tpr_file,
"-maxwarn",
"-1",
]
outfiles = [tpr_file]
return (
clean_files,
{
"command": cmd,
"as_binary": [tpr_file],
"infiles": [inputs["mdp_file"], inputs["gro_file"], inputs["top_file"]],
"outfiles": outfiles,
"outfiles_track": outfiles,
"scratch_directory": scratch_directory,
"environment": env,
"scratch_messy": True,
},
)
def build_input_mdrun(
self,
inputs: Dict[str, Any],
config: Optional["TaskConfig"] = None,
template: Optional[str] = None,
) -> Dict[str, Any]:
env = os.environ.copy()
if config:
env["MKL_NUM_THREADS"] = str(config.ncores)
env["OMP_NUM_THREADS"] = str(config.ncores)
scratch_directory = config.scratch_directory if config else None
log_file = tempfile.NamedTemporaryFile(suffix=".log").name
trr_file = tempfile.NamedTemporaryFile(suffix=".trr").name
edr_file = tempfile.NamedTemporaryFile(suffix=".edr").name
gro_file = tempfile.NamedTemporaryFile(suffix=".gro").name
tpr_file = inputs["tpr_file"]
tpr_fname = ntpath.basename(tpr_file)
cmd = [
inputs["proc_input"].engine, # Should here be gmx_mpi?
"mdrun",
"-s",
tpr_file, # input
"-o",
trr_file, # output
"-c",
gro_file, # output
"-e",
edr_file, # output
"-g",
log_file, # output
]
outfiles = [trr_file, gro_file, edr_file, log_file]
# For extra args
if inputs["proc_input"].keywords:
for key, val in inputs["proc_input"].keywords.items():
if val:
cmd.extend([key, val])
else:
cmd.extend([key])
return {
"command": cmd,
"as_binary": [tpr_fname, trr_file, edr_file],
"infiles": [tpr_file],
"outfiles": outfiles,
"outfiles_track": outfiles,
"scratch_directory": scratch_directory,
"environment": env,
"scratch_messy": True,
}
def parse_output(
self, output: Dict[str, str], inputs: Dict[str, Any]
) -> OutputComputeGmx:
# stdout = output["stdout"]
# stderr = output["stderr"]
outfiles = output["outfiles"]
scratch_dir = str(output["scratch_directory"])
traj, conf, energy, log = outfiles.keys()
# Deal with energy, log files later ... ?
self.cleanup([energy, log])
return self.output(
proc_input=inputs, molecule=conf, trajectory=traj, scratch_dir=scratch_dir
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
selenium-junit4-examples/src/test/java/com/saucedemo/SimpleVisualE2ETest.java | package com.saucedemo;
import org.junit.Before;
import org.junit.Test;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.remote.CapabilityType;
import org.openqa.selenium.remote.RemoteWebDriver;
import java.net.URL;
import java.util.Map;
import static org.junit.Assert.assertEquals;
public class SimpleVisualE2ETest {
protected WebDriver webDriver;
public String sauceUsername = System.getenv("SAUCE_USERNAME");
public String sauceAccessKey = System.getenv("SAUCE_ACCESS_KEY");
public String screenerApiKey = System.getenv("SCREENER_API_KEY");
@Before
public void setUp() throws Exception {
MutableCapabilities capabilities = new MutableCapabilities();
capabilities.setCapability(CapabilityType.BROWSER_NAME, "chrome");
capabilities.setCapability(CapabilityType.BROWSER_VERSION, "latest");
capabilities.setCapability(CapabilityType.PLATFORM_NAME, "Windows 10");
MutableCapabilities sauceOptions = new MutableCapabilities();
sauceOptions.setCapability("username", sauceUsername);
sauceOptions.setCapability("accesskey", sauceAccessKey);
capabilities.setCapability("sauce:options", sauceOptions);
MutableCapabilities visualOptions = new MutableCapabilities();
visualOptions.setCapability("apiKey", screenerApiKey);
visualOptions.setCapability("projectName", "visual-e2e-test");
visualOptions.setCapability("viewportSize", "1280x1024");
capabilities.setCapability("sauce:visual", visualOptions);
URL url = new URL("https://hub.screener.io/wd/hub");
webDriver = new RemoteWebDriver(url, capabilities);
}
@Test
public void testVisualE2E() {
webDriver.get("https://screener.io");
JavascriptExecutor js = (JavascriptExecutor) webDriver;
js.executeScript("/*@visual.init*/", "My Visual Test 2");
js.executeScript("/*@visual.snapshot*/", "Home");
Map<String, Object> response = (Map<String, Object>) js.executeScript("/*@visual.end*/");
assertEquals( true, response.get("passed"));
}
}
| [
"\"SAUCE_USERNAME\"",
"\"SAUCE_ACCESS_KEY\"",
"\"SCREENER_API_KEY\""
]
| []
| [
"SAUCE_USERNAME",
"SAUCE_ACCESS_KEY",
"SCREENER_API_KEY"
]
| [] | ["SAUCE_USERNAME", "SAUCE_ACCESS_KEY", "SCREENER_API_KEY"] | java | 3 | 0 | |
vendor/github.com/openshift/origin/pkg/generate/app/sourcelookup.go | package app
import (
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/golang/glog"
s2iapi "github.com/openshift/source-to-image/pkg/api"
s2igit "github.com/openshift/source-to-image/pkg/scm/git"
s2iutil "github.com/openshift/source-to-image/pkg/util"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/validation"
buildapi "github.com/openshift/origin/pkg/build/api"
"github.com/openshift/origin/pkg/generate"
"github.com/openshift/origin/pkg/generate/git"
"github.com/openshift/origin/pkg/generate/source"
)
type Dockerfile interface {
AST() *parser.Node
Contents() string
}
func NewDockerfileFromFile(path string) (Dockerfile, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
if len(data) == 0 {
return nil, fmt.Errorf("Dockerfile %q is empty", path)
}
return NewDockerfile(string(data))
}
func NewDockerfile(contents string) (Dockerfile, error) {
if len(contents) == 0 {
return nil, errors.New("Dockerfile is empty")
}
node, err := parser.Parse(strings.NewReader(contents))
if err != nil {
return nil, err
}
return dockerfileContents{node, contents}, nil
}
type dockerfileContents struct {
ast *parser.Node
contents string
}
func (d dockerfileContents) AST() *parser.Node {
return d.ast
}
func (d dockerfileContents) Contents() string {
return d.contents
}
// IsPossibleSourceRepository checks whether the provided string is a source repository or not
func IsPossibleSourceRepository(s string) bool {
return IsRemoteRepository(s) || isDirectory(s)
}
// IsRemoteRepository checks whether the provided string is a remote repository or not
func IsRemoteRepository(s string) bool {
if !s2igit.New(s2iutil.NewFileSystem()).ValidCloneSpecRemoteOnly(s) {
glog.V(5).Infof("%s is not a valid remote git clone spec", s)
return false
}
url, err := url.Parse(s)
if err != nil {
glog.V(5).Infof("%s is not a valid url: %v", s, err)
return false
}
url.Fragment = ""
gitRepo := git.NewRepository()
if _, _, err := gitRepo.ListRemote(url.String()); err != nil {
glog.V(5).Infof("could not list git remotes for %s: %v", s, err)
return false
}
glog.V(5).Infof("%s is a valid remote git repository", s)
return true
}
// SourceRepository represents a code repository that may be the target of a build.
type SourceRepository struct {
location string
url url.URL
localDir string
remoteURL *url.URL
contextDir string
secrets []buildapi.SecretBuildSource
info *SourceRepositoryInfo
sourceImage ComponentReference
sourceImageFrom string
sourceImageTo string
usedBy []ComponentReference
strategy generate.Strategy
ignoreRepository bool
binary bool
forceAddDockerfile bool
requiresAuth bool
}
// NewSourceRepository creates a reference to a local or remote source code repository from
// a URL or path.
func NewSourceRepository(s string, strategy generate.Strategy) (*SourceRepository, error) {
location, err := git.ParseRepository(s)
if err != nil {
return nil, err
}
return &SourceRepository{
location: s,
url: *location,
strategy: strategy,
}, nil
}
// NewSourceRepositoryWithDockerfile creates a reference to a local source code repository with
// the provided relative Dockerfile path (defaults to "Dockerfile").
func NewSourceRepositoryWithDockerfile(s, dockerfilePath string) (*SourceRepository, error) {
r, err := NewSourceRepository(s, generate.StrategyDocker)
if err != nil {
return nil, err
}
if len(dockerfilePath) == 0 {
dockerfilePath = "Dockerfile"
}
f, err := NewDockerfileFromFile(filepath.Join(s, dockerfilePath))
if err != nil {
return nil, err
}
if r.info == nil {
r.info = &SourceRepositoryInfo{}
}
r.info.Dockerfile = f
return r, nil
}
// NewSourceRepositoryForDockerfile creates a source repository that is set up to use
// the contents of a Dockerfile as the input of the build.
func NewSourceRepositoryForDockerfile(contents string) (*SourceRepository, error) {
s := &SourceRepository{
ignoreRepository: true,
strategy: generate.StrategyDocker,
}
err := s.AddDockerfile(contents)
return s, err
}
// NewBinarySourceRepository creates a source repository that is configured for binary
// input.
func NewBinarySourceRepository(strategy generate.Strategy) *SourceRepository {
return &SourceRepository{
binary: true,
ignoreRepository: true,
strategy: strategy,
}
}
// TODO: this doesn't really match the others - this should likely be a different type of
// object that is associated with a build or component.
func NewImageSourceRepository(compRef ComponentReference, from, to string) *SourceRepository {
return &SourceRepository{
sourceImage: compRef,
sourceImageFrom: from,
sourceImageTo: to,
ignoreRepository: true,
location: compRef.Input().From,
strategy: generate.StrategySource,
}
}
// UsedBy sets up which component uses the source repository
func (r *SourceRepository) UsedBy(ref ComponentReference) {
r.usedBy = append(r.usedBy, ref)
}
// Remote checks whether the source repository is remote
func (r *SourceRepository) Remote() bool {
return r.url.Scheme != "file"
}
// InUse checks if the source repository is in use
func (r *SourceRepository) InUse() bool {
return len(r.usedBy) > 0
}
// SetStrategy sets the source repository strategy
func (r *SourceRepository) SetStrategy(strategy generate.Strategy) {
r.strategy = strategy
}
// GetStrategy returns the source repository strategy
func (r *SourceRepository) GetStrategy() generate.Strategy {
return r.strategy
}
func (r *SourceRepository) String() string {
return r.location
}
// Detect clones source locally if not already local and runs code detection
// with the given detector.
func (r *SourceRepository) Detect(d Detector, dockerStrategy bool) error {
if r.info != nil {
return nil
}
path, err := r.LocalPath()
if err != nil {
return err
}
r.info, err = d.Detect(path, dockerStrategy)
if err != nil {
return err
}
if err = r.DetectAuth(); err != nil {
return err
}
return nil
}
// SetInfo sets the source repository info. This is to facilitate certain tests.
func (r *SourceRepository) SetInfo(info *SourceRepositoryInfo) {
r.info = info
}
// Info returns the source repository info generated on code detection
func (r *SourceRepository) Info() *SourceRepositoryInfo {
return r.info
}
// LocalPath returns the local path of the source repository
func (r *SourceRepository) LocalPath() (string, error) {
if len(r.localDir) > 0 {
return r.localDir, nil
}
switch {
case r.url.Scheme == "file":
r.localDir = filepath.Join(r.url.Path, r.contextDir)
default:
gitRepo := git.NewRepository()
var err error
if r.localDir, err = ioutil.TempDir("", "gen"); err != nil {
return "", err
}
localURL, ref := cloneURLAndRef(&r.url)
r.localDir, err = CloneAndCheckoutSources(gitRepo, localURL.String(), ref, r.localDir, r.contextDir)
if err != nil {
return "", err
}
}
return r.localDir, nil
}
func cloneURLAndRef(url *url.URL) (*url.URL, string) {
localURL := *url
ref := localURL.Fragment
localURL.Fragment = ""
return &localURL, ref
}
// DetectAuth returns an error if the source repository cannot be cloned
// without the current user's environment. The following changes are made to the
// environment:
// 1) The HOME directory is set to a temporary dir to avoid loading any settings in .gitconfig
// 2) The GIT_SSH variable is set to /dev/null so the regular SSH keys are not used
// (changing the HOME directory is not enough).
// 3) GIT_CONFIG_NOSYSTEM prevents git from loading system-wide config
// 4) GIT_ASKPASS to prevent git from prompting for a user/password
func (r *SourceRepository) DetectAuth() error {
url, ok, err := r.RemoteURL()
if err != nil {
return err
}
if !ok {
return nil // No auth needed, we can't find a remote URL
}
tempHome, err := ioutil.TempDir("", "githome")
if err != nil {
return err
}
defer os.RemoveAll(tempHome)
tempSrc, err := ioutil.TempDir("", "gen")
if err != nil {
return err
}
defer os.RemoveAll(tempSrc)
env := []string{
fmt.Sprintf("HOME=%s", tempHome),
"GIT_SSH=/dev/null",
"GIT_CONFIG_NOSYSTEM=true",
"GIT_ASKPASS=true",
}
if runtime.GOOS == "windows" {
env = append(env,
fmt.Sprintf("ProgramData=%s", os.Getenv("ProgramData")),
fmt.Sprintf("SystemRoot=%s", os.Getenv("SystemRoot")),
)
}
gitRepo := git.NewRepositoryWithEnv(env)
localURL, ref := cloneURLAndRef(url)
_, err = CloneAndCheckoutSources(gitRepo, localURL.String(), ref, tempSrc, "")
if err != nil {
r.requiresAuth = true
}
return nil
}
// RemoteURL returns the remote URL of the source repository
func (r *SourceRepository) RemoteURL() (*url.URL, bool, error) {
if r.remoteURL != nil {
return r.remoteURL, true, nil
}
switch r.url.Scheme {
case "file":
gitRepo := git.NewRepository()
remote, ok, err := gitRepo.GetOriginURL(r.url.Path)
if err != nil && err != git.ErrGitNotAvailable {
return nil, false, err
}
if !ok {
return nil, ok, nil
}
ref := gitRepo.GetRef(r.url.Path)
if len(ref) > 0 {
remote = fmt.Sprintf("%s#%s", remote, ref)
}
if r.remoteURL, err = git.ParseRepository(remote); err != nil {
return nil, false, err
}
default:
r.remoteURL = &r.url
}
return r.remoteURL, true, nil
}
// SetContextDir sets the context directory to use for the source repository
func (r *SourceRepository) SetContextDir(dir string) {
r.contextDir = dir
}
// ContextDir returns the context directory of the source repository
func (r *SourceRepository) ContextDir() string {
return r.contextDir
}
// Secrets returns the secrets
func (r *SourceRepository) Secrets() []buildapi.SecretBuildSource {
return r.secrets
}
// SetSourceImage sets the source(input) image for a repository
func (r *SourceRepository) SetSourceImage(c ComponentReference) {
r.sourceImage = c
}
// SetSourceImagePath sets the source/destination to use when copying from the SourceImage
func (r *SourceRepository) SetSourceImagePath(source, dest string) {
r.sourceImageFrom = source
r.sourceImageTo = dest
}
// AddDockerfile adds the Dockerfile contents to the SourceRepository and
// configure it to build with Docker strategy. Returns an error if the contents
// are invalid.
func (r *SourceRepository) AddDockerfile(contents string) error {
dockerfile, err := NewDockerfile(contents)
if err != nil {
return err
}
if r.info == nil {
r.info = &SourceRepositoryInfo{}
}
r.info.Dockerfile = dockerfile
r.SetStrategy(generate.StrategyDocker)
r.forceAddDockerfile = true
return nil
}
// AddBuildSecrets adds the defined secrets into a build. The input format for
// the secrets is "<secretName>:<destinationDir>". The destinationDir is
// optional and when not specified the default is the current working directory.
func (r *SourceRepository) AddBuildSecrets(secrets []string) error {
injections := s2iapi.VolumeList{}
r.secrets = []buildapi.SecretBuildSource{}
for _, in := range secrets {
if err := injections.Set(in); err != nil {
return err
}
}
secretExists := func(name string) bool {
for _, s := range r.secrets {
if s.Secret.Name == name {
return true
}
}
return false
}
for _, in := range injections {
if r.GetStrategy() == generate.StrategyDocker && filepath.IsAbs(in.Destination) {
return fmt.Errorf("for the docker strategy, the secret destination directory %q must be a relative path", in.Destination)
}
if len(validation.ValidateSecretName(in.Source, false)) != 0 {
return fmt.Errorf("the %q must be valid secret name", in.Source)
}
if secretExists(in.Source) {
return fmt.Errorf("the %q secret can be used just once", in.Source)
}
r.secrets = append(r.secrets, buildapi.SecretBuildSource{
Secret: kapi.LocalObjectReference{Name: in.Source},
DestinationDir: in.Destination,
})
}
return nil
}
// SourceRepositories is a list of SourceRepository objects
type SourceRepositories []*SourceRepository
func (rr SourceRepositories) String() string {
repos := []string{}
for _, r := range rr {
repos = append(repos, r.String())
}
return strings.Join(repos, ",")
}
// NotUsed returns the list of SourceRepositories that are not used
func (rr SourceRepositories) NotUsed() SourceRepositories {
notUsed := SourceRepositories{}
for _, r := range rr {
if !r.InUse() {
notUsed = append(notUsed, r)
}
}
return notUsed
}
// SourceRepositoryInfo contains info about a source repository
type SourceRepositoryInfo struct {
Path string
Types []SourceLanguageType
Dockerfile Dockerfile
Jenkinsfile bool
}
// Terms returns which languages the source repository was
// built with
func (info *SourceRepositoryInfo) Terms() []string {
terms := []string{}
for i := range info.Types {
terms = append(terms, info.Types[i].Term())
}
return terms
}
// SourceLanguageType contains info about the type of the language
// a source repository is built in
type SourceLanguageType struct {
Platform string
Version string
}
// Term returns a search term for the given source language type
// the term will be in the form of language:version
func (t *SourceLanguageType) Term() string {
if len(t.Version) == 0 {
return t.Platform
}
return fmt.Sprintf("%s:%s", t.Platform, t.Version)
}
// Detector is an interface for detecting information about a
// source repository
type Detector interface {
Detect(dir string, dockerStrategy bool) (*SourceRepositoryInfo, error)
}
// SourceRepositoryEnumerator implements the Detector interface
type SourceRepositoryEnumerator struct {
Detectors source.Detectors
DockerfileTester generate.Tester
JenkinsfileTester generate.Tester
}
// Detect extracts source code information about the provided source repository
func (e SourceRepositoryEnumerator) Detect(dir string, noSourceDetection bool) (*SourceRepositoryInfo, error) {
info := &SourceRepositoryInfo{
Path: dir,
}
// no point in doing source-type detection if the requested build strategy
// is docker or pipeline
if !noSourceDetection {
for _, d := range e.Detectors {
if detected := d(dir); detected != nil {
info.Types = append(info.Types, SourceLanguageType{
Platform: detected.Platform,
Version: detected.Version,
})
}
}
}
if path, ok, err := e.DockerfileTester.Has(dir); err == nil && ok {
dockerfile, err := NewDockerfileFromFile(path)
if err != nil {
return nil, err
}
info.Dockerfile = dockerfile
}
if _, ok, err := e.JenkinsfileTester.Has(dir); err == nil && ok {
info.Jenkinsfile = true
}
return info, nil
}
// StrategyAndSourceForRepository returns the build strategy and source code reference
// of the provided source repository
// TODO: user should be able to choose whether to download a remote source ref for
// more info
func StrategyAndSourceForRepository(repo *SourceRepository, image *ImageRef) (*BuildStrategyRef, *SourceRef, error) {
strategy := &BuildStrategyRef{
Base: image,
Strategy: repo.strategy,
}
source := &SourceRef{
Binary: repo.binary,
Secrets: repo.secrets,
RequiresAuth: repo.requiresAuth,
}
if repo.sourceImage != nil {
srcImageRef, err := InputImageFromMatch(repo.sourceImage.Input().ResolvedMatch)
if err != nil {
return nil, nil, err
}
source.SourceImage = srcImageRef
source.ImageSourcePath = repo.sourceImageFrom
source.ImageDestPath = repo.sourceImageTo
}
if (repo.ignoreRepository || repo.forceAddDockerfile) && repo.Info() != nil && repo.Info().Dockerfile != nil {
source.DockerfileContents = repo.Info().Dockerfile.Contents()
}
if !repo.ignoreRepository {
remoteURL, ok, err := repo.RemoteURL()
if err != nil {
return nil, nil, fmt.Errorf("cannot obtain remote URL for repository at %s", repo.location)
}
if ok {
source.URL = remoteURL
source.Ref = remoteURL.Fragment
} else {
source.Binary = true
}
source.ContextDir = repo.ContextDir()
}
return strategy, source, nil
}
// CloneAndCheckoutSources clones the remote repository using either regular
// git clone operation or shallow git clone, based on the "ref" provided (you
// cannot shallow clone using the 'ref').
// This function will return the full path to the buildable sources, including
// the context directory if specified.
func CloneAndCheckoutSources(repo git.Repository, remote, ref, localDir, contextDir string) (string, error) {
if len(ref) == 0 {
glog.V(5).Infof("No source ref specified, using shallow git clone")
if err := repo.CloneWithOptions(localDir, remote, git.Shallow, "--recursive"); err != nil {
return "", fmt.Errorf("shallow cloning repository %q to %q failed: %v", remote, localDir, err)
}
} else {
glog.V(5).Infof("Requested ref %q, performing full git clone and git checkout", ref)
if err := repo.Clone(localDir, remote); err != nil {
return "", fmt.Errorf("cloning repository %q to %q failed: %v", remote, localDir, err)
}
}
if len(ref) > 0 {
if err := repo.Checkout(localDir, ref); err != nil {
return "", fmt.Errorf("unable to checkout ref %q in %q repository: %v", ref, remote, err)
}
}
if len(contextDir) > 0 {
glog.V(5).Infof("Using context directory %q. The full source path is %q", contextDir, filepath.Join(localDir, contextDir))
}
return filepath.Join(localDir, contextDir), nil
}
| [
"\"ProgramData\"",
"\"SystemRoot\""
]
| []
| [
"SystemRoot",
"ProgramData"
]
| [] | ["SystemRoot", "ProgramData"] | go | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.