file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
task2.py
|
# -*- coding: utf-8 -*-
import time
from celery_app import app
@app.task
@app.task(queue='test_celey_queue_multiply')
def
|
(x, y):
# time.sleep(0.02)
return x * y
|
multiply
|
periodic_config.go
|
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// data definitions that are used for the config file generation of periodic prow jobs
package main
import (
"bytes"
"encoding/gob"
"fmt"
"hash/fnv"
"log"
"strings"
"gopkg.in/yaml.v2"
)
const (
// Template for periodic test/release jobs.
periodicTestJob = "prow_periodic_test_job.yaml"
// Template for periodic custom jobs.
periodicCustomJob = "prow_periodic_custom_job.yaml"
// Cron strings for key jobs
goCoveragePeriodicJobCron = "0 1 * * *" // Run at 01:00 every day
recreatePerfClusterPeriodicJobCron = "30 07 * * *" // Run at 00:30PST every day (07:30 UTC)
updatePerfClusterPeriodicJobCron = "5 * * * *" // Run every hour
)
// periodicJobTemplateData contains data about a periodic Prow job.
type periodicJobTemplateData struct {
Base baseProwJobTemplateData
PeriodicJobName string
CronString string
PeriodicCommand []string
}
func (p periodicJobTemplateData) Clone() periodicJobTemplateData {
var r periodicJobTemplateData
var err error
buff := new(bytes.Buffer)
enc := gob.NewEncoder(buff)
dec := gob.NewDecoder(buff)
if err = enc.Encode(&p); err != nil {
panic(err)
}
if err = dec.Decode(&r); err != nil {
panic(err)
}
return r
}
func getUTCtime(i int) int {
r := i + 7
if r > 23 {
return r - 24
}
return r
}
func
|
(str ...string) int {
h := fnv.New32a()
for _, s := range str {
h.Write([]byte(s))
}
return int(h.Sum32()) % 60
}
// Generate cron string based on job type, offset generated from jobname
// instead of assign random value to ensure consistency among runs,
// timeout is used for determining how many hours apart
func generateCron(jobType, jobName, repoName string, timeout int) string {
minutesOffset := calculateMinuteOffset(jobType, jobName)
// Determines hourly job inteval based on timeout
hours := int((timeout+5)/60) + 1 // Allow at least 5 minutes between runs
hourCron := fmt.Sprintf("%d * * * *", minutesOffset)
if hours > 1 {
hourCron = fmt.Sprintf("%d */%d * * *", minutesOffset, hours)
}
daily := func(pacificHour int) string {
return fmt.Sprintf("%d %d * * *", minutesOffset, getUTCtime(pacificHour))
}
weekly := func(pacificHour, dayOfWeek int) string {
return fmt.Sprintf("%d %d * * %d", minutesOffset, getUTCtime(pacificHour), dayOfWeek)
}
var res string
switch jobType {
case "continuous", "custom-job", "auto-release": // As much as every hour
res = hourCron
case "branch-ci":
res = daily(1) // 1 AM
case "nightly":
res = daily(2) // 2 AM
case "dot-release":
if strings.HasSuffix(repoName, "-operator") {
// Every Tuesday noon
res = weekly(12, 2)
} else {
// Every Tuesday 2 AM
res = weekly(2, 2)
}
default:
log.Printf("job type not supported for cron generation '%s'", jobName)
}
return res
}
// generatePeriodic generates periodic job configs for the given repo and configuration.
// Normally it generates one job per call
// But if it is continuous or branch-ci job, it generates a second job for beta testing of new prow-tests images
func generatePeriodic(title string, repoName string, periodicConfig yaml.MapSlice) {
var data periodicJobTemplateData
data.Base = newbaseProwJobTemplateData(repoName)
jobNameSuffix := ""
jobTemplate := readTemplate(periodicTestJob)
jobType := ""
isContinuousJob := false
project := data.Base.OrgName
repo := data.Base.RepoName
// Parse the input yaml and set values data based on them
for i, item := range periodicConfig {
jobName := getString(item.Key)
switch jobName {
case "continuous":
if !getBool(item.Value) {
return
}
jobType = getString(item.Key)
jobNameSuffix = "continuous"
isContinuousJob = true
// Use default command and arguments if none given.
if data.Base.Command == "" {
data.Base.Command = presubmitScript
}
if len(data.Base.Args) == 0 {
data.Base.Args = allPresubmitTests
}
data.Base.Timeout = 180
case "nightly":
if !getBool(item.Value) {
return
}
jobType = getString(item.Key)
jobNameSuffix = "nightly-release"
data.Base.ServiceAccount = nightlyAccount
data.Base.Command = releaseScript
data.Base.Args = releaseNightly
data.Base.Timeout = 180
case "branch-ci":
if !getBool(item.Value) {
return
}
jobType = getString(item.Key)
jobNameSuffix = "continuous"
isContinuousJob = true
data.Base.Command = releaseScript
data.Base.Args = releaseLocal
setupDockerInDockerForJob(&data.Base)
data.Base.Timeout = 180
case "dot-release", "auto-release":
if !getBool(item.Value) {
return
}
jobType = getString(item.Key)
jobNameSuffix = getString(item.Key)
data.Base.ServiceAccount = releaseAccount
data.Base.Command = releaseScript
data.Base.Args = []string{
"--" + jobNameSuffix,
"--release-gcs " + data.Base.ReleaseGcs,
"--release-gcr gcr.io/knative-releases",
"--github-token /etc/hub-token/token"}
addVolumeToJob(&data.Base, "/etc/hub-token", "hub-token", true, nil)
// For dot-release and auto-release jobs, set ORG_NAME env var if the org name is not knative, as it's needed by release.sh
if data.Base.OrgName != "knative" {
data.Base.addEnvToJob("ORG_NAME", data.Base.OrgName)
}
data.Base.Timeout = 180
case "custom-job":
jobType = getString(item.Key)
jobNameSuffix = getString(item.Value)
data.Base.Timeout = 120
case "cron":
data.CronString = getString(item.Value)
case "release":
version := getString(item.Value)
jobNameSuffix = version + "-" + jobNameSuffix
data.Base.RepoBranch = "release-" + version
if jobType == "dot-release" {
data.Base.Args = append(data.Base.Args, "--branch release-"+version)
}
default:
continue
}
// Knock-out the item, signalling it was already parsed.
periodicConfig[i] = yaml.MapItem{}
testgroupExtras := getTestgroupExtras(project, jobName)
data.Base.Annotations = generateProwJobAnnotations(repo, jobName, testgroupExtras)
}
parseBasicJobConfigOverrides(&data.Base, periodicConfig)
data.PeriodicJobName = fmt.Sprintf("ci-%s", data.Base.RepoNameForJob)
if jobNameSuffix != "" {
data.PeriodicJobName += "-" + jobNameSuffix
}
if data.CronString == "" {
data.CronString = generateCron(jobType, data.PeriodicJobName, data.Base.RepoName, data.Base.Timeout)
}
// Ensure required data exist.
if data.CronString == "" {
logFatalf("Job %q is missing cron string", data.PeriodicJobName)
}
if len(data.Base.Args) == 0 && data.Base.Command == "" {
logFatalf("Job %q is missing command", data.PeriodicJobName)
}
if jobType == "branch-ci" && data.Base.RepoBranch == "" {
logFatalf("%q jobs are intended to be used on release branches", jobType)
}
// Generate config itself.
data.PeriodicCommand = createCommand(data.Base)
if data.Base.ServiceAccount != "" {
data.Base.addEnvToJob("GOOGLE_APPLICATION_CREDENTIALS", data.Base.ServiceAccount)
data.Base.addEnvToJob("E2E_CLUSTER_REGION", "us-central1")
}
if data.Base.RepoBranch != "" && data.Base.RepoBranch != "master" {
// If it's a release version, add env var PULL_BASE_REF as ref name of the base branch.
// The reason for having it is in https://github.com/knative/test-infra/issues/780.
data.Base.addEnvToJob("PULL_BASE_REF", data.Base.RepoBranch)
}
addExtraEnvVarsToJob(extraEnvVars, &data.Base)
configureServiceAccountForJob(&data.Base)
data.Base.DecorationConfig = []string{fmt.Sprintf("timeout: %dm", data.Base.Timeout)}
// This is where the data actually gets written out
executeJobTemplate("periodic", jobTemplate, title, repoName, data.PeriodicJobName, false, data)
// If job is a continuous run, add a duplicate for pre-release testing of new prow-tests image
// It will (mostly) run less often than source job
if isContinuousJob {
betaData := data.Clone()
// Change the name and image
betaData.PeriodicJobName += "-beta-prow-tests"
betaData.Base.Image = strings.ReplaceAll(betaData.Base.Image, ":stable", ":beta")
// Run 2 or 3 times a day because prow-tests beta testing has different desired interval than the underlying job
hours := []int{getUTCtime(1), getUTCtime(4)}
if jobType == "continuous" { // as opposed to branch-ci
// These jobs run 8-24 times per day, so it matters more if they break
// So test them slightly more often
hours = append(hours, getUTCtime(15))
}
var hoursStr []string
for _, h := range hours {
hoursStr = append(hoursStr, fmt.Sprint(h))
}
betaData.CronString = fmt.Sprintf("%d %s * * *",
calculateMinuteOffset(jobType, betaData.PeriodicJobName),
strings.Join(hoursStr, ","))
// Write out our duplicate job
executeJobTemplate("periodic", jobTemplate, title, repoName, betaData.PeriodicJobName, false, betaData)
// Setup TestGrid here
// Each job becomes one of "test_groups"
// Then we want our own "dashboard" separate from others
// With each one of the jobs (aka "test_groups") in the single dashboard group
metaData.AddNonAlignedTest(NonAlignedTestGroup{
DashboardGroup: "prow-tests",
DashboardName: "beta-prow-tests",
HumanTabName: data.PeriodicJobName, // this is purposefully not betaData, so the display name is the original CI job name
CIJobName: betaData.PeriodicJobName,
BaseOptions: testgridTabSortByFailures,
Extra: nil,
})
}
}
// generateGoCoveragePeriodic generates the go coverage periodic job config for the given repo (configuration is ignored).
func generateGoCoveragePeriodic(title string, repoName string, _ yaml.MapSlice) {
var repo *repositoryData
// Find a repository entry where repo name matches and Go Coverage is enabled
for i, repoI := range repositories {
if repoName != repoI.Name || !repoI.EnableGoCoverage {
continue
}
repo = &repositories[i]
break
}
if repo != nil && repo.EnableGoCoverage {
repo.Processed = true
var data periodicJobTemplateData
data.Base = newbaseProwJobTemplateData(repoName)
data.PeriodicJobName = fmt.Sprintf("ci-%s-go-coverage", data.Base.RepoNameForJob)
data.CronString = goCoveragePeriodicJobCron
data.Base.GoCoverageThreshold = repo.GoCoverageThreshold
data.Base.Command = "runner.sh"
data.Base.Args = []string{
"coverage",
"--artifacts=$(ARTIFACTS)",
fmt.Sprintf("--cov-threshold-percentage=%d", data.Base.GoCoverageThreshold)}
data.Base.ServiceAccount = ""
data.Base.ExtraRefs = append(data.Base.ExtraRefs, " base_ref: "+data.Base.RepoBranch)
addExtraEnvVarsToJob(extraEnvVars, &data.Base)
addMonitoringPubsubLabelsToJob(&data.Base, data.PeriodicJobName)
configureServiceAccountForJob(&data.Base)
executeJobTemplate("periodic go coverage", readTemplate(periodicCustomJob), title, repoName, data.PeriodicJobName, false, data)
betaData := data.Clone()
// Change the name and image
betaData.PeriodicJobName += "-beta-prow-tests"
betaData.Base.Image = strings.ReplaceAll(betaData.Base.Image, ":stable", ":beta")
// Run once a day because prow-tests beta testing has different desired interval than the underlying job
betaData.CronString = fmt.Sprintf("%d %s * * *",
calculateMinuteOffset("go-coverage", betaData.PeriodicJobName),
fmt.Sprint(getUTCtime(0)))
// Write out our duplicate job
executeJobTemplate("periodic go coverage", readTemplate(periodicCustomJob), title, repoName, betaData.PeriodicJobName, false, betaData)
// Setup TestGrid here
// Each job becomes one of "test_groups"
// Then we want our own "dashboard" separate from others
// With each one of the jobs (aka "test_groups") in the single dashboard group
extras := make(map[string]string)
extras["short_text_metric"] = "coverage"
metaData.AddNonAlignedTest(NonAlignedTestGroup{
DashboardGroup: "prow-tests",
DashboardName: "beta-prow-tests",
HumanTabName: data.PeriodicJobName, // this is purposefully not betaData, so the display name is the original CI job name
CIJobName: betaData.PeriodicJobName,
BaseOptions: testgridTabGroupByDir,
Extra: extras,
})
}
}
|
calculateMinuteOffset
|
tf2error.py
|
from typing import Final
from alpyro_msgs import RosMessage, string, uint8
class
|
(RosMessage):
__msg_typ__ = "tf2_msgs/TF2Error"
__msg_def__ = "dWludDggTk9fRVJST1I9MAp1aW50OCBMT09LVVBfRVJST1I9MQp1aW50OCBDT05ORUNUSVZJVFlfRVJST1I9Mgp1aW50OCBFWFRSQVBPTEFUSU9OX0VSUk9SPTMKdWludDggSU5WQUxJRF9BUkdVTUVOVF9FUlJPUj00CnVpbnQ4IFRJTUVPVVRfRVJST1I9NQp1aW50OCBUUkFOU0ZPUk1fRVJST1I9Ngp1aW50OCBlcnJvcgpzdHJpbmcgZXJyb3Jfc3RyaW5nCgo="
__md5_sum__ = "bc6848fd6fd750c92e38575618a4917d"
NO_ERROR: Final[uint8] = 0
LOOKUP_ERROR: Final[uint8] = 1
CONNECTIVITY_ERROR: Final[uint8] = 2
EXTRAPOLATION_ERROR: Final[uint8] = 3
INVALID_ARGUMENT_ERROR: Final[uint8] = 4
TIMEOUT_ERROR: Final[uint8] = 5
TRANSFORM_ERROR: Final[uint8] = 6
error: uint8
error_string: string
|
TF2Error
|
container_top_test.go
|
package client // import "moby/client"
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"reflect"
"strings"
"testing"
"moby/api/types/container"
)
func TestContainerTopError(t *testing.T) {
client := &Client{
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
}
_, err := client.ContainerTop(context.Background(), "nothing", []string{})
if err == nil || err.Error() != "Error response from daemon: Server error" {
t.Fatalf("expected a Server Error, got %v", err)
}
}
func TestContainerTop(t *testing.T) {
expectedURL := "/containers/container_id/top"
expectedProcesses := [][]string{
{"p1", "p2"},
{"p3"},
}
expectedTitles := []string{"title1", "title2"}
client := &Client{
client: newMockClient(func(req *http.Request) (*http.Response, error) {
if !strings.HasPrefix(req.URL.Path, expectedURL) {
return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL)
}
query := req.URL.Query()
|
return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args)
}
b, err := json.Marshal(container.ContainerTopOKBody{
Processes: [][]string{
{"p1", "p2"},
{"p3"},
},
Titles: []string{"title1", "title2"},
})
if err != nil {
return nil, err
}
return &http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(b)),
}, nil
}),
}
processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expectedProcesses, processList.Processes) {
t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes)
}
if !reflect.DeepEqual(expectedTitles, processList.Titles) {
t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles)
}
}
|
args := query.Get("ps_args")
if args != "arg1 arg2" {
|
ListEx1.py
|
def
|
(sentence, n):
# Only proceed if n is positive
if n > 0:
# Only proceed if n is not more than the number of words
words = sentence.split()
if n <= len(words):
return (words[n-1])
return ("")
print(get_word("This is a lesson about lists", 4)) # Should print: lesson
print(get_word("This is a lesson about lists", -4)) # Nothing
print(get_word("Now we are cooking!", 1)) # Should print: Now
print(get_word("Now we are cooking!", 5)) # Nothing
|
get_word
|
clips-sizes-selector.component.ts
|
import { Component, OnInit, Output, EventEmitter, ViewChild } from '@angular/core';
import { ClipsSizesService } from './clips-sizes.service';
import { ClipsSelectorComponent } from '../clips-selector/clips-selector.component';
@Component({
selector: 'clips-sizes-selector',
styleUrls: ['clips-sizes-selector.component.css'],
templateUrl: 'clips-sizes-selector.component.html'
})
export class
|
implements OnInit {
public sizes: Object = {};
@Output() public change = new EventEmitter<any>()
@ViewChild(ClipsSelectorComponent) private selector;
constructor(private sizesService: ClipsSizesService) { }
public whenReady() {
return this.selector.whenReady();
}
public handleChange(event) {
this.change.next({
size: event.item,
});
}
public ngOnInit() {
this.sizes = this.sizesService.getAvailableSizes();
this.selector.ready();
}
public select(index) {
this.selector.select(this.sizes[index]);
}
public random() {
this.selector.random();
}
}
|
ClipsSizesSelectorComponent
|
scene.js
|
/*jslint sloppy: true */
var settings = require("./settings.js");
module.exports = Scene;
/**
* Renders game to the canvas.
*/
function Scene(container) {
var ctx = container.getContext('2d'),
cachedHeight = container.height,
cachedWidth = container.width,
getWidth = function () {
return cachedWidth;
},
getHeight = function () {
return cachedHeight;
},
renderHandle = function (color, handle) {
ctx.fillStyle = color;
ctx.fillRect(handle.x, handle.y, handle.width, handle.height);
},
clearBackground = function (color) {
ctx.fillStyle = color;
ctx.fillRect(0, 0, getWidth(), getHeight());
},
renderBall = function (color, ball) {
ctx.beginPath();
ctx.fillStyle = color;
ctx.arc(ball.x, ball.y, settings.ballradius, 0, 2 * Math.PI, false);
ctx.fill();
|
width: function () {
return getWidth();
},
height: function () {
return getHeight();
},
render: function (leftHandle, rightHandle, ball) {
clearBackground('#000000');
renderHandle('#ccaa00', leftHandle);
renderHandle('#00aa00', rightHandle);
renderBall('#ffffff', ball);
}
};
}
|
ctx.stroke();
};
return {
|
channels.go
|
package version
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
)
// Channels provides an interface to interact with a set of release channels.
// This module is also responsible for online retrieval of the latest release
// versions.
type Channels struct {
array []channelVersion
}
const (
versionCheckURL = "https://versioncheck.linkerd.io/version.json?version=%s&uuid=%s&source=%s"
)
// NewChannels is used primarily for testing, it returns a Channels struct that
// mimic a GetLatestVersions response.
func NewChannels(channel string) (Channels, error) {
cv, err := parseChannelVersion(channel)
if err != nil {
return Channels{}, err
}
return Channels{
array: []channelVersion{cv},
}, nil
}
// Match validates whether the given version string:
// 1) is a well-formed channel-version string, for example: "edge-19.1.2"
// 2) references a known channel
// 3) matches the version in the known channel
func (c Channels) Match(actualVersion string) error {
if actualVersion == "" {
return errors.New("actual version is empty")
}
actual, err := parseChannelVersion(actualVersion)
if err != nil {
return fmt.Errorf("failed to parse actual version: %s", err)
}
for _, cv := range c.array {
if cv.channel == actual.channel {
return match(cv.String(), actualVersion)
}
}
return fmt.Errorf("unsupported version channel: %s", actualVersion)
}
// GetLatestVersions performs an online request to check for the latest Linkerd
// release channels.
func GetLatestVersions(ctx context.Context, uuid string, source string) (Channels, error) {
url := fmt.Sprintf(versionCheckURL, Version, uuid, source)
return getLatestVersions(ctx, http.DefaultClient, url)
}
func getLatestVersions(ctx context.Context, client *http.Client, url string) (Channels, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return Channels{}, err
}
rsp, err := client.Do(req.WithContext(ctx))
if err != nil {
return Channels{}, err
}
defer rsp.Body.Close()
if rsp.StatusCode != 200
|
bytes, err := ioutil.ReadAll(rsp.Body)
if err != nil {
return Channels{}, err
}
var versionRsp map[string]string
err = json.Unmarshal(bytes, &versionRsp)
if err != nil {
return Channels{}, err
}
channels := Channels{}
for c, v := range versionRsp {
cv, err := parseChannelVersion(v)
if err != nil {
return Channels{}, fmt.Errorf("unexpected versioncheck response: %s", err)
}
if c != cv.channel {
return Channels{}, fmt.Errorf("unexpected versioncheck response: channel in %s does not match %s", cv, c)
}
channels.array = append(channels.array, cv)
}
return channels, nil
}
|
{
return Channels{}, fmt.Errorf("unexpected versioncheck response: %s", rsp.Status)
}
|
views.py
|
# import the necessary packages
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse,HttpResponse
import numpy as np
import urllib
import json
import cv2
import os
from .face import dog_ear
from glob import glob
from .forms import ImgForm,UrlForm
import base64
import requests
from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.preprocessing import image
from keras.models import load_model
import io
import tensorflow as tf
from PIL import Image
graph = tf.get_default_graph()
# define ResNet50 model
dog_names = [item[9:-1] for item in sorted(glob("test/*/"))]
ResNet50_model = ResNet50(weights='imagenet')
InceptionV3_model=load_model('dog/saved_models/weights.best.InceptionV3.hdf5')
# define the path to the face detector
FACE_DETECTOR_PATH = r"{base_path}/haarcascades/haarcascade_frontalface_alt.xml".format(
base_path=os.path.abspath(os.path.dirname(__file__)))
def main(request):
con={'form1':ImgForm,'form2':UrlForm}
return render(request,'main.html',con)
@csrf_exempt
def detect(request):
# initialize the data dictionary to be returned by the request
global graph
with graph.as_default():
data = {"success": False}
# check to see if this is a post request
if request.method == "POST":
# check to see if an image was uploaded
if request.FILES.get("image", None) is not None:
# grab the uploaded image
image,dog = _grab_image(stream=request.FILES["image"])
ad=request.POST.get("overlay", None)
# otherwise, assume that a URL was passed in
else:
# grab the URL from the request
url = request.POST.get("url", None)
ad=request.POST.get("overlay", None)
# if the URL is None, then return an error
if url is None:
data["error"] = "No URL provided."
|
return JsonResponse(data)
# load the image and convert
image,dog = _grab_image(url=url)
# convert the image to grayscale, load the face cascade detector,
# and detect faces in the image
img = cv2.cvtColor(dog_ear(image,ad), cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
detector = cv2.CascadeClassifier(FACE_DETECTOR_PATH)
rects = detector.detectMultiScale(image)
# construct a list of bounding boxes from the detection
rects = [(int(x), int(y), int(x + w), int(y + h)) for (x, y, w, h) in rects]
response=imgenc(img,rects)
# if len(rects)<2:
# breed = InceptionV3_predict_breed(img2)
# update the data dictionary with the faces detected
data.update({"num_faces": len(rects), "faces": rects, "success": True,"dog":str(dog),"img":response,'breed':"breed"})
return render(request,'main.html',data)
# return a JSON response
# return JsonResponse(data)
def _grab_image(path=None, stream=None, url=None):
# if the path is not None, then load the image from disk
if path is not None:
image = cv2.imread(path)
# otherwise, the image does not reside on disk
else:
# if the URL is not None, then download the image
if url is not None:
resp = urllib.request.urlopen(url)
data = resp.read()
# if the stream is not None, then the image has been uploaded
elif stream is not None:
data = stream.read()
# convert the image to a NumPy array and then read it into
# OpenCV format
image = np.asarray(bytearray(data), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
img = preprocess_input(path_to_tensor(image))
prediction = np.argmax(ResNet50_model.predict(img))
#boolean variable of presence of dog in image or not
dog=((prediction <= 268) & (prediction >= 151))
# return the image,and bool dog
return image,dog
def imgenc(image,rects):
# for (startX, startY, endX, endY) in rects:
# cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)
# r = 300.0 / image.shape[1]
# dim = (300, int(image.shape[0] * r))
# # perform the actual resizing of the image and show it
# resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
CDF=Image.fromarray(image)
in_mem_file=io.BytesIO()
CDF.save(in_mem_file, format = "PNG")
# reset file pointer to start
in_mem_file.seek(0)
img_bytes = in_mem_file.read()
base64_encoded_result_bytes = base64.b64encode(img_bytes)
base64_encoded_result_str = base64_encoded_result_bytes.decode('ascii')
return "data:image/png;base64,{0} ".format(base64_encoded_result_str)
def path_to_tensor(image):
# resize the shape of image
image2 =cv2.resize(image, (224,224), interpolation = cv2.INTER_AREA)
# change the data type to float to be accepted
image2 = image2.astype(np.float32)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(image2, axis=0)
def extract_InceptionV3(tensor):
return InceptionV3(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
def InceptionV3_predict_breed(image):
# extract bottleneck features
bottleneck_feature = extract_InceptionV3(path_to_tensor(image))
# obtain predicted vector
predicted_vector = InceptionV3_model.predict(bottleneck_feature)
# return dog breed that is predicted by the model
return dog_names[np.argmax(predicted_vector)]
| |
format_state.rs
|
use std::{
collections::BTreeMap,
io::{self, prelude::*},
|
use pueue_lib::{network::protocol::GenericStream, settings::Settings, task::Task};
use crate::{
cli::SubCommand,
display::{colors::Colors, print_state},
};
/// This function tries to read a map or list of JSON serialized [Task]s from `stdin`.
/// The tasks will then get deserialized and displayed as a normal `status` command.
/// The current group information is pulled from the daemon in a new `status` call.
pub async fn format_state(
stream: &mut GenericStream,
command: &SubCommand,
colors: &Colors,
settings: &Settings,
) -> Result<()> {
// Read the raw input to a buffer
let mut stdin = io::stdin();
let mut buffer = Vec::new();
stdin
.read_to_end(&mut buffer)
.context("Failed to read json from stdin.")?;
// Convert it to a valid utf8 stream. If this fails, it cannot be valid JSON.
let json = String::from_utf8(buffer).context("Failed to convert stdin input to UTF8")?;
// Try to deserialize the input as a map of tasks first.
// If this doesn't work, try a list of tasks.
let map_deserialize = serde_json::from_str::<BTreeMap<usize, Task>>(&json);
let tasks: Vec<Task> = if let Ok(map) = map_deserialize {
map.into_iter().map(|(_, task)| task).collect()
} else {
serde_json::from_str(&json).context("Failed to deserialize from JSON input.")?
};
let state = super::get_state(stream)
.await
.context("Failed to get the current state from daemon")?;
print_state(state, tasks, command, colors, settings);
Ok(())
}
|
};
use anyhow::{Context, Result};
|
keeper.ts
|
/**
This will probably move to its own repo at some point but easier to keep it here for now
*/
import * as os from 'os';
import * as fs from 'fs';
import { MangoClient } from './client';
import {
Account,
Commitment,
Connection,
PublicKey,
Transaction,
} from '@solana/web3.js';
import { getMultipleAccounts, zeroKey } from './utils';
import configFile from './ids.json';
import { Cluster, Config } from './config';
import {
makeCachePerpMarketsInstruction,
makeCachePricesInstruction,
makeCacheRootBankInstruction,
makeUpdateFundingInstruction,
makeUpdateRootBankInstruction,
} from './instruction';
import BN from 'bn.js';
import { PerpEventQueueLayout } from './layout';
import { MangoGroup, PerpMarket, promiseUndef } from '.';
import PerpEventQueue from './PerpEventQueue';
let lastRootBankCacheUpdate = 0;
const groupName = process.env.GROUP || 'mainnet.1';
const updateCacheInterval = parseInt(
process.env.UPDATE_CACHE_INTERVAL || '3000',
);
const updateRootBankCacheInterval = parseInt(
process.env.UPDATE_ROOT_BANK_CACHE_INTERVAL || '5000',
);
const processKeeperInterval = parseInt(
process.env.PROCESS_KEEPER_INTERVAL || '10000',
);
const consumeEventsInterval = parseInt(
process.env.CONSUME_EVENTS_INTERVAL || '1000',
);
const maxUniqueAccounts = parseInt(process.env.MAX_UNIQUE_ACCOUNTS || '10');
|
: true;
const cluster = (process.env.CLUSTER || 'mainnet') as Cluster;
const config = new Config(configFile);
const groupIds = config.getGroup(cluster, groupName);
if (!groupIds) {
throw new Error(`Group ${groupName} not found`);
}
const mangoProgramId = groupIds.mangoProgramId;
const mangoGroupKey = groupIds.publicKey;
const payer = new Account(
JSON.parse(
process.env.KEYPAIR ||
fs.readFileSync(os.homedir() + '/.config/solana/blw.json', 'utf-8'),
),
);
const connection = new Connection(
process.env.ENDPOINT_URL || config.cluster_urls[cluster],
'processed' as Commitment,
);
const client = new MangoClient(connection, mangoProgramId);
async function main() {
if (!groupIds) {
throw new Error(`Group ${groupName} not found`);
}
const mangoGroup = await client.getMangoGroup(mangoGroupKey);
const perpMarkets = await Promise.all(
groupIds.perpMarkets.map((m) => {
return mangoGroup.loadPerpMarket(
connection,
m.marketIndex,
m.baseDecimals,
m.quoteDecimals,
);
}),
);
processUpdateCache(mangoGroup);
processKeeperTransactions(mangoGroup, perpMarkets);
if (consumeEvents) {
processConsumeEvents(mangoGroup, perpMarkets);
}
}
console.time('processUpdateCache');
async function processUpdateCache(mangoGroup: MangoGroup) {
console.timeEnd('processUpdateCache');
try {
const batchSize = 8;
const promises: Promise<string>[] = [];
const rootBanks = mangoGroup.tokens
.map((t) => t.rootBank)
.filter((t) => !t.equals(zeroKey));
const oracles = mangoGroup.oracles.filter((o) => !o.equals(zeroKey));
const perpMarkets = mangoGroup.perpMarkets
.filter((pm) => !pm.isEmpty())
.map((pm) => pm.perpMarket);
const nowTs = Date.now();
let shouldUpdateRootBankCache = false;
if (nowTs - lastRootBankCacheUpdate > updateRootBankCacheInterval) {
shouldUpdateRootBankCache = true;
lastRootBankCacheUpdate = nowTs;
}
for (let i = 0; i < rootBanks.length / batchSize; i++) {
const startIndex = i * batchSize;
const endIndex = i * batchSize + batchSize;
const cacheTransaction = new Transaction();
if (shouldUpdateRootBankCache) {
cacheTransaction.add(
makeCacheRootBankInstruction(
mangoProgramId,
mangoGroup.publicKey,
mangoGroup.mangoCache,
rootBanks.slice(startIndex, endIndex),
),
);
}
cacheTransaction.add(
makeCachePricesInstruction(
mangoProgramId,
mangoGroup.publicKey,
mangoGroup.mangoCache,
oracles.slice(startIndex, endIndex),
),
);
cacheTransaction.add(
makeCachePerpMarketsInstruction(
mangoProgramId,
mangoGroup.publicKey,
mangoGroup.mangoCache,
perpMarkets.slice(startIndex, endIndex),
),
);
if (cacheTransaction.instructions.length > 0) {
promises.push(client.sendTransaction(cacheTransaction, payer, []));
}
}
Promise.all(promises).catch((err) => {
console.error('Error updating cache', err);
});
} finally {
console.time('processUpdateCache');
setTimeout(processUpdateCache, updateCacheInterval, mangoGroup);
}
}
async function processConsumeEvents(
mangoGroup: MangoGroup,
perpMarkets: PerpMarket[],
) {
try {
const eventQueuePks = perpMarkets.map((mkt) => mkt.eventQueue);
const eventQueueAccts = await getMultipleAccounts(
connection,
eventQueuePks,
);
const perpMktAndEventQueue = eventQueueAccts.map(
({ publicKey, accountInfo }) => {
const parsed = PerpEventQueueLayout.decode(accountInfo?.data);
const eventQueue = new PerpEventQueue(parsed);
const perpMarket = perpMarkets.find((mkt) =>
mkt.eventQueue.equals(publicKey),
);
if (!perpMarket) {
throw new Error('PerpMarket not found');
}
return { perpMarket, eventQueue };
},
);
const promises: Promise<string | void>[] = perpMktAndEventQueue.map(
({ perpMarket, eventQueue }) => {
const events = eventQueue.getUnconsumedEvents();
if (events.length === 0) {
// console.log('No events to consume');
return promiseUndef();
}
const accounts: Set<string> = new Set();
for (const event of events) {
if (event.fill) {
accounts.add(event.fill.maker.toBase58());
accounts.add(event.fill.taker.toBase58());
} else if (event.out) {
accounts.add(event.out.owner.toBase58());
}
// Limit unique accounts to first 20 or 21
if (accounts.size >= maxUniqueAccounts) {
break;
}
}
return client
.consumeEvents(
mangoGroup,
perpMarket,
Array.from(accounts)
.map((s) => new PublicKey(s))
.sort(),
payer,
consumeEventsLimit,
)
.then(() => {
console.log(
`Consumed up to ${
events.length
} events ${perpMarket.publicKey.toBase58()}`,
);
console.log(
'EVENTS:',
events.map((e) => e?.fill?.seqNum.toString()),
);
})
.catch((err) => {
console.error('Error consuming events', err);
});
},
);
Promise.all(promises);
} finally {
setTimeout(
processConsumeEvents,
consumeEventsInterval,
mangoGroup,
perpMarkets,
);
}
}
async function processKeeperTransactions(
mangoGroup: MangoGroup,
perpMarkets: PerpMarket[],
) {
try {
if (!groupIds) {
throw new Error(`Group ${groupName} not found`);
}
console.log('processKeeperTransactions');
const batchSize = 8;
const promises: Promise<string>[] = [];
const filteredPerpMarkets = perpMarkets.filter(
(pm) => !pm.publicKey.equals(zeroKey),
);
for (let i = 0; i < groupIds.tokens.length / batchSize; i++) {
const startIndex = i * batchSize;
const endIndex = i * batchSize + batchSize;
const updateRootBankTransaction = new Transaction();
groupIds.tokens.slice(startIndex, endIndex).forEach((token) => {
updateRootBankTransaction.add(
makeUpdateRootBankInstruction(
mangoProgramId,
mangoGroup.publicKey,
mangoGroup.mangoCache,
token.rootKey,
token.nodeKeys,
),
);
});
const updateFundingTransaction = new Transaction();
filteredPerpMarkets.slice(startIndex, endIndex).forEach((market) => {
if (market) {
updateFundingTransaction.add(
makeUpdateFundingInstruction(
mangoProgramId,
mangoGroup.publicKey,
mangoGroup.mangoCache,
market.publicKey,
market.bids,
market.asks,
),
);
}
});
if (updateRootBankTransaction.instructions.length > 0) {
promises.push(
client.sendTransaction(updateRootBankTransaction, payer, []),
);
}
if (updateFundingTransaction.instructions.length > 0) {
promises.push(
client.sendTransaction(updateFundingTransaction, payer, []),
);
}
}
Promise.all(promises).catch((err) => {
console.error('Error processing keeper instructions', err);
});
} finally {
setTimeout(
processKeeperTransactions,
processKeeperInterval,
mangoGroup,
perpMarkets,
);
}
}
main();
|
const consumeEventsLimit = new BN(process.env.CONSUME_EVENTS_LIMIT || '10');
const consumeEvents = process.env.CONSUME_EVENTS
? process.env.CONSUME_EVENTS === 'true'
|
hook.test.ts
|
/**
* @jest-environment jsdom
*/
import "../../__mocks__/game";
import "../../__mocks__/form-application";
import "../../__mocks__/application";
import "../../__mocks__/handlebars";
import "../../__mocks__/event";
import "../../__mocks__/crypto";
import "../../__mocks__/dialog";
import "../../__mocks__/hooks";
import SimpleCalendar from "./simple-calendar";
import Year from "./year";
import Month from "./month";
import Hook from "./hook";
import {SimpleCalendarHooks} from "../constants";
import Mock = jest.Mock;
import Moon from "./moon";
describe('Hook Tests', () => {
let y: Year;
beforeEach(()=>{
jest.spyOn(console, 'error').mockImplementation();
SimpleCalendar.instance = new SimpleCalendar();
y = new Year(0);
y.months.push(new Month('M', 1, 0, 5));
y.months.push(new Month('T', 2, 0, 15));
y.selectedYear = 0;
y.visibleYear = 0;
y.months[0].current = true;
y.months[0].selected = true;
y.months[0].visible = true;
y.months[0].days[0].current = true;
y.months[0].days[0].selected = true;
y.moons.push(new Moon('Moon', 10));
(<Mock>console.error).mockClear();
});
test('Emit Date/Time Change', ()=>{
|
Hook.emit(SimpleCalendarHooks.DateTimeChange);
expect(console.error).toHaveBeenCalledTimes(1);
Hook.emit(SimpleCalendarHooks.ClockStartStop);
expect(console.error).toHaveBeenCalledTimes(2);
SimpleCalendar.instance = new SimpleCalendar();
SimpleCalendar.instance.activeCalendar.year = y;
Hook.emit(SimpleCalendarHooks.DateTimeChange);
expect(console.error).toHaveBeenCalledTimes(2);
expect(Hooks.callAll).toHaveBeenCalledTimes(1);
y.months[0].days[0].current = false;
Hook.emit(SimpleCalendarHooks.DateTimeChange);
expect(console.error).toHaveBeenCalledTimes(2);
expect(Hooks.callAll).toHaveBeenCalledTimes(2);
y.months[0].current = false;
Hook.emit(SimpleCalendarHooks.DateTimeChange);
expect(console.error).toHaveBeenCalledTimes(2);
expect(Hooks.callAll).toHaveBeenCalledTimes(3);
Hook.emit(SimpleCalendarHooks.ClockStartStop);
expect(console.error).toHaveBeenCalledTimes(2);
expect(Hooks.callAll).toHaveBeenCalledTimes(4);
Hook.emit(SimpleCalendarHooks.PrimaryGM);
expect(console.error).toHaveBeenCalledTimes(2);
expect(Hooks.callAll).toHaveBeenCalledTimes(5);
//@ts-ignore
Hook.emit('asd');
expect(console.error).toHaveBeenCalledTimes(2);
expect(Hooks.callAll).toHaveBeenCalledTimes(6);
});
});
|
// @ts-ignore
SimpleCalendar.instance = null;
|
BF.py
|
""" This is a dummy file used only to avoid errors in ReadTheDocs. The real BF.py is created during the setup once swig is run. """
def CP():
pass
def LeP():
pass
def LaP():
pass
def HoPpro():
pass
def HoPphy():
pass
def FS():
pass
def ELMReLU():
pass
def ELMSigmoid():
pass
def ELMTanh():
pass
def ELMSin():
pass
def ELMSwish():
pass
def nCP():
pass
def nLeP():
pass
def nFS():
pass
def nELMReLU():
pass
def nELMSigmoid():
pass
def nELMTanh():
pass
def nELMSin():
pass
|
pass
|
def nELMSwish():
|
benchmarks.py
|
# -*- coding: iso-8859-1 -*-
# (c) 2009-2014 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Benchmark suite for WsgiDAV.
This test suite uses davclient to generate WebDAV requests.
A first collection of ideas
===========================
- The result is printable HTML, copy/pastable
- It also contains date, environment info (Hardware, package versions, ...)
- The suite can be run stand-alone against a running WsgiDAV server, just like
litmus.
- It uses `davclient` and generates an HTML file.
- There should be detailed results as well as a few summarizing numbers:
('Total time', 'Byte reads per second', 'Byte write per second', or something
like this), so one can compare benchmarks at a glance.
- Optional parameters allow to run only a single test
- Parameter allows to pass configuration infos that are dumped with the result:
benchEnviron = {
"comment": "Test with caching enabled",
"server_os": "Ubuntu 9.01",
"server_cpu": "Intel 3GHz",
"server_ram": "2GB",
"wsgidav_version": "0.4.b1"
"network_bandwidth": "100MBit",
>> these can be automatically set?:
"client_os": "Windows XP",
"client_cpu": "AMD 5000",
"date": now()
}
- Allow to print profiling info (from WsgiDAV server and from becnhmark client!)
- The result file could also contain the results of test suites ('PASSED'),
so we could use it as documentation for tests on different platforms/setups.
Questions
=========
- is lxml really faster?
- compare this to mod_dav's performance
Test cases
==========
|
- GET 100 x 1 kB
- 100 x PROPFIND depth 0
- 1 x PROPFIND depth infinity
- COPY: big file, many small files, big tree
- MOVE: big file, many small files, big tree
- DELETE: big file, many small files, big tree
- LOCK
- UNLOCK
- Check if locked
- PROPPATCH
- PROPFIND: depth 0, many small files
depth infinity
- run litmus in a timed script
- Simulate typical Windows Client request sequences:
- dir browsing
- file reading
- file editing
- http://groups.google.com/group/paste-users/t/b2afc88a86caade1?hl=en
use httperf
http://www.hpl.hp.com/research/linux/httperf/httperf-man-0.9.txt
and openwebload
http://openwebload.sourceforge.net/index.html
- makeTree(roofolderName="/bench", folderCount=10, subfolderCount=10, fileCount=10, fileSize=1024)
Big tree with 100 folders and 1000 files
bench/
folder1/
..
folder10/
subfolder10-1/
..
subfolder10-10/
file10-10-1.txt -> 1k
"""
import logging
_benchmarks = [#"proppatch_many",
#"proppatch_big",
#"proppatch_deep",
"test_scripted",
]
def _real_run_bench(bench, opts):
if bench == "*":
for bench in _benchmarks:
run_bench(bench, opts)
return
assert bench in _benchmarks
if bench == "test_scripted":
from avax.webdav.tests import test_scripted
test_scripted.main()
else:
raise ValueError()
def run_bench(bench, opts):
profile_benchmarks = opts["profile_benchmarks"]
if bench in profile_benchmarks:
# http://docs.python.org/library/profile.html#module-cProfile
import cProfile, pstats, StringIO
prof = cProfile.Profile()
prof = prof.runctx("_real_run_bench(bench, opts)", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
# stats.sort_stats("time") # Or cumulative
stats.sort_stats("cumulative") # Or time
stats.print_stats(80) # 80 = how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
logging.warning("Profile data for '%s':\n%s" % (bench, stream.getvalue()))
else:
_real_run_bench(bench, opts)
def bench_all(opts):
run_bench("*", opts)
def main():
opts = {"num": 10,
"profile_benchmarks": ["*"],
}
bench_all(opts)
if __name__ == "__main__":
main()
|
- PUT 1 x 10 MB
- PUT 100 x 1 kB
- GET 1 x 10 MB
|
versions.go
|
package status
import (
"encoding/json"
"io/ioutil"
"net/http"
"regexp"
"strings"
"github.com/hashicorp/go-version"
kversion "k8s.io/apimachinery/pkg/version"
kube "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/kiali/kiali/config"
"github.com/kiali/kiali/kubernetes"
"github.com/kiali/kiali/log"
)
type externalService func() (*ExternalServiceInfo, error)
var (
// Example Maistra product version is:
// [email protected]/maistra-0.1.0-1-3a136c90ec5e308f236e0d7ebb5c4c5e405217f4-unknown
// Example Maistra upstream project version is:
// [email protected]:8888/openshift-istio-tech-preview-0.1.0-1-3a136c90ec5e308f236e0d7ebb5c4c5e405217f4-Custom
// Example Istio snapshot version is:
// [email protected]/istio-release-1.0-20180927-21-10-cbe9c05c470ec1924f7bcf02334b183e7e6175cb-Clean
maistraProductVersionExpr = regexp.MustCompile("maistra-([0-9]+\\.[0-9]+\\.[0-9]+)")
maistraProjectVersionExpr = regexp.MustCompile("openshift-istio.*-([0-9]+\\.[0-9]+\\.[0-9]+)")
istioVersionExpr = regexp.MustCompile("([0-9]+\\.[0-9]+\\.[0-9]+)")
istioSnapshotVersionExpr = regexp.MustCompile("istio-release-([0-9]+\\.[0-9]+)(-[0-9]{8})")
)
func getVersions() {
components := []externalService{
istioVersion,
prometheusVersion,
kubernetesVersion,
}
if config.Get().ExternalServices.Jaeger.URL != "" {
components = append(components, jaegerVersion)
}
if config.Get().ExternalServices.Grafana.URL != "" {
components = append(components, grafanaVersion)
}
for _, comp := range components {
getVersionComponent(comp)
}
}
func getVersionComponent(serviceComponent externalService) {
componentInfo, err := serviceComponent()
if err == nil {
info.ExternalServices = append(info.ExternalServices, *componentInfo)
}
}
func validateVersion(istioReq string, installedVersion string) bool {
reqWords := strings.Split(istioReq, " ")
requirementV, errReqV := version.NewVersion(reqWords[1])
installedV, errInsV := version.NewVersion(installedVersion)
if errReqV != nil || errInsV != nil {
return false
}
switch operator := reqWords[0]; operator {
case "==":
return installedV.Equal(requirementV)
case ">=":
return installedV.GreaterThan(requirementV) || installedV.Equal(requirementV)
case ">":
return installedV.GreaterThan(requirementV)
case "<=":
return installedV.LessThan(requirementV) || installedV.Equal(requirementV)
case "<":
return installedV.LessThan(requirementV)
}
return false
}
func
|
() (*ExternalServiceInfo, error) {
var (
body []byte
err error
product *ExternalServiceInfo
resp *http.Response
)
istioConfig := config.Get().ExternalServices.Istio
resp, err = http.Get(istioConfig.UrlServiceVersion)
if err == nil {
defer resp.Body.Close()
body, err = ioutil.ReadAll(resp.Body)
if err == nil {
rawVersion := string(body)
product, err = parseIstioRawVersion(rawVersion)
return product, err
}
}
return nil, err
}
func parseIstioRawVersion(rawVersion string) (*ExternalServiceInfo, error) {
product := ExternalServiceInfo{Name: "Unknown", Version: "Unknown"}
// First see if we detect Maistra (either product or upstream project).
// If it is not Maistra, see if it is upstream Istio (either a release or snapshot).
// If it is neither then it is some unknown Istio implementation that we do not support.
maistraVersionStringArr := maistraProductVersionExpr.FindStringSubmatch(rawVersion)
if maistraVersionStringArr != nil {
log.Debugf("Detected Maistra product version [%v]", rawVersion)
if len(maistraVersionStringArr) > 1 {
product.Name = "Maistra"
product.Version = maistraVersionStringArr[1] // get regex group #1 ,which is the "#.#.#" version string
if !validateVersion(config.MaistraVersionSupported, product.Version) {
info.WarningMessages = append(info.WarningMessages, "Maistra version "+product.Version+" is not supported, the version should be "+config.MaistraVersionSupported)
}
// we know this is Maistra - either a supported or unsupported version - return now
return &product, nil
}
}
maistraVersionStringArr = maistraProjectVersionExpr.FindStringSubmatch(rawVersion)
if maistraVersionStringArr != nil {
log.Debugf("Detected Maistra project version [%v]", rawVersion)
if len(maistraVersionStringArr) > 1 {
product.Name = "Maistra Project"
product.Version = maistraVersionStringArr[1] // get regex group #1 ,which is the "#.#.#" version string
if !validateVersion(config.MaistraVersionSupported, product.Version) {
info.WarningMessages = append(info.WarningMessages, "Maistra project version "+product.Version+" is not supported, the version should be "+config.MaistraVersionSupported)
}
// we know this is Maistra - either a supported or unsupported version - return now
return &product, nil
}
}
// see if it is a released version of Istio
istioVersionStringArr := istioVersionExpr.FindStringSubmatch(rawVersion)
if istioVersionStringArr != nil {
log.Debugf("Detected Istio version [%v]", rawVersion)
if len(istioVersionStringArr) > 1 {
product.Name = "Istio"
product.Version = istioVersionStringArr[1] // get regex group #1 ,which is the "#.#.#" version string
if !validateVersion(config.IstioVersionSupported, product.Version) {
info.WarningMessages = append(info.WarningMessages, "Istio version "+product.Version+" is not supported, the version should be "+config.IstioVersionSupported)
}
// we know this is Istio upstream - either a supported or unsupported version - return now
return &product, nil
}
}
// see if it is a snapshot version of Istio
istioVersionStringArr = istioSnapshotVersionExpr.FindStringSubmatch(rawVersion)
if istioVersionStringArr != nil {
log.Debugf("Detected Istio snapshot version [%v]", rawVersion)
if len(istioVersionStringArr) > 2 {
product.Name = "Istio Snapshot"
majorMinor := istioVersionStringArr[1] // regex group #1 is the "#.#" version numbers
snapshotStr := istioVersionStringArr[2] // regex group #2 is the date/time stamp
product.Version = majorMinor + snapshotStr
if !validateVersion(config.IstioVersionSupported, majorMinor) {
info.WarningMessages = append(info.WarningMessages, "Istio snapshot version "+product.Version+" is not supported, the version should be "+config.IstioVersionSupported)
}
// we know this is Istio upstream - either a supported or unsupported version - return now
return &product, nil
}
}
log.Debugf("Detected unknown Istio implementation version [%v]", rawVersion)
product.Name = "Unknown Istio Implementation"
product.Version = rawVersion
info.WarningMessages = append(info.WarningMessages, "Unknown Istio implementation version "+product.Version+" is not recognized, thus not supported.")
return &product, nil
}
type p8sResponseVersion struct {
Version string `json:"version"`
Revision string `json:"revision"`
}
func jaegerVersion() (*ExternalServiceInfo, error) {
product := ExternalServiceInfo{}
product.Name = "Jaeger"
product.Url = config.Get().ExternalServices.Jaeger.URL
return &product, nil
}
func grafanaVersion() (*ExternalServiceInfo, error) {
product := ExternalServiceInfo{}
product.Name = "Grafana"
product.Url = config.Get().ExternalServices.Grafana.URL
return &product, nil
}
func prometheusVersion() (*ExternalServiceInfo, error) {
product := ExternalServiceInfo{}
prometheusV := new(p8sResponseVersion)
prometheusUrl := config.Get().ExternalServices.PrometheusServiceURL
resp, err := http.Get(prometheusUrl + "/version")
if err == nil {
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&prometheusV)
if err == nil {
product.Name = "Prometheus"
product.Version = prometheusV.Version
return &product, nil
}
}
return nil, err
}
func kubernetesVersion() (*ExternalServiceInfo, error) {
var (
err error
k8sConfig *rest.Config
k8s *kube.Clientset
serverVersion *kversion.Info
)
product := ExternalServiceInfo{}
k8sConfig, err = kubernetes.ConfigClient()
if err == nil {
k8sConfig.QPS = config.Get().KubernetesConfig.QPS
k8sConfig.Burst = config.Get().KubernetesConfig.Burst
k8s, err = kube.NewForConfig(k8sConfig)
if err == nil {
serverVersion, err = k8s.Discovery().ServerVersion()
if err == nil {
product.Name = "Kubernetes"
product.Version = serverVersion.GitVersion
return &product, nil
}
}
}
return nil, err
}
|
istioVersion
|
targetlist.go
|
// SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors.
//
// SPDX-License-Identifier: Apache-2.0
package dataobjects
import (
"encoding/json"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
lsv1alpha1 "github.com/gardener/landscaper/apis/core/v1alpha1"
lsv1alpha1helper "github.com/gardener/landscaper/apis/core/v1alpha1/helper"
kutil "github.com/gardener/landscaper/controller-utils/pkg/kubernetes"
)
// TargetList is the internal representation of a list of targets.
type TargetList struct {
Targets []*Target
}
// NewTargetList creates a new internal targetlist.
func NewTargetList() *TargetList {
return &TargetList{
Targets: []*Target{},
}
}
// NewTargetListWithSize creates a new internal targetlist with a given size.
func
|
(size int) *TargetList {
return &TargetList{
Targets: make([]*Target, size),
}
}
// SetAllSourceType sets the source type for all targets in the list.
func (t *TargetList) SetAllSourceType(sourceType lsv1alpha1.DataObjectSourceType) *TargetList {
for i := range t.Targets {
t.Targets[i].SetSourceType(sourceType)
}
return t
}
// NewFromTargetList creates a new internal targetlist instance from a list of raw targets.
func NewFromTargetList(targets []lsv1alpha1.Target) (*TargetList, error) {
res := NewTargetListWithSize(len(targets))
for i := range targets {
tmp, err := NewFromTarget(&targets[i])
if err != nil {
return nil, err
}
res.Targets[i] = tmp
}
return res, nil
}
// GetData returns the targets as list of internal go maps.
func (t *TargetList) GetData() ([]interface{}, error) {
rawTargets := make([]lsv1alpha1.Target, len(t.Targets))
for i := range t.Targets {
rawTargets[i] = *t.Targets[i].Raw
}
raw, err := json.Marshal(rawTargets)
if err != nil {
return nil, err
}
var data []interface{}
if err := json.Unmarshal(raw, &data); err != nil {
return nil, err
}
return data, nil
}
// Build creates a new data object based on the given data and metadata.
// Does not set owner references.
func (tl TargetList) Build(tlName string) ([]*lsv1alpha1.Target, error) {
newTL := make([]*lsv1alpha1.Target, len(tl.Targets))
for i := 0; i < len(newTL); i++ {
tar := tl.Targets[i]
newTarget := &lsv1alpha1.Target{}
newTarget.Name = lsv1alpha1helper.GenerateDataObjectNameWithIndex(tar.Metadata.Context, tar.Metadata.Key, i)
newTarget.Namespace = tar.Metadata.Namespace
if tar.Raw != nil {
newTarget.Spec = tar.Raw.Spec
for key, val := range tar.Raw.Annotations {
metav1.SetMetaDataAnnotation(&newTarget.ObjectMeta, key, val)
}
for key, val := range tar.Raw.Labels {
kutil.SetMetaDataLabel(newTarget, key, val)
}
}
SetMetadataFromObject(newTarget, tar.Metadata)
tar.Raw = newTarget
newTL[i] = newTarget
}
return newTL, nil
}
// Apply applies data and metadata to a existing target (except owner references).
func (tl TargetList) Apply(raw *lsv1alpha1.Target, index int) error {
t := tl.Targets[index]
raw.Name = lsv1alpha1helper.GenerateDataObjectNameWithIndex(t.Metadata.Context, t.Metadata.Key, index)
raw.Namespace = t.Metadata.Namespace
raw.Spec = t.Raw.Spec
SetMetadataFromObject(raw, t.Metadata)
return nil
}
|
NewTargetListWithSize
|
kdtree_test.go
|
package kdtree
import (
"fmt"
"testing"
)
func
|
(t *testing.T) {
listt := []*KDNode{
&KDNode{X: 2, Y: 3},
&KDNode{X: 9, Y: 6},
&KDNode{X: 8, Y: 1},
&KDNode{X: 4, Y: 7},
&KDNode{X: 5, Y: 4},
&KDNode{X: 7, Y: 2},
}
root := BuildKDTree(listt, 1)
fmt.Println(root.Left)
a := 0.0012
b := 0.0013
fmt.Println(a > b)
}
|
TestMedian
|
typed_encoder.gen.go
|
// Code generated by typed_encoder.gen.go.tmpl. DO NOT EDIT.
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encoding
import (
"unsafe"
"github.com/apache/arrow/go/v6/arrow"
"github.com/apache/arrow/go/v6/arrow/memory"
"github.com/apache/arrow/go/v6/parquet"
format "github.com/apache/arrow/go/v6/parquet/internal/gen-go/parquet"
"github.com/apache/arrow/go/v6/parquet/internal/utils"
"github.com/apache/arrow/go/v6/parquet/schema"
"golang.org/x/xerrors"
)
// fully typed encoder interfaces to enable writing against encoder/decoders
// without having to care about what encoding type is actually being used.
var (
Int32EncoderTraits int32EncoderTraits
Int32DecoderTraits int32DecoderTraits
Int64EncoderTraits int64EncoderTraits
Int64DecoderTraits int64DecoderTraits
Int96EncoderTraits int96EncoderTraits
Int96DecoderTraits int96DecoderTraits
Float32EncoderTraits float32EncoderTraits
Float32DecoderTraits float32DecoderTraits
Float64EncoderTraits float64EncoderTraits
Float64DecoderTraits float64DecoderTraits
BooleanEncoderTraits boolEncoderTraits
BooleanDecoderTraits boolDecoderTraits
ByteArrayEncoderTraits byteArrayEncoderTraits
ByteArrayDecoderTraits byteArrayDecoderTraits
FixedLenByteArrayEncoderTraits fixedLenByteArrayEncoderTraits
FixedLenByteArrayDecoderTraits fixedLenByteArrayDecoderTraits
)
// Int32Encoder is the interface for all encoding types that implement encoding
// int32 values.
type Int32Encoder interface {
TypedEncoder
Put([]int32)
PutSpaced([]int32, []byte, int64)
}
// Int32Decoder is the interface for all encoding types that implement decoding
// int32 values.
type Int32Decoder interface {
TypedDecoder
Decode([]int32) (int, error)
DecodeSpaced([]int32, int, []byte, int64) (int, error)
}
// the int32EncoderTraits struct is used to make it easy to create encoders and decoders based on type
type int32EncoderTraits struct{}
// Encoder returns an encoder for int32 type data, using the specified encoding type and whether or not
// it should be dictionary encoded.
func (int32EncoderTraits) Encoder(e format.Encoding, useDict bool, descr *schema.Column, mem memory.Allocator) TypedEncoder {
if useDict {
return &DictInt32Encoder{newDictEncoderBase(descr, NewInt32Dictionary(), mem)}
}
switch e {
case format.Encoding_PLAIN:
return &PlainInt32Encoder{encoder: newEncoderBase(e, descr, mem)}
case format.Encoding_DELTA_BINARY_PACKED:
return DeltaBitPackInt32Encoder{&deltaBitPackEncoder{
encoder: newEncoderBase(e, descr, mem)}}
default:
panic("unimplemented encoding type")
}
}
// int32DecoderTraits is a helper struct for providing information regardless of the type
// and used as a generic way to create a Decoder or Dictionary Decoder for int32 values
type int32DecoderTraits struct{}
// BytesRequired returns the number of bytes required to store n int32 values.
func (int32DecoderTraits) BytesRequired(n int) int {
return arrow.Int32Traits.BytesRequired(n)
}
// Decoder returns a decoder for int32 typed data of the requested encoding type if available
func (int32DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {
if useDict {
return &DictInt32Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}
}
switch e {
case parquet.Encodings.Plain:
return &PlainInt32Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}
case parquet.Encodings.DeltaBinaryPacked:
if mem == nil {
mem = memory.DefaultAllocator
}
return &DeltaBitPackInt32Decoder{
deltaBitPackDecoder: &deltaBitPackDecoder{
decoder: newDecoderBase(format.Encoding(e), descr),
mem: mem,
}}
default:
panic("unimplemented encoding type")
}
}
// DictInt32Encoder is an encoder for int32 data using dictionary encoding
type DictInt32Encoder struct {
dictEncoder
}
// Type returns the underlying physical type that can be encoded with this encoder
func (enc *DictInt32Encoder) Type() parquet.Type {
return parquet.Types.Int32
}
// Put encodes the values passed in, adding to the index as needed.
func (enc *DictInt32Encoder) Put(in []int32) {
for _, val := range in {
enc.dictEncoder.Put(val)
}
}
// PutSpaced is the same as Put but for when the data being encoded has slots open for
// null values, using the bitmap provided to skip values as needed.
func (enc *DictInt32Encoder) PutSpaced(in []int32, validBits []byte, validBitsOffset int64) {
utils.VisitSetBitRuns(validBits, validBitsOffset, int64(len(in)), func(pos, length int64) error {
for i := int64(0); i < length; i++ {
enc.dictEncoder.Put(in[i+pos])
}
return nil
})
}
// DictInt32Decoder is a decoder for decoding dictionary encoded data for int32 columns
type DictInt32Decoder struct {
dictDecoder
}
// Type returns the underlying physical type that can be decoded with this decoder
func (DictInt32Decoder) Type() parquet.Type {
return parquet.Types.Int32
}
// Decode populates the passed in slice with min(len(out), remaining values) values,
// decoding using hte dictionary to get the actual values. Returns the number of values
// actually decoded and any error encountered.
func (d *DictInt32Decoder) Decode(out []int32) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decode(out[:vals])
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict eof exception")
}
d.nvals -= vals
return vals, nil
}
// Decode spaced is like Decode but will space out the data leaving slots for null values
// based on the provided bitmap.
func (d *DictInt32Decoder) DecodeSpaced(out []int32, nullCount int, validBits []byte, validBitsOffset int64) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decodeSpaced(out[:vals], nullCount, validBits, validBitsOffset)
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict spaced eof exception")
}
d.nvals -= vals
return vals, nil
}
// Int32DictConverter is a helper for dictionary handling which is used for converting
// run length encoded indexes into the actual values that are stored in the dictionary index page.
type Int32DictConverter struct {
valueDecoder Int32Decoder
dict []int32
zeroVal int32
}
// ensure validates that we've decoded dictionary values up to the index
// provided so that we don't need to decode the entire dictionary at start.
func (dc *Int32DictConverter) ensure(idx utils.IndexType) error {
if len(dc.dict) <= int(idx) {
if cap(dc.dict) <= int(idx) {
val := make([]int32, int(idx+1)-len(dc.dict))
n, err := dc.valueDecoder.Decode(val)
if err != nil {
return err
}
dc.dict = append(dc.dict, val[:n]...)
} else {
cur := len(dc.dict)
n, err := dc.valueDecoder.Decode(dc.dict[cur : idx+1])
if err != nil {
return err
}
dc.dict = dc.dict[:cur+n]
}
}
return nil
}
// IsValid verifies that the set of indexes passed in are all valid indexes
// in the dictionary and if necessary decodes dictionary indexes up to the index
// requested.
func (dc *Int32DictConverter) IsValid(idxes ...utils.IndexType) bool {
min, max := utils.GetMinMaxInt32(*(*[]int32)(unsafe.Pointer(&idxes)))
dc.ensure(utils.IndexType(max))
return min >= 0 && int(min) < len(dc.dict) && int(max) >= 0 && int(max) < len(dc.dict)
}
// Fill populates the slice passed in entirely with the value at dictionary index indicated by val
func (dc *Int32DictConverter) Fill(out interface{}, val utils.IndexType) error {
o := out.([]int32)
if err := dc.ensure(val); err != nil {
return err
}
o[0] = dc.dict[val]
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
return nil
}
// FillZero populates the entire slice of out with the zero value for int32
func (dc *Int32DictConverter) FillZero(out interface{}) {
o := out.([]int32)
o[0] = dc.zeroVal
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
}
// Copy populates the slice provided with the values in the dictionary at the indexes
// in the vals slice.
func (dc *Int32DictConverter) Copy(out interface{}, vals []utils.IndexType) error {
o := out.([]int32)
for idx, val := range vals {
o[idx] = dc.dict[val]
}
return nil
}
// Int64Encoder is the interface for all encoding types that implement encoding
// int64 values.
type Int64Encoder interface {
TypedEncoder
Put([]int64)
PutSpaced([]int64, []byte, int64)
}
// Int64Decoder is the interface for all encoding types that implement decoding
// int64 values.
type Int64Decoder interface {
TypedDecoder
Decode([]int64) (int, error)
DecodeSpaced([]int64, int, []byte, int64) (int, error)
}
// the int64EncoderTraits struct is used to make it easy to create encoders and decoders based on type
type int64EncoderTraits struct{}
// Encoder returns an encoder for int64 type data, using the specified encoding type and whether or not
// it should be dictionary encoded.
func (int64EncoderTraits) Encoder(e format.Encoding, useDict bool, descr *schema.Column, mem memory.Allocator) TypedEncoder {
if useDict {
return &DictInt64Encoder{newDictEncoderBase(descr, NewInt64Dictionary(), mem)}
}
switch e {
case format.Encoding_PLAIN:
return &PlainInt64Encoder{encoder: newEncoderBase(e, descr, mem)}
case format.Encoding_DELTA_BINARY_PACKED:
return DeltaBitPackInt64Encoder{&deltaBitPackEncoder{
encoder: newEncoderBase(e, descr, mem)}}
default:
panic("unimplemented encoding type")
}
}
// int64DecoderTraits is a helper struct for providing information regardless of the type
// and used as a generic way to create a Decoder or Dictionary Decoder for int64 values
type int64DecoderTraits struct{}
// BytesRequired returns the number of bytes required to store n int64 values.
func (int64DecoderTraits) BytesRequired(n int) int {
return arrow.Int64Traits.BytesRequired(n)
}
// Decoder returns a decoder for int64 typed data of the requested encoding type if available
func (int64DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {
if useDict {
return &DictInt64Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}
}
switch e {
case parquet.Encodings.Plain:
return &PlainInt64Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}
case parquet.Encodings.DeltaBinaryPacked:
if mem == nil {
mem = memory.DefaultAllocator
}
return &DeltaBitPackInt64Decoder{
deltaBitPackDecoder: &deltaBitPackDecoder{
decoder: newDecoderBase(format.Encoding(e), descr),
mem: mem,
}}
default:
panic("unimplemented encoding type")
}
}
// DictInt64Encoder is an encoder for int64 data using dictionary encoding
type DictInt64Encoder struct {
dictEncoder
}
// Type returns the underlying physical type that can be encoded with this encoder
func (enc *DictInt64Encoder) Type() parquet.Type {
return parquet.Types.Int64
}
// Put encodes the values passed in, adding to the index as needed.
func (enc *DictInt64Encoder) Put(in []int64) {
for _, val := range in {
enc.dictEncoder.Put(val)
}
}
// PutSpaced is the same as Put but for when the data being encoded has slots open for
// null values, using the bitmap provided to skip values as needed.
func (enc *DictInt64Encoder) PutSpaced(in []int64, validBits []byte, validBitsOffset int64) {
utils.VisitSetBitRuns(validBits, validBitsOffset, int64(len(in)), func(pos, length int64) error {
for i := int64(0); i < length; i++ {
enc.dictEncoder.Put(in[i+pos])
}
return nil
})
}
// DictInt64Decoder is a decoder for decoding dictionary encoded data for int64 columns
type DictInt64Decoder struct {
dictDecoder
}
// Type returns the underlying physical type that can be decoded with this decoder
func (DictInt64Decoder) Type() parquet.Type {
return parquet.Types.Int64
}
// Decode populates the passed in slice with min(len(out), remaining values) values,
// decoding using hte dictionary to get the actual values. Returns the number of values
// actually decoded and any error encountered.
func (d *DictInt64Decoder) Decode(out []int64) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decode(out[:vals])
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict eof exception")
}
d.nvals -= vals
return vals, nil
}
// Decode spaced is like Decode but will space out the data leaving slots for null values
// based on the provided bitmap.
func (d *DictInt64Decoder) DecodeSpaced(out []int64, nullCount int, validBits []byte, validBitsOffset int64) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decodeSpaced(out[:vals], nullCount, validBits, validBitsOffset)
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict spaced eof exception")
}
d.nvals -= vals
return vals, nil
}
// Int64DictConverter is a helper for dictionary handling which is used for converting
// run length encoded indexes into the actual values that are stored in the dictionary index page.
type Int64DictConverter struct {
valueDecoder Int64Decoder
dict []int64
zeroVal int64
}
// ensure validates that we've decoded dictionary values up to the index
// provided so that we don't need to decode the entire dictionary at start.
func (dc *Int64DictConverter) ensure(idx utils.IndexType) error {
if len(dc.dict) <= int(idx) {
if cap(dc.dict) <= int(idx) {
val := make([]int64, int(idx+1)-len(dc.dict))
n, err := dc.valueDecoder.Decode(val)
if err != nil {
return err
}
dc.dict = append(dc.dict, val[:n]...)
} else {
cur := len(dc.dict)
n, err := dc.valueDecoder.Decode(dc.dict[cur : idx+1])
if err != nil {
return err
}
dc.dict = dc.dict[:cur+n]
}
}
return nil
}
// IsValid verifies that the set of indexes passed in are all valid indexes
// in the dictionary and if necessary decodes dictionary indexes up to the index
// requested.
func (dc *Int64DictConverter) IsValid(idxes ...utils.IndexType) bool {
min, max := utils.GetMinMaxInt32(*(*[]int32)(unsafe.Pointer(&idxes)))
dc.ensure(utils.IndexType(max))
return min >= 0 && int(min) < len(dc.dict) && int(max) >= 0 && int(max) < len(dc.dict)
}
// Fill populates the slice passed in entirely with the value at dictionary index indicated by val
func (dc *Int64DictConverter) Fill(out interface{}, val utils.IndexType) error {
o := out.([]int64)
if err := dc.ensure(val); err != nil {
return err
}
o[0] = dc.dict[val]
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
return nil
}
// FillZero populates the entire slice of out with the zero value for int64
func (dc *Int64DictConverter) FillZero(out interface{}) {
o := out.([]int64)
o[0] = dc.zeroVal
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
}
// Copy populates the slice provided with the values in the dictionary at the indexes
// in the vals slice.
func (dc *Int64DictConverter) Copy(out interface{}, vals []utils.IndexType) error {
o := out.([]int64)
for idx, val := range vals {
o[idx] = dc.dict[val]
}
return nil
}
// Int96Encoder is the interface for all encoding types that implement encoding
// parquet.Int96 values.
type Int96Encoder interface {
TypedEncoder
Put([]parquet.Int96)
PutSpaced([]parquet.Int96, []byte, int64)
}
// Int96Decoder is the interface for all encoding types that implement decoding
// parquet.Int96 values.
type Int96Decoder interface {
TypedDecoder
Decode([]parquet.Int96) (int, error)
DecodeSpaced([]parquet.Int96, int, []byte, int64) (int, error)
}
// the int96EncoderTraits struct is used to make it easy to create encoders and decoders based on type
type int96EncoderTraits struct{}
// Encoder returns an encoder for int96 type data, using the specified encoding type and whether or not
// it should be dictionary encoded.
// dictionary encoding does not exist for this type and Encoder will panic if useDict is true
func (int96EncoderTraits) Encoder(e format.Encoding, useDict bool, descr *schema.Column, mem memory.Allocator) TypedEncoder {
if useDict {
panic("parquet: no parquet.Int96 dictionary encoding")
}
switch e {
case format.Encoding_PLAIN:
return &PlainInt96Encoder{encoder: newEncoderBase(e, descr, mem)}
default:
panic("unimplemented encoding type")
}
}
// int96DecoderTraits is a helper struct for providing information regardless of the type
// and used as a generic way to create a Decoder or Dictionary Decoder for int96 values
type int96DecoderTraits struct{}
// BytesRequired returns the number of bytes required to store n int96 values.
func (int96DecoderTraits) BytesRequired(n int) int {
return parquet.Int96Traits.BytesRequired(n)
}
// Decoder returns a decoder for int96 typed data of the requested encoding type if available
func (int96DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {
if useDict {
panic("dictionary decoding unimplemented for int96")
}
switch e {
case parquet.Encodings.Plain:
return &PlainInt96Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}
default:
panic("unimplemented encoding type")
}
}
// Float32Encoder is the interface for all encoding types that implement encoding
// float32 values.
type Float32Encoder interface {
TypedEncoder
Put([]float32)
PutSpaced([]float32, []byte, int64)
}
// Float32Decoder is the interface for all encoding types that implement decoding
// float32 values.
type Float32Decoder interface {
TypedDecoder
Decode([]float32) (int, error)
DecodeSpaced([]float32, int, []byte, int64) (int, error)
}
// the float32EncoderTraits struct is used to make it easy to create encoders and decoders based on type
type float32EncoderTraits struct{}
// Encoder returns an encoder for float32 type data, using the specified encoding type and whether or not
// it should be dictionary encoded.
func (float32EncoderTraits) Encoder(e format.Encoding, useDict bool, descr *schema.Column, mem memory.Allocator) TypedEncoder {
if useDict {
return &DictFloat32Encoder{newDictEncoderBase(descr, NewFloat32Dictionary(), mem)}
}
switch e {
case format.Encoding_PLAIN:
return &PlainFloat32Encoder{encoder: newEncoderBase(e, descr, mem)}
default:
panic("unimplemented encoding type")
}
}
// float32DecoderTraits is a helper struct for providing information regardless of the type
// and used as a generic way to create a Decoder or Dictionary Decoder for float32 values
type float32DecoderTraits struct{}
// BytesRequired returns the number of bytes required to store n float32 values.
func (float32DecoderTraits) BytesRequired(n int) int {
return arrow.Float32Traits.BytesRequired(n)
}
// Decoder returns a decoder for float32 typed data of the requested encoding type if available
func (float32DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {
if useDict {
return &DictFloat32Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}
}
switch e {
case parquet.Encodings.Plain:
return &PlainFloat32Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}
default:
panic("unimplemented encoding type")
}
}
// DictFloat32Encoder is an encoder for float32 data using dictionary encoding
type DictFloat32Encoder struct {
dictEncoder
}
// Type returns the underlying physical type that can be encoded with this encoder
func (enc *DictFloat32Encoder) Type() parquet.Type {
return parquet.Types.Float
}
// Put encodes the values passed in, adding to the index as needed.
func (enc *DictFloat32Encoder) Put(in []float32) {
for _, val := range in {
enc.dictEncoder.Put(val)
}
}
// PutSpaced is the same as Put but for when the data being encoded has slots open for
// null values, using the bitmap provided to skip values as needed.
func (enc *DictFloat32Encoder) PutSpaced(in []float32, validBits []byte, validBitsOffset int64) {
utils.VisitSetBitRuns(validBits, validBitsOffset, int64(len(in)), func(pos, length int64) error {
for i := int64(0); i < length; i++ {
enc.dictEncoder.Put(in[i+pos])
}
return nil
})
}
// DictFloat32Decoder is a decoder for decoding dictionary encoded data for float32 columns
type DictFloat32Decoder struct {
dictDecoder
}
// Type returns the underlying physical type that can be decoded with this decoder
func (DictFloat32Decoder) Type() parquet.Type {
return parquet.Types.Float
}
// Decode populates the passed in slice with min(len(out), remaining values) values,
// decoding using hte dictionary to get the actual values. Returns the number of values
// actually decoded and any error encountered.
func (d *DictFloat32Decoder) Decode(out []float32) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decode(out[:vals])
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict eof exception")
}
d.nvals -= vals
return vals, nil
}
// Decode spaced is like Decode but will space out the data leaving slots for null values
// based on the provided bitmap.
func (d *DictFloat32Decoder) DecodeSpaced(out []float32, nullCount int, validBits []byte, validBitsOffset int64) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decodeSpaced(out[:vals], nullCount, validBits, validBitsOffset)
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict spaced eof exception")
}
d.nvals -= vals
return vals, nil
}
// Float32DictConverter is a helper for dictionary handling which is used for converting
// run length encoded indexes into the actual values that are stored in the dictionary index page.
type Float32DictConverter struct {
valueDecoder Float32Decoder
dict []float32
zeroVal float32
}
// ensure validates that we've decoded dictionary values up to the index
// provided so that we don't need to decode the entire dictionary at start.
func (dc *Float32DictConverter) ensure(idx utils.IndexType) error {
if len(dc.dict) <= int(idx) {
if cap(dc.dict) <= int(idx) {
val := make([]float32, int(idx+1)-len(dc.dict))
n, err := dc.valueDecoder.Decode(val)
if err != nil {
return err
}
dc.dict = append(dc.dict, val[:n]...)
} else {
cur := len(dc.dict)
n, err := dc.valueDecoder.Decode(dc.dict[cur : idx+1])
if err != nil {
return err
}
dc.dict = dc.dict[:cur+n]
}
}
return nil
}
// IsValid verifies that the set of indexes passed in are all valid indexes
// in the dictionary and if necessary decodes dictionary indexes up to the index
// requested.
func (dc *Float32DictConverter) IsValid(idxes ...utils.IndexType) bool {
min, max := utils.GetMinMaxInt32(*(*[]int32)(unsafe.Pointer(&idxes)))
dc.ensure(utils.IndexType(max))
return min >= 0 && int(min) < len(dc.dict) && int(max) >= 0 && int(max) < len(dc.dict)
}
// Fill populates the slice passed in entirely with the value at dictionary index indicated by val
func (dc *Float32DictConverter) Fill(out interface{}, val utils.IndexType) error {
o := out.([]float32)
if err := dc.ensure(val); err != nil {
return err
}
o[0] = dc.dict[val]
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
return nil
}
// FillZero populates the entire slice of out with the zero value for float32
func (dc *Float32DictConverter) FillZero(out interface{}) {
o := out.([]float32)
o[0] = dc.zeroVal
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
}
// Copy populates the slice provided with the values in the dictionary at the indexes
// in the vals slice.
func (dc *Float32DictConverter) Copy(out interface{}, vals []utils.IndexType) error {
o := out.([]float32)
for idx, val := range vals {
o[idx] = dc.dict[val]
}
return nil
}
// Float64Encoder is the interface for all encoding types that implement encoding
// float64 values.
type Float64Encoder interface {
TypedEncoder
Put([]float64)
PutSpaced([]float64, []byte, int64)
}
// Float64Decoder is the interface for all encoding types that implement decoding
// float64 values.
type Float64Decoder interface {
TypedDecoder
Decode([]float64) (int, error)
DecodeSpaced([]float64, int, []byte, int64) (int, error)
}
// the float64EncoderTraits struct is used to make it easy to create encoders and decoders based on type
type float64EncoderTraits struct{}
// Encoder returns an encoder for float64 type data, using the specified encoding type and whether or not
// it should be dictionary encoded.
func (float64EncoderTraits) Encoder(e format.Encoding, useDict bool, descr *schema.Column, mem memory.Allocator) TypedEncoder {
if useDict {
return &DictFloat64Encoder{newDictEncoderBase(descr, NewFloat64Dictionary(), mem)}
}
switch e {
case format.Encoding_PLAIN:
return &PlainFloat64Encoder{encoder: newEncoderBase(e, descr, mem)}
default:
panic("unimplemented encoding type")
}
}
// float64DecoderTraits is a helper struct for providing information regardless of the type
// and used as a generic way to create a Decoder or Dictionary Decoder for float64 values
type float64DecoderTraits struct{}
// BytesRequired returns the number of bytes required to store n float64 values.
func (float64DecoderTraits) BytesRequired(n int) int {
return arrow.Float64Traits.BytesRequired(n)
}
// Decoder returns a decoder for float64 typed data of the requested encoding type if available
func (float64DecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {
if useDict {
return &DictFloat64Decoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}
}
switch e {
case parquet.Encodings.Plain:
return &PlainFloat64Decoder{decoder: newDecoderBase(format.Encoding(e), descr)}
default:
panic("unimplemented encoding type")
}
}
// DictFloat64Encoder is an encoder for float64 data using dictionary encoding
type DictFloat64Encoder struct {
dictEncoder
}
// Type returns the underlying physical type that can be encoded with this encoder
func (enc *DictFloat64Encoder) Type() parquet.Type {
return parquet.Types.Double
}
// Put encodes the values passed in, adding to the index as needed.
func (enc *DictFloat64Encoder) Put(in []float64) {
for _, val := range in {
enc.dictEncoder.Put(val)
}
}
// PutSpaced is the same as Put but for when the data being encoded has slots open for
// null values, using the bitmap provided to skip values as needed.
func (enc *DictFloat64Encoder) PutSpaced(in []float64, validBits []byte, validBitsOffset int64) {
utils.VisitSetBitRuns(validBits, validBitsOffset, int64(len(in)), func(pos, length int64) error {
for i := int64(0); i < length; i++ {
enc.dictEncoder.Put(in[i+pos])
}
return nil
})
}
// DictFloat64Decoder is a decoder for decoding dictionary encoded data for float64 columns
type DictFloat64Decoder struct {
dictDecoder
}
// Type returns the underlying physical type that can be decoded with this decoder
func (DictFloat64Decoder) Type() parquet.Type {
return parquet.Types.Double
}
// Decode populates the passed in slice with min(len(out), remaining values) values,
// decoding using hte dictionary to get the actual values. Returns the number of values
// actually decoded and any error encountered.
func (d *DictFloat64Decoder) Decode(out []float64) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decode(out[:vals])
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict eof exception")
}
d.nvals -= vals
return vals, nil
}
// Decode spaced is like Decode but will space out the data leaving slots for null values
// based on the provided bitmap.
func (d *DictFloat64Decoder) DecodeSpaced(out []float64, nullCount int, validBits []byte, validBitsOffset int64) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decodeSpaced(out[:vals], nullCount, validBits, validBitsOffset)
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict spaced eof exception")
}
d.nvals -= vals
return vals, nil
}
// Float64DictConverter is a helper for dictionary handling which is used for converting
// run length encoded indexes into the actual values that are stored in the dictionary index page.
type Float64DictConverter struct {
valueDecoder Float64Decoder
dict []float64
zeroVal float64
}
// ensure validates that we've decoded dictionary values up to the index
// provided so that we don't need to decode the entire dictionary at start.
func (dc *Float64DictConverter) ensure(idx utils.IndexType) error {
if len(dc.dict) <= int(idx) {
if cap(dc.dict) <= int(idx) {
val := make([]float64, int(idx+1)-len(dc.dict))
n, err := dc.valueDecoder.Decode(val)
if err != nil {
return err
}
dc.dict = append(dc.dict, val[:n]...)
} else {
cur := len(dc.dict)
n, err := dc.valueDecoder.Decode(dc.dict[cur : idx+1])
if err != nil {
return err
}
dc.dict = dc.dict[:cur+n]
}
}
return nil
}
// IsValid verifies that the set of indexes passed in are all valid indexes
// in the dictionary and if necessary decodes dictionary indexes up to the index
// requested.
func (dc *Float64DictConverter) IsValid(idxes ...utils.IndexType) bool {
min, max := utils.GetMinMaxInt32(*(*[]int32)(unsafe.Pointer(&idxes)))
dc.ensure(utils.IndexType(max))
return min >= 0 && int(min) < len(dc.dict) && int(max) >= 0 && int(max) < len(dc.dict)
}
// Fill populates the slice passed in entirely with the value at dictionary index indicated by val
func (dc *Float64DictConverter) Fill(out interface{}, val utils.IndexType) error {
o := out.([]float64)
if err := dc.ensure(val); err != nil {
return err
}
o[0] = dc.dict[val]
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
return nil
}
// FillZero populates the entire slice of out with the zero value for float64
func (dc *Float64DictConverter) FillZero(out interface{}) {
o := out.([]float64)
o[0] = dc.zeroVal
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
}
// Copy populates the slice provided with the values in the dictionary at the indexes
// in the vals slice.
func (dc *Float64DictConverter) Copy(out interface{}, vals []utils.IndexType) error {
o := out.([]float64)
for idx, val := range vals {
o[idx] = dc.dict[val]
}
return nil
}
// BooleanEncoder is the interface for all encoding types that implement encoding
// bool values.
type BooleanEncoder interface {
TypedEncoder
Put([]bool)
PutSpaced([]bool, []byte, int64)
}
// BooleanDecoder is the interface for all encoding types that implement decoding
// bool values.
type BooleanDecoder interface {
TypedDecoder
Decode([]bool) (int, error)
DecodeSpaced([]bool, int, []byte, int64) (int, error)
}
// the boolEncoderTraits struct is used to make it easy to create encoders and decoders based on type
type boolEncoderTraits struct{}
// Encoder returns an encoder for bool type data, using the specified encoding type and whether or not
// it should be dictionary encoded.
// dictionary encoding does not exist for this type and Encoder will panic if useDict is true
func (boolEncoderTraits) Encoder(e format.Encoding, useDict bool, descr *schema.Column, mem memory.Allocator) TypedEncoder {
if useDict {
panic("parquet: no bool dictionary encoding")
}
switch e {
case format.Encoding_PLAIN:
return &PlainBooleanEncoder{encoder: newEncoderBase(e, descr, mem)}
default:
panic("unimplemented encoding type")
}
}
// boolDecoderTraits is a helper struct for providing information regardless of the type
// and used as a generic way to create a Decoder or Dictionary Decoder for bool values
type boolDecoderTraits struct{}
// BytesRequired returns the number of bytes required to store n bool values.
func (boolDecoderTraits) BytesRequired(n int) int {
return arrow.BooleanTraits.BytesRequired(n)
}
// Decoder returns a decoder for bool typed data of the requested encoding type if available
func (boolDecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {
if useDict {
panic("dictionary decoding unimplemented for bool")
}
switch e {
case parquet.Encodings.Plain:
return &PlainBooleanDecoder{decoder: newDecoderBase(format.Encoding(e), descr)}
default:
panic("unimplemented encoding type")
}
}
// ByteArrayEncoder is the interface for all encoding types that implement encoding
// parquet.ByteArray values.
type ByteArrayEncoder interface {
TypedEncoder
Put([]parquet.ByteArray)
PutSpaced([]parquet.ByteArray, []byte, int64)
}
// ByteArrayDecoder is the interface for all encoding types that implement decoding
// parquet.ByteArray values.
type ByteArrayDecoder interface {
TypedDecoder
Decode([]parquet.ByteArray) (int, error)
DecodeSpaced([]parquet.ByteArray, int, []byte, int64) (int, error)
}
// the byteArrayEncoderTraits struct is used to make it easy to create encoders and decoders based on type
type byteArrayEncoderTraits struct{}
// Encoder returns an encoder for byteArray type data, using the specified encoding type and whether or not
// it should be dictionary encoded.
func (byteArrayEncoderTraits) Encoder(e format.Encoding, useDict bool, descr *schema.Column, mem memory.Allocator) TypedEncoder {
if useDict {
return &DictByteArrayEncoder{newDictEncoderBase(descr, NewBinaryDictionary(mem), mem)}
}
switch e {
case format.Encoding_PLAIN:
return &PlainByteArrayEncoder{encoder: newEncoderBase(e, descr, mem)}
case format.Encoding_DELTA_LENGTH_BYTE_ARRAY:
return &DeltaLengthByteArrayEncoder{
encoder: newEncoderBase(e, descr, mem),
lengthEncoder: &DeltaBitPackInt32Encoder{
&deltaBitPackEncoder{encoder: newEncoderBase(e, descr, mem)}},
}
case format.Encoding_DELTA_BYTE_ARRAY:
return &DeltaByteArrayEncoder{
encoder: newEncoderBase(e, descr, mem),
}
default:
panic("unimplemented encoding type")
}
}
// byteArrayDecoderTraits is a helper struct for providing information regardless of the type
// and used as a generic way to create a Decoder or Dictionary Decoder for byteArray values
type byteArrayDecoderTraits struct{}
// BytesRequired returns the number of bytes required to store n byteArray values.
func (byteArrayDecoderTraits) BytesRequired(n int) int {
return parquet.ByteArrayTraits.BytesRequired(n)
}
// Decoder returns a decoder for byteArray typed data of the requested encoding type if available
func (byteArrayDecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {
if useDict {
return &DictByteArrayDecoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}
}
switch e {
case parquet.Encodings.Plain:
return &PlainByteArrayDecoder{decoder: newDecoderBase(format.Encoding(e), descr)}
case parquet.Encodings.DeltaLengthByteArray:
if mem == nil {
mem = memory.DefaultAllocator
}
return &DeltaLengthByteArrayDecoder{
decoder: newDecoderBase(format.Encoding(e), descr),
mem: mem,
}
case parquet.Encodings.DeltaByteArray:
if mem == nil {
mem = memory.DefaultAllocator
}
return &DeltaByteArrayDecoder{
DeltaLengthByteArrayDecoder: &DeltaLengthByteArrayDecoder{
decoder: newDecoderBase(format.Encoding(e), descr),
mem: mem,
}}
default:
panic("unimplemented encoding type")
}
}
// DictByteArrayEncoder is an encoder for parquet.ByteArray data using dictionary encoding
type DictByteArrayEncoder struct {
dictEncoder
}
// Type returns the underlying physical type that can be encoded with this encoder
func (enc *DictByteArrayEncoder) Type() parquet.Type {
return parquet.Types.ByteArray
}
// DictByteArrayDecoder is a decoder for decoding dictionary encoded data for parquet.ByteArray columns
type DictByteArrayDecoder struct {
dictDecoder
}
// Type returns the underlying physical type that can be decoded with this decoder
func (DictByteArrayDecoder) Type() parquet.Type {
return parquet.Types.ByteArray
}
// Decode populates the passed in slice with min(len(out), remaining values) values,
// decoding using hte dictionary to get the actual values. Returns the number of values
// actually decoded and any error encountered.
func (d *DictByteArrayDecoder) Decode(out []parquet.ByteArray) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decode(out[:vals])
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict eof exception")
}
d.nvals -= vals
return vals, nil
}
// Decode spaced is like Decode but will space out the data leaving slots for null values
// based on the provided bitmap.
func (d *DictByteArrayDecoder) DecodeSpaced(out []parquet.ByteArray, nullCount int, validBits []byte, validBitsOffset int64) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decodeSpaced(out[:vals], nullCount, validBits, validBitsOffset)
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict spaced eof exception")
}
d.nvals -= vals
return vals, nil
}
// ByteArrayDictConverter is a helper for dictionary handling which is used for converting
// run length encoded indexes into the actual values that are stored in the dictionary index page.
type ByteArrayDictConverter struct {
valueDecoder ByteArrayDecoder
dict []parquet.ByteArray
zeroVal parquet.ByteArray
}
// ensure validates that we've decoded dictionary values up to the index
// provided so that we don't need to decode the entire dictionary at start.
func (dc *ByteArrayDictConverter) ensure(idx utils.IndexType) error {
if len(dc.dict) <= int(idx) {
if cap(dc.dict) <= int(idx) {
val := make([]parquet.ByteArray, int(idx+1)-len(dc.dict))
n, err := dc.valueDecoder.Decode(val)
if err != nil {
return err
}
dc.dict = append(dc.dict, val[:n]...)
} else {
cur := len(dc.dict)
n, err := dc.valueDecoder.Decode(dc.dict[cur : idx+1])
if err != nil {
return err
}
dc.dict = dc.dict[:cur+n]
}
}
return nil
}
// IsValid verifies that the set of indexes passed in are all valid indexes
// in the dictionary and if necessary decodes dictionary indexes up to the index
// requested.
func (dc *ByteArrayDictConverter) IsValid(idxes ...utils.IndexType) bool {
min, max := utils.GetMinMaxInt32(*(*[]int32)(unsafe.Pointer(&idxes)))
dc.ensure(utils.IndexType(max))
return min >= 0 && int(min) < len(dc.dict) && int(max) >= 0 && int(max) < len(dc.dict)
}
// Fill populates the slice passed in entirely with the value at dictionary index indicated by val
func (dc *ByteArrayDictConverter) Fill(out interface{}, val utils.IndexType) error {
o := out.([]parquet.ByteArray)
if err := dc.ensure(val); err != nil {
return err
}
o[0] = dc.dict[val]
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
return nil
}
// FillZero populates the entire slice of out with the zero value for parquet.ByteArray
func (dc *ByteArrayDictConverter) FillZero(out interface{}) {
o := out.([]parquet.ByteArray)
o[0] = dc.zeroVal
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
}
// Copy populates the slice provided with the values in the dictionary at the indexes
// in the vals slice.
func (dc *ByteArrayDictConverter) Copy(out interface{}, vals []utils.IndexType) error {
o := out.([]parquet.ByteArray)
for idx, val := range vals {
o[idx] = dc.dict[val]
}
return nil
}
// FixedLenByteArrayEncoder is the interface for all encoding types that implement encoding
// parquet.FixedLenByteArray values.
type FixedLenByteArrayEncoder interface {
TypedEncoder
Put([]parquet.FixedLenByteArray)
PutSpaced([]parquet.FixedLenByteArray, []byte, int64)
}
// FixedLenByteArrayDecoder is the interface for all encoding types that implement decoding
// parquet.FixedLenByteArray values.
type FixedLenByteArrayDecoder interface {
TypedDecoder
Decode([]parquet.FixedLenByteArray) (int, error)
DecodeSpaced([]parquet.FixedLenByteArray, int, []byte, int64) (int, error)
}
// the fixedLenByteArrayEncoderTraits struct is used to make it easy to create encoders and decoders based on type
type fixedLenByteArrayEncoderTraits struct{}
// Encoder returns an encoder for fixedLenByteArray type data, using the specified encoding type and whether or not
// it should be dictionary encoded.
func (fixedLenByteArrayEncoderTraits) Encoder(e format.Encoding, useDict bool, descr *schema.Column, mem memory.Allocator) TypedEncoder {
if useDict {
return &DictFixedLenByteArrayEncoder{newDictEncoderBase(descr, NewBinaryDictionary(mem), mem)}
}
switch e {
case format.Encoding_PLAIN:
return &PlainFixedLenByteArrayEncoder{encoder: newEncoderBase(e, descr, mem)}
default:
panic("unimplemented encoding type")
}
}
// fixedLenByteArrayDecoderTraits is a helper struct for providing information regardless of the type
// and used as a generic way to create a Decoder or Dictionary Decoder for fixedLenByteArray values
type fixedLenByteArrayDecoderTraits struct{}
// BytesRequired returns the number of bytes required to store n fixedLenByteArray values.
func (fixedLenByteArrayDecoderTraits) BytesRequired(n int) int {
return parquet.FixedLenByteArrayTraits.BytesRequired(n)
}
// Decoder returns a decoder for fixedLenByteArray typed data of the requested encoding type if available
func (fixedLenByteArrayDecoderTraits) Decoder(e parquet.Encoding, descr *schema.Column, useDict bool, mem memory.Allocator) TypedDecoder {
if useDict {
return &DictFixedLenByteArrayDecoder{dictDecoder{decoder: newDecoderBase(format.Encoding_RLE_DICTIONARY, descr), mem: mem}}
}
switch e {
case parquet.Encodings.Plain:
return &PlainFixedLenByteArrayDecoder{decoder: newDecoderBase(format.Encoding(e), descr)}
default:
panic("unimplemented encoding type")
}
}
// DictFixedLenByteArrayEncoder is an encoder for parquet.FixedLenByteArray data using dictionary encoding
type DictFixedLenByteArrayEncoder struct {
dictEncoder
}
// Type returns the underlying physical type that can be encoded with this encoder
func (enc *DictFixedLenByteArrayEncoder) Type() parquet.Type {
return parquet.Types.FixedLenByteArray
}
// DictFixedLenByteArrayDecoder is a decoder for decoding dictionary encoded data for parquet.FixedLenByteArray columns
type DictFixedLenByteArrayDecoder struct {
dictDecoder
}
// Type returns the underlying physical type that can be decoded with this decoder
func (DictFixedLenByteArrayDecoder) Type() parquet.Type {
return parquet.Types.FixedLenByteArray
}
// Decode populates the passed in slice with min(len(out), remaining values) values,
// decoding using hte dictionary to get the actual values. Returns the number of values
// actually decoded and any error encountered.
func (d *DictFixedLenByteArrayDecoder) Decode(out []parquet.FixedLenByteArray) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decode(out[:vals])
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict eof exception")
}
d.nvals -= vals
return vals, nil
}
// Decode spaced is like Decode but will space out the data leaving slots for null values
// based on the provided bitmap.
func (d *DictFixedLenByteArrayDecoder) DecodeSpaced(out []parquet.FixedLenByteArray, nullCount int, validBits []byte, validBitsOffset int64) (int, error) {
vals := utils.MinInt(len(out), d.nvals)
decoded, err := d.decodeSpaced(out[:vals], nullCount, validBits, validBitsOffset)
if err != nil {
return decoded, err
}
if vals != decoded {
return decoded, xerrors.New("parquet: dict spaced eof exception")
}
d.nvals -= vals
return vals, nil
}
// FixedLenByteArrayDictConverter is a helper for dictionary handling which is used for converting
// run length encoded indexes into the actual values that are stored in the dictionary index page.
type FixedLenByteArrayDictConverter struct {
valueDecoder FixedLenByteArrayDecoder
dict []parquet.FixedLenByteArray
zeroVal parquet.FixedLenByteArray
}
// ensure validates that we've decoded dictionary values up to the index
// provided so that we don't need to decode the entire dictionary at start.
func (dc *FixedLenByteArrayDictConverter) ensure(idx utils.IndexType) error {
if len(dc.dict) <= int(idx) {
if cap(dc.dict) <= int(idx) {
val := make([]parquet.FixedLenByteArray, int(idx+1)-len(dc.dict))
n, err := dc.valueDecoder.Decode(val)
if err != nil {
return err
}
dc.dict = append(dc.dict, val[:n]...)
} else {
cur := len(dc.dict)
n, err := dc.valueDecoder.Decode(dc.dict[cur : idx+1])
if err != nil {
return err
}
dc.dict = dc.dict[:cur+n]
}
}
return nil
}
// IsValid verifies that the set of indexes passed in are all valid indexes
// in the dictionary and if necessary decodes dictionary indexes up to the index
// requested.
func (dc *FixedLenByteArrayDictConverter) IsValid(idxes ...utils.IndexType) bool {
min, max := utils.GetMinMaxInt32(*(*[]int32)(unsafe.Pointer(&idxes)))
dc.ensure(utils.IndexType(max))
return min >= 0 && int(min) < len(dc.dict) && int(max) >= 0 && int(max) < len(dc.dict)
}
// Fill populates the slice passed in entirely with the value at dictionary index indicated by val
func (dc *FixedLenByteArrayDictConverter) Fill(out interface{}, val utils.IndexType) error {
o := out.([]parquet.FixedLenByteArray)
if err := dc.ensure(val); err != nil {
return err
}
o[0] = dc.dict[val]
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
return nil
}
// FillZero populates the entire slice of out with the zero value for parquet.FixedLenByteArray
func (dc *FixedLenByteArrayDictConverter) FillZero(out interface{}) {
o := out.([]parquet.FixedLenByteArray)
o[0] = dc.zeroVal
for i := 1; i < len(o); i *= 2 {
copy(o[i:], o[:i])
}
}
// Copy populates the slice provided with the values in the dictionary at the indexes
// in the vals slice.
func (dc *FixedLenByteArrayDictConverter) Copy(out interface{}, vals []utils.IndexType) error {
o := out.([]parquet.FixedLenByteArray)
for idx, val := range vals {
o[idx] = dc.dict[val]
}
return nil
}
// NewDictConverter creates a dict converter of the appropriate type, using the passed in
// decoder as the decoder to decode the dictionary index.
func NewDictConverter(dict TypedDecoder) utils.DictionaryConverter {
switch dict.Type() {
case parquet.Types.Int32:
return &Int32DictConverter{valueDecoder: dict.(Int32Decoder), dict: make([]int32, 0, dict.ValuesLeft())}
case parquet.Types.Int64:
return &Int64DictConverter{valueDecoder: dict.(Int64Decoder), dict: make([]int64, 0, dict.ValuesLeft())}
case parquet.Types.Float:
return &Float32DictConverter{valueDecoder: dict.(Float32Decoder), dict: make([]float32, 0, dict.ValuesLeft())}
case parquet.Types.Double:
return &Float64DictConverter{valueDecoder: dict.(Float64Decoder), dict: make([]float64, 0, dict.ValuesLeft())}
case parquet.Types.ByteArray:
return &ByteArrayDictConverter{valueDecoder: dict.(ByteArrayDecoder), dict: make([]parquet.ByteArray, 0, dict.ValuesLeft())}
case parquet.Types.FixedLenByteArray:
return &FixedLenByteArrayDictConverter{valueDecoder: dict.(FixedLenByteArrayDecoder), dict: make([]parquet.FixedLenByteArray, 0, dict.ValuesLeft())}
default:
return nil
}
}
// helper function to get encoding traits object for the physical type indicated
func
|
(t parquet.Type) EncoderTraits {
switch t {
case parquet.Types.Int32:
return Int32EncoderTraits
case parquet.Types.Int64:
return Int64EncoderTraits
case parquet.Types.Int96:
return Int96EncoderTraits
case parquet.Types.Float:
return Float32EncoderTraits
case parquet.Types.Double:
return Float64EncoderTraits
case parquet.Types.Boolean:
return BooleanEncoderTraits
case parquet.Types.ByteArray:
return ByteArrayEncoderTraits
case parquet.Types.FixedLenByteArray:
return FixedLenByteArrayEncoderTraits
default:
return nil
}
}
// helper function to get decoding traits object for the physical type indicated
func getDecodingTraits(t parquet.Type) DecoderTraits {
switch t {
case parquet.Types.Int32:
return Int32DecoderTraits
case parquet.Types.Int64:
return Int64DecoderTraits
case parquet.Types.Int96:
return Int96DecoderTraits
case parquet.Types.Float:
return Float32DecoderTraits
case parquet.Types.Double:
return Float64DecoderTraits
case parquet.Types.Boolean:
return BooleanDecoderTraits
case parquet.Types.ByteArray:
return ByteArrayDecoderTraits
case parquet.Types.FixedLenByteArray:
return FixedLenByteArrayDecoderTraits
default:
return nil
}
}
|
getEncodingTraits
|
ruleset.go
|
// Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package rules
import (
"errors"
"fmt"
"math"
"sort"
"time"
"github.com/m3db/m3/src/cluster/kv"
merrors "github.com/m3db/m3/src/metrics/errors"
"github.com/m3db/m3/src/metrics/filters"
"github.com/m3db/m3/src/metrics/generated/proto/rulepb"
metricID "github.com/m3db/m3/src/metrics/metric/id"
"github.com/m3db/m3/src/metrics/rules/view"
"github.com/m3db/m3/src/metrics/rules/view/changes"
xerrors "github.com/m3db/m3/src/x/errors"
"github.com/pborman/uuid"
)
const (
timeNanosMax = int64(math.MaxInt64)
)
var (
errNilRuleSetProto = errors.New("nil rule set proto")
errRuleSetNotTombstoned = errors.New("ruleset is not tombstoned")
errRuleNotFound = errors.New("rule not found")
errNoRuleSnapshots = errors.New("rule has no snapshots")
ruleIDNotFoundErrorFmt = "no rule with id %v"
ruleActionErrorFmt = "cannot %s rule %s"
ruleSetActionErrorFmt = "cannot %s ruleset %s"
unknownOpTypeFmt = "unknown op type %v"
)
// RuleSet is a read-only set of rules associated with a namespace.
type RuleSet interface {
// Namespace is the metrics namespace the ruleset applies to.
Namespace() []byte
// Version returns the ruleset version.
Version() int
// CutoverNanos returns when the ruleset takes effect.
CutoverNanos() int64
// TombStoned returns whether the ruleset is tombstoned.
Tombstoned() bool
// CreatedAtNanos returns the creation time for this ruleset.
CreatedAtNanos() int64
// LastUpdatedAtNanos returns the time when this ruleset was last updated.
LastUpdatedAtNanos() int64
// Proto returns the rulepb.Ruleset representation of this ruleset.
Proto() (*rulepb.RuleSet, error)
// MappingRuleHistory returns a map of mapping rule id to states that rule has been in.
MappingRules() (view.MappingRules, error)
// RollupRuleHistory returns a map of rollup rule id to states that rule has been in.
RollupRules() (view.RollupRules, error)
// Latest returns the latest snapshot of a ruleset containing the latest snapshots
// of each rule in the ruleset.
Latest() (view.RuleSet, error)
// ActiveSet returns the active ruleset at a given time.
ActiveSet(timeNanos int64) Matcher
// ToMutableRuleSet returns a mutable version of this ruleset.
ToMutableRuleSet() MutableRuleSet
}
// MutableRuleSet is mutable ruleset.
type MutableRuleSet interface {
RuleSet
// Clone returns a copy of this MutableRuleSet.
Clone() MutableRuleSet
// AppendMappingRule creates a new mapping rule and adds it to this ruleset.
// Should return the id of the newly created rule.
AddMappingRule(view.MappingRule, UpdateMetadata) (string, error)
// UpdateMappingRule creates a new mapping rule and adds it to this ruleset.
UpdateMappingRule(view.MappingRule, UpdateMetadata) error
// DeleteMappingRule deletes a mapping rule
DeleteMappingRule(string, UpdateMetadata) error
// AppendRollupRule creates a new rollup rule and adds it to this ruleset.
// Should return the id of the newly created rule.
AddRollupRule(view.RollupRule, UpdateMetadata) (string, error)
|
// DeleteRollupRule deletes a rollup rule
DeleteRollupRule(string, UpdateMetadata) error
// Tombstone tombstones this ruleset and all of its rules.
Delete(UpdateMetadata) error
// Revive removes the tombstone from this ruleset. It does not revive any rules.
Revive(UpdateMetadata) error
// ApplyRuleSetChanges takes set of rule set changes and applies them to a ruleset.
ApplyRuleSetChanges(changes.RuleSetChanges, UpdateMetadata) error
}
type ruleSet struct {
uuid string
version int
namespace []byte
createdAtNanos int64
lastUpdatedAtNanos int64
lastUpdatedBy string
tombstoned bool
cutoverNanos int64
mappingRules []*mappingRule
rollupRules []*rollupRule
tagsFilterOpts filters.TagsFilterOptions
newRollupIDFn metricID.NewIDFn
isRollupIDFn metricID.MatchIDFn
}
// NewRuleSetFromProto creates a new RuleSet from a proto object.
func NewRuleSetFromProto(version int, rs *rulepb.RuleSet, opts Options) (RuleSet, error) {
if rs == nil {
return nil, errNilRuleSetProto
}
tagsFilterOpts := opts.TagsFilterOptions()
mappingRules := make([]*mappingRule, 0, len(rs.MappingRules))
for _, mappingRule := range rs.MappingRules {
mc, err := newMappingRuleFromProto(mappingRule, tagsFilterOpts)
if err != nil {
return nil, err
}
mappingRules = append(mappingRules, mc)
}
rollupRules := make([]*rollupRule, 0, len(rs.RollupRules))
for _, rollupRule := range rs.RollupRules {
rc, err := newRollupRuleFromProto(rollupRule, tagsFilterOpts)
if err != nil {
return nil, err
}
rollupRules = append(rollupRules, rc)
}
return &ruleSet{
uuid: rs.Uuid,
version: version,
namespace: []byte(rs.Namespace),
createdAtNanos: rs.CreatedAtNanos,
lastUpdatedAtNanos: rs.LastUpdatedAtNanos,
lastUpdatedBy: rs.LastUpdatedBy,
tombstoned: rs.Tombstoned,
cutoverNanos: rs.CutoverNanos,
mappingRules: mappingRules,
rollupRules: rollupRules,
tagsFilterOpts: tagsFilterOpts,
newRollupIDFn: opts.NewRollupIDFn(),
isRollupIDFn: opts.IsRollupIDFn(),
}, nil
}
// NewEmptyRuleSet returns an empty ruleset to be used with a new namespace.
func NewEmptyRuleSet(namespaceName string, meta UpdateMetadata) MutableRuleSet {
rs := &ruleSet{
uuid: uuid.NewUUID().String(),
version: kv.UninitializedVersion,
namespace: []byte(namespaceName),
tombstoned: false,
mappingRules: make([]*mappingRule, 0),
rollupRules: make([]*rollupRule, 0),
}
rs.updateMetadata(meta)
return rs
}
func (rs *ruleSet) Namespace() []byte { return rs.namespace }
func (rs *ruleSet) Version() int { return rs.version }
func (rs *ruleSet) CutoverNanos() int64 { return rs.cutoverNanos }
func (rs *ruleSet) Tombstoned() bool { return rs.tombstoned }
func (rs *ruleSet) LastUpdatedAtNanos() int64 { return rs.lastUpdatedAtNanos }
func (rs *ruleSet) CreatedAtNanos() int64 { return rs.createdAtNanos }
func (rs *ruleSet) ToMutableRuleSet() MutableRuleSet { return rs }
func (rs *ruleSet) ActiveSet(timeNanos int64) Matcher {
mappingRules := make([]*mappingRule, 0, len(rs.mappingRules))
for _, mappingRule := range rs.mappingRules {
activeRule := mappingRule.activeRule(timeNanos)
mappingRules = append(mappingRules, activeRule)
}
rollupRules := make([]*rollupRule, 0, len(rs.rollupRules))
for _, rollupRule := range rs.rollupRules {
activeRule := rollupRule.activeRule(timeNanos)
rollupRules = append(rollupRules, activeRule)
}
return newActiveRuleSet(
rs.version,
mappingRules,
rollupRules,
rs.tagsFilterOpts,
rs.newRollupIDFn,
rs.isRollupIDFn,
)
}
// Proto returns the protobuf representation of a ruleset.
func (rs *ruleSet) Proto() (*rulepb.RuleSet, error) {
res := &rulepb.RuleSet{
Uuid: rs.uuid,
Namespace: string(rs.namespace),
CreatedAtNanos: rs.createdAtNanos,
LastUpdatedAtNanos: rs.lastUpdatedAtNanos,
LastUpdatedBy: rs.lastUpdatedBy,
Tombstoned: rs.tombstoned,
CutoverNanos: rs.cutoverNanos,
}
mappingRules := make([]*rulepb.MappingRule, len(rs.mappingRules))
for i, m := range rs.mappingRules {
mr, err := m.proto()
if err != nil {
return nil, err
}
mappingRules[i] = mr
}
res.MappingRules = mappingRules
rollupRules := make([]*rulepb.RollupRule, len(rs.rollupRules))
for i, r := range rs.rollupRules {
rr, err := r.proto()
if err != nil {
return nil, err
}
rollupRules[i] = rr
}
res.RollupRules = rollupRules
return res, nil
}
func (rs *ruleSet) MappingRules() (view.MappingRules, error) {
mappingRules := make(view.MappingRules, len(rs.mappingRules))
for _, m := range rs.mappingRules {
hist, err := m.history()
if err != nil {
return nil, err
}
mappingRules[m.uuid] = hist
}
return mappingRules, nil
}
func (rs *ruleSet) RollupRules() (view.RollupRules, error) {
rollupRules := make(view.RollupRules, len(rs.rollupRules))
for _, r := range rs.rollupRules {
hist, err := r.history()
if err != nil {
return nil, err
}
rollupRules[r.uuid] = hist
}
return rollupRules, nil
}
func (rs *ruleSet) Latest() (view.RuleSet, error) {
mrs, err := rs.latestMappingRules()
if err != nil {
return view.RuleSet{}, err
}
rrs, err := rs.latestRollupRules()
if err != nil {
return view.RuleSet{}, err
}
return view.RuleSet{
Namespace: string(rs.Namespace()),
Version: rs.Version(),
CutoverMillis: rs.CutoverNanos() / nanosPerMilli,
MappingRules: mrs,
RollupRules: rrs,
}, nil
}
func (rs *ruleSet) Clone() MutableRuleSet {
namespace := make([]byte, len(rs.namespace))
copy(namespace, rs.namespace)
mappingRules := make([]*mappingRule, len(rs.mappingRules))
for i, m := range rs.mappingRules {
c := m.clone()
mappingRules[i] = &c
}
rollupRules := make([]*rollupRule, len(rs.rollupRules))
for i, r := range rs.rollupRules {
c := r.clone()
rollupRules[i] = &c
}
// This clone deliberately ignores tagFliterOpts and rollupIDFn
// as they are not useful for the MutableRuleSet.
return &ruleSet{
uuid: rs.uuid,
version: rs.version,
createdAtNanos: rs.createdAtNanos,
lastUpdatedAtNanos: rs.lastUpdatedAtNanos,
lastUpdatedBy: rs.lastUpdatedBy,
tombstoned: rs.tombstoned,
cutoverNanos: rs.cutoverNanos,
namespace: namespace,
mappingRules: mappingRules,
rollupRules: rollupRules,
tagsFilterOpts: rs.tagsFilterOpts,
newRollupIDFn: rs.newRollupIDFn,
isRollupIDFn: rs.isRollupIDFn,
}
}
func (rs *ruleSet) AddMappingRule(mrv view.MappingRule, meta UpdateMetadata) (string, error) {
m, err := rs.getMappingRuleByName(mrv.Name)
if err != nil && err != errRuleNotFound {
return "", xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "add", mrv.Name))
}
if err == errRuleNotFound {
m = newEmptyMappingRule()
if err = m.addSnapshot(
mrv.Name,
mrv.Filter,
mrv.AggregationID,
mrv.StoragePolicies,
mrv.DropPolicy,
mrv.Tags,
meta,
); err != nil {
return "", xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "add", mrv.Name))
}
rs.mappingRules = append(rs.mappingRules, m)
} else {
if err := m.revive(
mrv.Name,
mrv.Filter,
mrv.AggregationID,
mrv.StoragePolicies,
mrv.DropPolicy,
mrv.Tags,
meta,
); err != nil {
return "", xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "revive", mrv.Name))
}
}
rs.updateMetadata(meta)
return m.uuid, nil
}
func (rs *ruleSet) UpdateMappingRule(mrv view.MappingRule, meta UpdateMetadata) error {
m, err := rs.getMappingRuleByID(mrv.ID)
if err != nil {
return merrors.NewInvalidInputError(fmt.Sprintf(ruleIDNotFoundErrorFmt, mrv.ID))
}
if err := m.addSnapshot(
mrv.Name,
mrv.Filter,
mrv.AggregationID,
mrv.StoragePolicies,
mrv.DropPolicy,
mrv.Tags,
meta,
); err != nil {
return xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "update", mrv.Name))
}
rs.updateMetadata(meta)
return nil
}
func (rs *ruleSet) DeleteMappingRule(id string, meta UpdateMetadata) error {
m, err := rs.getMappingRuleByID(id)
if err != nil {
return merrors.NewInvalidInputError(fmt.Sprintf(ruleIDNotFoundErrorFmt, id))
}
if err := m.markTombstoned(meta); err != nil {
return xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "delete", id))
}
rs.updateMetadata(meta)
return nil
}
func (rs *ruleSet) AddRollupRule(rrv view.RollupRule, meta UpdateMetadata) (string, error) {
r, err := rs.getRollupRuleByName(rrv.Name)
if err != nil && err != errRuleNotFound {
return "", xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "add", rrv.Name))
}
targets := newRollupTargetsFromView(rrv.Targets)
if err == errRuleNotFound {
r = newEmptyRollupRule()
if err = r.addSnapshot(
rrv.Name,
rrv.Filter,
targets,
meta,
); err != nil {
return "", xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "add", rrv.Name))
}
rs.rollupRules = append(rs.rollupRules, r)
} else {
if err := r.revive(
rrv.Name,
rrv.Filter,
targets,
meta,
); err != nil {
return "", xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "revive", rrv.Name))
}
}
rs.updateMetadata(meta)
return r.uuid, nil
}
func (rs *ruleSet) UpdateRollupRule(rrv view.RollupRule, meta UpdateMetadata) error {
r, err := rs.getRollupRuleByID(rrv.ID)
if err != nil {
return merrors.NewInvalidInputError(fmt.Sprintf(ruleIDNotFoundErrorFmt, rrv.ID))
}
targets := newRollupTargetsFromView(rrv.Targets)
if err = r.addSnapshot(
rrv.Name,
rrv.Filter,
targets,
meta,
); err != nil {
return xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "update", rrv.Name))
}
rs.updateMetadata(meta)
return nil
}
func (rs *ruleSet) DeleteRollupRule(id string, meta UpdateMetadata) error {
r, err := rs.getRollupRuleByID(id)
if err != nil {
return merrors.NewInvalidInputError(fmt.Sprintf(ruleIDNotFoundErrorFmt, id))
}
if err := r.markTombstoned(meta); err != nil {
return xerrors.Wrap(err, fmt.Sprintf(ruleActionErrorFmt, "delete", id))
}
rs.updateMetadata(meta)
return nil
}
func (rs *ruleSet) Delete(meta UpdateMetadata) error {
if rs.tombstoned {
return fmt.Errorf("%s is already tombstoned", string(rs.namespace))
}
rs.tombstoned = true
rs.updateMetadata(meta)
// Make sure that all of the rules in the ruleset are tombstoned as well.
for _, m := range rs.mappingRules {
if t := m.tombstoned(); !t {
_ = m.markTombstoned(meta)
}
}
for _, r := range rs.rollupRules {
if t := r.tombstoned(); !t {
_ = r.markTombstoned(meta)
}
}
return nil
}
func (rs *ruleSet) ApplyRuleSetChanges(rsc changes.RuleSetChanges, meta UpdateMetadata) error {
if err := rs.applyMappingRuleChanges(rsc.MappingRuleChanges, meta); err != nil {
return err
}
return rs.applyRollupRuleChanges(rsc.RollupRuleChanges, meta)
}
func (rs *ruleSet) Revive(meta UpdateMetadata) error {
if !rs.Tombstoned() {
return xerrors.Wrap(errRuleSetNotTombstoned, fmt.Sprintf(ruleSetActionErrorFmt, "revive", string(rs.namespace)))
}
rs.tombstoned = false
rs.updateMetadata(meta)
return nil
}
func (rs *ruleSet) updateMetadata(meta UpdateMetadata) {
rs.cutoverNanos = meta.cutoverNanos
rs.lastUpdatedAtNanos = meta.updatedAtNanos
rs.lastUpdatedBy = meta.updatedBy
}
func (rs *ruleSet) getMappingRuleByName(name string) (*mappingRule, error) {
for _, m := range rs.mappingRules {
n, err := m.name()
if err != nil {
continue
}
if n == name {
return m, nil
}
}
return nil, errRuleNotFound
}
func (rs *ruleSet) getMappingRuleByID(id string) (*mappingRule, error) {
for _, m := range rs.mappingRules {
if m.uuid == id {
return m, nil
}
}
return nil, errRuleNotFound
}
func (rs *ruleSet) getRollupRuleByName(name string) (*rollupRule, error) {
for _, r := range rs.rollupRules {
n, err := r.name()
if err != nil {
return nil, err
}
if n == name {
return r, nil
}
}
return nil, errRuleNotFound
}
func (rs *ruleSet) getRollupRuleByID(id string) (*rollupRule, error) {
for _, r := range rs.rollupRules {
if r.uuid == id {
return r, nil
}
}
return nil, errRuleNotFound
}
func (rs *ruleSet) latestMappingRules() ([]view.MappingRule, error) {
mrs, err := rs.MappingRules()
if err != nil {
return nil, err
}
filtered := make([]view.MappingRule, 0, len(mrs))
for _, m := range mrs {
if len(m) > 0 && !m[0].Tombstoned {
// Rule snapshots are sorted by cutover time in descending order.
filtered = append(filtered, m[0])
}
}
sort.Sort(view.MappingRulesByNameAsc(filtered))
return filtered, nil
}
func (rs *ruleSet) latestRollupRules() ([]view.RollupRule, error) {
rrs, err := rs.RollupRules()
if err != nil {
return nil, err
}
filtered := make([]view.RollupRule, 0, len(rrs))
for _, r := range rrs {
if len(r) > 0 && !r[0].Tombstoned {
// Rule snapshots are sorted by cutover time in descending order.
filtered = append(filtered, r[0])
}
}
sort.Sort(view.RollupRulesByNameAsc(filtered))
return filtered, nil
}
func (rs *ruleSet) applyMappingRuleChanges(mrChanges []changes.MappingRuleChange, meta UpdateMetadata) error {
for _, mrChange := range mrChanges {
switch mrChange.Op {
case changes.AddOp:
if _, err := rs.AddMappingRule(*mrChange.RuleData, meta); err != nil {
return err
}
case changes.ChangeOp:
if err := rs.UpdateMappingRule(*mrChange.RuleData, meta); err != nil {
return err
}
case changes.DeleteOp:
if err := rs.DeleteMappingRule(*mrChange.RuleID, meta); err != nil {
return err
}
default:
return merrors.NewInvalidInputError(fmt.Sprintf(unknownOpTypeFmt, mrChange.Op))
}
}
return nil
}
func (rs *ruleSet) applyRollupRuleChanges(rrChanges []changes.RollupRuleChange, meta UpdateMetadata) error {
for _, rrChange := range rrChanges {
switch rrChange.Op {
case changes.AddOp:
if _, err := rs.AddRollupRule(*rrChange.RuleData, meta); err != nil {
return err
}
case changes.ChangeOp:
if err := rs.UpdateRollupRule(*rrChange.RuleData, meta); err != nil {
return err
}
case changes.DeleteOp:
if err := rs.DeleteRollupRule(*rrChange.RuleID, meta); err != nil {
return err
}
default:
return merrors.NewInvalidInputError(fmt.Sprintf(unknownOpTypeFmt, rrChange.Op))
}
}
return nil
}
// RuleSetUpdateHelper stores the necessary details to create an UpdateMetadata.
type RuleSetUpdateHelper struct {
propagationDelay time.Duration
}
// NewRuleSetUpdateHelper creates a new RuleSetUpdateHelper struct.
func NewRuleSetUpdateHelper(propagationDelay time.Duration) RuleSetUpdateHelper {
return RuleSetUpdateHelper{propagationDelay: propagationDelay}
}
// UpdateMetadata contains descriptive information that needs to be updated
// with any modification of the ruleset.
type UpdateMetadata struct {
cutoverNanos int64
updatedAtNanos int64
updatedBy string
}
// NewUpdateMetadata creates a properly initialized UpdateMetadata object.
func (r RuleSetUpdateHelper) NewUpdateMetadata(updateTime int64, updatedBy string) UpdateMetadata {
cutoverNanos := updateTime + int64(r.propagationDelay)
return UpdateMetadata{updatedAtNanos: updateTime, cutoverNanos: cutoverNanos, updatedBy: updatedBy}
}
|
// UpdateRollupRule creates a new rollup rule and adds it to this ruleset.
UpdateRollupRule(view.RollupRule, UpdateMetadata) error
|
testing.go
|
package consensus
import (
"context"
"github.com/filecoin-project/go-filecoin/actor"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/state"
"github.com/filecoin-project/go-filecoin/types"
"gx/ipfs/QmS2aqUZLJp8kF1ihE5rvDGE5LvmKDPnx32w9Z1BW9xLV5/go-ipfs-blockstore"
"github.com/stretchr/testify/require"
)
// TestView is an implementation of stateView used for testing the chain
// manager. It provides a consistent view that the storage market
// that returns 1 for storage total and 1 for any miner storage.
type TestView struct{}
var _ PowerTableView = &TestView{}
// Total always returns 1.
func (tv *TestView) Total(ctx context.Context, st state.Tree, bstore blockstore.Blockstore) (uint64, error) {
return uint64(1), nil
}
// Miner always returns 1.
func (tv *TestView) Miner(ctx context.Context, st state.Tree, bstore blockstore.Blockstore, mAddr address.Address) (uint64, error) {
return uint64(1), nil
}
// HasPower always returns true.
func (tv *TestView) HasPower(ctx context.Context, st state.Tree, bstore blockstore.Blockstore, mAddr address.Address) bool {
return true
}
// RequireNewTipSet instantiates and returns a new tipset of the given blocks
// and requires that the setup validation succeed.
func RequireNewTipSet(require *require.Assertions, blks ...*types.Block) types.TipSet {
ts, err := types.NewTipSet(blks...)
require.NoError(err)
return ts
}
// RequireTipSetAdd adds a block to the provided tipset and requires that this
// does not error.
func RequireTipSetAdd(require *require.Assertions, blk *types.Block, ts types.TipSet) {
err := ts.AddBlock(blk)
require.NoError(err)
}
// TestPowerTableView is an implementation of the powertable view used for testing mining
// wherein each miner has totalPower/minerPower power.
type TestPowerTableView struct{ minerPower, totalPower uint64 }
// NewTestPowerTableView creates a test power view with the given total power
func NewTestPowerTableView(minerPower uint64, totalPower uint64) *TestPowerTableView {
return &TestPowerTableView{minerPower: minerPower, totalPower: totalPower}
}
// Total always returns value that was supplied to NewTestPowerTableView.
func (tv *TestPowerTableView) Total(ctx context.Context, st state.Tree, bstore blockstore.Blockstore) (uint64, error) {
return tv.totalPower, nil
}
// Miner always returns value that was supplied to NewTestPowerTableView.
func (tv *TestPowerTableView) Miner(ctx context.Context, st state.Tree, bstore blockstore.Blockstore, mAddr address.Address) (uint64, error) {
return tv.minerPower, nil
}
// HasPower always returns true.
func (tv *TestPowerTableView) HasPower(ctx context.Context, st state.Tree, bstore blockstore.Blockstore, mAddr address.Address) bool {
return true
}
// TestSignedMessageValidator is a validator that doesn't validate to simplify message creation in tests.
type TestSignedMessageValidator struct{}
|
func (tsmv *TestSignedMessageValidator) Validate(ctx context.Context, msg *types.SignedMessage, fromActor *actor.Actor) error {
return nil
}
// TestBlockRewarder is a rewarder that doesn't actually add any rewards to simplify state tracking in tests
type TestBlockRewarder struct{}
var _ BlockRewarder = (*TestBlockRewarder)(nil)
// BlockReward is a noop
func (tbr *TestBlockRewarder) BlockReward(ctx context.Context, st state.Tree, minerAddr address.Address) error {
// do nothing to keep state root the same
return nil
}
// GasReward is a noop
func (tbr *TestBlockRewarder) GasReward(ctx context.Context, st state.Tree, minerAddr address.Address, msg *types.SignedMessage, gas *types.AttoFIL) error {
// do nothing to keep state root the same
return nil
}
// NewTestProcessor creates a processor with a test validator and test rewarder
func NewTestProcessor() *DefaultProcessor {
return &DefaultProcessor{
signedMessageValidator: &TestSignedMessageValidator{},
blockRewarder: &TestBlockRewarder{},
}
}
|
var _ SignedMessageValidator = (*TestSignedMessageValidator)(nil)
// Validate always returns nil
|
utils.go
|
// Copyright 2018-2021 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package utils
import (
"fmt"
"math/rand"
"net"
"net/http"
"net/url"
"os/user"
"path"
"path/filepath"
"regexp"
"strings"
"time"
gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1"
grouppb "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/cs3org/reva/pkg/registry"
"github.com/cs3org/reva/pkg/registry/memory"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
)
var (
matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)")
matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])")
matchEmail = regexp.MustCompile(`^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$`)
// GlobalRegistry configures a service registry globally accessible. It defaults to a memory registry. The usage of
// globals is not encouraged, and this is a workaround until the PR is out of a draft state.
GlobalRegistry registry.Registry = memory.New(map[string]interface{}{})
)
// Skip evaluates whether a source endpoint contains any of the prefixes.
// i.e: /a/b/c/d/e contains prefix /a/b/c
func Skip(source string, prefixes []string) bool {
for i := range prefixes {
if strings.HasPrefix(source, prefixes[i]) {
return true
}
}
return false
}
// GetClientIP retrieves the client IP from incoming requests
func GetClientIP(r *http.Request) (string, error) {
var clientIP string
forwarded := r.Header.Get("X-FORWARDED-FOR")
if forwarded != "" {
clientIP = forwarded
} else {
if ip, _, err := net.SplitHostPort(r.RemoteAddr); err != nil
|
else {
clientIP = ip
}
}
return clientIP, nil
}
// ToSnakeCase converts a CamelCase string to a snake_case string.
func ToSnakeCase(str string) string {
snake := matchFirstCap.ReplaceAllString(str, "${1}_${2}")
snake = matchAllCap.ReplaceAllString(snake, "${1}_${2}")
return strings.ToLower(snake)
}
// ResolvePath converts relative local paths to absolute paths
func ResolvePath(path string) (string, error) {
usr, err := user.Current()
if err != nil {
return "", err
}
homeDir := usr.HomeDir
if path == "~" {
path = homeDir
} else if strings.HasPrefix(path, "~/") {
path = filepath.Join(homeDir, path[2:])
}
path, err = filepath.Abs(path)
if err != nil {
return "", err
}
return path, nil
}
// RandString is a helper to create tokens.
func RandString(n int) string {
rand.Seed(time.Now().UTC().UnixNano())
var l = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
b := make([]rune, n)
for i := range b {
b[i] = l[rand.Intn(len(l))]
}
return string(b)
}
// TSToUnixNano converts a protobuf Timestamp to uint64
// with nanoseconds resolution.
func TSToUnixNano(ts *types.Timestamp) uint64 {
return uint64(time.Unix(int64(ts.Seconds), int64(ts.Nanos)).UnixNano())
}
// TSToTime converts a protobuf Timestamp to Go's time.Time.
func TSToTime(ts *types.Timestamp) time.Time {
return time.Unix(int64(ts.Seconds), int64(ts.Nanos))
}
// LaterTS returns the timestamp which occurs later.
func LaterTS(t1 *types.Timestamp, t2 *types.Timestamp) *types.Timestamp {
if TSToUnixNano(t1) > TSToUnixNano(t2) {
return t1
}
return t2
}
// ExtractGranteeID returns the ID, user or group, set in the GranteeId object
func ExtractGranteeID(grantee *provider.Grantee) (*userpb.UserId, *grouppb.GroupId) {
switch t := grantee.Id.(type) {
case *provider.Grantee_UserId:
return t.UserId, nil
case *provider.Grantee_GroupId:
return nil, t.GroupId
default:
return nil, nil
}
}
// UserEqual returns whether two users have the same field values.
func UserEqual(u, v *userpb.UserId) bool {
return u != nil && v != nil && u.Idp == v.Idp && u.OpaqueId == v.OpaqueId
}
// GroupEqual returns whether two groups have the same field values.
func GroupEqual(u, v *grouppb.GroupId) bool {
return u != nil && v != nil && u.Idp == v.Idp && u.OpaqueId == v.OpaqueId
}
// ResourceIDEqual returns whether two resources have the same field values.
func ResourceIDEqual(u, v *provider.ResourceId) bool {
return u != nil && v != nil && u.StorageId == v.StorageId && u.OpaqueId == v.OpaqueId
}
// ResourceEqual returns whether two resources have the same field values.
func ResourceEqual(u, v *provider.Reference) bool {
return u != nil && v != nil && u.Path == v.Path && ((u.ResourceId == nil && v.ResourceId == nil) || (ResourceIDEqual(u.ResourceId, v.ResourceId)))
}
// GranteeEqual returns whether two grantees have the same field values.
func GranteeEqual(u, v *provider.Grantee) bool {
if u == nil || v == nil {
return false
}
uu, ug := ExtractGranteeID(u)
vu, vg := ExtractGranteeID(v)
return u.Type == v.Type && (UserEqual(uu, vu) || GroupEqual(ug, vg))
}
// IsEmailValid checks whether the provided email has a valid format.
func IsEmailValid(e string) bool {
if len(e) < 3 || len(e) > 254 {
return false
}
return matchEmail.MatchString(e)
}
// IsValidWebAddress checks whether the provided address is a valid URL.
func IsValidWebAddress(address string) bool {
_, err := url.ParseRequestURI(address)
return err == nil
}
// IsValidPhoneNumber checks whether the provided phone number has a valid format.
func IsValidPhoneNumber(number string) bool {
re := regexp.MustCompile(`^(?:(?:\(?(?:00|\+)([1-4]\d\d|[1-9]\d?)\)?)?[\-\.\ \\\/]?)?((?:\(?\d{1,}\)?[\-\.\ \\\/]?){0,})(?:[\-\.\ \\\/]?(?:#|ext\.?|extension|x)[\-\.\ \\\/]?(\d+))?$`)
return re.MatchString(number)
}
// IsValidName cheks if the given name doesn't contain any non-alpha, space or dash characters.
func IsValidName(name string) bool {
re := regexp.MustCompile(`^[A-Za-z\s\-]*$`)
return re.MatchString(name)
}
// MarshalProtoV1ToJSON marshals a proto V1 message to a JSON byte array
// TODO: update this once we start using V2 in CS3APIs
func MarshalProtoV1ToJSON(m proto.Message) ([]byte, error) {
mV2 := proto.MessageV2(m)
return protojson.Marshal(mV2)
}
// UnmarshalJSONToProtoV1 decodes a JSON byte array to a specified proto message type
// TODO: update this once we start using V2 in CS3APIs
func UnmarshalJSONToProtoV1(b []byte, m proto.Message) error {
mV2 := proto.MessageV2(m)
if err := protojson.Unmarshal(b, mV2); err != nil {
return err
}
return nil
}
// IsRelativeReference returns true if the given reference qualifies as relative
// when the resource id is set and the path starts with a .
//
// TODO(corby): Currently if the path begins with a dot, the ResourceId is set but has empty storageId and OpaqueId
// then the reference is still being viewed as relative. We need to check if we want that because in some
// places we might not want to set both StorageId and OpaqueId so we can't do a hard check if they are set.
func IsRelativeReference(ref *provider.Reference) bool {
return ref.ResourceId != nil && strings.HasPrefix(ref.Path, ".")
}
// IsAbsoluteReference returns true if the given reference qualifies as absolute
// when either only the resource id is set or only the path is set and starts with /
//
// TODO(corby): Currently if the path is empty, the ResourceId is set but has empty storageId and OpaqueId
// then the reference is still being viewed as absolute. We need to check if we want that because in some
// places we might not want to set both StorageId and OpaqueId so we can't do a hard check if they are set.
func IsAbsoluteReference(ref *provider.Reference) bool {
return (ref.ResourceId != nil && ref.Path == "") || (ref.ResourceId == nil) && strings.HasPrefix(ref.Path, "/")
}
// IsAbsolutePathReference returns true if the given reference qualifies as a global path
// when only the path is set and starts with /
func IsAbsolutePathReference(ref *provider.Reference) bool {
return ref.ResourceId == nil && strings.HasPrefix(ref.Path, "/")
}
// MakeRelativePath prefixes the path with a . to use it in a relative reference
func MakeRelativePath(p string) string {
p = path.Join("/", p)
if p == "/" {
return "."
}
return "." + p
}
// UserTypeMap translates account type string to CS3 UserType
func UserTypeMap(accountType string) userpb.UserType {
var t userpb.UserType
switch accountType {
case "primary":
t = userpb.UserType_USER_TYPE_PRIMARY
case "secondary":
t = userpb.UserType_USER_TYPE_SECONDARY
case "service":
t = userpb.UserType_USER_TYPE_SERVICE
case "application":
t = userpb.UserType_USER_TYPE_APPLICATION
case "guest":
t = userpb.UserType_USER_TYPE_GUEST
case "federated":
t = userpb.UserType_USER_TYPE_FEDERATED
case "lightweight":
t = userpb.UserType_USER_TYPE_LIGHTWEIGHT
}
return t
}
// UserTypeToString translates CS3 UserType to user-readable string
func UserTypeToString(accountType userpb.UserType) string {
var t string
switch accountType {
case userpb.UserType_USER_TYPE_PRIMARY:
t = "primary"
case userpb.UserType_USER_TYPE_SECONDARY:
t = "secondary"
case userpb.UserType_USER_TYPE_SERVICE:
t = "service"
case userpb.UserType_USER_TYPE_APPLICATION:
t = "application"
case userpb.UserType_USER_TYPE_GUEST:
t = "guest"
case userpb.UserType_USER_TYPE_FEDERATED:
t = "federated"
case userpb.UserType_USER_TYPE_LIGHTWEIGHT:
t = "lightweight"
}
return t
}
// SplitStorageSpaceID can be used to split `storagespaceid` into `storageid` and `nodeid`
// Currently they are built using `<storageid>!<nodeid>` in the decomposedfs, but other drivers might return different ids.
// any place in the code that relies on this function should instead use the storage registry to look up the responsible storage provider.
// Note: This would in effect change the storage registry into a storage space registry.
func SplitStorageSpaceID(ssid string) (storageid, nodeid string, err error) {
// query that specific storage provider
parts := strings.SplitN(ssid, "!", 2)
if len(parts) != 2 {
return "", "", fmt.Errorf("storage space id must be separated by '!'")
}
return parts[0], parts[1], nil
}
// GetViewMode converts a human-readable string to a view mode for opening a resource in an app.
func GetViewMode(viewMode string) gateway.OpenInAppRequest_ViewMode {
switch viewMode {
case "view":
return gateway.OpenInAppRequest_VIEW_MODE_VIEW_ONLY
case "read":
return gateway.OpenInAppRequest_VIEW_MODE_READ_ONLY
case "write":
return gateway.OpenInAppRequest_VIEW_MODE_READ_WRITE
default:
return gateway.OpenInAppRequest_VIEW_MODE_INVALID
}
}
|
{
ipObj := net.ParseIP(r.RemoteAddr)
if ipObj == nil {
return "", err
}
clientIP = ipObj.String()
}
|
v03.rs
|
use crate::trace::exporter::datadog::model::Error;
use opentelemetry::sdk::export::trace;
use opentelemetry::{Key, Value};
use std::time::SystemTime;
pub(crate) fn encode(service_name: &str, spans: Vec<trace::SpanData>) -> Result<Vec<u8>, Error> {
let mut encoded = Vec::new();
rmp::encode::write_array_len(&mut encoded, spans.len() as u32)?;
for span in spans.into_iter() {
// API supports but doesn't mandate grouping spans with the same trace ID
rmp::encode::write_array_len(&mut encoded, 1)?;
// Safe until the year 2262 when Datadog will need to change their API
let start = span
.start_time
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_nanos() as i64;
let duration = span
.end_time
.duration_since(span.start_time)
|
if let Some(Value::String(s)) = span.attributes.get(&Key::new("span.type")) {
rmp::encode::write_map_len(&mut encoded, 11)?;
rmp::encode::write_str(&mut encoded, "type")?;
rmp::encode::write_str(&mut encoded, s.as_ref())?;
} else {
rmp::encode::write_map_len(&mut encoded, 10)?;
}
// Datadog span name is OpenTelemetry component name - see module docs for more information
rmp::encode::write_str(&mut encoded, "service")?;
rmp::encode::write_str(&mut encoded, service_name)?;
rmp::encode::write_str(&mut encoded, "name")?;
rmp::encode::write_str(&mut encoded, span.instrumentation_lib.name)?;
rmp::encode::write_str(&mut encoded, "resource")?;
rmp::encode::write_str(&mut encoded, &span.name)?;
rmp::encode::write_str(&mut encoded, "trace_id")?;
rmp::encode::write_u64(&mut encoded, span.span_context.trace_id().to_u128() as u64)?;
rmp::encode::write_str(&mut encoded, "span_id")?;
rmp::encode::write_u64(&mut encoded, span.span_context.span_id().to_u64())?;
rmp::encode::write_str(&mut encoded, "parent_id")?;
rmp::encode::write_u64(&mut encoded, span.parent_span_id.to_u64())?;
rmp::encode::write_str(&mut encoded, "start")?;
rmp::encode::write_i64(&mut encoded, start)?;
rmp::encode::write_str(&mut encoded, "duration")?;
rmp::encode::write_i64(&mut encoded, duration)?;
rmp::encode::write_str(&mut encoded, "error")?;
rmp::encode::write_i32(&mut encoded, span.status_code as i32)?;
rmp::encode::write_str(&mut encoded, "meta")?;
rmp::encode::write_map_len(&mut encoded, span.attributes.len() as u32)?;
for (key, value) in span.attributes.iter() {
rmp::encode::write_str(&mut encoded, key.as_str())?;
rmp::encode::write_str(&mut encoded, value.as_str().as_ref())?;
}
}
Ok(encoded)
}
|
.map(|x| x.as_nanos() as i64)
.unwrap_or(0);
|
bn_tools_t.py
|
import numpy as np
# Nonlinearity functions (Numpy implementation)
nl_linear = lambda x: x
nl_tanh = lambda x: np.tanh(x)
nl_sigmoid = lambda x: 1./(1+np.exp(-x))
nl_rect = lambda x: np.clip(x, 0, np.inf)
#nl_rect = lambda x: np.clip(x, -np.inf, np.inf)
nl_shallow_rect = lambda x: np.clip(0.1*x, 0, np.inf)
nl_clip = lambda x: np.clip(x, 0, 1)
nl_softplus = lambda x: np.log(1. + np.exp(x)) #
#'''
# Nonlinearity functions (Theano implementation)
import numpy, theano
import numpy.distutils
import numpy.distutils.__config__
import theano.tensor as T
nl_linear_t = lambda x: x
nl_tanh_t = lambda x: T.tanh(x)
nl_sigmoid_t = lambda x: T.nnet.sigmoid(x)
nl_fermi_t = lambda x: T.nnet.sigmoid(x*50)
nl_clip_t = lambda x: T.clip(x, 0., 1.)
nl_rect_t = lambda x: T.maximum(x, 0.)
nl_rect_squared_t = lambda x: T.maximum(x**2, 0.)
nl_shallow_rect_t = lambda x: T.maximum(0.1*x, 0.)
#'''
def convert_input_const_to_time(inp, num_frames):
if inp.shape[0] != 1:
raise Exception("First axis of inp has to be 1-dim.")
if inp.shape[1] != 1:
inp = inp[:, 0:1, :]
print('WARNING (bn_tools): Input has more than one frame. Only first frame will be broadcast.')
inp = np.tile(inp, (1, num_frames, 1))
return inp
def check_nonlinearities():
import matplotlib.pyplot as plt
x_np=np.arange(-5,5,0.1).astype('float32')
x=theano.shared(x_np)
# for fkt in [nl_linear_t,nl_rect_t,nl_clip_t,nl_sigmoid_t, nl_tanh_t]:
for fkt in [nl_clip_t,nl_sigmoid_t]:
y= fkt(x)
tf = theano.function([],y)
plt.plot(x_np, tf())
plt.show()
if __name__=='__main__':
|
check_nonlinearities()
|
|
row_event.go
|
package replication
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"strconv"
"time"
"github.com/pingcap/errors"
"github.com/shopspring/decimal"
"github.com/siddontang/go-log/log"
. "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go/hack"
)
var errMissingTableMapEvent = errors.New("invalid table id, no corresponding table map event")
type TableMapEvent struct {
flavor string
tableIDSize int
TableID uint64
Flags uint16
Schema []byte
Table []byte
ColumnCount uint64
ColumnType []byte
ColumnMeta []uint16
//len = (ColumnCount + 7) / 8
NullBitmap []byte
/*
The followings are available only after MySQL-8.0.1 or MariaDB-10.5.0
see:
- https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html#sysvar_binlog_row_metadata
- https://mysqlhighavailability.com/more-metadata-is-written-into-binary-log/
- https://jira.mariadb.org/browse/MDEV-20477
*/
// SignednessBitmap stores signedness info for numeric columns.
SignednessBitmap []byte
// DefaultCharset/ColumnCharset stores collation info for character columns.
// DefaultCharset[0] is the default collation of character columns.
// For character columns that have different charset,
// (character column index, column collation) pairs follows
DefaultCharset []uint64
// ColumnCharset contains collation sequence for all character columns
ColumnCharset []uint64
// SetStrValue stores values for set columns.
SetStrValue [][][]byte
setStrValueString [][]string
// EnumStrValue stores values for enum columns.
EnumStrValue [][][]byte
enumStrValueString [][]string
// ColumnName list all column names.
ColumnName [][]byte
columnNameString []string // the same as ColumnName in string type, just for reuse
// GeometryType stores real type for geometry columns.
GeometryType []uint64
// PrimaryKey is a sequence of column indexes of primary key.
PrimaryKey []uint64
// PrimaryKeyPrefix is the prefix length used for each column of primary key.
// 0 means that the whole column length is used.
PrimaryKeyPrefix []uint64
// EnumSetDefaultCharset/EnumSetColumnCharset is similar to DefaultCharset/ColumnCharset but for enum/set columns.
EnumSetDefaultCharset []uint64
EnumSetColumnCharset []uint64
}
func (e *TableMapEvent) Decode(data []byte) error {
pos := 0
e.TableID = FixedLengthInt(data[0:e.tableIDSize])
pos += e.tableIDSize
e.Flags = binary.LittleEndian.Uint16(data[pos:])
pos += 2
schemaLength := data[pos]
pos++
e.Schema = data[pos : pos+int(schemaLength)]
pos += int(schemaLength)
//skip 0x00
pos++
tableLength := data[pos]
pos++
e.Table = data[pos : pos+int(tableLength)]
pos += int(tableLength)
//skip 0x00
pos++
var n int
e.ColumnCount, _, n = LengthEncodedInt(data[pos:])
pos += n
e.ColumnType = data[pos : pos+int(e.ColumnCount)]
pos += int(e.ColumnCount)
var err error
var metaData []byte
if metaData, _, n, err = LengthEncodedString(data[pos:]); err != nil {
return errors.Trace(err)
}
if err = e.decodeMeta(metaData); err != nil {
return errors.Trace(err)
}
pos += n
nullBitmapSize := bitmapByteSize(int(e.ColumnCount))
if len(data[pos:]) < nullBitmapSize {
return io.EOF
}
e.NullBitmap = data[pos : pos+nullBitmapSize]
pos += nullBitmapSize
if err = e.decodeOptionalMeta(data[pos:]); err != nil {
return err
}
return nil
}
func bitmapByteSize(columnCount int) int {
return int(columnCount+7) / 8
}
// see mysql sql/log_event.h
/*
0 byte
MYSQL_TYPE_DECIMAL
MYSQL_TYPE_TINY
MYSQL_TYPE_SHORT
MYSQL_TYPE_LONG
MYSQL_TYPE_NULL
MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_LONGLONG
MYSQL_TYPE_INT24
MYSQL_TYPE_DATE
MYSQL_TYPE_TIME
MYSQL_TYPE_DATETIME
MYSQL_TYPE_YEAR
1 byte
MYSQL_TYPE_FLOAT
MYSQL_TYPE_DOUBLE
MYSQL_TYPE_BLOB
MYSQL_TYPE_GEOMETRY
//maybe
MYSQL_TYPE_TIME2
MYSQL_TYPE_DATETIME2
MYSQL_TYPE_TIMESTAMP2
2 byte
MYSQL_TYPE_VARCHAR
MYSQL_TYPE_BIT
MYSQL_TYPE_NEWDECIMAL
MYSQL_TYPE_VAR_STRING
MYSQL_TYPE_STRING
This enumeration value is only used internally and cannot exist in a binlog.
MYSQL_TYPE_NEWDATE
MYSQL_TYPE_ENUM
MYSQL_TYPE_SET
MYSQL_TYPE_TINY_BLOB
MYSQL_TYPE_MEDIUM_BLOB
MYSQL_TYPE_LONG_BLOB
*/
func (e *TableMapEvent) decodeMeta(data []byte) error {
pos := 0
e.ColumnMeta = make([]uint16, e.ColumnCount)
for i, t := range e.ColumnType {
switch t {
case MYSQL_TYPE_STRING:
var x uint16 = uint16(data[pos]) << 8 //real type
x += uint16(data[pos+1]) //pack or field length
e.ColumnMeta[i] = x
pos += 2
case MYSQL_TYPE_NEWDECIMAL:
var x uint16 = uint16(data[pos]) << 8 //precision
x += uint16(data[pos+1]) //decimals
e.ColumnMeta[i] = x
pos += 2
case MYSQL_TYPE_VAR_STRING,
MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_BIT:
e.ColumnMeta[i] = binary.LittleEndian.Uint16(data[pos:])
pos += 2
case MYSQL_TYPE_BLOB,
MYSQL_TYPE_DOUBLE,
MYSQL_TYPE_FLOAT,
MYSQL_TYPE_GEOMETRY,
MYSQL_TYPE_JSON:
e.ColumnMeta[i] = uint16(data[pos])
pos++
case MYSQL_TYPE_TIME2,
MYSQL_TYPE_DATETIME2,
MYSQL_TYPE_TIMESTAMP2:
e.ColumnMeta[i] = uint16(data[pos])
pos++
case MYSQL_TYPE_NEWDATE,
MYSQL_TYPE_ENUM,
MYSQL_TYPE_SET,
MYSQL_TYPE_TINY_BLOB,
MYSQL_TYPE_MEDIUM_BLOB,
MYSQL_TYPE_LONG_BLOB:
return errors.Errorf("unsupport type in binlog %d", t)
default:
e.ColumnMeta[i] = 0
}
}
return nil
}
func (e *TableMapEvent) decodeOptionalMeta(data []byte) (err error) {
pos := 0
for pos < len(data) {
// optional metadata fields are stored in Type, Length, Value(TLV) format
// Type takes 1 byte. Length is a packed integer value. Values takes Length bytes
t := data[pos]
pos++
l, _, n := LengthEncodedInt(data[pos:])
pos += n
v := data[pos : pos+int(l)]
pos += int(l)
switch t {
case TABLE_MAP_OPT_META_SIGNEDNESS:
e.SignednessBitmap = v
case TABLE_MAP_OPT_META_DEFAULT_CHARSET:
e.DefaultCharset, err = e.decodeDefaultCharset(v)
if err != nil {
return err
}
case TABLE_MAP_OPT_META_COLUMN_CHARSET:
e.ColumnCharset, err = e.decodeIntSeq(v)
if err != nil {
return err
}
case TABLE_MAP_OPT_META_COLUMN_NAME:
if err = e.decodeColumnNames(v); err != nil {
return err
}
case TABLE_MAP_OPT_META_SET_STR_VALUE:
e.SetStrValue, err = e.decodeStrValue(v)
if err != nil {
return err
}
case TABLE_MAP_OPT_META_ENUM_STR_VALUE:
e.EnumStrValue, err = e.decodeStrValue(v)
if err != nil {
return err
}
case TABLE_MAP_OPT_META_GEOMETRY_TYPE:
e.GeometryType, err = e.decodeIntSeq(v)
if err != nil {
return err
}
case TABLE_MAP_OPT_META_SIMPLE_PRIMARY_KEY:
if err = e.decodeSimplePrimaryKey(v); err != nil {
return err
}
case TABLE_MAP_OPT_META_PRIMARY_KEY_WITH_PREFIX:
if err = e.decodePrimaryKeyWithPrefix(v); err != nil {
return err
}
case TABLE_MAP_OPT_META_ENUM_AND_SET_DEFAULT_CHARSET:
e.EnumSetDefaultCharset, err = e.decodeDefaultCharset(v)
if err != nil {
return err
}
case TABLE_MAP_OPT_META_ENUM_AND_SET_COLUMN_CHARSET:
e.EnumSetColumnCharset, err = e.decodeIntSeq(v)
if err != nil {
return err
}
default:
// Ignore for future extension
}
}
return nil
}
func (e *TableMapEvent) decodeIntSeq(v []byte) (ret []uint64, err error) {
p := 0
for p < len(v) {
i, _, n := LengthEncodedInt(v[p:])
p += n
ret = append(ret, i)
}
return
}
func (e *TableMapEvent) decodeDefaultCharset(v []byte) (ret []uint64, err error) {
ret, err = e.decodeIntSeq(v)
if err != nil {
return
}
if len(ret)%2 != 1 {
return nil, errors.Errorf("Expect odd item in DefaultCharset but got %d", len(ret))
}
return
}
func (e *TableMapEvent) decodeColumnNames(v []byte) error {
p := 0
e.ColumnName = make([][]byte, 0, e.ColumnCount)
for p < len(v) {
n := int(v[p])
p++
e.ColumnName = append(e.ColumnName, v[p:p+n])
p += n
}
if len(e.ColumnName) != int(e.ColumnCount) {
return errors.Errorf("Expect %d column names but got %d", e.ColumnCount, len(e.ColumnName))
}
return nil
}
func (e *TableMapEvent) decodeStrValue(v []byte) (ret [][][]byte, err error) {
p := 0
for p < len(v) {
nVal, _, n := LengthEncodedInt(v[p:])
p += n
vals := make([][]byte, 0, int(nVal))
for i := 0; i < int(nVal); i++ {
val, _, n, err := LengthEncodedString(v[p:])
if err != nil {
return nil, err
}
p += n
vals = append(vals, val)
}
ret = append(ret, vals)
}
return
}
func (e *TableMapEvent) decodeSimplePrimaryKey(v []byte) error {
p := 0
for p < len(v) {
i, _, n := LengthEncodedInt(v[p:])
e.PrimaryKey = append(e.PrimaryKey, i)
e.PrimaryKeyPrefix = append(e.PrimaryKeyPrefix, 0)
p += n
}
return nil
}
func (e *TableMapEvent) decodePrimaryKeyWithPrefix(v []byte) error {
p := 0
for p < len(v) {
i, _, n := LengthEncodedInt(v[p:])
e.PrimaryKey = append(e.PrimaryKey, i)
p += n
i, _, n = LengthEncodedInt(v[p:])
e.PrimaryKeyPrefix = append(e.PrimaryKeyPrefix, i)
p += n
}
return nil
}
func (e *TableMapEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "TableID: %d\n", e.TableID)
fmt.Fprintf(w, "TableID size: %d\n", e.tableIDSize)
fmt.Fprintf(w, "Flags: %d\n", e.Flags)
fmt.Fprintf(w, "Schema: %s\n", e.Schema)
fmt.Fprintf(w, "Table: %s\n", e.Table)
fmt.Fprintf(w, "Column count: %d\n", e.ColumnCount)
fmt.Fprintf(w, "Column type: \n%s", hex.Dump(e.ColumnType))
fmt.Fprintf(w, "NULL bitmap: \n%s", hex.Dump(e.NullBitmap))
fmt.Fprintf(w, "Signedness bitmap: \n%s", hex.Dump(e.SignednessBitmap))
fmt.Fprintf(w, "Default charset: %v\n", e.DefaultCharset)
fmt.Fprintf(w, "Column charset: %v\n", e.ColumnCharset)
fmt.Fprintf(w, "Set str value: %v\n", e.SetStrValueString())
fmt.Fprintf(w, "Enum str value: %v\n", e.EnumStrValueString())
fmt.Fprintf(w, "Column name: %v\n", e.ColumnNameString())
fmt.Fprintf(w, "Geometry type: %v\n", e.GeometryType)
fmt.Fprintf(w, "Primary key: %v\n", e.PrimaryKey)
fmt.Fprintf(w, "Primary key prefix: %v\n", e.PrimaryKeyPrefix)
fmt.Fprintf(w, "Enum/set default charset: %v\n", e.EnumSetDefaultCharset)
fmt.Fprintf(w, "Enum/set column charset: %v\n", e.EnumSetColumnCharset)
unsignedMap := e.UnsignedMap()
fmt.Fprintf(w, "UnsignedMap: %#v\n", unsignedMap)
collationMap := e.CollationMap()
fmt.Fprintf(w, "CollationMap: %#v\n", collationMap)
enumSetCollationMap := e.EnumSetCollationMap()
fmt.Fprintf(w, "EnumSetCollationMap: %#v\n", enumSetCollationMap)
enumStrValueMap := e.EnumStrValueMap()
fmt.Fprintf(w, "EnumStrValueMap: %#v\n", enumStrValueMap)
setStrValueMap := e.SetStrValueMap()
fmt.Fprintf(w, "SetStrValueMap: %#v\n", setStrValueMap)
geometryTypeMap := e.GeometryTypeMap()
fmt.Fprintf(w, "GeometryTypeMap: %#v\n", geometryTypeMap)
nameMaxLen := 0
for _, name := range e.ColumnName {
if len(name) > nameMaxLen {
nameMaxLen = len(name)
}
}
nameFmt := " %s"
if nameMaxLen > 0 {
nameFmt = fmt.Sprintf(" %%-%ds", nameMaxLen)
}
primaryKey := map[int]struct{}{}
for _, pk := range e.PrimaryKey {
primaryKey[int(pk)] = struct{}{}
}
fmt.Fprintf(w, "Columns: \n")
for i := 0; i < int(e.ColumnCount); i++ {
if len(e.ColumnName) == 0 {
fmt.Fprintf(w, nameFmt, "<n/a>")
} else {
fmt.Fprintf(w, nameFmt, e.ColumnName[i])
}
fmt.Fprintf(w, " type=%-3d", e.realType(i))
if e.IsNumericColumn(i) {
if len(unsignedMap) == 0 {
fmt.Fprintf(w, " unsigned=<n/a>")
} else if unsignedMap[i] {
fmt.Fprintf(w, " unsigned=yes")
} else {
fmt.Fprintf(w, " unsigned=no ")
}
}
if e.IsCharacterColumn(i) {
if len(collationMap) == 0 {
fmt.Fprintf(w, " collation=<n/a>")
} else {
fmt.Fprintf(w, " collation=%d ", collationMap[i])
}
}
if e.IsEnumColumn(i) {
if len(enumSetCollationMap) == 0 {
fmt.Fprintf(w, " enum_collation=<n/a>")
} else {
fmt.Fprintf(w, " enum_collation=%d", enumSetCollationMap[i])
}
if len(enumStrValueMap) == 0 {
fmt.Fprintf(w, " enum=<n/a>")
} else {
fmt.Fprintf(w, " enum=%v", enumStrValueMap[i])
}
}
if e.IsSetColumn(i) {
if len(enumSetCollationMap) == 0 {
fmt.Fprintf(w, " set_collation=<n/a>")
} else {
fmt.Fprintf(w, " set_collation=%d", enumSetCollationMap[i])
}
if len(setStrValueMap) == 0 {
fmt.Fprintf(w, " set=<n/a>")
} else {
fmt.Fprintf(w, " set=%v", setStrValueMap[i])
}
}
if e.IsGeometryColumn(i) {
if len(geometryTypeMap) == 0 {
fmt.Fprintf(w, " geometry_type=<n/a>")
} else {
fmt.Fprintf(w, " geometry_type=%v", geometryTypeMap[i])
}
}
available, nullable := e.Nullable(i)
if !available {
fmt.Fprintf(w, " null=<n/a>")
} else if nullable {
fmt.Fprintf(w, " null=yes")
} else {
fmt.Fprintf(w, " null=no ")
}
if _, ok := primaryKey[i]; ok {
fmt.Fprintf(w, " pri")
}
fmt.Fprintf(w, "\n")
}
fmt.Fprintln(w)
}
// Nullable returns the nullablity of the i-th column.
// If null bits are not available, available is false.
// i must be in range [0, ColumnCount).
func (e *TableMapEvent) Nullable(i int) (available, nullable bool) {
if len(e.NullBitmap) == 0 {
return
}
return true, e.NullBitmap[i/8]&(1<<uint(i%8)) != 0
}
// SetStrValueString returns values for set columns as string slices.
// nil is returned if not available or no set columns at all.
func (e *TableMapEvent) SetStrValueString() [][]string {
if e.setStrValueString == nil {
if len(e.SetStrValue) == 0 {
return nil
}
e.setStrValueString = make([][]string, 0, len(e.SetStrValue))
for _, vals := range e.SetStrValue {
e.setStrValueString = append(
e.setStrValueString,
e.bytesSlice2StrSlice(vals),
)
}
}
return e.setStrValueString
}
// EnumStrValueString returns values for enum columns as string slices.
// nil is returned if not available or no enum columns at all.
func (e *TableMapEvent) EnumStrValueString() [][]string {
if e.enumStrValueString == nil {
if len(e.EnumStrValue) == 0 {
return nil
}
e.enumStrValueString = make([][]string, 0, len(e.EnumStrValue))
for _, vals := range e.EnumStrValue {
e.enumStrValueString = append(
e.enumStrValueString,
e.bytesSlice2StrSlice(vals),
)
}
}
return e.enumStrValueString
}
// ColumnNameString returns column names as string slice.
// nil is returned if not available.
func (e *TableMapEvent) ColumnNameString() []string {
if e.columnNameString == nil {
e.columnNameString = e.bytesSlice2StrSlice(e.ColumnName)
}
return e.columnNameString
}
func (e *TableMapEvent) bytesSlice2StrSlice(src [][]byte) []string {
if src == nil {
return nil
}
ret := make([]string, 0, len(src))
for _, item := range src {
ret = append(ret, string(item))
}
return ret
}
// UnsignedMap returns a map: column index -> unsigned.
// Note that only numeric columns will be returned.
// nil is returned if not available or no numeric columns at all.
func (e *TableMapEvent) UnsignedMap() map[int]bool {
if len(e.SignednessBitmap) == 0 {
return nil
}
p := 0
ret := make(map[int]bool)
for i := 0; i < int(e.ColumnCount); i++ {
if !e.IsNumericColumn(i) {
continue
}
ret[i] = e.SignednessBitmap[p/8]&(1<<uint(7-p%8)) != 0
p++
}
return ret
}
// CollationMap returns a map: column index -> collation id.
// Note that only character columns will be returned.
// nil is returned if not available or no character columns at all.
func (e *TableMapEvent) CollationMap() map[int]uint64 {
return e.collationMap(e.IsCharacterColumn, e.DefaultCharset, e.ColumnCharset)
}
// EnumSetCollationMap returns a map: column index -> collation id.
// Note that only enum or set columns will be returned.
// nil is returned if not available or no enum/set columns at all.
func (e *TableMapEvent) EnumSetCollationMap() map[int]uint64 {
return e.collationMap(e.IsEnumOrSetColumn, e.EnumSetDefaultCharset, e.EnumSetColumnCharset)
}
func (e *TableMapEvent) collationMap(includeType func(int) bool, defaultCharset, columnCharset []uint64) map[int]uint64 {
if len(defaultCharset) != 0 {
defaultCollation := defaultCharset[0]
// character column index -> collation
collations := make(map[int]uint64)
for i := 1; i < len(defaultCharset); i += 2 {
collations[int(defaultCharset[i])] = defaultCharset[i+1]
}
p := 0
ret := make(map[int]uint64)
for i := 0; i < int(e.ColumnCount); i++ {
if !includeType(i) {
continue
}
if collation, ok := collations[p]; ok {
ret[i] = collation
} else {
ret[i] = defaultCollation
}
p++
}
return ret
}
if len(columnCharset) != 0 {
p := 0
ret := make(map[int]uint64)
for i := 0; i < int(e.ColumnCount); i++ {
if !includeType(i) {
continue
}
ret[i] = columnCharset[p]
p++
}
return ret
}
return nil
}
// EnumStrValueMap returns a map: column index -> enum string value.
// Note that only enum columns will be returned.
// nil is returned if not available or no enum columns at all.
func (e *TableMapEvent) EnumStrValueMap() map[int][]string {
return e.strValueMap(e.IsEnumColumn, e.EnumStrValueString())
}
// SetStrValueMap returns a map: column index -> set string value.
// Note that only set columns will be returned.
// nil is returned if not available or no set columns at all.
func (e *TableMapEvent) SetStrValueMap() map[int][]string {
return e.strValueMap(e.IsSetColumn, e.SetStrValueString())
}
func (e *TableMapEvent) strValueMap(includeType func(int) bool, strValue [][]string) map[int][]string {
if len(strValue) == 0 {
return nil
}
p := 0
ret := make(map[int][]string)
for i := 0; i < int(e.ColumnCount); i++ {
if !includeType(i) {
continue
}
ret[i] = strValue[p]
p++
}
return ret
}
// GeometryTypeMap returns a map: column index -> geometry type.
// Note that only geometry columns will be returned.
// nil is returned if not available or no geometry columns at all.
func (e *TableMapEvent) GeometryTypeMap() map[int]uint64 {
if len(e.GeometryType) == 0 {
return nil
}
p := 0
ret := make(map[int]uint64)
for i := 0; i < int(e.ColumnCount); i++ {
if !e.IsGeometryColumn(i) {
continue
}
ret[i] = e.GeometryType[p]
p++
}
return ret
}
// Below realType and IsXXXColumn are base from:
// table_def::type in sql/rpl_utility.h
// Table_map_log_event::print_columns in mysql-8.0/sql/log_event.cc and mariadb-10.5/sql/log_event_client.cc
func (e *TableMapEvent) realType(i int) byte {
typ := e.ColumnType[i]
switch typ {
case MYSQL_TYPE_STRING:
rtyp := byte(e.ColumnMeta[i] >> 8)
if rtyp == MYSQL_TYPE_ENUM || rtyp == MYSQL_TYPE_SET {
return rtyp
}
case MYSQL_TYPE_DATE:
return MYSQL_TYPE_NEWDATE
}
return typ
}
func (e *TableMapEvent) IsNumericColumn(i int) bool {
switch e.realType(i) {
case MYSQL_TYPE_TINY,
MYSQL_TYPE_SHORT,
MYSQL_TYPE_INT24,
MYSQL_TYPE_LONG,
MYSQL_TYPE_LONGLONG,
MYSQL_TYPE_NEWDECIMAL,
MYSQL_TYPE_FLOAT,
MYSQL_TYPE_DOUBLE:
return true
default:
return false
}
}
// IsCharacterColumn returns true if the column type is considered as character type.
// Note that JSON/GEOMETRY types are treated as character type in mariadb.
// (JSON is an alias for LONGTEXT in mariadb: https://mariadb.com/kb/en/json-data-type/)
func (e *TableMapEvent) IsCharacterColumn(i int) bool {
switch e.realType(i) {
case MYSQL_TYPE_STRING,
MYSQL_TYPE_VAR_STRING,
MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_BLOB:
return true
case MYSQL_TYPE_GEOMETRY:
if e.flavor == "mariadb" {
return true
}
return false
default:
return false
}
}
func (e *TableMapEvent) IsEnumColumn(i int) bool {
return e.realType(i) == MYSQL_TYPE_ENUM
}
func (e *TableMapEvent) IsSetColumn(i int) bool {
return e.realType(i) == MYSQL_TYPE_SET
}
func (e *TableMapEvent) IsGeometryColumn(i int) bool {
return e.realType(i) == MYSQL_TYPE_GEOMETRY
}
func (e *TableMapEvent) IsEnumOrSetColumn(i int) bool {
rtyp := e.realType(i)
return rtyp == MYSQL_TYPE_ENUM || rtyp == MYSQL_TYPE_SET
}
// RowsEventStmtEndFlag is set in the end of the statement.
const RowsEventStmtEndFlag = 0x01
type RowsEvent struct {
//0, 1, 2
Version int
tableIDSize int
tables map[uint64]*TableMapEvent
needBitmap2 bool
Table *TableMapEvent
TableID uint64
Flags uint16
//if version == 2
ExtraData []byte
//lenenc_int
ColumnCount uint64
//len = (ColumnCount + 7) / 8
ColumnBitmap1 []byte
//if UPDATE_ROWS_EVENTv1 or v2
//len = (ColumnCount + 7) / 8
ColumnBitmap2 []byte
//rows: invalid: int64, float64, bool, []byte, string
Rows [][]interface{}
parseTime bool
timestampStringLocation *time.Location
useDecimal bool
ignoreJSONDecodeErr bool
}
func (e *RowsEvent) Decode(data []byte) error {
pos := 0
e.TableID = FixedLengthInt(data[0:e.tableIDSize])
pos += e.tableIDSize
e.Flags = binary.LittleEndian.Uint16(data[pos:])
pos += 2
if e.Version == 2 {
dataLen := binary.LittleEndian.Uint16(data[pos:])
pos += 2
e.ExtraData = data[pos : pos+int(dataLen-2)]
pos += int(dataLen - 2)
}
var n int
e.ColumnCount, _, n = LengthEncodedInt(data[pos:])
pos += n
bitCount := bitmapByteSize(int(e.ColumnCount))
e.ColumnBitmap1 = data[pos : pos+bitCount]
pos += bitCount
if e.needBitmap2 {
e.ColumnBitmap2 = data[pos : pos+bitCount]
pos += bitCount
}
var ok bool
e.Table, ok = e.tables[e.TableID]
if !ok {
if len(e.tables) > 0 {
return errors.Errorf("invalid table id %d, no corresponding table map event", e.TableID)
} else {
return errors.Annotatef(errMissingTableMapEvent, "table id %d", e.TableID)
}
}
var err error
// ... repeat rows until event-end
defer func() {
if r := recover(); r != nil {
log.Fatalf("parse rows event panic %v, data %q, parsed rows %#v, table map %#v\n%s", r, data, e, e.Table, Pstack())
}
}()
// Pre-allocate memory for rows.
rowsLen := e.ColumnCount
if e.needBitmap2 {
rowsLen += e.ColumnCount
}
e.Rows = make([][]interface{}, 0, rowsLen)
for pos < len(data) {
if n, err = e.decodeRows(data[pos:], e.Table, e.ColumnBitmap1); err != nil {
return errors.Trace(err)
}
pos += n
if e.needBitmap2 {
if n, err = e.decodeRows(data[pos:], e.Table, e.ColumnBitmap2); err != nil {
return errors.Trace(err)
}
pos += n
}
}
return nil
}
func isBitSet(bitmap []byte, i int) bool {
return bitmap[i>>3]&(1<<(uint(i)&7)) > 0
}
func (e *RowsEvent) decodeRows(data []byte, table *TableMapEvent, bitmap []byte) (int, error) {
row := make([]interface{}, e.ColumnCount)
pos := 0
// refer: https://github.com/alibaba/canal/blob/c3e38e50e269adafdd38a48c63a1740cde304c67/dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java#L63
count := 0
for i := 0; i < int(e.ColumnCount); i++ {
if isBitSet(bitmap, i) {
count++
}
}
count = (count + 7) / 8
nullBitmap := data[pos : pos+count]
pos += count
nullbitIndex := 0
var n int
var err error
for i := 0; i < int(e.ColumnCount); i++ {
if !isBitSet(bitmap, i) {
continue
}
isNull := (uint32(nullBitmap[nullbitIndex/8]) >> uint32(nullbitIndex%8)) & 0x01
nullbitIndex++
if isNull > 0 {
row[i] = nil
continue
}
row[i], n, err = e.decodeValue(data[pos:], table.ColumnType[i], table.ColumnMeta[i])
if err != nil {
return 0, err
}
pos += n
}
e.Rows = append(e.Rows, row)
return pos, nil
}
func (e *RowsEvent) parseFracTime(t interface{}) interface{} {
v, ok := t.(fracTime)
if !ok {
return t
}
if !e.parseTime {
// Don't parse time, return string directly
return v.String()
}
// return Golang time directly
return v.Time
}
// see mysql sql/log_event.cc log_event_print_value
func (e *RowsEvent) decodeValue(data []byte, tp byte, meta uint16) (v interface{}, n int, err error) {
var length int = 0
if tp == MYSQL_TYPE_STRING {
if meta >= 256 {
b0 := uint8(meta >> 8)
b1 := uint8(meta & 0xFF)
if b0&0x30 != 0x30 {
length = int(uint16(b1) | (uint16((b0&0x30)^0x30) << 4))
tp = byte(b0 | 0x30)
} else {
length = int(meta & 0xFF)
tp = b0
}
} else {
length = int(meta)
}
}
switch tp {
case MYSQL_TYPE_NULL:
return nil, 0, nil
case MYSQL_TYPE_LONG:
n = 4
v = ParseBinaryInt32(data)
case MYSQL_TYPE_TINY:
n = 1
v = ParseBinaryInt8(data)
case MYSQL_TYPE_SHORT:
n = 2
v = ParseBinaryInt16(data)
case MYSQL_TYPE_INT24:
n = 3
v = ParseBinaryInt24(data)
case MYSQL_TYPE_LONGLONG:
n = 8
v = ParseBinaryInt64(data)
case MYSQL_TYPE_NEWDECIMAL:
prec := uint8(meta >> 8)
scale := uint8(meta & 0xFF)
v, n, err = decodeDecimal(data, int(prec), int(scale), e.useDecimal)
case MYSQL_TYPE_FLOAT:
n = 4
v = ParseBinaryFloat32(data)
case MYSQL_TYPE_DOUBLE:
n = 8
v = ParseBinaryFloat64(data)
case MYSQL_TYPE_BIT:
nbits := ((meta >> 8) * 8) + (meta & 0xFF)
n = int(nbits+7) / 8
//use int64 for bit
v, err = decodeBit(data, int(nbits), int(n))
case MYSQL_TYPE_TIMESTAMP:
n = 4
t := binary.LittleEndian.Uint32(data)
if t == 0 {
v = formatZeroTime(0, 0)
} else {
v = e.parseFracTime(fracTime{
Time: time.Unix(int64(t), 0),
Dec: 0,
timestampStringLocation: e.timestampStringLocation,
})
}
case MYSQL_TYPE_TIMESTAMP2:
v, n, err = decodeTimestamp2(data, meta, e.timestampStringLocation)
v = e.parseFracTime(v)
case MYSQL_TYPE_DATETIME:
n = 8
i64 := binary.LittleEndian.Uint64(data)
if i64 == 0 {
v = formatZeroTime(0, 0)
} else {
d := i64 / 1000000
t := i64 % 1000000
v = e.parseFracTime(fracTime{
Time: time.Date(
int(d/10000),
time.Month((d%10000)/100),
int(d%100),
int(t/10000),
int((t%10000)/100),
int(t%100),
0,
time.UTC,
),
Dec: 0,
})
}
case MYSQL_TYPE_DATETIME2:
v, n, err = decodeDatetime2(data, meta)
v = e.parseFracTime(v)
case MYSQL_TYPE_TIME:
n = 3
i32 := uint32(FixedLengthInt(data[0:3]))
if i32 == 0 {
v = "00:00:00"
} else {
sign := ""
if i32 < 0 {
sign = "-"
}
v = fmt.Sprintf("%s%02d:%02d:%02d", sign, i32/10000, (i32%10000)/100, i32%100)
}
case MYSQL_TYPE_TIME2:
v, n, err = decodeTime2(data, meta)
case MYSQL_TYPE_DATE:
n = 3
i32 := uint32(FixedLengthInt(data[0:3]))
if i32 == 0 {
v = "0000-00-00"
} else {
v = fmt.Sprintf("%04d-%02d-%02d", i32/(16*32), i32/32%16, i32%32)
}
case MYSQL_TYPE_YEAR:
n = 1
year := int(data[0])
if year == 0 {
v = year
} else {
v = year + 1900
}
case MYSQL_TYPE_ENUM:
l := meta & 0xFF
switch l {
case 1:
v = int64(data[0])
n = 1
case 2:
v = int64(binary.LittleEndian.Uint16(data))
n = 2
default:
err = fmt.Errorf("Unknown ENUM packlen=%d", l)
}
case MYSQL_TYPE_SET:
n = int(meta & 0xFF)
nbits := n * 8
v, err = littleDecodeBit(data, nbits, n)
case MYSQL_TYPE_BLOB:
v, n, err = decodeBlob(data, meta)
case MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_VAR_STRING:
length = int(meta)
v, n = decodeString(data, length)
case MYSQL_TYPE_STRING:
v, n = decodeString(data, length)
case MYSQL_TYPE_JSON:
// Refer: https://github.com/shyiko/mysql-binlog-connector-java/blob/master/src/main/java/com/github/shyiko/mysql/binlog/event/deserialization/AbstractRowsEventDataDeserializer.java#L404
length = int(FixedLengthInt(data[0:meta]))
n = length + int(meta)
v, err = e.decodeJsonBinary(data[meta:n])
case MYSQL_TYPE_GEOMETRY:
// MySQL saves Geometry as Blob in binlog
// Seem that the binary format is SRID (4 bytes) + WKB, outer can use
// MySQL GeoFromWKB or others to create the geometry data.
// Refer https://dev.mysql.com/doc/refman/5.7/en/gis-wkb-functions.html
// I also find some go libs to handle WKB if possible
// see https://github.com/twpayne/go-geom or https://github.com/paulmach/go.geo
v, n, err = decodeBlob(data, meta)
default:
err = fmt.Errorf("unsupport type %d in binlog and don't know how to handle", tp)
}
return
}
func decodeString(data []byte, length int) (v string, n int) {
if length < 256 {
length = int(data[0])
n = int(length) + 1
//v = hack.String(data[1:n])
v = string([]byte(data[1:n]))
} else {
length = int(binary.LittleEndian.Uint16(data[0:]))
n = length + 2
//v = hack.String(data[2:n])
v = string([]byte(data[2:n]))
}
return
}
const digitsPerInteger int = 9
var compressedBytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4}
func decodeDecimalDecompressValue(compIndx int, data []byte, mask uint8) (size int, value uint32) {
size = compressedBytes[compIndx]
databuff := make([]byte, size)
for i := 0; i < size; i++ {
databuff[i] = data[i] ^ mask
}
value = uint32(BFixedLengthInt(databuff))
return
}
func decodeDecimal(data []byte, precision int, decimals int, useDecimal bool) (interface{}, int, error) {
//see python mysql replication and https://github.com/jeremycole/mysql_binlog
integral := (precision - decimals)
uncompIntegral := int(integral / digitsPerInteger)
uncompFractional := int(decimals / digitsPerInteger)
compIntegral := integral - (uncompIntegral * digitsPerInteger)
compFractional := decimals - (uncompFractional * digitsPerInteger)
binSize := uncompIntegral*4 + compressedBytes[compIntegral] +
uncompFractional*4 + compressedBytes[compFractional]
buf := make([]byte, binSize)
copy(buf, data[:binSize])
//must copy the data for later change
data = buf
// Support negative
// The sign is encoded in the high bit of the the byte
// But this bit can also be used in the value
value := uint32(data[0])
var res bytes.Buffer
var mask uint32 = 0
if value&0x80 == 0 {
mask = uint32((1 << 32) - 1)
res.WriteString("-")
}
//clear sign
data[0] ^= 0x80
pos, value := decodeDecimalDecompressValue(compIntegral, data, uint8(mask))
res.WriteString(fmt.Sprintf("%d", value))
for i := 0; i < uncompIntegral; i++ {
value = binary.BigEndian.Uint32(data[pos:]) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
res.WriteString(".")
for i := 0; i < uncompFractional; i++ {
value = binary.BigEndian.Uint32(data[pos:]) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
if size, value := decodeDecimalDecompressValue(compFractional, data[pos:], uint8(mask)); size > 0 {
res.WriteString(fmt.Sprintf("%0*d", compFractional, value))
pos += size
}
if useDecimal {
f, err := decimal.NewFromString(hack.String(res.Bytes()))
return f, pos, err
}
f, err := strconv.ParseFloat(hack.String(res.Bytes()), 64)
return f, pos, err
}
func decodeBit(data []byte, nbits int, length int) (value int64, err error) {
if nbits > 1 {
switch length {
case 1:
value = int64(data[0])
case 2:
value = int64(binary.BigEndian.Uint16(data))
case 3:
value = int64(BFixedLengthInt(data[0:3]))
case 4:
value = int64(binary.BigEndian.Uint32(data))
case 5:
value = int64(BFixedLengthInt(data[0:5]))
case 6:
value = int64(BFixedLengthInt(data[0:6]))
case 7:
value = int64(BFixedLengthInt(data[0:7]))
case 8:
value = int64(binary.BigEndian.Uint64(data))
default:
err = fmt.Errorf("invalid bit length %d", length)
}
} else {
if length != 1 {
err = fmt.Errorf("invalid bit length %d", length)
} else {
value = int64(data[0])
}
}
return
}
func littleDecodeBit(data []byte, nbits int, length int) (value int64, err error) {
if nbits > 1 {
switch length {
case 1:
value = int64(data[0])
case 2:
value = int64(binary.LittleEndian.Uint16(data))
case 3:
value = int64(FixedLengthInt(data[0:3]))
case 4:
value = int64(binary.LittleEndian.Uint32(data))
case 5:
value = int64(FixedLengthInt(data[0:5]))
case 6:
value = int64(FixedLengthInt(data[0:6]))
case 7:
value = int64(FixedLengthInt(data[0:7]))
case 8:
value = int64(binary.LittleEndian.Uint64(data))
default:
err = fmt.Errorf("invalid bit length %d", length)
}
} else {
if length != 1 {
err = fmt.Errorf("invalid bit length %d", length)
} else {
value = int64(data[0])
}
}
return
}
func decodeTimestamp2(data []byte, dec uint16, timestampStringLocation *time.Location) (interface{}, int, error) {
//get timestamp binary length
n := int(4 + (dec+1)/2)
sec := int64(binary.BigEndian.Uint32(data[0:4]))
usec := int64(0)
switch dec {
case 1, 2:
usec = int64(data[4]) * 10000
case 3, 4:
usec = int64(binary.BigEndian.Uint16(data[4:])) * 100
case 5, 6:
usec = int64(BFixedLengthInt(data[4:7]))
}
if sec == 0 {
return formatZeroTime(int(usec), int(dec)), n, nil
}
return fracTime{
Time: time.Unix(sec, usec*1000),
Dec: int(dec),
timestampStringLocation: timestampStringLocation,
}, n, nil
}
const DATETIMEF_INT_OFS int64 = 0x8000000000
func
|
(data []byte, dec uint16) (interface{}, int, error) {
//get datetime binary length
n := int(5 + (dec+1)/2)
intPart := int64(BFixedLengthInt(data[0:5])) - DATETIMEF_INT_OFS
var frac int64 = 0
switch dec {
case 1, 2:
frac = int64(data[5]) * 10000
case 3, 4:
frac = int64(binary.BigEndian.Uint16(data[5:7])) * 100
case 5, 6:
frac = int64(BFixedLengthInt(data[5:8]))
}
if intPart == 0 {
return formatZeroTime(int(frac), int(dec)), n, nil
}
tmp := intPart<<24 + frac
//handle sign???
if tmp < 0 {
tmp = -tmp
}
// var secPart int64 = tmp % (1 << 24)
ymdhms := tmp >> 24
ymd := ymdhms >> 17
ym := ymd >> 5
hms := ymdhms % (1 << 17)
day := int(ymd % (1 << 5))
month := int(ym % 13)
year := int(ym / 13)
second := int(hms % (1 << 6))
minute := int((hms >> 6) % (1 << 6))
hour := int((hms >> 12))
// DATETIME encoding for nonfractional part after MySQL 5.6.4
// https://dev.mysql.com/doc/internals/en/date-and-time-data-type-representation.html
// integer value for 1970-01-01 00:00:00 is
// year*13+month = 25611 = 0b110010000001011
// day = 1 = 0b00001
// hour = 0 = 0b00000
// minute = 0 = 0b000000
// second = 0 = 0b000000
// integer value = 0b1100100000010110000100000000000000000 = 107420450816
if intPart < 107420450816 {
return formatBeforeUnixZeroTime(year, month, day, hour, minute, second, int(frac), int(dec)), n, nil
}
return fracTime{
Time: time.Date(year, time.Month(month), day, hour, minute, second, int(frac*1000), time.UTC),
Dec: int(dec),
}, n, nil
}
const TIMEF_OFS int64 = 0x800000000000
const TIMEF_INT_OFS int64 = 0x800000
func decodeTime2(data []byte, dec uint16) (string, int, error) {
//time binary length
n := int(3 + (dec+1)/2)
tmp := int64(0)
intPart := int64(0)
frac := int64(0)
switch dec {
case 1:
case 2:
intPart = int64(BFixedLengthInt(data[0:3])) - TIMEF_INT_OFS
frac = int64(data[3])
if intPart < 0 && frac > 0 {
/*
Negative values are stored with reverse fractional part order,
for binary sort compatibility.
Disk value intpart frac Time value Memory value
800000.00 0 0 00:00:00.00 0000000000.000000
7FFFFF.FF -1 255 -00:00:00.01 FFFFFFFFFF.FFD8F0
7FFFFF.9D -1 99 -00:00:00.99 FFFFFFFFFF.F0E4D0
7FFFFF.00 -1 0 -00:00:01.00 FFFFFFFFFF.000000
7FFFFE.FF -1 255 -00:00:01.01 FFFFFFFFFE.FFD8F0
7FFFFE.F6 -2 246 -00:00:01.10 FFFFFFFFFE.FE7960
Formula to convert fractional part from disk format
(now stored in "frac" variable) to absolute value: "0x100 - frac".
To reconstruct in-memory value, we shift
to the next integer value and then substruct fractional part.
*/
intPart++ /* Shift to the next integer value */
frac -= 0x100 /* -(0x100 - frac) */
}
tmp = intPart<<24 + frac*10000
case 3:
case 4:
intPart = int64(BFixedLengthInt(data[0:3])) - TIMEF_INT_OFS
frac = int64(binary.BigEndian.Uint16(data[3:5]))
if intPart < 0 && frac > 0 {
/*
Fix reverse fractional part order: "0x10000 - frac".
See comments for FSP=1 and FSP=2 above.
*/
intPart++ /* Shift to the next integer value */
frac -= 0x10000 /* -(0x10000-frac) */
}
tmp = intPart<<24 + frac*100
case 5:
case 6:
tmp = int64(BFixedLengthInt(data[0:6])) - TIMEF_OFS
default:
intPart = int64(BFixedLengthInt(data[0:3])) - TIMEF_INT_OFS
tmp = intPart << 24
}
if intPart == 0 {
return "00:00:00", n, nil
}
hms := int64(0)
sign := ""
if tmp < 0 {
tmp = -tmp
sign = "-"
}
hms = tmp >> 24
hour := (hms >> 12) % (1 << 10) /* 10 bits starting at 12th */
minute := (hms >> 6) % (1 << 6) /* 6 bits starting at 6th */
second := hms % (1 << 6) /* 6 bits starting at 0th */
secPart := tmp % (1 << 24)
if secPart != 0 {
return fmt.Sprintf("%s%02d:%02d:%02d.%06d", sign, hour, minute, second, secPart), n, nil
}
return fmt.Sprintf("%s%02d:%02d:%02d", sign, hour, minute, second), n, nil
}
func decodeBlob(data []byte, meta uint16) (v []byte, n int, err error) {
var length int
switch meta {
case 1:
length = int(data[0])
v = data[1 : 1+length]
n = length + 1
case 2:
length = int(binary.LittleEndian.Uint16(data))
v = data[2 : 2+length]
n = length + 2
case 3:
length = int(FixedLengthInt(data[0:3]))
v = data[3 : 3+length]
n = length + 3
case 4:
length = int(binary.LittleEndian.Uint32(data))
v = data[4 : 4+length]
n = length + 4
default:
err = fmt.Errorf("invalid blob packlen = %d", meta)
}
return
}
func (e *RowsEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "TableID: %d\n", e.TableID)
fmt.Fprintf(w, "Flags: %d\n", e.Flags)
fmt.Fprintf(w, "Column count: %d\n", e.ColumnCount)
fmt.Fprintf(w, "Values:\n")
for _, rows := range e.Rows {
fmt.Fprintf(w, "--\n")
for j, d := range rows {
if _, ok := d.([]byte); ok {
fmt.Fprintf(w, "%d:%q\n", j, d)
} else {
fmt.Fprintf(w, "%d:%#v\n", j, d)
}
}
}
fmt.Fprintln(w)
}
type RowsQueryEvent struct {
Query []byte
}
func (e *RowsQueryEvent) Decode(data []byte) error {
//ignore length byte 1
e.Query = data[1:]
return nil
}
func (e *RowsQueryEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Query: %s\n", e.Query)
fmt.Fprintln(w)
}
|
decodeDatetime2
|
_phiscs.py
|
import os
import click
|
import scphylo as scp
@click.command(short_help="Run PhISCS (CSP version).")
@click.argument(
"genotype_file",
required=True,
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
)
@click.argument(
"alpha",
required=True,
type=float,
)
@click.argument(
"beta",
required=True,
type=float,
)
def phiscsb(genotype_file, alpha, beta):
"""PhISCS-B.
A combinatorial approach for subperfect
tumor phylogeny reconstructionvia integrative use of
single-cell and bulk sequencing data :cite:`PhISCS`.
scphylo phiscsb input.SC 0.0001 0.1
"""
outfile = os.path.splitext(genotype_file)[0]
scp.settings.verbosity = "info"
scp.settings.logfile = f"{outfile}.phiscsb.log"
df_in = scp.io.read(genotype_file)
df_out = scp.tl.phiscsb(df_in, alpha=alpha, beta=beta)
scp.io.write(df_out, f"{outfile}.phiscsb.CFMatrix")
return None
@click.command(short_help="Run PhISCS (ILP version).")
@click.argument(
"genotype_file",
required=True,
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
)
@click.argument(
"alpha",
required=True,
type=float,
)
@click.argument(
"beta",
required=True,
type=float,
)
@click.option(
"--time_limit",
"-t",
default=86400,
type=int,
show_default=True,
help="Time limit of the program (in second).",
)
@click.option(
"--n_threads",
"-p",
default=1,
type=int,
show_default=True,
help="Number of threads.",
)
def phiscsi(genotype_file, alpha, beta, time_limit, n_threads):
"""PhISCS-I.
A combinatorial approach for subperfect
tumor phylogeny reconstructionvia integrative use of
single-cell and bulk sequencing data :cite:`PhISCS`.
scphylo phiscsi input.SC 0.0001 0.1 -t 3600 -p 8
"""
outfile = os.path.splitext(genotype_file)[0]
scp.settings.verbosity = "info"
scp.settings.logfile = f"{outfile}.phiscsi.log"
df_in = scp.io.read(genotype_file)
df_out = scp.tl.phiscsi(
df_in, alpha=alpha, beta=beta, time_limit=time_limit, n_threads=n_threads
)
scp.io.write(df_out, f"{outfile}.phiscsi.CFMatrix")
return None
| |
expressroutecircuits.go
|
package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// ExpressRouteCircuitsClient is the network Client
type ExpressRouteCircuitsClient struct {
BaseClient
}
// NewExpressRouteCircuitsClient creates an instance of the ExpressRouteCircuitsClient client.
func NewExpressRouteCircuitsClient(subscriptionID string) ExpressRouteCircuitsClient {
return NewExpressRouteCircuitsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewExpressRouteCircuitsClientWithBaseURI creates an instance of the ExpressRouteCircuitsClient client.
func NewExpressRouteCircuitsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitsClient
|
// CreateOrUpdate creates or updates an express route circuit.
// Parameters:
// resourceGroupName - the name of the resource group.
// circuitName - the name of the circuit.
// parameters - parameters supplied to the create or update express route circuit operation.
func (client ExpressRouteCircuitsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, circuitName string, parameters ExpressRouteCircuit) (result ExpressRouteCircuitsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, circuitName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, circuitName string, parameters ExpressRouteCircuit) (*http.Request, error) {
pathParameters := map[string]interface{}{
"circuitName": autorest.Encode("path", circuitName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) (future ExpressRouteCircuitsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuit, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the specified express route circuit.
// Parameters:
// resourceGroupName - the name of the resource group.
// circuitName - the name of the express route circuit.
func (client ExpressRouteCircuitsClient) Delete(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.Delete")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, circuitName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client ExpressRouteCircuitsClient) DeletePreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"circuitName": autorest.Encode("path", circuitName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (future ExpressRouteCircuitsDeleteFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets information about the specified express route circuit.
// Parameters:
// resourceGroupName - the name of the resource group.
// circuitName - the name of express route circuit.
func (client ExpressRouteCircuitsClient) Get(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuit, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, circuitName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client ExpressRouteCircuitsClient) GetPreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"circuitName": autorest.Encode("path", circuitName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuit, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetPeeringStats gets all stats from an express route circuit in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
// circuitName - the name of the express route circuit.
// peeringName - the name of the peering.
func (client ExpressRouteCircuitsClient) GetPeeringStats(ctx context.Context, resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitStats, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.GetPeeringStats")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPeeringStatsPreparer(ctx, resourceGroupName, circuitName, peeringName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", nil, "Failure preparing request")
return
}
resp, err := client.GetPeeringStatsSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure sending request")
return
}
result, err = client.GetPeeringStatsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure responding to request")
}
return
}
// GetPeeringStatsPreparer prepares the GetPeeringStats request.
func (client ExpressRouteCircuitsClient) GetPeeringStatsPreparer(ctx context.Context, resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"circuitName": autorest.Encode("path", circuitName),
"peeringName": autorest.Encode("path", peeringName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetPeeringStatsSender sends the GetPeeringStats request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) GetPeeringStatsSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetPeeringStatsResponder handles the response to the GetPeeringStats request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) GetPeeringStatsResponder(resp *http.Response) (result ExpressRouteCircuitStats, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetStats gets all the stats from an express route circuit in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
// circuitName - the name of the express route circuit.
func (client ExpressRouteCircuitsClient) GetStats(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitStats, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.GetStats")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetStatsPreparer(ctx, resourceGroupName, circuitName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", nil, "Failure preparing request")
return
}
resp, err := client.GetStatsSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure sending request")
return
}
result, err = client.GetStatsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure responding to request")
}
return
}
// GetStatsPreparer prepares the GetStats request.
func (client ExpressRouteCircuitsClient) GetStatsPreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"circuitName": autorest.Encode("path", circuitName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetStatsSender sends the GetStats request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) GetStatsSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetStatsResponder handles the response to the GetStats request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) GetStatsResponder(resp *http.Response) (result ExpressRouteCircuitStats, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List gets all the express route circuits in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
func (client ExpressRouteCircuitsClient) List(ctx context.Context, resourceGroupName string) (result ExpressRouteCircuitListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.List")
defer func() {
sc := -1
if result.erclr.Response.Response != nil {
sc = result.erclr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.erclr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending request")
return
}
result.erclr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client ExpressRouteCircuitsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) ListResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client ExpressRouteCircuitsClient) listNextResults(ctx context.Context, lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) {
req, err := lastResults.expressRouteCircuitListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client ExpressRouteCircuitsClient) ListComplete(ctx context.Context, resourceGroupName string) (result ExpressRouteCircuitListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx, resourceGroupName)
return
}
// ListAll gets all the express route circuits in a subscription.
func (client ExpressRouteCircuitsClient) ListAll(ctx context.Context) (result ExpressRouteCircuitListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListAll")
defer func() {
sc := -1
if result.erclr.Response.Response != nil {
sc = result.erclr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listAllNextResults
req, err := client.ListAllPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing request")
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.erclr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending request")
return
}
result.erclr, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure responding to request")
}
return
}
// ListAllPreparer prepares the ListAll request.
func (client ExpressRouteCircuitsClient) ListAllPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListAllSender sends the ListAll request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) ListAllSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListAllResponder handles the response to the ListAll request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) ListAllResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listAllNextResults retrieves the next set of results, if any.
func (client ExpressRouteCircuitsClient) listAllNextResults(ctx context.Context, lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) {
req, err := lastResults.expressRouteCircuitListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listAllNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listAllNextResults", resp, "Failure sending next results request")
}
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listAllNextResults", resp, "Failure responding to next results request")
}
return
}
// ListAllComplete enumerates all values, automatically crossing page boundaries as required.
func (client ExpressRouteCircuitsClient) ListAllComplete(ctx context.Context) (result ExpressRouteCircuitListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListAll")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListAll(ctx)
return
}
// ListArpTable gets the currently advertised ARP table associated with the express route circuit in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
// circuitName - the name of the express route circuit.
// peeringName - the name of the peering.
// devicePath - the path of the device.
func (client ExpressRouteCircuitsClient) ListArpTable(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string) (result ExpressRouteCircuitsListArpTableFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListArpTable")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListArpTablePreparer(ctx, resourceGroupName, circuitName, peeringName, devicePath)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", nil, "Failure preparing request")
return
}
result, err = client.ListArpTableSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", result.Response(), "Failure sending request")
return
}
return
}
// ListArpTablePreparer prepares the ListArpTable request.
func (client ExpressRouteCircuitsClient) ListArpTablePreparer(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"circuitName": autorest.Encode("path", circuitName),
"devicePath": autorest.Encode("path", devicePath),
"peeringName": autorest.Encode("path", peeringName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListArpTableSender sends the ListArpTable request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) (future ExpressRouteCircuitsListArpTableFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// ListArpTableResponder handles the response to the ListArpTable request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Response) (result ExpressRouteCircuitsArpTableListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListRoutesTable gets the currently advertised routes table associated with the express route circuit in a resource
// group.
// Parameters:
// resourceGroupName - the name of the resource group.
// circuitName - the name of the express route circuit.
// peeringName - the name of the peering.
// devicePath - the path of the device.
func (client ExpressRouteCircuitsClient) ListRoutesTable(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string) (result ExpressRouteCircuitsListRoutesTableFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListRoutesTable")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListRoutesTablePreparer(ctx, resourceGroupName, circuitName, peeringName, devicePath)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", nil, "Failure preparing request")
return
}
result, err = client.ListRoutesTableSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", result.Response(), "Failure sending request")
return
}
return
}
// ListRoutesTablePreparer prepares the ListRoutesTable request.
func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"circuitName": autorest.Encode("path", circuitName),
"devicePath": autorest.Encode("path", devicePath),
"peeringName": autorest.Encode("path", peeringName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListRoutesTableSender sends the ListRoutesTable request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request) (future ExpressRouteCircuitsListRoutesTableFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// ListRoutesTableResponder handles the response to the ListRoutesTable request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Response) (result ExpressRouteCircuitsRoutesTableListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListRoutesTableSummary gets the currently advertised routes table summary associated with the express route circuit
// in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
// circuitName - the name of the express route circuit.
// peeringName - the name of the peering.
// devicePath - the path of the device.
func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string) (result ExpressRouteCircuitsListRoutesTableSummaryFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListRoutesTableSummary")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListRoutesTableSummaryPreparer(ctx, resourceGroupName, circuitName, peeringName, devicePath)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", nil, "Failure preparing request")
return
}
result, err = client.ListRoutesTableSummarySender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", result.Response(), "Failure sending request")
return
}
return
}
// ListRoutesTableSummaryPreparer prepares the ListRoutesTableSummary request.
func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryPreparer(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, devicePath string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"circuitName": autorest.Encode("path", circuitName),
"devicePath": autorest.Encode("path", devicePath),
"peeringName": autorest.Encode("path", peeringName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListRoutesTableSummarySender sends the ListRoutesTableSummary request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http.Request) (future ExpressRouteCircuitsListRoutesTableSummaryFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// ListRoutesTableSummaryResponder handles the response to the ListRoutesTableSummary request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryResponder(resp *http.Response) (result ExpressRouteCircuitsRoutesTableSummaryListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// UpdateTags updates an express route circuit tags.
// Parameters:
// resourceGroupName - the name of the resource group.
// circuitName - the name of the circuit.
// parameters - parameters supplied to update express route circuit tags.
func (client ExpressRouteCircuitsClient) UpdateTags(ctx context.Context, resourceGroupName string, circuitName string, parameters TagsObject) (result ExpressRouteCircuitsUpdateTagsFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.UpdateTags")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, circuitName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "UpdateTags", nil, "Failure preparing request")
return
}
result, err = client.UpdateTagsSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "UpdateTags", result.Response(), "Failure sending request")
return
}
return
}
// UpdateTagsPreparer prepares the UpdateTags request.
func (client ExpressRouteCircuitsClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, circuitName string, parameters TagsObject) (*http.Request, error) {
pathParameters := map[string]interface{}{
"circuitName": autorest.Encode("path", circuitName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-01-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateTagsSender sends the UpdateTags request. The method will close the
// http.Response Body if it receives an error.
func (client ExpressRouteCircuitsClient) UpdateTagsSender(req *http.Request) (future ExpressRouteCircuitsUpdateTagsFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// UpdateTagsResponder handles the response to the UpdateTags request. The method always
// closes the http.Response Body.
func (client ExpressRouteCircuitsClient) UpdateTagsResponder(resp *http.Response) (result ExpressRouteCircuit, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
|
{
return ExpressRouteCircuitsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
|
trace.go
|
package internal
import (
"net/http"
"strconv"
"github.com/networknext/dd-trace-go/tracer"
"github.com/networknext/dd-trace-go/tracer/ext"
)
// TraceAndServe will apply tracing to the given http.Handler using the passed tracer under the given service and resource.
func TraceAndServe(h http.Handler, w http.ResponseWriter, r *http.Request, service, resource string, t *tracer.Tracer) {
// bail out if tracing isn't enabled
if !t.Enabled() {
h.ServeHTTP(w, r)
return
}
span, ctx := t.NewChildSpanWithContext("http.request", r.Context())
defer span.Finish()
span.Type = ext.HTTPType
span.Service = service
span.Resource = resource
span.SetMeta(ext.HTTPMethod, r.Method)
span.SetMeta(ext.HTTPURL, r.URL.Path)
traceRequest := r.WithContext(ctx)
traceWriter := NewResponseWriter(w, span)
h.ServeHTTP(traceWriter, traceRequest)
}
// ResponseWriter is a small wrapper around an http response writer that will
// intercept and store the status of a request.
// It implements the ResponseWriter interface.
type ResponseWriter struct {
http.ResponseWriter
span *tracer.Span
status int
}
// New ResponseWriter allocateds and returns a new ResponseWriter.
func NewResponseWriter(w http.ResponseWriter, span *tracer.Span) *ResponseWriter {
return &ResponseWriter{w, span, 0}
}
// Write writes the data to the connection as part of an HTTP reply.
// We explicitely call WriteHeader with the 200 status code
// in order to get it reported into the span.
func (w *ResponseWriter) Write(b []byte) (int, error) {
if w.status == 0 {
w.WriteHeader(http.StatusOK)
}
return w.ResponseWriter.Write(b)
}
|
// It also sets the status code to the span.
func (w *ResponseWriter) WriteHeader(status int) {
w.ResponseWriter.WriteHeader(status)
w.status = status
w.span.SetMeta(ext.HTTPCode, strconv.Itoa(status))
if status >= 500 && status < 600 {
w.span.Error = 1
}
}
|
// WriteHeader sends an HTTP response header with status code.
|
backup.go
|
package main
import (
"log"
"net/url"
"os/exec"
"path"
"sync"
homedir "github.com/mitchellh/go-homedir"
"github.com/spf13/afero"
)
// We have them here so that we can override these in the tests
var execCommand = exec.Command
var appFS = afero.NewOsFs()
var gitCommand = "git"
// Check if we have a copy of the repo already, if
// we do, we update the repo, else we do a fresh clone
func backUp(backupDir string, repo *Repository, wg *sync.WaitGroup) ([]byte, error) {
defer wg.Done()
repoDir := path.Join(backupDir, repo.Namespace, repo.Name+".git")
_, err := appFS.Stat(repoDir)
var stdoutStderr []byte
if err == nil {
log.Printf("%s exists, updating. \n", repo.Name)
cmd := execCommand(gitCommand, "-C", repoDir, "remote", "update", "--prune")
stdoutStderr, err = cmd.CombinedOutput()
} else {
log.Printf("Cloning %s\n", repo.Name)
log.Printf("%#v\n", repo)
if repo.Private && useHTTPSClone != nil && *useHTTPSClone && ignorePrivate != nil && !*ignorePrivate {
// Add username and token to the clone URL
// https://gitlab.com/amitsaha/testproject1 => https://amitsaha:[email protected]/amitsaha/testproject1
u, err := url.Parse(repo.CloneURL)
if err != nil {
log.Fatalf("Invalid clone URL: %v\n", err)
}
repo.CloneURL = u.Scheme + "://" + gitHostUsername + ":" + gitHostToken + "@" + u.Host + u.Path
}
cmd := execCommand(gitCommand, "clone", "--mirror", repo.CloneURL, repoDir)
stdoutStderr, err = cmd.CombinedOutput()
}
return stdoutStderr, err
}
func setupBackupDir(backupDir string, service string, githostURL string) string
|
{
if len(backupDir) == 0 {
homeDir, err := homedir.Dir()
if err == nil {
service = service + ".com"
backupDir = path.Join(homeDir, ".gitbackup", service)
} else {
log.Fatal("Could not determine home directory and backup directory not specified")
}
} else {
if len(githostURL) == 0 {
service = service + ".com"
backupDir = path.Join(backupDir, service)
} else {
u, err := url.Parse(githostURL)
if err != nil {
panic(err)
}
backupDir = path.Join(backupDir, u.Host)
}
}
_, err := appFS.Stat(backupDir)
if err != nil {
log.Printf("%s doesn't exist, creating it\n", backupDir)
err := appFS.MkdirAll(backupDir, 0771)
if err != nil {
log.Fatal(err)
}
}
return backupDir
}
|
|
apps.py
|
from django.apps import apps
from django.db.models.signals import post_delete, post_save, pre_delete
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.classes import ModelPermission
from mayan.apps.acls.permissions import permission_acl_edit, permission_acl_view
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.classes import ModelCopy
from mayan.apps.common.menus import (
menu_list_facet, menu_main, menu_object, menu_related, menu_secondary,
menu_setup, menu_tools
)
from mayan.apps.documents.links.document_type_links import link_document_type_list
from mayan.apps.documents.signals import signal_post_initial_document_type
from mayan.apps.events.classes import EventModelRegistry, ModelEventType
from mayan.apps.navigation.classes import SourceColumn
from mayan.apps.rest_api.fields import DynamicSerializerField
from mayan.apps.views.html_widgets import TwoStateWidget
from .events import event_index_template_created, event_index_template_edited
from .handlers import (
handler_create_default_document_index, handler_delete_empty,
handler_index_document, handler_remove_document
)
from .html_widgets import (
get_instance_link, index_instance_item_link, node_level
)
from .links import (
link_document_index_instance_list, link_document_type_index_templates,
link_index_instance_menu, link_index_instance_rebuild,
link_index_instances_reset, link_index_template_setup,
link_index_template_create, link_index_template_document_types,
link_index_template_delete, link_index_template_edit,
link_index_template_list, link_index_template_node_tree_view,
link_index_instances_rebuild, link_index_template_node_create,
link_index_template_node_delete, link_index_template_node_edit
)
from .permissions import (
permission_index_template_delete, permission_index_template_edit,
permission_index_instance_view,
permission_index_template_rebuild, permission_index_template_view
)
class DocumentIndexingApp(MayanAppConfig):
app_namespace = 'indexing'
app_url = 'indexing'
has_rest_api = True
has_tests = True
name = 'mayan.apps.document_indexing'
verbose_name = _('Document indexing')
def
|
(self):
super().ready()
Document = apps.get_model(
app_label='documents', model_name='Document'
)
DocumentType = apps.get_model(
app_label='documents', model_name='DocumentType'
)
DocumentIndexInstanceNode = self.get_model(
model_name='DocumentIndexInstanceNode'
)
IndexInstance = self.get_model(model_name='IndexInstance')
IndexInstanceNode = self.get_model(model_name='IndexInstanceNode')
IndexInstanceNodeSearchResult = self.get_model(
model_name='IndexInstanceNodeSearchResult'
)
IndexTemplate = self.get_model(model_name='IndexTemplate')
IndexTemplateNode = self.get_model(model_name='IndexTemplateNode')
DynamicSerializerField.add_serializer(
klass=IndexTemplate,
serializer_class='mayan.apps.document_indexing.serializers.IndexTemplateSerializer'
)
EventModelRegistry.register(model=IndexTemplate)
ModelCopy(
model=IndexTemplateNode, excludes={'parent__isnull': False},
extra_kwargs={'get_or_create': True}
).add_fields(
field_names=(
'index', 'expression', 'enabled', 'link_documents'
),
)
ModelCopy(
model=IndexTemplate, bind_link=True, register_permission=True
).add_fields(
field_names=(
'label', 'slug', 'enabled', 'document_types',
'index_template_nodes'
),
)
ModelEventType.register(
event_types=(
event_index_template_created, event_index_template_edited
), model=IndexTemplate
)
ModelPermission.register(
model=Document, permissions=(
permission_index_instance_view,
)
)
ModelPermission.register(
model=IndexTemplate, permissions=(
permission_acl_edit, permission_acl_view,
permission_index_template_delete,
permission_index_template_edit,
permission_index_instance_view,
permission_index_template_rebuild,
permission_index_template_view,
)
)
ModelPermission.register_inheritance(
model=IndexTemplateNode, related='index'
)
ModelPermission.register_inheritance(
model=IndexInstanceNode, related='index_template_node__index'
)
# Document Index Instance Node
SourceColumn(
func=lambda context: get_instance_link(
index_instance_node=context['object'],
), include_label=True, is_sortable=True, label=_('Level'),
sort_field='value', source=DocumentIndexInstanceNode
)
# Index instance
SourceColumn(
attribute='get_level_count', include_label=True,
label=_('Depth'), source=IndexInstance
)
SourceColumn(
attribute='get_descendants_count', include_label=True,
label=_('Total nodes'), source=IndexInstance
)
SourceColumn(
func=lambda context: context[
'object'
].get_descendants_document_count(
user=context['request'].user
), include_label=True, label=_('Total documents'),
help_text=_(
'Number of unique documents this item contains.'
), source=IndexInstance
)
# Index instance node
column_index_instance_node_level = SourceColumn(
func=lambda context: index_instance_item_link(context['object']),
is_identifier=True, is_sortable=True, label=_('Level'),
sort_field='value', source=IndexInstanceNode
)
column_index_instance_node_level.add_exclude(
source=DocumentIndexInstanceNode
)
column_index_instance_node_level_count = SourceColumn(
attribute='get_level_count', include_label=True,
label=_('Depth'), source=IndexInstanceNode
)
column_index_instance_node_level_count.add_exclude(
source=DocumentIndexInstanceNode
)
column_index_instance_node_count = SourceColumn(
attribute='get_descendants_count',
include_label=True, label=_('Nodes'), source=IndexInstanceNode
)
column_index_instance_node_count.add_exclude(
source=DocumentIndexInstanceNode
)
column_index_instance_node_document_count = SourceColumn(
func=lambda context: context[
'object'
].get_descendants_document_count(
user=context['request'].user
), include_label=True, label=_('Documents'),
help_text=_(
'Number of unique documents this item contains.'
), source=IndexInstanceNode
)
column_index_instance_node_document_count.add_exclude(
source=DocumentIndexInstanceNode
)
SourceColumn(
func=lambda context: index_instance_item_link(context['object']),
is_identifier=True, is_sortable=True, label=_('Level'),
sort_field='value', source=IndexInstanceNodeSearchResult
)
SourceColumn(
attribute='get_full_path', source=IndexInstanceNodeSearchResult
)
# Index template
column_index_label = SourceColumn(
attribute='label', is_identifier=True, is_sortable=True,
source=IndexTemplate
)
column_index_label.add_exclude(source=IndexInstance)
SourceColumn(
attribute='label', is_object_absolute_url=True,
is_identifier=True, is_sortable=True, source=IndexInstance
)
column_index_slug = SourceColumn(
attribute='slug', include_label=True, is_sortable=True,
source=IndexTemplate
)
column_index_slug.add_exclude(source=IndexInstance)
column_index_enabled = SourceColumn(
attribute='enabled', include_label=True, is_sortable=True,
source=IndexTemplate, widget=TwoStateWidget
)
column_index_enabled.add_exclude(source=IndexInstance)
# Index template node
SourceColumn(
func=lambda context: node_level(context['object']),
include_label=True, is_identifier=True, label=_('Level'),
source=IndexTemplateNode
)
SourceColumn(
attribute='enabled', include_label=True, is_sortable=True,
source=IndexTemplateNode, widget=TwoStateWidget
)
SourceColumn(
attribute='link_documents', include_label=True, is_sortable=True,
source=IndexTemplateNode, widget=TwoStateWidget
)
menu_list_facet.bind_links(
links=(
link_document_index_instance_list,
), sources=(Document,)
)
menu_list_facet.bind_links(
links=(link_document_type_index_templates,),
sources=(DocumentType,)
)
menu_list_facet.bind_links(
links=(
link_index_template_document_types,
link_index_template_node_tree_view
), sources=(IndexTemplate,)
)
menu_object.bind_links(
links=(
link_index_template_delete, link_index_template_edit,
link_index_instance_rebuild
), sources=(IndexTemplate,)
)
menu_object.bind_links(
links=(
link_index_template_node_create, link_index_template_node_edit,
link_index_template_node_delete
), sources=(IndexTemplateNode,)
)
menu_main.bind_links(links=(link_index_instance_menu,), position=50)
menu_related.bind_links(
links=(link_index_template_list,),
sources=(
DocumentType, 'documents:document_type_list',
'documents:document_type_create'
)
)
menu_related.bind_links(
links=(link_document_type_list,),
sources=(
IndexTemplate, 'indexing:index_template_list',
'indexing:index_template_create'
)
)
menu_secondary.bind_links(
links=(link_index_template_list, link_index_template_create),
sources=(
IndexTemplate, 'indexing:index_template_list',
'indexing:index_template_create'
)
)
menu_setup.bind_links(links=(link_index_template_setup,))
menu_tools.bind_links(
links=(link_index_instances_rebuild, link_index_instances_reset)
)
post_save.connect(
dispatch_uid='document_indexing_handler_index_document',
receiver=handler_index_document,
sender=Document
)
post_delete.connect(
dispatch_uid='document_indexing_handler_delete_empty',
receiver=handler_delete_empty,
sender=Document
)
pre_delete.connect(
dispatch_uid='document_indexing_handler_remove_document',
receiver=handler_remove_document,
sender=Document
)
signal_post_initial_document_type.connect(
dispatch_uid='document_indexing_handler_create_default_document_index',
receiver=handler_create_default_document_index,
sender=DocumentType
)
|
ready
|
traverse.go
|
package manager
import (
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/blang/semver"
"github.com/rancher/catalog-service/helm"
"github.com/rancher/catalog-service/model"
"github.com/rancher/catalog-service/parse"
)
func traverseFiles(repoPath, kind string, catalogType CatalogType) ([]model.Template, []error, error) {
if kind == "" || kind == RancherTemplateType {
return traverseGitFiles(repoPath)
}
if kind == HelmTemplateType {
if catalogType == CatalogTypeHelmGitRepo {
return traverseHelmGitFiles(repoPath)
}
return traverseHelmFiles(repoPath)
}
return nil, nil, fmt.Errorf("Unknown kind %s", kind)
}
func traverseHelmGitFiles(repoPath string) ([]model.Template, []error, error) {
fullpath := path.Join(repoPath, "stable")
templates := []model.Template{}
var template *model.Template
errors := []error{}
err := filepath.Walk(fullpath, func(path string, info os.FileInfo, err error) error {
if len(path) == len(fullpath) {
return nil
}
relPath := path[len(fullpath)+1:]
components := strings.Split(relPath, "/")
if len(components) == 1 {
if template != nil {
templates = append(templates, *template)
}
template = new(model.Template)
template.Versions = make([]model.Version, 0)
template.Versions = append(template.Versions, model.Version{
Files: make([]model.File, 0),
})
template.Base = HelmTemplateBaseType
}
if info.IsDir() {
return nil
}
if strings.HasSuffix(info.Name(), "Chart.yaml") {
metadata, err := helm.LoadMetadata(path)
if err != nil {
return err
}
template.Description = metadata.Description
template.DefaultVersion = metadata.Version
if len(metadata.Sources) > 0 {
template.ProjectURL = metadata.Sources[0]
}
iconData, iconFilename, err := parse.ParseIcon(metadata.Icon)
if err != nil {
errors = append(errors, err)
}
rev := 0
template.Icon = iconData
template.IconFilename = iconFilename
template.FolderName = components[0]
template.Name = components[0]
template.Versions[0].Revision = &rev
template.Versions[0].Version = metadata.Version
}
file, err := helm.LoadFile(path)
if err != nil {
return err
}
file.Name = relPath
if strings.HasSuffix(info.Name(), "README.md") {
template.Versions[0].Readme = file.Contents
return nil
}
template.Versions[0].Files = append(template.Versions[0].Files, *file)
return nil
})
return templates, errors, err
}
func traverseHelmFiles(repoPath string) ([]model.Template, []error, error) {
index, err := helm.LoadIndex(repoPath)
if err != nil {
return nil, nil, err
}
|
for chart, metadata := range index.IndexFile.Entries {
template := model.Template{
Name: chart,
}
template.Description = metadata[0].Description
template.DefaultVersion = metadata[0].Version
if len(metadata[0].Sources) > 0 {
template.ProjectURL = metadata[0].Sources[0]
}
iconData, iconFilename, err := parse.ParseIcon(metadata[0].Icon)
if err != nil {
errors = append(errors, err)
}
template.Icon = iconData
template.IconFilename = iconFilename
template.Base = HelmTemplateBaseType
versions := make([]model.Version, 0)
for i, version := range metadata {
v := model.Version{
Revision: &i,
Version: version.Version,
}
files, err := helm.FetchFiles(version.URLs)
if err != nil {
fmt.Println(err)
errors = append(errors, err)
continue
}
filesToAdd := []model.File{}
for _, file := range files {
if strings.EqualFold(fmt.Sprintf("%s/%s", chart, "readme.md"), file.Name) {
v.Readme = file.Contents
continue
}
filesToAdd = append(filesToAdd, file)
}
v.Files = filesToAdd
versions = append(versions, v)
}
template.FolderName = chart
template.Versions = versions
templates = append(templates, template)
}
return templates, nil, nil
}
func traverseGitFiles(repoPath string) ([]model.Template, []error, error) {
templateIndex := map[string]*model.Template{}
var errors []error
if err := filepath.Walk(repoPath, func(fullPath string, f os.FileInfo, err error) error {
if f == nil || !f.Mode().IsRegular() {
return nil
}
relativePath, err := filepath.Rel(repoPath, fullPath)
if err != nil {
return err
}
_, _, parsedCorrectly := parse.TemplatePath(relativePath)
if !parsedCorrectly {
return nil
}
_, filename := path.Split(relativePath)
if err = handleFile(templateIndex, fullPath, relativePath, filename); err != nil {
errors = append(errors, fmt.Errorf("%s: %v", fullPath, err))
}
return nil
}); err != nil {
return nil, nil, err
}
templates := []model.Template{}
for _, template := range templateIndex {
for i, version := range template.Versions {
var readme string
for _, file := range version.Files {
if strings.ToLower(file.Name) == "readme.md" {
readme = file.Contents
}
}
var rancherCompose string
var templateVersion string
for _, file := range version.Files {
if file.Name == "rancher-compose.yml" {
rancherCompose = file.Contents
}
if file.Name == "template-version.yml" {
templateVersion = file.Contents
}
}
newVersion := version
if rancherCompose != "" || templateVersion != "" {
var err error
if rancherCompose != "" {
newVersion, err = parse.CatalogInfoFromRancherCompose([]byte(rancherCompose))
}
if templateVersion != "" {
newVersion, err = parse.CatalogInfoFromTemplateVersion([]byte(templateVersion))
}
if err != nil {
var id string
if template.Base == "" {
id = fmt.Sprintf("%s:%d", template.FolderName, i)
} else {
id = fmt.Sprintf("%s*%s:%d", template.Base, template.FolderName, i)
}
errors = append(errors, fmt.Errorf("Failed to parse rancher-compose.yml for %s: %v", id, err))
continue
}
newVersion.Revision = version.Revision
// If rancher-compose.yml contains version, use this instead of folder version
if newVersion.Version == "" {
newVersion.Version = version.Version
}
newVersion.Files = version.Files
}
newVersion.Readme = readme
template.Versions[i] = newVersion
}
var filteredVersions []model.Version
for _, version := range template.Versions {
if version.Version != "" {
filteredVersions = append(filteredVersions, version)
}
}
template.Versions = filteredVersions
templates = append(templates, *template)
}
return templates, errors, nil
}
func handleFile(templateIndex map[string]*model.Template, fullPath, relativePath, filename string) error {
switch {
case filename == "config.yml" || filename == "template.yml":
base, templateName, parsedCorrectly := parse.TemplatePath(relativePath)
if !parsedCorrectly {
return nil
}
contents, err := ioutil.ReadFile(fullPath)
if err != nil {
return err
}
var template model.Template
if template, err = parse.TemplateInfo(contents); err != nil {
return err
}
template.Base = base
template.FolderName = templateName
key := base + templateName
if existingTemplate, ok := templateIndex[key]; ok {
template.Icon = existingTemplate.Icon
template.IconFilename = existingTemplate.IconFilename
template.Readme = existingTemplate.Readme
template.Versions = existingTemplate.Versions
}
templateIndex[key] = &template
case strings.HasPrefix(filename, "catalogIcon") || strings.HasPrefix(filename, "icon"):
base, templateName, parsedCorrectly := parse.TemplatePath(relativePath)
if !parsedCorrectly {
return nil
}
contents, err := ioutil.ReadFile(fullPath)
if err != nil {
return err
}
key := base + templateName
if _, ok := templateIndex[key]; !ok {
templateIndex[key] = &model.Template{}
}
templateIndex[key].Icon = base64.StdEncoding.EncodeToString([]byte(contents))
templateIndex[key].IconFilename = filename
case strings.HasPrefix(strings.ToLower(filename), "readme.md"):
base, templateName, parsedCorrectly := parse.TemplatePath(relativePath)
if !parsedCorrectly {
return nil
}
_, _, _, parsedCorrectly = parse.VersionPath(relativePath)
if parsedCorrectly {
return handleVersionFile(templateIndex, fullPath, relativePath, filename)
}
contents, err := ioutil.ReadFile(fullPath)
if err != nil {
return err
}
key := base + templateName
if _, ok := templateIndex[key]; !ok {
templateIndex[key] = &model.Template{}
}
templateIndex[key].Readme = string(contents)
default:
return handleVersionFile(templateIndex, fullPath, relativePath, filename)
}
return nil
}
func handleVersionFile(templateIndex map[string]*model.Template, fullPath, relativePath, filename string) error {
base, templateName, folderName, parsedCorrectly := parse.VersionPath(relativePath)
if !parsedCorrectly {
return nil
}
contents, err := ioutil.ReadFile(fullPath)
if err != nil {
return err
}
key := base + templateName
file := model.File{
Name: filename,
Contents: string(contents),
}
if _, ok := templateIndex[key]; !ok {
templateIndex[key] = &model.Template{}
}
// Handle case where folder name is a revision (just a number)
revision, err := strconv.Atoi(folderName)
if err == nil {
for i, version := range templateIndex[key].Versions {
if version.Revision != nil && *version.Revision == revision {
templateIndex[key].Versions[i].Files = append(version.Files, file)
return nil
}
}
templateIndex[key].Versions = append(templateIndex[key].Versions, model.Version{
Revision: &revision,
Files: []model.File{file},
})
return nil
}
// Handle case where folder name is version (must be in semver format)
_, err = semver.Parse(strings.Trim(folderName, "v"))
if err == nil {
for i, version := range templateIndex[key].Versions {
if version.Version == folderName {
templateIndex[key].Versions[i].Files = append(version.Files, file)
return nil
}
}
templateIndex[key].Versions = append(templateIndex[key].Versions, model.Version{
Version: folderName,
Files: []model.File{file},
})
return nil
}
return nil
}
|
templates := []model.Template{}
var errors []error
|
utils.py
|
import asyncio
from datetime import datetime
from typing import Union, Callable, Optional, List, TYPE_CHECKING
from ..config import Side, TradingType, ExitRoutine, InstrumentType
from ..core import Trade, Instrument, ExchangeType, Order, OrderBook
from ..exchange import Exchange
from ..engine.managers import Periodic
if TYPE_CHECKING:
from mxts.engine import StrategyManager
class StrategyUtilsMixin(object):
_manager: "StrategyManager"
def orders(
self,
instrument: Instrument = None,
exchange: ExchangeType = None,
side: Side = None,
) -> List[Order]:
"""select all open orders
Args:
instrument (Optional[Instrument]): filter open orders by instrument
exchange (Optional[ExchangeType]): filter open orders by exchange
side (Optional[Side]): filter open orders by side
Returns:
list (Order): list of open orders
"""
return self._manager.orders(self, instrument, exchange, side) # type: ignore # mixin
def pastOrders(
self,
instrument: Instrument = None,
exchange: ExchangeType = None,
side: Side = None,
) -> List[Order]:
"""select all past orders
Args:
instrument (Optional[Instrument]): filter past orders by instrument
exchange (Optional[ExchangeType]): filter past orders by exchange
side (Optional[Side]): filter past orders by side
Returns:
list (Order): list of open orders
"""
return self._manager.pastOrders(self, instrument, exchange, side) # type: ignore # mixin
def trades(
self,
instrument: Instrument = None,
exchange: ExchangeType = None,
side: Side = None,
) -> List[Trade]:
"""select all past trades
Args:
instrument (Optional[Instrument]): filter trades by instrument
exchange (Optional[ExchangeType]): filter trades by exchange
side (Optional[Side]): filter trades by side
Returns:
list (Trade): list of trades
"""
return self._manager.trades(self, instrument, exchange, side) # type: ignore # mixin
#################
# Other Methods #
#################
def
|
(self) -> TradingType:
"""Return the trading type, from TradingType enum"""
return self._manager.tradingType()
def loop(self) -> asyncio.AbstractEventLoop:
"""Return the event loop"""
return self._manager.loop()
def now(self) -> datetime:
"""Return the current datetime. Useful to avoid code changes between
live trading and backtesting. Defaults to `datetime.now`"""
return self._manager.now()
def instruments(
self, type: InstrumentType = None, exchange: ExchangeType = None
) -> List[Instrument]:
"""Return list of all available instruments"""
return Instrument._instrumentdb.instruments(type=type, exchange=exchange)
def exchanges(self, instrument_type: InstrumentType = None) -> List[Exchange]:
"""Return list of all available exchanges"""
return list(
set(
__
for _ in Instrument._instrumentdb.instruments(type=instrument_type)
for __ in _.exchanges
)
)
def accounts(
self, type: InstrumentType = None, exchange: ExchangeType = None
) -> None: # TODO
"""Return list of all accounts"""
raise NotImplementedError()
async def subscribe(self, instrument: Instrument) -> None:
"""Subscribe to market data for the given instrument"""
return await self._manager.subscribe(instrument=instrument, strategy=self) # type: ignore # mixin
async def lookup(
self, instrument: Optional[Instrument], exchange: ExchangeType = None
) -> List[Instrument]:
"""Return list of all available instruments that match the instrument given"""
return await self._manager.lookup(instrument, exchange=exchange)
async def book(self, instrument: Instrument) -> Optional[OrderBook]:
"""Return list of all available instruments that match the instrument given"""
return await self._manager.book(instrument)
def periodic(
self,
function: Callable,
second: Union[int, str] = 0,
minute: Union[int, str] = "*",
hour: Union[int, str] = "*",
) -> Periodic:
"""periodically run a given async function.
NOTE: precise timing is NOT guaranteed due to event loop scheduling.
Args:
function (callable); function to call periodically
second (Union[int, str]); second to align periodic to, or '*' for every second
minute (Union[int, str]); minute to align periodic to, or '*' for every minute
hour (Union[int, str]); hour to align periodic to, or '*' for every hour
NOTE: this is a rudimentary scheme but should be sufficient. For more
complicated scheduling, just install multiple instances of the same periodic
e.g. for running on :00, :15, :30, and :45 install
periodic(0, 0, '*')
periodic(0, 15, '*')
periodic(0, 30, '*')
periodic(0, 45, '*')
"""
return self._manager.periodic(function, second, minute, hour)
def restrictTradingHours(
self,
start_second: Optional[int] = None,
start_minute: Optional[int] = None,
start_hour: Optional[int] = None,
end_second: Optional[int] = None,
end_minute: Optional[int] = None,
end_hour: Optional[int] = None,
on_end_of_day: ExitRoutine = ExitRoutine.NONE,
) -> None:
"""Restrict a strategy's trading hours to
[start_hour:start_minute:start_second, end_hour:end_minute:end_second]
NOTE: precise timing is NOT guaranteed due to event loop scheduling.
Args:
start_second (Optional[int]); starting second
start_minute (Optional[int]); starting minute
start_second (Optional[int]); starting hour
end_second (Optional[int]); ending second
end_second (Optional[int]); ending minute
end_second (Optional[int]); ending hour
on_end_of_day (ExitRoutine); what to do when you hit the end time
"""
self._manager.restrictTradingHours(
self, # type: ignore # mixin
start_second=start_second,
start_minute=start_minute,
start_hour=start_hour,
end_second=end_second,
end_minute=end_minute,
end_hour=end_hour,
on_end_of_day=on_end_of_day,
)
def slippage(self, trade: Trade) -> None:
"""method to inject slippage when backtesting
Args:
trade (Trade): the completed trade to adjust
Returns:
trade (Trade): the modified trade
"""
pass
def transactionCost(self, trade: Trade) -> None:
"""method to inject transaction costs when backtesting
Args:
trade (Trade): the completed trade to adjust
Returns:
trade (Trade): the modified trade
"""
pass
|
tradingType
|
vtgate_buffer.py
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Test the vtgate master buffer.
During a master failover, vtgate should automatically buffer (stall) requests
for a configured time and retry them after the failover is over.
The test reproduces such a scenario as follows:
- two threads constantly execute a critical read respectively a write (UPDATE)
- vtctl PlannedReparentShard runs a master failover
- both threads should not see any error during despite the failover
"""
import logging
import Queue
import random
import threading
import time
import unittest
import environment
import tablet
import utils
from mysql_flavor import mysql_flavor
KEYSPACE = 'ks1'
SHARD = '0'
SCHEMA = '''CREATE TABLE buffer(
id BIGINT NOT NULL,
msg VARCHAR(64) NOT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB'''
CRITICAL_READ_ROW_ID = 1
UPDATE_ROW_ID = 2
class AbstractVtgateThread(threading.Thread):
"""Thread which constantly executes a query on vtgate.
Implement the execute() method for the specific query.
"""
def __init__(self, vtgate, name, writable=False):
super(AbstractVtgateThread, self).__init__(name=name)
self.vtgate = vtgate
self.writable = writable
self.quit = False
# Number of queries successfully executed.
self.rpcs = 0
# Number of failed queries.
self.errors = 0
# Queue used to notify the main thread that this thread executed
# "self.notify_after_n_successful_rpcs" RPCs successfully.
# Then "True" will be put exactly once on the queue.
self.wait_for_notification = Queue.Queue(maxsize=1)
# notify_lock guards the two fields below.
self.notify_lock = threading.Lock()
# If 0, notifications are disabled.
self.notify_after_n_successful_rpcs = 0
# Number of RPCs at the time a notification was requested.
self.rpcs_so_far = 0
self.start()
def run(self):
|
def execute(self, cursor):
raise NotImplementedError('Child class needs to implement this')
def set_notify_after_n_successful_rpcs(self, n):
with self.notify_lock:
self.notify_after_n_successful_rpcs = n
self.rpcs_so_far = self.rpcs
def stop(self):
self.quit = True
class ReadThread(AbstractVtgateThread):
def __init__(self, vtgate):
super(ReadThread, self).__init__(vtgate, 'ReadThread')
def execute(self, cursor):
row_count = cursor.execute('SELECT * FROM buffer WHERE id = :id',
{'id': CRITICAL_READ_ROW_ID})
logging.debug('read returned %d row(s).', row_count)
class UpdateThread(AbstractVtgateThread):
def __init__(self, vtgate, ignore_error_func=None):
self.ignore_error_func = ignore_error_func
# Value used in next UPDATE query. Increased after every query.
self.i = 1
self._commit_errors = 0
super(UpdateThread, self).__init__(vtgate, 'UpdateThread', writable=True)
def execute(self, cursor):
attempt = self.i
self.i += 1
try:
commit_started = False
cursor.begin()
# Do not use a bind variable for "msg" to make sure that the value shows
# up in the logs.
row_count = cursor.execute('UPDATE buffer SET msg=\'update %d\' '
'WHERE id = :id' % attempt,
{'id': UPDATE_ROW_ID})
# Sleep between [0, 1] seconds to prolong the time the transaction is in
# flight. This is more realistic because applications are going to keep
# their transactions open for longer as well.
time.sleep(random.randint(0, 1000) / 1000.0)
commit_started = True
cursor.commit()
logging.debug('UPDATE %d affected %d row(s).', attempt, row_count)
except Exception as e: # pylint: disable=broad-except
try:
# Rollback to free the transaction in vttablet.
cursor.rollback()
except Exception as e: # pylint: disable=broad-except
logging.warn('rollback failed: %s', str(e))
if not commit_started:
logging.debug('UPDATE %d failed before COMMIT. This should not happen.'
' Re-raising exception.', attempt)
raise
if self.ignore_error_func and self.ignore_error_func(e):
logging.debug('UPDATE %d failed during COMMIT. But we cannot buffer'
' this error and and ignore it. err: %s', attempt, str(e))
else:
self._commit_errors += 1
if self._commit_errors > 1:
raise
logging.debug('UPDATE %d failed during COMMIT. This is okay once'
' because we do not support buffering it. err: %s',
attempt, str(e))
def commit_errors(self):
return self._commit_errors
master = tablet.Tablet()
replica = tablet.Tablet()
all_tablets = [master, replica]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
utils.run_vtctl(['CreateKeyspace', KEYSPACE])
# Start tablets.
db_name = 'vt_' + KEYSPACE
for t in all_tablets:
t.create_db(db_name)
master.start_vttablet(wait_for_state=None,
init_tablet_type='replica',
init_keyspace=KEYSPACE, init_shard=SHARD,
tablet_index=0)
replica.start_vttablet(wait_for_state=None,
init_tablet_type='replica',
init_keyspace=KEYSPACE, init_shard=SHARD,
tablet_index=1)
for t in all_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
# Reparent to choose an initial master and enable replication.
utils.run_vtctl(['InitShardMaster', '-force', '%s/%s' % (KEYSPACE, SHARD),
master.tablet_alias])
# Create the schema.
utils.run_vtctl(['ApplySchema', '-sql=' + SCHEMA, KEYSPACE])
start_vtgate()
# Insert two rows for the later threads (critical read, update).
with utils.vtgate.write_transaction(keyspace=KEYSPACE, shards=[SHARD],
tablet_type='master') as tx:
tx.execute('INSERT INTO buffer (id, msg) VALUES (:id, :msg)',
{'id': CRITICAL_READ_ROW_ID, 'msg': 'critical read'})
tx.execute('INSERT INTO buffer (id, msg) VALUES (:id, :msg)',
{'id': UPDATE_ROW_ID, 'msg': 'update'})
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in [master, replica]]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
def start_vtgate():
utils.VtGate().start(extra_args=[
'-enable_buffer',
# Long timeout in case failover is slow.
'-buffer_window', '10m',
'-buffer_max_failover_duration', '10m',
'-buffer_min_time_between_failovers', '20m'],
tablets=all_tablets)
class TestBufferBase(unittest.TestCase):
def _test_buffer(self, reparent_func, enable_read_thread=True,
ignore_error_func=None):
# Start both threads.
if enable_read_thread:
read_thread = ReadThread(utils.vtgate)
else:
logging.debug('ReadThread explicitly disabled in this test.')
update_thread = UpdateThread(utils.vtgate, ignore_error_func)
try:
# Verify they got at least 2 RPCs through.
if enable_read_thread:
read_thread.set_notify_after_n_successful_rpcs(2)
update_thread.set_notify_after_n_successful_rpcs(2)
if enable_read_thread:
read_thread.wait_for_notification.get()
update_thread.wait_for_notification.get()
# Execute the failover.
if enable_read_thread:
read_thread.set_notify_after_n_successful_rpcs(10)
update_thread.set_notify_after_n_successful_rpcs(10)
reparent_func()
# Failover is done. Swap master and replica for the next test.
global master, replica
master, replica = replica, master
if enable_read_thread:
read_thread.wait_for_notification.get()
update_thread.wait_for_notification.get()
except:
# Something went wrong. Kill vtgate first to unblock any buffered requests
# which would further block the two threads.
utils.vtgate.kill()
raise
finally:
# Stop threads.
if enable_read_thread:
read_thread.stop()
update_thread.stop()
if enable_read_thread:
read_thread.join()
update_thread.join()
# Both threads must not see any error.
if enable_read_thread:
self.assertEqual(0, read_thread.errors)
self.assertEqual(0, update_thread.errors)
# At least one thread should have been buffered.
# TODO(mberlin): This may fail if a failover is too fast. Add retries then.
v = utils.vtgate.get_vars()
labels = '%s.%s' % (KEYSPACE, SHARD)
in_flight_max = v['BufferLastRequestsInFlightMax'].get(labels, 0)
if in_flight_max == 0:
# Missed buffering is okay when we observed the failover during the
# COMMIT (which cannot trigger the buffering).
self.assertGreater(update_thread.commit_errors(), 0,
'No buffering took place and the update thread saw no'
' error during COMMIT. But one of it must happen.')
else:
self.assertGreater(in_flight_max, 0)
# There was a failover and the HealthCheck module must have seen it.
master_promoted_count = v['HealthcheckMasterPromoted'].get(labels, 0)
self.assertGreater(master_promoted_count, 0)
if labels in v['BufferFailoverDurationSumMs']:
# Buffering was actually started.
logging.debug('Failover was buffered for %d milliseconds.',
v['BufferFailoverDurationSumMs'][labels])
# Number of buffering stops must be equal to the number of seen failovers.
buffering_stops = v['BufferStops'].get('%s.NewMasterSeen' % labels, 0)
self.assertEqual(master_promoted_count, buffering_stops)
def external_reparent(self):
# Demote master.
start = time.time()
master.mquery('', mysql_flavor().demote_master_commands(), log_query=True)
if master.semi_sync_enabled():
master.set_semi_sync_enabled(master=False)
# Wait for replica to catch up to master.
utils.wait_for_replication_pos(master, replica)
# Wait for at least one second to articially prolong the failover and give
# the buffer a chance to observe it.
d = time.time() - start
min_unavailability_s = 1
if d < min_unavailability_s:
w = min_unavailability_s - d
logging.debug('Waiting for %.1f seconds because the failover was too fast'
' (took only %.3f seconds)', w, d)
time.sleep(w)
# Promote replica to new master.
replica.mquery('', mysql_flavor().promote_slave_commands(),
log_query=True)
if replica.semi_sync_enabled():
replica.set_semi_sync_enabled(master=True)
old_master = master
new_master = replica
# Configure old master to use new master.
new_pos = mysql_flavor().master_position(new_master)
logging.debug('New master position: %s', str(new_pos))
# Use 'localhost' as hostname because Travis CI worker hostnames
# are too long for MySQL replication.
change_master_cmds = mysql_flavor().change_master_commands(
'localhost', new_master.mysql_port, new_pos)
old_master.mquery('', ['RESET SLAVE'] + change_master_cmds +
['START SLAVE'], log_query=True)
# Notify the new vttablet master about the reparent.
utils.run_vtctl(['TabletExternallyReparented', new_master.tablet_alias],
auto_log=True)
class TestBuffer(TestBufferBase):
def setUp(self):
utils.vtgate.kill()
# Restart vtgate between each test or the feature
# --buffer_min_time_between_failovers
# will ignore subsequent failovers.
start_vtgate()
def test_buffer_planned_reparent(self):
def planned_reparent():
utils.run_vtctl(['PlannedReparentShard', '-keyspace_shard',
'%s/%s' % (KEYSPACE, SHARD),
'-new_master', replica.tablet_alias])
self._test_buffer(planned_reparent)
def test_buffer_external_reparent(self):
self._test_buffer(self.external_reparent)
if __name__ == '__main__':
utils.main()
|
with self.vtgate.create_connection() as conn:
c = conn.cursor(keyspace=KEYSPACE, shards=[SHARD], tablet_type='master',
writable=self.writable)
while not self.quit:
try:
self.execute(c)
self.rpcs += 1
# If notifications are requested, check if we already executed the
# required number of successful RPCs.
# Use >= instead of == because we can miss the exact point due to
# slow thread scheduling.
with self.notify_lock:
if (self.notify_after_n_successful_rpcs != 0 and
self.rpcs >= (self.notify_after_n_successful_rpcs +
self.rpcs_so_far)):
self.wait_for_notification.put(True)
self.notify_after_n_successful_rpcs = 0
except Exception as e: # pylint: disable=broad-except
self.errors += 1
logging.debug('thread: %s query failed: %s', self.name, str(e))
# Wait 10ms seconds between two attempts.
time.sleep(0.01)
|
payload.rs
|
//! Payload stream
use bytes::{Bytes, BytesMut};
#[cfg(not(test))]
use futures::task::current as current_task;
use futures::task::Task;
use futures::{Async, Poll, Stream};
use std::cell::RefCell;
use std::cmp;
use std::collections::VecDeque;
use std::rc::{Rc, Weak};
use error::PayloadError;
/// max buffer size 32k
pub(crate) const MAX_BUFFER_SIZE: usize = 32_768;
#[derive(Debug, PartialEq)]
pub(crate) enum PayloadStatus {
Read,
Pause,
Dropped,
}
/// Buffered stream of bytes chunks
///
/// Payload stores chunks in a vector. First chunk can be received with
/// `.readany()` method. Payload stream is not thread safe. Payload does not
/// notify current task when new data is available.
///
/// Payload stream can be used as `HttpResponse` body stream.
#[derive(Debug)]
pub struct Payload {
inner: Rc<RefCell<Inner>>,
}
impl Payload {
/// Create payload stream.
///
/// This method construct two objects responsible for bytes stream
/// generation.
///
/// * `PayloadSender` - *Sender* side of the stream
///
/// * `Payload` - *Receiver* side of the stream
pub fn
|
(eof: bool) -> (PayloadSender, Payload) {
let shared = Rc::new(RefCell::new(Inner::new(eof)));
(
PayloadSender {
inner: Rc::downgrade(&shared),
},
Payload { inner: shared },
)
}
/// Create empty payload
#[doc(hidden)]
pub fn empty() -> Payload {
Payload {
inner: Rc::new(RefCell::new(Inner::new(true))),
}
}
/// Length of the data in this payload
#[cfg(test)]
pub fn len(&self) -> usize {
self.inner.borrow().len()
}
/// Is payload empty
#[cfg(test)]
pub fn is_empty(&self) -> bool {
self.inner.borrow().len() == 0
}
/// Put unused data back to payload
#[inline]
pub fn unread_data(&mut self, data: Bytes) {
self.inner.borrow_mut().unread_data(data);
}
#[cfg(test)]
pub(crate) fn readall(&self) -> Option<Bytes> {
self.inner.borrow_mut().readall()
}
#[inline]
/// Set read buffer capacity
///
/// Default buffer capacity is 32Kb.
pub fn set_read_buffer_capacity(&mut self, cap: usize) {
self.inner.borrow_mut().capacity = cap;
}
}
impl Stream for Payload {
type Item = Bytes;
type Error = PayloadError;
#[inline]
fn poll(&mut self) -> Poll<Option<Bytes>, PayloadError> {
self.inner.borrow_mut().readany()
}
}
impl Clone for Payload {
fn clone(&self) -> Payload {
Payload {
inner: Rc::clone(&self.inner),
}
}
}
/// Payload writer interface.
pub(crate) trait PayloadWriter {
/// Set stream error.
fn set_error(&mut self, err: PayloadError);
/// Write eof into a stream which closes reading side of a stream.
fn feed_eof(&mut self);
/// Feed bytes into a payload stream
fn feed_data(&mut self, data: Bytes);
/// Need read data
fn need_read(&self) -> PayloadStatus;
}
/// Sender part of the payload stream
pub struct PayloadSender {
inner: Weak<RefCell<Inner>>,
}
impl PayloadWriter for PayloadSender {
#[inline]
fn set_error(&mut self, err: PayloadError) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().set_error(err)
}
}
#[inline]
fn feed_eof(&mut self) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().feed_eof()
}
}
#[inline]
fn feed_data(&mut self, data: Bytes) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().feed_data(data)
}
}
#[inline]
fn need_read(&self) -> PayloadStatus {
// we check need_read only if Payload (other side) is alive,
// otherwise always return true (consume payload)
if let Some(shared) = self.inner.upgrade() {
if shared.borrow().need_read {
PayloadStatus::Read
} else {
#[cfg(not(test))]
{
if shared.borrow_mut().io_task.is_none() {
shared.borrow_mut().io_task = Some(current_task());
}
}
PayloadStatus::Pause
}
} else {
PayloadStatus::Dropped
}
}
}
#[derive(Debug)]
struct Inner {
len: usize,
eof: bool,
err: Option<PayloadError>,
need_read: bool,
items: VecDeque<Bytes>,
capacity: usize,
task: Option<Task>,
io_task: Option<Task>,
}
impl Inner {
fn new(eof: bool) -> Self {
Inner {
eof,
len: 0,
err: None,
items: VecDeque::new(),
need_read: true,
capacity: MAX_BUFFER_SIZE,
task: None,
io_task: None,
}
}
#[inline]
fn set_error(&mut self, err: PayloadError) {
self.err = Some(err);
}
#[inline]
fn feed_eof(&mut self) {
self.eof = true;
}
#[inline]
fn feed_data(&mut self, data: Bytes) {
self.len += data.len();
self.items.push_back(data);
self.need_read = self.len < self.capacity;
if let Some(task) = self.task.take() {
task.notify()
}
}
#[cfg(test)]
fn len(&self) -> usize {
self.len
}
#[cfg(test)]
pub(crate) fn readall(&mut self) -> Option<Bytes> {
let len = self.items.iter().map(|b| b.len()).sum();
if len > 0 {
let mut buf = BytesMut::with_capacity(len);
for item in &self.items {
buf.extend_from_slice(item);
}
self.items = VecDeque::new();
self.len = 0;
Some(buf.take().freeze())
} else {
self.need_read = true;
None
}
}
fn readany(&mut self) -> Poll<Option<Bytes>, PayloadError> {
if let Some(data) = self.items.pop_front() {
self.len -= data.len();
self.need_read = self.len < self.capacity;
#[cfg(not(test))]
{
if self.need_read && self.task.is_none() {
self.task = Some(current_task());
}
if let Some(task) = self.io_task.take() {
task.notify()
}
}
Ok(Async::Ready(Some(data)))
} else if let Some(err) = self.err.take() {
Err(err)
} else if self.eof {
Ok(Async::Ready(None))
} else {
self.need_read = true;
#[cfg(not(test))]
{
if self.task.is_none() {
self.task = Some(current_task());
}
if let Some(task) = self.io_task.take() {
task.notify()
}
}
Ok(Async::NotReady)
}
}
fn unread_data(&mut self, data: Bytes) {
self.len += data.len();
self.items.push_front(data);
}
}
/// Payload buffer
pub struct PayloadBuffer<S> {
len: usize,
items: VecDeque<Bytes>,
stream: S,
}
impl<S> PayloadBuffer<S>
where
S: Stream<Item = Bytes, Error = PayloadError>,
{
/// Create new `PayloadBuffer` instance
pub fn new(stream: S) -> Self {
PayloadBuffer {
len: 0,
items: VecDeque::new(),
stream,
}
}
/// Get mutable reference to an inner stream.
pub fn get_mut(&mut self) -> &mut S {
&mut self.stream
}
#[inline]
fn poll_stream(&mut self) -> Poll<bool, PayloadError> {
self.stream.poll().map(|res| match res {
Async::Ready(Some(data)) => {
self.len += data.len();
self.items.push_back(data);
Async::Ready(true)
}
Async::Ready(None) => Async::Ready(false),
Async::NotReady => Async::NotReady,
})
}
/// Read first available chunk of bytes
#[inline]
pub fn readany(&mut self) -> Poll<Option<Bytes>, PayloadError> {
if let Some(data) = self.items.pop_front() {
self.len -= data.len();
Ok(Async::Ready(Some(data)))
} else {
match self.poll_stream()? {
Async::Ready(true) => self.readany(),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
}
/// Check if buffer contains enough bytes
#[inline]
pub fn can_read(&mut self, size: usize) -> Poll<Option<bool>, PayloadError> {
if size <= self.len {
Ok(Async::Ready(Some(true)))
} else {
match self.poll_stream()? {
Async::Ready(true) => self.can_read(size),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
}
/// Return reference to the first chunk of data
#[inline]
pub fn get_chunk(&mut self) -> Poll<Option<&[u8]>, PayloadError> {
if self.items.is_empty() {
match self.poll_stream()? {
Async::Ready(true) => (),
Async::Ready(false) => return Ok(Async::Ready(None)),
Async::NotReady => return Ok(Async::NotReady),
}
}
match self.items.front().map(|c| c.as_ref()) {
Some(chunk) => Ok(Async::Ready(Some(chunk))),
None => Ok(Async::NotReady),
}
}
/// Read exact number of bytes
#[inline]
pub fn read_exact(&mut self, size: usize) -> Poll<Option<Bytes>, PayloadError> {
if size <= self.len {
self.len -= size;
let mut chunk = self.items.pop_front().unwrap();
if size < chunk.len() {
let buf = chunk.split_to(size);
self.items.push_front(chunk);
Ok(Async::Ready(Some(buf)))
} else if size == chunk.len() {
Ok(Async::Ready(Some(chunk)))
} else {
let mut buf = BytesMut::with_capacity(size);
buf.extend_from_slice(&chunk);
while buf.len() < size {
let mut chunk = self.items.pop_front().unwrap();
let rem = cmp::min(size - buf.len(), chunk.len());
buf.extend_from_slice(&chunk.split_to(rem));
if !chunk.is_empty() {
self.items.push_front(chunk);
}
}
Ok(Async::Ready(Some(buf.freeze())))
}
} else {
match self.poll_stream()? {
Async::Ready(true) => self.read_exact(size),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
}
/// Remove specified amount if bytes from buffer
#[inline]
pub fn drop_bytes(&mut self, size: usize) {
if size <= self.len {
self.len -= size;
let mut len = 0;
while len < size {
let mut chunk = self.items.pop_front().unwrap();
let rem = cmp::min(size - len, chunk.len());
len += rem;
if rem < chunk.len() {
chunk.split_to(rem);
self.items.push_front(chunk);
}
}
}
}
/// Copy buffered data
pub fn copy(&mut self, size: usize) -> Poll<Option<BytesMut>, PayloadError> {
if size <= self.len {
let mut buf = BytesMut::with_capacity(size);
for chunk in &self.items {
if buf.len() < size {
let rem = cmp::min(size - buf.len(), chunk.len());
buf.extend_from_slice(&chunk[..rem]);
}
if buf.len() == size {
return Ok(Async::Ready(Some(buf)));
}
}
}
match self.poll_stream()? {
Async::Ready(true) => self.copy(size),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
/// Read until specified ending, returning the ending as well.
pub fn read_until(&mut self, line: &[u8]) -> Poll<Option<Bytes>, PayloadError> {
let mut idx = 0;
let mut num = 0;
let mut offset = 0;
let mut found = false;
let mut length = 0;
for no in 0..self.items.len() {
{
let chunk = &self.items[no];
for (pos, ch) in chunk.iter().enumerate() {
if *ch == line[idx] {
idx += 1;
if idx == line.len() {
num = no;
offset = pos + 1;
length += pos + 1;
found = true;
break;
}
} else {
idx = 0
}
}
if !found {
length += chunk.len()
}
}
if found {
let mut buf = BytesMut::with_capacity(length);
if num > 0 {
for _ in 0..num {
buf.extend_from_slice(&self.items.pop_front().unwrap());
}
}
if offset > 0 {
let mut chunk = self.items.pop_front().unwrap();
buf.extend_from_slice(&chunk.split_to(offset));
if !chunk.is_empty() {
self.items.push_front(chunk)
}
}
self.len -= length;
return Ok(Async::Ready(Some(buf.freeze())));
}
}
match self.poll_stream()? {
Async::Ready(true) => self.read_until(line),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
/// Read bytes until new line delimiter
pub fn readline(&mut self) -> Poll<Option<Bytes>, PayloadError> {
self.read_until(b"\n")
}
/// Put unprocessed data back to the buffer
pub fn unprocessed(&mut self, data: Bytes) {
self.len += data.len();
self.items.push_front(data);
}
/// Get remaining data from the buffer
pub fn remaining(&mut self) -> Bytes {
self.items
.iter_mut()
.fold(BytesMut::new(), |mut b, c| {
b.extend_from_slice(c);
b
})
.freeze()
}
}
#[cfg(test)]
mod tests {
use super::*;
use failure::Fail;
use futures::future::{lazy, result};
use std::io;
use tokio::runtime::current_thread::Runtime;
#[test]
fn test_error() {
let err: PayloadError =
io::Error::new(io::ErrorKind::Other, "ParseError").into();
assert_eq!(format!("{}", err), "ParseError");
assert_eq!(format!("{}", err.cause().unwrap()), "ParseError");
let err = PayloadError::Incomplete;
assert_eq!(
format!("{}", err),
"A payload reached EOF, but is not complete."
);
}
#[test]
fn test_basic() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (_, payload) = Payload::new(false);
let mut payload = PayloadBuffer::new(payload);
assert_eq!(payload.len, 0);
assert_eq!(Async::NotReady, payload.readany().ok().unwrap());
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_eof() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::new(false);
let mut payload = PayloadBuffer::new(payload);
assert_eq!(Async::NotReady, payload.readany().ok().unwrap());
sender.feed_data(Bytes::from("data"));
sender.feed_eof();
assert_eq!(
Async::Ready(Some(Bytes::from("data"))),
payload.readany().ok().unwrap()
);
assert_eq!(payload.len, 0);
assert_eq!(Async::Ready(None), payload.readany().ok().unwrap());
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_err() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::new(false);
let mut payload = PayloadBuffer::new(payload);
assert_eq!(Async::NotReady, payload.readany().ok().unwrap());
sender.set_error(PayloadError::Incomplete);
payload.readany().err().unwrap();
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_readany() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::new(false);
let mut payload = PayloadBuffer::new(payload);
sender.feed_data(Bytes::from("line1"));
sender.feed_data(Bytes::from("line2"));
assert_eq!(
Async::Ready(Some(Bytes::from("line1"))),
payload.readany().ok().unwrap()
);
assert_eq!(payload.len, 0);
assert_eq!(
Async::Ready(Some(Bytes::from("line2"))),
payload.readany().ok().unwrap()
);
assert_eq!(payload.len, 0);
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_readexactly() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::new(false);
let mut payload = PayloadBuffer::new(payload);
assert_eq!(Async::NotReady, payload.read_exact(2).ok().unwrap());
sender.feed_data(Bytes::from("line1"));
sender.feed_data(Bytes::from("line2"));
assert_eq!(
Async::Ready(Some(Bytes::from_static(b"li"))),
payload.read_exact(2).ok().unwrap()
);
assert_eq!(payload.len, 3);
assert_eq!(
Async::Ready(Some(Bytes::from_static(b"ne1l"))),
payload.read_exact(4).ok().unwrap()
);
assert_eq!(payload.len, 4);
sender.set_error(PayloadError::Incomplete);
payload.read_exact(10).err().unwrap();
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_readuntil() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::new(false);
let mut payload = PayloadBuffer::new(payload);
assert_eq!(Async::NotReady, payload.read_until(b"ne").ok().unwrap());
sender.feed_data(Bytes::from("line1"));
sender.feed_data(Bytes::from("line2"));
assert_eq!(
Async::Ready(Some(Bytes::from("line"))),
payload.read_until(b"ne").ok().unwrap()
);
assert_eq!(payload.len, 1);
assert_eq!(
Async::Ready(Some(Bytes::from("1line2"))),
payload.read_until(b"2").ok().unwrap()
);
assert_eq!(payload.len, 0);
sender.set_error(PayloadError::Incomplete);
payload.read_until(b"b").err().unwrap();
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_unread_data() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (_, mut payload) = Payload::new(false);
payload.unread_data(Bytes::from("data"));
assert!(!payload.is_empty());
assert_eq!(payload.len(), 4);
assert_eq!(
Async::Ready(Some(Bytes::from("data"))),
payload.poll().ok().unwrap()
);
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
}
|
new
|
Cacher.py
|
import os
import pickle
from .Utils import purify, staticPath
def cacheIn(dir, name, data):
"""
Store given `data` under ./cache/dir/name.pickle file.
Note that `dir` and `name` are "purified" before used!
-dir: string of sub-directory to be created. Cache-file will be stored in it.
It shouldn't be None.
-name: string of filename without any extension. Cache-file will be named
after it. It shouldn't be None.
-data: python object to be cached.
"""
path = staticPath(__file__, "cache")
dir = purify(dir)
name = purify(name)
path = os.path.join(path, dir)
# If specified file exists, overwrite it without errors or warnings.
os.makedirs(path, exist_ok=True)
filename = name + ".pickle"
path = os.path.join(path, filename)
with open(path, "wb") as file:
pickle.dump(data, file)
def
|
(dir, name):
"""
Try to retrieve cached data under `./cache/dir/name.pickle`. If the
cache-file doesn't exist, None is being returned.
Note that `dir` and `name` are "purified" before used!
-dir: string of sub-directory to searched for cache-file. It shouldn't be
None.
-name: string of filename to be searched without any extension. It shouldn't
be None.
"""
data = None
path = staticPath(__file__, "cache")
dir = purify(dir)
name = purify(name)
filename = name + ".pickle"
path = os.path.join(path, dir, filename)
if os.path.isfile(path):
with open(path, "rb") as file:
data = pickle.load(file)
return data
|
cacheOut
|
domains.rs
|
#[macro_use]
extern crate postgres_derive;
#[macro_use]
extern crate postgres;
use postgres::{Connection, TlsMode};
use postgres::types::WrongType;
mod util;
#[test]
fn defaults() {
#[derive(FromSql, ToSql, Debug, PartialEq)]
struct SessionId(Vec<u8>);
let conn = Connection::connect("postgres://postgres:password@localhost", TlsMode::None)
.unwrap();
conn.execute(
"CREATE DOMAIN pg_temp.\"SessionId\" AS bytea CHECK(octet_length(VALUE) = 16);",
&[],
).unwrap();
util::test_type(
&conn,
"\"SessionId\"",
&[
(
SessionId(b"0123456789abcdef".to_vec()),
"'0123456789abcdef'",
),
],
);
}
#[test]
fn
|
() {
#[derive(FromSql, ToSql, Debug, PartialEq)]
#[postgres(name = "session_id")]
struct SessionId(Vec<u8>);
let conn = Connection::connect("postgres://postgres:password@localhost", TlsMode::None)
.unwrap();
conn.execute(
"CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16);",
&[],
).unwrap();
util::test_type(
&conn,
"session_id",
&[
(
SessionId(b"0123456789abcdef".to_vec()),
"'0123456789abcdef'",
),
],
);
}
#[test]
fn wrong_name() {
#[derive(FromSql, ToSql, Debug, PartialEq)]
struct SessionId(Vec<u8>);
let conn = Connection::connect("postgres://postgres:password@localhost", TlsMode::None)
.unwrap();
conn.execute(
"CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16);",
&[],
).unwrap();
let err = conn.execute("SELECT $1::session_id", &[&SessionId(vec![])])
.unwrap_err();
assert!(err.as_conversion().unwrap().is::<WrongType>());
}
#[test]
fn wrong_type() {
#[derive(FromSql, ToSql, Debug, PartialEq)]
#[postgres(name = "session_id")]
struct SessionId(i32);
let conn = Connection::connect("postgres://postgres:password@localhost", TlsMode::None)
.unwrap();
conn.execute(
"CREATE DOMAIN pg_temp.session_id AS bytea CHECK(octet_length(VALUE) = 16);",
&[],
).unwrap();
let err = conn.execute("SELECT $1::session_id", &[&SessionId(0)])
.unwrap_err();
assert!(err.as_conversion().unwrap().is::<WrongType>());
}
#[test]
fn domain_in_composite() {
#[derive(FromSql, ToSql, Debug, PartialEq)]
#[postgres(name = "domain")]
struct Domain(String);
#[derive(FromSql, ToSql, Debug, PartialEq)]
#[postgres(name = "composite")]
struct Composite {
domain: Domain,
}
let conn = Connection::connect("postgres://postgres:password@localhost", TlsMode::None)
.unwrap();
conn.batch_execute(
"
CREATE DOMAIN pg_temp.domain AS TEXT;\
CREATE TYPE pg_temp.composite AS (
domain domain
);
",
).unwrap();
util::test_type(
&conn,
"composite",
&[
(Composite { domain: Domain("hello".to_string()) }, "ROW('hello')"),
],
);
}
|
name_overrides
|
util.go
|
package server
import (
"bytes"
"crypto/rand"
"errors"
"fmt"
"io"
mr "math/rand"
"net/http"
"os"
"os/exec"
"time"
)
// createV4UUID returns a V4 RFC4122 compliant UUID.
func createV4UUID() string {
u := make([]byte, 16)
rand.Read(u)
// 13th char must be 4 and 17th must be in [89AB]
u[8] = (u[8] | 0x80) & 0xBF
u[6] = (u[6] | 0x40) & 0x4F
return fmt.Sprintf("%X-%X-%X-%X-%X", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
}
// randomString returns a random string for n characters.
func randomString(n int) string {
const chars = "abcdefghijklmnopqrstuvwxyz0123456789"
mr.Seed(time.Now().UTC().UnixNano())
result := make([]byte, n)
for i := 0; i < n; i++ {
result[i] = chars[mr.Intn(len(chars))]
}
return string(result)
|
// execCmd executes an os command and formats any output from stdout/err
func execCmd(cmd *exec.Cmd) (string, error) {
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
cmd.Stdout, cmd.Stderr = &stdout, &stderr
err := cmd.Run()
result := stdout.String()
if err := stderr.String(); err != "" {
return "", errors.New(err)
}
return result, err
}
func downloadFile(filepath string, url string) error {
// Create the file
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
// Get the data
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
// Writer the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
return nil
}
|
}
|
relationship.py
|
import redis
from keys import key_list as default_key_list
class Relationship(object):
def __init__(self, redis_connection=None, key_list=None, actor=None):
if key_list:
self.key_list = default_key_list.copy()
self.key_list.update(key_list)
else:
self.key_list = default_key_list
if redis_connection:
self.redis_connection = redis_connection
else:
self.redis_connection = redis.StrictRedis(
host='localhost',
port=6379,
db=0
)
self.actor = actor
def __call__(self, *args, **kwargs):
self.actor = args[0]
return self
def _action_call(self, command, from_id, to_id, operation_key):
command_values = ':'.join(('user', str(from_id), operation_key)), to_id
return getattr(self.redis_connection, command)(*command_values)
def _list_call(self, operation_key):
return self.redis_connection.smembers(
'user:{}:{}'.format(self._get_actor(), operation_key)
)
def _count_call(self, operation_key):
return self.redis_connection.scard(
'user:{}:{}'.format(
self._get_actor(),
operation_key
)
)
def _get_actor(self):
if hasattr(self, 'actor'):
return self.actor
raise ValueError("actor is not defined")
def block(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["blocked_by"])
def unblock(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["blocked"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["blocked_by"])
def follow(self, to_id):
self._action_call('sadd', self._get_actor(), to_id, self.key_list["following"])
self._action_call('sadd', to_id, self._get_actor(), self.key_list["followers"])
def unfollow(self, to_id):
self._action_call('srem', self._get_actor(), to_id, self.key_list["following"])
self._action_call('srem', to_id, self._get_actor(), self.key_list["followers"])
def friends(self):
return self.redis_connection.sinter(
"user:{}:{}".format(self._get_actor(), self.key_list["following"]),
"user:{}:{}".format(self._get_actor(), self.key_list["followers"]),
)
def mutual_friends(self, to_id):
actor_friends, to_id_friends = self(self._get_actor()).friends(), self(to_id).friends()
return actor_friends.intersection(to_id_friends)
def followers(self):
|
def following(self):
return self._list_call(self.key_list["following"])
def blocks(self):
return self._list_call(self.key_list["blocked"])
def blocked(self):
return self._list_call(self.key_list["blocked_by"])
def follower_count(self):
return self._count_call(self.key_list["followers"])
def following_count(self):
return self._count_call(self.key_list["following"])
def block_count(self):
return self._count_call(self.key_list["blocked"])
def blocked_count(self):
return self._count_call(self.key_list["blocked_by"])
def is_follower(self, follower_id):
return self._action_call('sismember', self._get_actor(), follower_id, self.key_list["followers"])
def is_following(self, following_id):
return self._action_call('sismember', self._get_actor(), following_id, self.key_list["following"])
def is_blocked(self, blocked_id):
return self._action_call('sismember', self._get_actor(), blocked_id, self.key_list["blocked"])
def is_blocked_by(self, blocked_by_id):
return self._action_call('sismember', self._get_actor(), blocked_by_id,self.key_list["blocked_by"])
def get_network(self, output):
user_id = self._get_actor()
try:
import pydot
except ImportError:
raise ImportError("You need pydot library to get network functionality.")
graph = pydot.Dot('network_of_user_{}'.format(user_id), graph_type='digraph')
target_node = pydot.Node(user_id)
for _id in self(user_id).following():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(target_node, user_node))
for _id in self(user_id).followers():
user_node = pydot.Node(_id)
graph.add_edge(pydot.Edge(user_node, target_node))
graph.write_png(output)
|
return self._list_call(self.key_list["followers"])
|
pipelines.py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class PaperSpiderPipeline(object):
def process_item(self, item, spider):
|
return item
|
|
utils.ts
|
import { ValidationError } from '@nestjs/common';
import { plainToClass } from 'class-transformer';
import { validate } from 'class-validator';
import { registerDecorator, ValidationOptions } from 'class-validator';
import zxcvbn from 'zxcvbn';
export function checkPropertiesExists(obj: Record<string, unknown>) {
for (const key in obj) {
if (obj[key] !== null && obj[key] != '') return false;
}
return true;
}
export async function validateObject(obj: any, transformToType: any) {
const transformedObj = plainToClass(transformToType, obj);
const validationErrors: ValidationError[] = await validate(transformedObj);
const errors = validationErrors.map((v) => {
const error = {};
error[`${v.property}Errors`] = Object.values(v.constraints);
return error;
});
return errors;
}
export function IsPasswordValid(validationOptions?: ValidationOptions) {
return function (object: any, propertyName: string) {
registerDecorator({
target: object.constructor,
propertyName: propertyName,
constraints: [],
options: validationOptions,
validator: {
validate(value: any) {
const result = zxcvbn(value);
if (result.score === 0) {
this.error = 'password is too weak';
return false;
}
return true;
},
defaultMessage(): string {
return this.error || 'something went wrong';
},
},
});
};
}
export function
|
(...objects: Record<string, string[]>[]) {
const returnValue = {};
for (const obj of objects) {
Object.keys(obj).forEach((key) => (returnValue[key] = []));
}
for (const obj of objects) {
for (const [key, value] of Object.entries(obj)) {
returnValue[key] = [...returnValue[key], ...value];
}
}
return returnValue;
}
|
mergeObjects
|
config.py
|
import os
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.getcwd() + '/blog.db'
SECRET_KEY = 'secret'
|
||
database.py
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from os import getenv
|
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
|
SQLALCHEMY_DATABASE_URL = getenv("DATABASE_URL")
|
test_to_json.py
|
import re
def test_repr(tracer, rpc_stub):
class A:
|
tracer.start()
match = re.match("foo", "foobar")
a = A()
tracer.stop()
from utils import return_GetFrame
frame_proto = return_GetFrame(rpc_stub, "test_repr")
binding_match_event = frame_proto.events[0]
assert (
binding_match_event.binding.repr
== "<re.Match object; span=(0, 3), match='foo'>"
)
assert (
binding_match_event.binding.value
== '{"repr": "<re.Match object; span=(0, 3), match=\'foo\'>"}'
)
binding_a_event = frame_proto.events[2]
assert binding_a_event.binding.repr == "<test_to_json.test_repr.<locals>.A object>"
assert binding_a_event.binding.value == "{}"
|
pass
|
func.go
|
package gosrc
import (
"fmt"
"github.com/davyxu/tabtoy/v3/model"
"strings"
"text/template"
)
var UsefulFunc = template.FuncMap{}
// 将定义用的类型,转换为不同语言对应的复合类型
func init() {
UsefulFunc["GoType"] = func(tf *mo
|
TypeDefine) string {
convertedType := model.LanguagePrimitive(tf.FieldType, "go")
if tf.IsArray() {
return "[]" + convertedType
}
return convertedType
}
UsefulFunc["GoTabTag"] = func(fieldType *model.TypeDefine) string {
var sb strings.Builder
var kv []string
if fieldType.Name != "" {
kv = append(kv, fmt.Sprintf("tb_name:\"%s\"", fieldType.Name))
}
if len(kv) > 0 {
sb.WriteString("`")
for _, s := range kv {
sb.WriteString(s)
}
sb.WriteString("`")
}
return sb.String()
}
UsefulFunc["JsonTabOmit"] = func() string {
return "`json:\"-\"`"
}
}
|
del.
|
fileserver.go
|
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"image"
"image/jpeg"
"image/png"
"io"
"io/ioutil"
slog "log"
random "math/rand"
"mime/multipart"
"net/http"
_ "net/http/pprof"
"net/smtp"
"net/url"
"os"
"os/signal"
"path"
"path/filepath"
"regexp"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/astaxie/beego/httplib"
"github.com/deckarep/golang-set"
_ "github.com/eventials/go-tus"
"github.com/json-iterator/go"
"github.com/nfnt/resize"
"github.com/sjqzhang/googleAuthenticator"
"github.com/sjqzhang/goutil"
log "github.com/sjqzhang/seelog"
"github.com/sjqzhang/tusd"
"github.com/sjqzhang/tusd/filestore"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
var staticHandler http.Handler
var json = jsoniter.ConfigCompatibleWithStandardLibrary
var server *Server
var logacc log.LoggerInterface
var FOLDERS = []string{DATA_DIR, STORE_DIR, CONF_DIR, STATIC_DIR}
var CONST_QUEUE_SIZE = 10000
var (
VERSION string
BUILD_TIME string
GO_VERSION string
GIT_VERSION string
v = flag.Bool("v", false, "display version")
)
var (
FileName string
ptr unsafe.Pointer
DOCKER_DIR = ""
STORE_DIR = STORE_DIR_NAME
CONF_DIR = CONF_DIR_NAME
LOG_DIR = LOG_DIR_NAME
DATA_DIR = DATA_DIR_NAME
STATIC_DIR = STATIC_DIR_NAME
LARGE_DIR_NAME = "haystack"
LARGE_DIR = STORE_DIR + "/haystack"
CONST_LEVELDB_FILE_NAME = DATA_DIR + "/fileserver.db"
CONST_LOG_LEVELDB_FILE_NAME = DATA_DIR + "/log.db"
CONST_STAT_FILE_NAME = DATA_DIR + "/stat.json"
CONST_CONF_FILE_NAME = CONF_DIR + "/cfg.json"
CONST_SEARCH_FILE_NAME = DATA_DIR + "/search.txt"
logConfigStr = `
<seelog type="asynctimer" asyncinterval="1000" minlevel="trace" maxlevel="error">
<outputs formatid="common">
<buffered formatid="common" size="1048576" flushperiod="1000">
<rollingfile type="size" filename="{DOCKER_DIR}log/fileserver.log" maxsize="104857600" maxrolls="10"/>
</buffered>
</outputs>
<formats>
<format id="common" format="%Date %Time [%LEV] [%File:%Line] [%Func] %Msg%n" />
</formats>
</seelog>
`
logAccessConfigStr = `
<seelog type="asynctimer" asyncinterval="1000" minlevel="trace" maxlevel="error">
<outputs formatid="common">
<buffered formatid="common" size="1048576" flushperiod="1000">
<rollingfile type="size" filename="{DOCKER_DIR}log/access.log" maxsize="104857600" maxrolls="10"/>
</buffered>
</outputs>
<formats>
<format id="common" format="%Date %Time [%LEV] [%File:%Line] [%Func] %Msg%n" />
</formats>
</seelog>
`
)
const (
STORE_DIR_NAME = "files"
LOG_DIR_NAME = "log"
DATA_DIR_NAME = "data"
CONF_DIR_NAME = "conf"
STATIC_DIR_NAME = "static"
CONST_STAT_FILE_COUNT_KEY = "fileCount"
CONST_BIG_UPLOAD_PATH_SUFFIX = "/big/upload/"
CONST_STAT_FILE_TOTAL_SIZE_KEY = "totalSize"
CONST_Md5_ERROR_FILE_NAME = "errors.md5"
CONST_Md5_QUEUE_FILE_NAME = "queue.md5"
CONST_FILE_Md5_FILE_NAME = "files.md5"
CONST_REMOME_Md5_FILE_NAME = "removes.md5"
CONST_SMALL_FILE_SIZE = 1024 * 1024
CONST_MESSAGE_CLUSTER_IP = "Can only be called by the cluster ip or 127.0.0.1 or admin_ips(cfg.json),current ip:%s"
cfgJson = `{
"绑定端号": "端口",
"addr": ":8080",
"PeerID": "集群内唯一,请使用0-9的单字符,默认自动生成",
"peer_id": "%s",
"本主机地址": "本机http地址,默认自动生成(注意端口必须与addr中的端口一致),必段为内网,自动生成不为内网请自行修改,下同",
"host": "%s",
"集群": "集群列表,注意为了高可用,IP必须不能是同一个,同一不会自动备份,且不能为127.0.0.1,且必须为内网IP,默认自动生成",
"peers": ["%s"],
"组号": "用于区别不同的集群(上传或下载)与support_group_manage配合使用,带在下载路径中",
"group": "group1",
"是否支持按组(集群)管理,主要用途是Nginx支持多集群": "默认不支持,不支持时路径为http://10.1.5.4:8080/action,支持时为http://10.1.5.4:8080/group(配置中的group参数)/action,action为动作名,如status,delete,sync等",
"support_group_manage": false,
"是否合并小文件": "默认不合并,合并可以解决inode不够用的情况(当前对于小于1M文件)进行合并",
"enable_merge_small_file": false,
"允许后缀名": "允许可以上传的文件后缀名,如jpg,jpeg,png等。留空允许所有。",
"extensions": [],
"重试同步失败文件的时间": "单位秒",
"refresh_interval": 1800,
"是否自动重命名": "默认不自动重命名,使用原文件名",
"rename_file": false,
"是否支持web上传,方便调试": "默认支持web上传",
"enable_web_upload": true,
"是否支持非日期路径": "默认支持非日期路径,也即支持自定义路径,需要上传文件时指定path",
"enable_custom_path": true,
"下载域名": "用于外网下载文件的域名,不包含http://",
"download_domain": "",
"场景列表": "当设定后,用户指的场景必项在列表中,默认不做限制(注意:如果想开启场景认功能,格式如下:'场景名:googleauth_secret' 如 default:N7IET373HB2C5M6D ",
"scenes": [],
"默认场景": "默认default",
"default_scene": "default",
"是否显示目录": "默认显示,方便调试用,上线时请关闭",
"show_dir": true,
"邮件配置": "",
"mail": {
"user": "[email protected]",
"password": "abc",
"host": "smtp.163.com:25"
},
"告警接收邮件列表": "接收人数组",
"alarm_receivers": [],
"告警接收URL": "方法post,参数:subject,message",
"alarm_url": "",
"下载是否需带token": "真假",
"download_use_token": false,
"下载token过期时间": "单位秒",
"download_token_expire": 600,
"是否自动修复": "在超过1亿文件时出现性能问题,取消此选项,请手动按天同步,请查看FAQ",
"auto_repair": true,
"文件去重算法md5可能存在冲突,默认md5": "sha1|md5",
"file_sum_arithmetic": "md5",
"管理ip列表": "用于管理集的ip白名单,",
"admin_ips": ["127.0.0.1"],
"是否启用迁移": "默认不启用",
"enable_migrate": false,
"文件是否去重": "默认去重",
"enable_distinct_file": true,
"是否开启跨站访问": "默认开启",
"enable_cross_origin": true,
"是否开启Google认证,实现安全的上传、下载": "默认不开启",
"enable_google_auth": false,
"认证url": "当url不为空时生效,注意:普通上传中使用http参数 auth_token 作为认证参数, 在断点续传中通过HTTP头Upload-Metadata中的auth_token作为认证参数,认证流程参考认证架构图",
"auth_url": "",
"下载是否认证": "默认不认证(注意此选项是在auth_url不为空的情况下生效)",
"enable_download_auth": false,
"默认是否下载": "默认下载",
"default_download": true,
"本机是否只读": "默认可读可写",
"read_only": false,
"是否开启断点续传": "默认开启",
"enable_tus": true,
"同步单一文件超时时间(单位秒)": "默认为0,程序自动计算,在特殊情况下,自已设定",
"sync_timeout": 0
}
`
)
type Server struct {
ldb *leveldb.DB
logDB *leveldb.DB
util *goutil.Common
statMap *goutil.CommonMap
sumMap *goutil.CommonMap
queueToPeers chan FileInfo
queueFromPeers chan FileInfo
queueFileLog chan *FileLog
lockMap *goutil.CommonMap
sceneMap *goutil.CommonMap
searchMap *goutil.CommonMap
curDate string
host string
}
type FileInfo struct {
Name string `json:"name"`
ReName string `json:"rename"`
Path string `json:"path"`
Md5 string `json:"md5"`
Size int64 `json:"size"`
Peers []string `json:"peers"`
Scene string `json:"scene"`
TimeStamp int64 `json:"timeStamp"`
OffSet int64 `json:"offset"`
}
type FileLog struct {
FileInfo *FileInfo
FileName string
}
type JsonResult struct {
Message string `json:"message"`
Status string `json:"status"`
Data interface{} `json:"data"`
}
type FileResult struct {
Url string `json:"url"`
Md5 string `json:"md5"`
Path string `json:"path"`
Domain string `json:"domain"`
Scene string `json:"scene"`
Size int64 `json:"size"`
ModTime int64 `json:"mtime"`
//Just for Compatibility
Scenes string `json:"scenes"`
Retmsg string `json:"retmsg"`
Retcode int `json:"retcode"`
Src string `json:"src"`
}
type Mail struct {
User string `json:"user"`
Password string `json:"password"`
Host string `json:"host"`
}
type StatDateFileInfo struct {
Date string `json:"date"`
TotalSize int64 `json:"totalSize"`
FileCount int64 `json:"fileCount"`
}
type GloablConfig struct {
Addr string `json:"addr"`
Peers []string `json:"peers"`
Group string `json:"group"`
RenameFile bool `json:"rename_file"`
ShowDir bool `json:"show_dir"`
Extensions []string `json:"extensions"`
RefreshInterval int `json:"refresh_interval"`
EnableWebUpload bool `json:"enable_web_upload"`
DownloadDomain string `json:"download_domain"`
EnableCustomPath bool `json:"enable_custom_path"`
Scenes []string `json:"scenes"`
AlarmReceivers []string `json:"alarm_receivers"`
DefaultScene string `json:"default_scene"`
Mail Mail `json:"mail"`
AlarmUrl string `json:"alarm_url"`
DownloadUseToken bool `json:"download_use_token"`
DownloadTokenExpire int `json:"download_token_expire"`
QueueSize int `json:"queue_size"`
AutoRepair bool `json:"auto_repair"`
Host string `json:"host"`
FileSumArithmetic string `json:"file_sum_arithmetic"`
PeerId string `json:"peer_id"`
SupportGroupManage bool `json:"support_group_manage"`
AdminIps []string `json:"admin_ips"`
EnableMergeSmallFile bool `json:"enable_merge_small_file"`
EnableMigrate bool `json:"enable_migrate"`
EnableDistinctFile bool `json:"enable_distinct_file"`
ReadOnly bool `json:"read_only"`
EnableCrossOrigin bool `json:"enable_cross_origin"`
EnableGoogleAuth bool `json:"enable_google_auth"`
AuthUrl string `json:"auth_url"`
EnableDownloadAuth bool `json:"enable_download_auth"`
DefaultDownload bool `json:"default_download"`
EnableTus bool `json:"enable_tus"`
SyncTimeout int64 `json:"sync_timeout"`
}
type FileInfoResult struct {
Name string `json:"name"`
Md5 string `json:"md5"`
Path string `json:"path"`
Size int64 `json:"size"`
ModTime int64 `json:"mtime"`
IsDir bool `json:"is_dir"`
}
func NewServer() *Server {
var (
server *Server
err error
)
server = &Server{
util: &goutil.Common{},
statMap: goutil.NewCommonMap(0),
lockMap: goutil.NewCommonMap(0),
sceneMap: goutil.NewCommonMap(0),
searchMap: goutil.NewCommonMap(0),
queueToPeers: make(chan FileInfo, CONST_QUEUE_SIZE),
queueFromPeers: make(chan FileInfo, CONST_QUEUE_SIZE),
queueFileLog: make(chan *FileLog, CONST_QUEUE_SIZE),
sumMap: goutil.NewCommonMap(365 * 3),
}
defaultTransport := &http.Transport{
DisableKeepAlives: true,
Dial: httplib.TimeoutDialer(time.Second*6, time.Second*300),
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
}
settins := httplib.BeegoHTTPSettings{
UserAgent: "Go-FastDFS",
ConnectTimeout: 10 * time.Second,
ReadWriteTimeout: 10 * time.Second,
Gzip: true,
DumpBody: true,
Transport: defaultTransport,
}
httplib.SetDefaultSetting(settins)
server.statMap.Put(CONST_STAT_FILE_COUNT_KEY, int64(0))
server.statMap.Put(CONST_STAT_FILE_TOTAL_SIZE_KEY, int64(0))
server.statMap.Put(server.util.GetToDay()+"_"+CONST_STAT_FILE_COUNT_KEY, int64(0))
server.statMap.Put(server.util.GetToDay()+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, int64(0))
server.curDate = server.util.GetToDay()
opts := &opt.Options{
CompactionTableSize: 1024 * 1024 * 20,
WriteBuffer: 1024 * 1024 * 20,
}
server.ldb, err = leveldb.OpenFile(CONST_LEVELDB_FILE_NAME, opts)
if err != nil {
fmt.Println(err)
log.Error(err)
panic(err)
}
server.logDB, err = leveldb.OpenFile(CONST_LOG_LEVELDB_FILE_NAME, opts)
if err != nil {
fmt.Println(err)
log.Error(err)
panic(err)
}
return server
}
func Config() *GloablConfig {
return (*GloablConfig)(atomic.LoadPointer(&ptr))
}
func ParseConfig(filePath string) {
var (
data []byte
)
if filePath == "" {
data = []byte(strings.TrimSpace(cfgJson))
} else {
file, err := os.Open(filePath)
if err != nil {
panic(fmt.Sprintln("open file path:", filePath, "error:", err))
}
defer file.Close()
FileName = filePath
data, err = ioutil.ReadAll(file)
if err != nil {
panic(fmt.Sprintln("file path:", filePath, " read all error:", err))
}
}
var c GloablConfig
if err := json.Unmarshal(data, &c); err != nil {
panic(fmt.Sprintln("file path:", filePath, "json unmarshal error:", err))
}
log.Info(c)
atomic.StorePointer(&ptr, unsafe.Pointer(&c))
log.Info("config parse success")
}
func (this *Server) BackUpMetaDataByDate(date string) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("BackUpMetaDataByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
msg string
name string
fileInfo FileInfo
logFileName string
fileLog *os.File
fileMeta *os.File
metaFileName string
fi os.FileInfo
)
logFileName = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME
this.lockMap.LockKey(logFileName)
defer this.lockMap.UnLockKey(logFileName)
metaFileName = DATA_DIR + "/" + date + "/" + "meta.data"
os.MkdirAll(DATA_DIR+"/"+date, 0775)
if this.util.IsExist(logFileName) {
os.Remove(logFileName)
}
if this.util.IsExist(metaFileName) {
os.Remove(metaFileName)
}
fileLog, err = os.
|
gFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
log.Error(err)
return
}
defer fileLog.Close()
fileMeta, err = os.OpenFile(metaFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
if err != nil {
log.Error(err)
return
}
defer fileMeta.Close()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, CONST_FILE_Md5_FILE_NAME)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
defer iter.Release()
for iter.Next() {
if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil {
continue
}
name = fileInfo.Name
if fileInfo.ReName != "" {
name = fileInfo.ReName
}
msg = fmt.Sprintf("%s\t%s\n", fileInfo.Md5, string(iter.Value()))
if _, err = fileMeta.WriteString(msg); err != nil {
log.Error(err)
}
msg = fmt.Sprintf("%s\t%s\n", this.util.MD5(fileInfo.Path+"/"+name), string(iter.Value()))
if _, err = fileMeta.WriteString(msg); err != nil {
log.Error(err)
}
msg = fmt.Sprintf("%s|%d|%d|%s\n", fileInfo.Md5, fileInfo.Size, fileInfo.TimeStamp, fileInfo.Path+"/"+name)
if _, err = fileLog.WriteString(msg); err != nil {
log.Error(err)
}
}
if fi, err = fileLog.Stat(); err != nil {
log.Error(err)
} else if fi.Size() == 0 {
fileLog.Close()
os.Remove(logFileName)
}
if fi, err = fileMeta.Stat(); err != nil {
log.Error(err)
} else if fi.Size() == 0 {
fileMeta.Close()
os.Remove(metaFileName)
}
}
func (this *Server) RepairFileInfoFromFile() {
var (
pathPrefix string
err error
fi os.FileInfo
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("RepairFileInfoFromFile")
log.Error(re)
log.Error(string(buffer))
}
}()
if this.lockMap.IsLock("RepairFileInfoFromFile") {
log.Warn("Lock RepairFileInfoFromFile")
return
}
this.lockMap.LockKey("RepairFileInfoFromFile")
defer this.lockMap.UnLockKey("RepairFileInfoFromFile")
handlefunc := func(file_path string, f os.FileInfo, err error) error {
var (
files []os.FileInfo
fi os.FileInfo
fileInfo FileInfo
sum string
pathMd5 string
)
if f.IsDir() {
files, err = ioutil.ReadDir(file_path)
if err != nil {
return err
}
for _, fi = range files {
if fi.IsDir() || fi.Size() == 0 {
continue
}
file_path = strings.Replace(file_path, "\\", "/", -1)
if DOCKER_DIR != "" {
file_path = strings.Replace(file_path, DOCKER_DIR, "", 1)
}
if pathPrefix != "" {
file_path = strings.Replace(file_path, pathPrefix, STORE_DIR_NAME, 1)
}
if strings.HasPrefix(file_path, STORE_DIR_NAME+"/"+LARGE_DIR_NAME) {
log.Info(fmt.Sprintf("ignore small file file %s", file_path+"/"+fi.Name()))
continue
}
pathMd5 = this.util.MD5(file_path + "/" + fi.Name())
//if finfo, _ := this.GetFileInfoFromLevelDB(pathMd5); finfo != nil && finfo.Md5 != "" {
// log.Info(fmt.Sprintf("exist ignore file %s", file_path+"/"+fi.Name()))
// continue
//}
//sum, err = this.util.GetFileSumByName(file_path+"/"+fi.Name(), Config().FileSumArithmetic)
sum = pathMd5
if err != nil {
log.Error(err)
continue
}
fileInfo = FileInfo{
Size: fi.Size(),
Name: fi.Name(),
Path: file_path,
Md5: sum,
TimeStamp: fi.ModTime().Unix(),
Peers: []string{this.host},
OffSet: -2,
}
//log.Info(fileInfo)
log.Info(file_path, "/", fi.Name())
this.AppendToQueue(&fileInfo)
//this.postFileToPeer(&fileInfo)
this.SaveFileInfoToLevelDB(fileInfo.Md5, &fileInfo, this.ldb)
//this.SaveFileMd5Log(&fileInfo, CONST_FILE_Md5_FILE_NAME)
}
}
return nil
}
pathname := STORE_DIR
pathPrefix, err = os.Readlink(pathname)
if err == nil {
//link
pathname = pathPrefix
if strings.HasSuffix(pathPrefix, "/") {
//bugfix fullpath
pathPrefix = pathPrefix[0 : len(pathPrefix)-1]
}
}
fi, err = os.Stat(pathname)
if err != nil {
log.Error(err)
}
if fi.IsDir() {
filepath.Walk(pathname, handlefunc)
}
log.Info("RepairFileInfoFromFile is finish.")
}
func (this *Server) RepairStatByDate(date string) StatDateFileInfo {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("RepairStatByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
fileInfo FileInfo
fileCount int64
fileSize int64
stat StatDateFileInfo
)
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, CONST_FILE_Md5_FILE_NAME)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
defer iter.Release()
for iter.Next() {
if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil {
continue
}
fileCount = fileCount + 1
fileSize = fileSize + fileInfo.Size
}
this.statMap.Put(date+"_"+CONST_STAT_FILE_COUNT_KEY, fileCount)
this.statMap.Put(date+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, fileSize)
this.SaveStat()
stat.Date = date
stat.FileCount = fileCount
stat.TotalSize = fileSize
return stat
}
func (this *Server) GetFilePathByInfo(fileInfo *FileInfo, withDocker bool) string {
var (
fn string
)
fn = fileInfo.Name
if fileInfo.ReName != "" {
fn = fileInfo.ReName
}
if withDocker {
return DOCKER_DIR + fileInfo.Path + "/" + fn
}
return fileInfo.Path + "/" + fn
}
func (this *Server) CheckFileExistByInfo(md5s string, fileInfo *FileInfo) bool {
var (
err error
fullpath string
fi os.FileInfo
info *FileInfo
)
if fileInfo == nil {
return false
}
if fileInfo.OffSet >= 0 {
//small file
if info, err = this.GetFileInfoFromLevelDB(fileInfo.Md5); err == nil && info.Md5 == fileInfo.Md5 {
return true
} else {
return false
}
}
fullpath = this.GetFilePathByInfo(fileInfo, true)
if fi, err = os.Stat(fullpath); err != nil {
return false
}
if fi.Size() == fileInfo.Size {
return true
} else {
return false
}
}
func (this *Server) ParseSmallFile(filename string) (string, int64, int, error) {
var (
err error
offset int64
length int
)
err = errors.New("unvalid small file")
if len(filename) < 3 {
return filename, -1, -1, err
}
if strings.Contains(filename, "/") {
filename = filename[strings.LastIndex(filename, "/")+1:]
}
pos := strings.Split(filename, ",")
if len(pos) < 3 {
return filename, -1, -1, err
}
offset, err = strconv.ParseInt(pos[1], 10, 64)
if err != nil {
return filename, -1, -1, err
}
if length, err = strconv.Atoi(pos[2]); err != nil {
return filename, offset, -1, err
}
if length > CONST_SMALL_FILE_SIZE || offset < 0 {
err = errors.New("invalid filesize or offset")
return filename, -1, -1, err
}
return pos[0], offset, length, nil
}
func (this *Server) DownloadFromPeer(peer string, fileInfo *FileInfo) {
var (
err error
filename string
fpath string
fpathTmp string
fi os.FileInfo
sum string
data []byte
downloadUrl string
)
if Config().ReadOnly {
log.Warn("ReadOnly", fileInfo)
return
}
filename = fileInfo.Name
if fileInfo.ReName != "" {
filename = fileInfo.ReName
}
if Config().EnableDistinctFile && this.CheckFileExistByInfo(fileInfo.Md5, fileInfo) {
log.Info("DownloadFromPeer file Exist")
return
}
if !Config().EnableDistinctFile && this.util.FileExists(this.GetFilePathByInfo(fileInfo, true)) {
if fi, err = os.Stat(this.GetFilePathByInfo(fileInfo, true)); err == nil {
if fi.ModTime().Unix() > fileInfo.TimeStamp {
log.Info(fmt.Sprintf("ignore file sync path:%s", this.GetFilePathByInfo(fileInfo, false)))
fileInfo.TimeStamp = fi.ModTime().Unix()
this.postFileToPeer(fileInfo) // keep newer
return
}
os.Remove(this.GetFilePathByInfo(fileInfo, true))
}
}
if _, err = os.Stat(fileInfo.Path); err != nil {
os.MkdirAll(DOCKER_DIR+fileInfo.Path, 0775)
}
//fmt.Println("downloadFromPeer",fileInfo)
p := strings.Replace(fileInfo.Path, STORE_DIR_NAME+"/", "", 1)
//filename=this.util.UrlEncode(filename)
downloadUrl = peer + "/" + Config().Group + "/" + p + "/" + filename
log.Info("DownloadFromPeer: ", downloadUrl)
fpath = DOCKER_DIR + fileInfo.Path + "/" + filename
fpathTmp = DOCKER_DIR + fileInfo.Path + "/" + fmt.Sprintf("%s_%s", "tmp_", filename)
timeout := fileInfo.Size/1024/1024/1 + 30
if Config().SyncTimeout > 0 {
timeout = Config().SyncTimeout
}
this.lockMap.LockKey(fpath)
defer this.lockMap.UnLockKey(fpath)
download_key := fmt.Sprintf("downloading_%d_%s", time.Now().Unix(), fpath)
this.ldb.Put([]byte(download_key), []byte(""), nil)
defer func() {
this.ldb.Delete([]byte(download_key), nil)
}()
if fileInfo.OffSet == -2 {
//migrate file
if fi, err = os.Stat(fpath); err == nil && fi.Size() == fileInfo.Size {
//prevent double download
this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb)
//log.Info(fmt.Sprintf("file '%s' has download", fpath))
return
}
req := httplib.Get(downloadUrl)
req.SetTimeout(time.Second*30, time.Second*time.Duration(timeout))
if err = req.ToFile(fpathTmp); err != nil {
os.Remove(fpathTmp)
log.Error(err)
return
}
if os.Rename(fpathTmp, fpath) == nil {
//this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb)
}
return
}
req := httplib.Get(downloadUrl)
req.SetTimeout(time.Second*30, time.Second*time.Duration(timeout))
if fileInfo.OffSet >= 0 {
//small file download
data, err = req.Bytes()
if err != nil {
log.Error(err)
return
}
data2 := make([]byte, len(data)+1)
data2[0] = '1'
for i, v := range data {
data2[i+1] = v
}
data = data2
if int64(len(data)) != fileInfo.Size {
log.Warn("file size is error")
return
}
fpath = strings.Split(fpath, ",")[0]
err = this.util.WriteFileByOffSet(fpath, fileInfo.OffSet, data)
if err != nil {
log.Warn(err)
return
}
this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
return
}
if err = req.ToFile(fpathTmp); err != nil {
os.Remove(fpathTmp)
log.Error(err)
return
}
if fi, err = os.Stat(fpathTmp); err != nil {
os.Remove(fpathTmp)
return
}
_ = sum
//if Config().EnableDistinctFile {
// //DistinctFile
// if sum, err = this.util.GetFileSumByName(fpathTmp, Config().FileSumArithmetic); err != nil {
// log.Error(err)
// return
// }
//} else {
// //DistinctFile By path
// sum = this.util.MD5(this.GetFilePathByInfo(fileInfo, false))
//}
if fi.Size() != fileInfo.Size { // maybe has bug remove || sum != fileInfo.Md5
log.Error("file sum check error")
os.Remove(fpathTmp)
return
}
if os.Rename(fpathTmp, fpath) == nil {
this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
}
}
func (this *Server) CrossOrigin(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, X-Requested-By, If-Modified-Since, X-File-Name, X-File-Type, Cache-Control, Origin")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS, PUT, DELETE")
w.Header().Set("Access-Control-Expose-Headers", "Authorization")
//https://blog.csdn.net/yanzisu_congcong/article/details/80552155
}
func (this *Server) SetDownloadHeader(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", "attachment")
}
func (this *Server) CheckAuth(w http.ResponseWriter, r *http.Request) bool {
var (
err error
req *httplib.BeegoHTTPRequest
result string
jsonResult JsonResult
)
if err = r.ParseForm(); err != nil {
log.Error(err)
return false
}
req = httplib.Post(Config().AuthUrl)
req.SetTimeout(time.Second*10, time.Second*10)
for k, _ := range r.Form {
req.Param(k, r.FormValue(k))
}
for k, v := range r.Header {
req.Header(k, v[0])
}
result, err = req.String()
result = strings.TrimSpace(result)
if strings.HasPrefix(result, "{") && strings.HasSuffix(result, "}") {
if err = json.Unmarshal([]byte(result), &jsonResult); err != nil {
log.Error(err)
return false
}
if jsonResult.Data != "ok" {
log.Warn(result)
return false
}
} else {
if result != "ok" {
log.Warn(result)
return false
}
}
return true
}
func (this *Server) NotPermit(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(401)
}
func (this *Server) GetFilePathFromRequest(w http.ResponseWriter, r *http.Request) (string, string) {
var (
err error
fullpath string
smallPath string
)
fullpath = r.RequestURI[len(Config().Group)+2 : len(r.RequestURI)]
fullpath = strings.Split(fullpath, "?")[0] // just path
fullpath = DOCKER_DIR + STORE_DIR_NAME + "/" + fullpath
if strings.HasPrefix(r.RequestURI, "/"+Config().Group+"/"+LARGE_DIR_NAME+"/") {
smallPath = fullpath //notice order
fullpath = strings.Split(fullpath, ",")[0]
}
if fullpath, err = url.PathUnescape(fullpath); err != nil {
log.Error(err)
}
return fullpath, smallPath
}
func (this *Server) CheckDownloadAuth(w http.ResponseWriter, r *http.Request) (bool, error) {
var (
err error
maxTimestamp int64
minTimestamp int64
ts int64
token string
timestamp string
fullpath string
smallPath string
pathMd5 string
fileInfo *FileInfo
scene string
secret interface{}
code string
ok bool
)
CheckToken := func(token string, md5sum string, timestamp string) bool {
if this.util.MD5(md5sum+timestamp) != token {
return false
}
return true
}
if Config().EnableDownloadAuth && Config().AuthUrl != "" && !this.IsPeer(r) && !this.CheckAuth(w, r) {
return false, errors.New("auth fail")
}
if Config().DownloadUseToken && !this.IsPeer(r) {
token = r.FormValue("token")
timestamp = r.FormValue("timestamp")
if token == "" || timestamp == "" {
return false, errors.New("unvalid request")
}
maxTimestamp = time.Now().Add(time.Second *
time.Duration(Config().DownloadTokenExpire)).Unix()
minTimestamp = time.Now().Add(-time.Second *
time.Duration(Config().DownloadTokenExpire)).Unix()
if ts, err = strconv.ParseInt(timestamp, 10, 64); err != nil {
return false, errors.New("unvalid timestamp")
}
if ts > maxTimestamp || ts < minTimestamp {
return false, errors.New("timestamp expire")
}
fullpath, smallPath = this.GetFilePathFromRequest(w, r)
if smallPath != "" {
pathMd5 = this.util.MD5(smallPath)
} else {
pathMd5 = this.util.MD5(fullpath)
}
if fileInfo, err = this.GetFileInfoFromLevelDB(pathMd5); err != nil {
// TODO
} else {
ok := CheckToken(token, fileInfo.Md5, timestamp)
if !ok {
return ok, errors.New("unvalid token")
}
return ok, nil
}
}
if Config().EnableGoogleAuth && !this.IsPeer(r) {
fullpath = r.RequestURI[len(Config().Group)+2 : len(r.RequestURI)]
fullpath = strings.Split(fullpath, "?")[0] // just path
scene = strings.Split(fullpath, "/")[0]
code = r.FormValue("code")
if secret, ok = this.sceneMap.GetValue(scene); ok {
if !this.VerifyGoogleCode(secret.(string), code, int64(Config().DownloadTokenExpire/30)) {
return false, errors.New("invalid google code")
}
}
}
return true, nil
}
func (this *Server) GetSmallFileByURI(w http.ResponseWriter, r *http.Request) ([]byte, bool, error) {
var (
err error
data []byte
offset int64
length int
fullpath string
info os.FileInfo
)
fullpath, _ = this.GetFilePathFromRequest(w, r)
if _, offset, length, err = this.ParseSmallFile(r.RequestURI); err != nil {
return nil, false, err
}
if info, err = os.Stat(fullpath); err != nil {
return nil, false, err
}
if info.Size() < offset+int64(length) {
return nil, true, errors.New("noFound")
} else {
data, err = this.util.ReadFileByOffSet(fullpath, offset, length)
if err != nil {
return nil, false, err
}
return data, false, err
}
}
func (this *Server) DownloadSmallFileByURI(w http.ResponseWriter, r *http.Request) (bool, error) {
var (
err error
data []byte
isDownload bool
imgWidth int
imgHeight int
width string
height string
notFound bool
)
r.ParseForm()
isDownload = true
if r.FormValue("download") == "" {
isDownload = Config().DefaultDownload
}
if r.FormValue("download") == "0" {
isDownload = false
}
width = r.FormValue("width")
height = r.FormValue("height")
if width != "" {
imgWidth, err = strconv.Atoi(width)
if err != nil {
log.Error(err)
}
}
if height != "" {
imgHeight, err = strconv.Atoi(height)
if err != nil {
log.Error(err)
}
}
data, notFound, err = this.GetSmallFileByURI(w, r)
_ = notFound
if data != nil && string(data[0]) == "1" {
if isDownload {
this.SetDownloadHeader(w, r)
}
if imgWidth != 0 || imgHeight != 0 {
this.ResizeImageByBytes(w, data[1:], uint(imgWidth), uint(imgHeight))
return true, nil
}
w.Write(data[1:])
return true, nil
}
return false, errors.New("not found")
}
func (this *Server) DownloadNormalFileByURI(w http.ResponseWriter, r *http.Request) (bool, error) {
var (
err error
isDownload bool
imgWidth int
imgHeight int
width string
height string
)
r.ParseForm()
isDownload = true
if r.FormValue("download") == "" {
isDownload = Config().DefaultDownload
}
if r.FormValue("download") == "0" {
isDownload = false
}
width = r.FormValue("width")
height = r.FormValue("height")
if width != "" {
imgWidth, err = strconv.Atoi(width)
if err != nil {
log.Error(err)
}
}
if height != "" {
imgHeight, err = strconv.Atoi(height)
if err != nil {
log.Error(err)
}
}
if isDownload {
this.SetDownloadHeader(w, r)
}
fullpath, _ := this.GetFilePathFromRequest(w, r)
if imgWidth != 0 || imgHeight != 0 {
this.ResizeImage(w, fullpath, uint(imgWidth), uint(imgHeight))
return true, nil
}
staticHandler.ServeHTTP(w, r)
return true, nil
}
func (this *Server) DownloadNotFound(w http.ResponseWriter, r *http.Request) {
var (
err error
fullpath string
smallPath string
isDownload bool
pathMd5 string
peer string
fileInfo *FileInfo
)
fullpath, smallPath = this.GetFilePathFromRequest(w, r)
isDownload = true
if r.FormValue("download") == "" {
isDownload = Config().DefaultDownload
}
if r.FormValue("download") == "0" {
isDownload = false
}
if smallPath != "" {
pathMd5 = this.util.MD5(smallPath)
} else {
pathMd5 = this.util.MD5(fullpath)
}
for _, peer = range Config().Peers {
if fileInfo, err = this.checkPeerFileExist(peer, pathMd5, fullpath); err != nil {
log.Error(err)
continue
}
if fileInfo.Md5 != "" {
go this.DownloadFromPeer(peer, fileInfo)
//http.Redirect(w, r, peer+r.RequestURI, 302)
if isDownload {
this.SetDownloadHeader(w, r)
}
this.DownloadFileToResponse(peer+r.RequestURI, w, r)
return
}
}
w.WriteHeader(404)
return
}
func (this *Server) Download(w http.ResponseWriter, r *http.Request) {
var (
err error
ok bool
fullpath string
smallPath string
fi os.FileInfo
)
if ok, err = this.CheckDownloadAuth(w, r); !ok {
log.Error(err)
this.NotPermit(w, r)
return
}
if Config().EnableCrossOrigin {
this.CrossOrigin(w, r)
}
fullpath, smallPath = this.GetFilePathFromRequest(w, r)
if smallPath == "" {
if fi, err = os.Stat(fullpath); err != nil {
this.DownloadNotFound(w, r)
return
}
if !Config().ShowDir && fi.IsDir() {
w.Write([]byte("list dir deny"))
return
}
//staticHandler.ServeHTTP(w, r)
this.DownloadNormalFileByURI(w, r)
return
}
if smallPath != "" {
if ok, err = this.DownloadSmallFileByURI(w, r); !ok {
this.DownloadNotFound(w, r)
return
}
return
}
}
func (this *Server) DownloadFileToResponse(url string, w http.ResponseWriter, r *http.Request) {
var (
err error
req *httplib.BeegoHTTPRequest
resp *http.Response
)
req = httplib.Get(url)
req.SetTimeout(time.Second*20, time.Second*600)
resp, err = req.DoRequest()
if err != nil {
log.Error(err)
}
defer resp.Body.Close()
_, err = io.Copy(w, resp.Body)
if err != nil {
log.Error(err)
}
}
func (this *Server) ResizeImageByBytes(w http.ResponseWriter, data []byte, width, height uint) {
var (
img image.Image
err error
imgType string
)
reader := bytes.NewReader(data)
img, imgType, err = image.Decode(reader)
if err != nil {
log.Error(err)
return
}
img = resize.Resize(width, height, img, resize.Lanczos3)
if imgType == "jpg" || imgType == "jpeg" {
jpeg.Encode(w, img, nil)
} else if imgType == "png" {
png.Encode(w, img)
} else {
w.Write(data)
}
}
func (this *Server) ResizeImage(w http.ResponseWriter, fullpath string, width, height uint) {
var (
img image.Image
err error
imgType string
file *os.File
)
file, err = os.Open(fullpath)
if err != nil {
log.Error(err)
return
}
img, imgType, err = image.Decode(file)
if err != nil {
log.Error(err)
return
}
file.Close()
img = resize.Resize(width, height, img, resize.Lanczos3)
if imgType == "jpg" || imgType == "jpeg" {
jpeg.Encode(w, img, nil)
} else if imgType == "png" {
png.Encode(w, img)
} else {
file.Seek(0, 0)
io.Copy(w, file)
}
}
func (this *Server) GetServerURI(r *http.Request) string {
return fmt.Sprintf("http://%s/", r.Host)
}
func (this *Server) CheckFileAndSendToPeer(date string, filename string, isForceUpload bool) {
var (
md5set mapset.Set
err error
md5s []interface{}
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("CheckFileAndSendToPeer")
log.Error(re)
log.Error(string(buffer))
}
}()
if md5set, err = this.GetMd5sByDate(date, filename); err != nil {
log.Error(err)
return
}
md5s = md5set.ToSlice()
for _, md := range md5s {
if md == nil {
continue
}
if fileInfo, _ := this.GetFileInfoFromLevelDB(md.(string)); fileInfo != nil && fileInfo.Md5 != "" {
if isForceUpload {
fileInfo.Peers = []string{}
}
if len(fileInfo.Peers) > len(Config().Peers) {
continue
}
if !this.util.Contains(this.host, fileInfo.Peers) {
fileInfo.Peers = append(fileInfo.Peers, this.host) // peer is null
}
if filename == CONST_Md5_QUEUE_FILE_NAME {
this.AppendToDownloadQueue(fileInfo)
} else {
this.AppendToQueue(fileInfo)
}
}
}
}
func (this *Server) postFileToPeer(fileInfo *FileInfo) {
var (
err error
peer string
filename string
info *FileInfo
postURL string
result string
fi os.FileInfo
i int
data []byte
fpath string
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("postFileToPeer")
log.Error(re)
log.Error(string(buffer))
}
}()
//fmt.Println("postFile",fileInfo)
for i, peer = range Config().Peers {
_ = i
if fileInfo.Peers == nil {
fileInfo.Peers = []string{}
}
if this.util.Contains(peer, fileInfo.Peers) {
continue
}
filename = fileInfo.Name
if fileInfo.ReName != "" {
filename = fileInfo.ReName
if fileInfo.OffSet != -1 {
filename = strings.Split(fileInfo.ReName, ",")[0]
}
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + filename
if !this.util.FileExists(fpath) {
log.Warn(fmt.Sprintf("file '%s' not found", fpath))
continue
} else {
if fileInfo.Size == 0 {
if fi, err = os.Stat(fpath); err != nil {
log.Error(err)
} else {
fileInfo.Size = fi.Size()
}
}
}
if fileInfo.OffSet != -2 && Config().EnableDistinctFile {
//not migrate file should check or update file
// where not EnableDistinctFile should check
if info, err = this.checkPeerFileExist(peer, fileInfo.Md5, ""); info.Md5 != "" {
fileInfo.Peers = append(fileInfo.Peers, peer)
if _, err = this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil {
log.Error(err)
}
continue
}
}
postURL = fmt.Sprintf("%s%s", peer, this.getRequestURI("syncfile_info"))
b := httplib.Post(postURL)
b.SetTimeout(time.Second*30, time.Second*30)
if data, err = json.Marshal(fileInfo); err != nil {
log.Error(err)
return
}
b.Param("fileInfo", string(data))
result, err = b.String()
if !strings.HasPrefix(result, "http://") || err != nil {
this.SaveFileMd5Log(fileInfo, CONST_Md5_ERROR_FILE_NAME)
}
if strings.HasPrefix(result, "http://") {
log.Info(result)
if !this.util.Contains(peer, fileInfo.Peers) {
fileInfo.Peers = append(fileInfo.Peers, peer)
if _, err = this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil {
log.Error(err)
}
}
}
if err != nil {
log.Error(err)
}
}
}
func (this *Server) SaveFileMd5Log(fileInfo *FileInfo, filename string) {
var (
info FileInfo
)
for len(this.queueFileLog)+len(this.queueFileLog)/10 > CONST_QUEUE_SIZE {
time.Sleep(time.Second * 1)
}
info = *fileInfo
this.queueFileLog <- &FileLog{FileInfo: &info, FileName: filename}
}
func (this *Server) saveFileMd5Log(fileInfo *FileInfo, filename string) {
var (
err error
outname string
logDate string
ok bool
fullpath string
md5Path string
logKey string
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("saveFileMd5Log")
log.Error(re)
log.Error(string(buffer))
}
}()
if fileInfo == nil || fileInfo.Md5 == "" || filename == "" {
log.Warn("saveFileMd5Log", fileInfo, filename)
return
}
logDate = this.util.GetDayFromTimeStamp(fileInfo.TimeStamp)
outname = fileInfo.Name
if fileInfo.ReName != "" {
outname = fileInfo.ReName
}
fullpath = fileInfo.Path + "/" + outname
logKey = fmt.Sprintf("%s_%s_%s", logDate, filename, fileInfo.Md5)
if filename == CONST_FILE_Md5_FILE_NAME {
//this.searchMap.Put(fileInfo.Md5, fileInfo.Name)
if ok, err = this.IsExistFromLevelDB(fileInfo.Md5, this.ldb); !ok {
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_COUNT_KEY, 1)
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, fileInfo.Size)
this.SaveStat()
}
if _, err = this.SaveFileInfoToLevelDB(logKey, fileInfo, this.logDB); err != nil {
log.Error(err)
}
if _, err := this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil {
log.Error("saveToLevelDB", err, fileInfo)
}
if _, err = this.SaveFileInfoToLevelDB(this.util.MD5(fullpath), fileInfo, this.ldb); err != nil {
log.Error("saveToLevelDB", err, fileInfo)
}
return
}
if filename == CONST_REMOME_Md5_FILE_NAME {
//this.searchMap.Remove(fileInfo.Md5)
if ok, err = this.IsExistFromLevelDB(fileInfo.Md5, this.ldb); ok {
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_COUNT_KEY, -1)
this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, -fileInfo.Size)
this.SaveStat()
}
this.RemoveKeyFromLevelDB(logKey, this.logDB)
md5Path = this.util.MD5(fullpath)
if err := this.RemoveKeyFromLevelDB(fileInfo.Md5, this.ldb); err != nil {
log.Error("RemoveKeyFromLevelDB", err, fileInfo)
}
if err = this.RemoveKeyFromLevelDB(md5Path, this.ldb); err != nil {
log.Error("RemoveKeyFromLevelDB", err, fileInfo)
}
// remove files.md5 for stat info(repair from logDB)
logKey = fmt.Sprintf("%s_%s_%s", logDate, CONST_FILE_Md5_FILE_NAME, fileInfo.Md5)
this.RemoveKeyFromLevelDB(logKey, this.logDB)
return
}
this.SaveFileInfoToLevelDB(logKey, fileInfo, this.logDB)
}
func (this *Server) checkPeerFileExist(peer string, md5sum string, fpath string) (*FileInfo, error) {
var (
err error
fileInfo FileInfo
)
req := httplib.Post(fmt.Sprintf("%s%s?md5=%s", peer, this.getRequestURI("check_file_exist"), md5sum))
req.Param("path", fpath)
req.Param("md5", md5sum)
req.SetTimeout(time.Second*5, time.Second*10)
if err = req.ToJSON(&fileInfo); err != nil {
return &FileInfo{}, err
}
if fileInfo.Md5 == "" {
return &fileInfo, errors.New("not found")
}
return &fileInfo, nil
}
func (this *Server) CheckFileExist(w http.ResponseWriter, r *http.Request) {
var (
data []byte
err error
fileInfo *FileInfo
fpath string
fi os.FileInfo
)
r.ParseForm()
md5sum := ""
md5sum = r.FormValue("md5")
fpath = r.FormValue("path")
if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); fileInfo != nil {
if fileInfo.OffSet != -1 {
if data, err = json.Marshal(fileInfo); err != nil {
log.Error(err)
}
w.Write(data)
return
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name
if fileInfo.ReName != "" {
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName
}
if this.util.IsExist(fpath) {
if data, err = json.Marshal(fileInfo); err == nil {
w.Write(data)
return
} else {
log.Error(err)
}
} else {
if fileInfo.OffSet == -1 {
this.RemoveKeyFromLevelDB(md5sum, this.ldb) // when file delete,delete from leveldb
}
}
} else {
if fpath != "" {
fi, err = os.Stat(fpath)
if err == nil {
sum := this.util.MD5(fpath)
//if Config().EnableDistinctFile {
// sum, err = this.util.GetFileSumByName(fpath, Config().FileSumArithmetic)
// if err != nil {
// log.Error(err)
// }
//}
fileInfo = &FileInfo{
Path: path.Dir(fpath),
Name: path.Base(fpath),
Size: fi.Size(),
Md5: sum,
Peers: []string{Config().Host},
OffSet: -1, //very important
TimeStamp: fi.ModTime().Unix(),
}
data, err = json.Marshal(fileInfo)
w.Write(data)
return
}
}
}
data, _ = json.Marshal(FileInfo{})
w.Write(data)
return
}
func (this *Server) CheckFilesExist(w http.ResponseWriter, r *http.Request) {
var (
data []byte
err error
fileInfo *FileInfo
fileInfos []*FileInfo
fpath string
result JsonResult
)
r.ParseForm()
md5sum := ""
md5sum = r.FormValue("md5s")
md5s := strings.Split(md5sum, ",")
for _, m := range md5s {
if fileInfo, err = this.GetFileInfoFromLevelDB(m); fileInfo != nil {
if fileInfo.OffSet != -1 {
if data, err = json.Marshal(fileInfo); err != nil {
log.Error(err)
}
//w.Write(data)
//return
fileInfos = append(fileInfos, fileInfo)
continue
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name
if fileInfo.ReName != "" {
fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName
}
if this.util.IsExist(fpath) {
if data, err = json.Marshal(fileInfo); err == nil {
fileInfos = append(fileInfos, fileInfo)
//w.Write(data)
//return
continue
} else {
log.Error(err)
}
} else {
if fileInfo.OffSet == -1 {
this.RemoveKeyFromLevelDB(md5sum, this.ldb) // when file delete,delete from leveldb
}
}
}
}
result.Data = fileInfos
data, _ = json.Marshal(result)
w.Write(data)
return
}
func (this *Server) Sync(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
r.ParseForm()
result.Status = "fail"
if !this.IsPeer(r) {
result.Message = "client must be in cluster"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
date := ""
force := ""
inner := ""
isForceUpload := false
force = r.FormValue("force")
date = r.FormValue("date")
inner = r.FormValue("inner")
if force == "1" {
isForceUpload = true
}
if inner != "1" {
for _, peer := range Config().Peers {
req := httplib.Post(peer + this.getRequestURI("sync"))
req.Param("force", force)
req.Param("inner", "1")
req.Param("date", date)
if _, err := req.String(); err != nil {
log.Error(err)
}
}
}
if date == "" {
result.Message = "require paramete date &force , ?date=20181230"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
date = strings.Replace(date, ".", "", -1)
if isForceUpload {
go this.CheckFileAndSendToPeer(date, CONST_FILE_Md5_FILE_NAME, isForceUpload)
} else {
go this.CheckFileAndSendToPeer(date, CONST_Md5_ERROR_FILE_NAME, isForceUpload)
}
result.Status = "ok"
result.Message = "job is running"
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) IsExistFromLevelDB(key string, db *leveldb.DB) (bool, error) {
return db.Has([]byte(key), nil)
}
func (this *Server) GetFileInfoFromLevelDB(key string) (*FileInfo, error) {
var (
err error
data []byte
fileInfo FileInfo
)
if data, err = this.ldb.Get([]byte(key), nil); err != nil {
return nil, err
}
if err = json.Unmarshal(data, &fileInfo); err != nil {
return nil, err
}
return &fileInfo, nil
}
func (this *Server) SaveStat() {
SaveStatFunc := func() {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("SaveStatFunc")
log.Error(re)
log.Error(string(buffer))
}
}()
stat := this.statMap.Get()
if v, ok := stat[CONST_STAT_FILE_COUNT_KEY]; ok {
switch v.(type) {
case int64, int32, int, float64, float32:
if v.(int64) >= 0 {
if data, err := json.Marshal(stat); err != nil {
log.Error(err)
} else {
this.util.WriteBinFile(CONST_STAT_FILE_NAME, data)
}
}
}
}
}
SaveStatFunc()
}
func (this *Server) RemoveKeyFromLevelDB(key string, db *leveldb.DB) error {
var (
err error
)
err = db.Delete([]byte(key), nil)
return err
}
func (this *Server) SaveFileInfoToLevelDB(key string, fileInfo *FileInfo, db *leveldb.DB) (*FileInfo, error) {
var (
err error
data []byte
)
if fileInfo == nil || db == nil {
return nil, errors.New("fileInfo is null or db is null")
}
if data, err = json.Marshal(fileInfo); err != nil {
return fileInfo, err
}
if err = db.Put([]byte(key), data, nil); err != nil {
return fileInfo, err
}
if db == this.ldb { //search slow ,write fast, double write logDB
logDate := this.util.GetDayFromTimeStamp(fileInfo.TimeStamp)
logKey := fmt.Sprintf("%s_%s_%s", logDate, CONST_FILE_Md5_FILE_NAME, fileInfo.Md5)
this.logDB.Put([]byte(logKey), data, nil)
}
return fileInfo, nil
}
func (this *Server) IsPeer(r *http.Request) bool {
var (
ip string
peer string
bflag bool
)
//return true
ip = this.util.GetClientIp(r)
if ip == "127.0.0.1" || ip == this.util.GetPulicIP() {
return true
}
if this.util.Contains(ip, Config().AdminIps) {
return true
}
ip = "http://" + ip
bflag = false
for _, peer = range Config().Peers {
if strings.HasPrefix(peer, ip) {
bflag = true
break
}
}
return bflag
}
func (this *Server) ReceiveMd5s(w http.ResponseWriter, r *http.Request) {
var (
err error
md5str string
fileInfo *FileInfo
md5s []string
)
if !this.IsPeer(r) {
log.Warn(fmt.Sprintf("ReceiveMd5s %s", this.util.GetClientIp(r)))
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
r.ParseForm()
md5str = r.FormValue("md5s")
md5s = strings.Split(md5str, ",")
AppendFunc := func(md5s []string) {
for _, m := range md5s {
if m != "" {
if fileInfo, err = this.GetFileInfoFromLevelDB(m); err != nil {
log.Error(err)
continue
}
this.AppendToQueue(fileInfo)
}
}
}
go AppendFunc(md5s)
}
func (this *Server) GetClusterNotPermitMessage(r *http.Request) string {
var (
message string
)
message = fmt.Sprintf(CONST_MESSAGE_CLUSTER_IP, this.util.GetClientIp(r))
return message
}
func (this *Server) GetMd5sForWeb(w http.ResponseWriter, r *http.Request) {
var (
date string
err error
result mapset.Set
lines []string
md5s []interface{}
)
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
date = r.FormValue("date")
if result, err = this.GetMd5sByDate(date, CONST_FILE_Md5_FILE_NAME); err != nil {
log.Error(err)
return
}
md5s = result.ToSlice()
for _, line := range md5s {
if line != nil && line != "" {
lines = append(lines, line.(string))
}
}
w.Write([]byte(strings.Join(lines, ",")))
}
func (this *Server) GetMd5File(w http.ResponseWriter, r *http.Request) {
var (
date string
fpath string
data []byte
err error
)
if !this.IsPeer(r) {
return
}
fpath = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME
if !this.util.FileExists(fpath) {
w.WriteHeader(404)
return
}
if data, err = ioutil.ReadFile(fpath); err != nil {
w.WriteHeader(500)
return
}
w.Write(data)
}
func (this *Server) GetMd5sMapByDate(date string, filename string) (*goutil.CommonMap, error) {
var (
err error
result *goutil.CommonMap
fpath string
content string
lines []string
line string
cols []string
data []byte
)
result = goutil.NewCommonMap(0)
if filename == "" {
fpath = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME
} else {
fpath = DATA_DIR + "/" + date + "/" + filename
}
if !this.util.FileExists(fpath) {
return result, errors.New(fmt.Sprintf("fpath %s not found", fpath))
}
if data, err = ioutil.ReadFile(fpath); err != nil {
return result, err
}
content = string(data)
lines = strings.Split(content, "\n")
for _, line = range lines {
cols = strings.Split(line, "|")
if len(cols) > 2 {
if _, err = strconv.ParseInt(cols[1], 10, 64); err != nil {
continue
}
result.Add(cols[0])
}
}
return result, nil
}
func (this *Server) GetMd5sByDate(date string, filename string) (mapset.Set, error) {
var (
keyPrefix string
md5set mapset.Set
keys []string
)
md5set = mapset.NewSet()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, filename)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
for iter.Next() {
keys = strings.Split(string(iter.Key()), "_")
if len(keys) >= 3 {
md5set.Add(keys[2])
}
}
iter.Release()
return md5set, nil
}
func (this *Server) SyncFileInfo(w http.ResponseWriter, r *http.Request) {
var (
err error
fileInfo FileInfo
fileInfoStr string
filename string
)
r.ParseForm()
if !this.IsPeer(r) {
return
}
fileInfoStr = r.FormValue("fileInfo")
if err = json.Unmarshal([]byte(fileInfoStr), &fileInfo); err != nil {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
log.Error(err)
return
}
if fileInfo.OffSet == -2 {
// optimize migrate
this.SaveFileInfoToLevelDB(fileInfo.Md5, &fileInfo, this.ldb)
} else {
this.SaveFileMd5Log(&fileInfo, CONST_Md5_QUEUE_FILE_NAME)
}
this.AppendToDownloadQueue(&fileInfo)
filename = fileInfo.Name
if fileInfo.ReName != "" {
filename = fileInfo.ReName
}
p := strings.Replace(fileInfo.Path, STORE_DIR+"/", "", 1)
downloadUrl := fmt.Sprintf("http://%s/%s", r.Host, Config().Group+"/"+p+"/"+filename)
log.Info("SyncFileInfo: ", downloadUrl)
w.Write([]byte(downloadUrl))
}
func (this *Server) CheckScene(scene string) (bool, error) {
var (
scenes []string
)
if len(Config().Scenes) == 0 {
return true, nil
}
for _, s := range Config().Scenes {
scenes = append(scenes, strings.Split(s, ":")[0])
}
if !this.util.Contains(scene, scenes) {
return false, errors.New("not valid scene")
}
return true, nil
}
func (this *Server) GetFileInfo(w http.ResponseWriter, r *http.Request) {
var (
fpath string
md5sum string
fileInfo *FileInfo
err error
result JsonResult
)
md5sum = r.FormValue("md5")
fpath = r.FormValue("path")
result.Status = "fail"
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
md5sum = r.FormValue("md5")
if fpath != "" {
fpath = strings.Replace(fpath, "/"+Config().Group+"/", STORE_DIR_NAME+"/", 1)
md5sum = this.util.MD5(fpath)
}
if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
result.Status = "ok"
result.Data = fileInfo
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
func (this *Server) RemoveFile(w http.ResponseWriter, r *http.Request) {
var (
err error
md5sum string
fileInfo *FileInfo
fpath string
delUrl string
result JsonResult
inner string
name string
)
_ = delUrl
_ = inner
r.ParseForm()
md5sum = r.FormValue("md5")
fpath = r.FormValue("path")
inner = r.FormValue("inner")
result.Status = "fail"
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
if Config().AuthUrl != "" && !this.CheckAuth(w, r) {
this.NotPermit(w, r)
return
}
if fpath != "" && md5sum == "" {
fpath = strings.Replace(fpath, "/"+Config().Group+"/", STORE_DIR_NAME+"/", 1)
md5sum = this.util.MD5(fpath)
}
if inner != "1" {
for _, peer := range Config().Peers {
delFile := func(peer string, md5sum string, fileInfo *FileInfo) {
delUrl = fmt.Sprintf("%s%s", peer, this.getRequestURI("delete"))
req := httplib.Post(delUrl)
req.Param("md5", md5sum)
req.Param("inner", "1")
req.SetTimeout(time.Second*5, time.Second*10)
if _, err = req.String(); err != nil {
log.Error(err)
}
}
go delFile(peer, md5sum, fileInfo)
}
}
if len(md5sum) < 32 {
result.Message = "md5 unvalid"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if fileInfo.OffSet >= 0 {
result.Message = "small file delete not support"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
name = fileInfo.Name
if fileInfo.ReName != "" {
name = fileInfo.ReName
}
fpath = fileInfo.Path + "/" + name
if fileInfo.Path != "" && this.util.FileExists(DOCKER_DIR+fpath) {
this.SaveFileMd5Log(fileInfo, CONST_REMOME_Md5_FILE_NAME)
if err = os.Remove(DOCKER_DIR + fpath); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
} else {
result.Message = "remove success"
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
}
result.Message = "fail remove"
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) getRequestURI(action string) string {
var (
uri string
)
if Config().SupportGroupManage {
uri = "/" + Config().Group + "/" + action
} else {
uri = "/" + action
}
return uri
}
func (this *Server) BuildFileResult(fileInfo *FileInfo, r *http.Request) FileResult {
var (
outname string
fileResult FileResult
p string
downloadUrl string
domain string
)
if Config().DownloadDomain != "" {
domain = fmt.Sprintf("http://%s", Config().DownloadDomain)
} else {
domain = fmt.Sprintf("http://%s", r.Host)
}
outname = fileInfo.Name
if fileInfo.ReName != "" {
outname = fileInfo.ReName
}
p = strings.Replace(fileInfo.Path, STORE_DIR_NAME+"/", "", 1)
p = Config().Group + "/" + p + "/" + outname
downloadUrl = fmt.Sprintf("http://%s/%s", r.Host, p)
if Config().DownloadDomain != "" {
downloadUrl = fmt.Sprintf("http://%s/%s", Config().DownloadDomain, p)
}
fileResult.Url = downloadUrl
fileResult.Md5 = fileInfo.Md5
fileResult.Path = "/" + p
fileResult.Domain = domain
fileResult.Scene = fileInfo.Scene
fileResult.Size = fileInfo.Size
fileResult.ModTime = fileInfo.TimeStamp
// Just for Compatibility
fileResult.Src = fileResult.Path
fileResult.Scenes = fileInfo.Scene
return fileResult
}
func (this *Server) SaveUploadFile(file multipart.File, header *multipart.FileHeader, fileInfo *FileInfo, r *http.Request) (*FileInfo, error) {
var (
err error
outFile *os.File
folder string
fi os.FileInfo
)
defer file.Close()
_, fileInfo.Name = filepath.Split(header.Filename)
// bugfix for ie upload file contain fullpath
if len(Config().Extensions) > 0 && !this.util.Contains(path.Ext(fileInfo.Name), Config().Extensions) {
return fileInfo, errors.New("(error)file extension mismatch")
}
if Config().RenameFile {
fileInfo.ReName = this.util.MD5(this.util.GetUUID()) + path.Ext(fileInfo.Name)
}
folder = time.Now().Format("20060102/15/04")
if Config().PeerId != "" {
folder = fmt.Sprintf(folder+"/%s", Config().PeerId)
}
if fileInfo.Scene != "" {
folder = fmt.Sprintf(STORE_DIR+"/%s/%s", fileInfo.Scene, folder)
} else {
folder = fmt.Sprintf(STORE_DIR+"/%s", folder)
}
if fileInfo.Path != "" {
if strings.HasPrefix(fileInfo.Path, STORE_DIR) {
folder = fileInfo.Path
} else {
folder = STORE_DIR + "/" + fileInfo.Path
}
}
if !this.util.FileExists(folder) {
os.MkdirAll(folder, 0775)
}
outPath := fmt.Sprintf(folder+"/%s", fileInfo.Name)
if Config().RenameFile {
outPath = fmt.Sprintf(folder+"/%s", fileInfo.ReName)
}
if this.util.FileExists(outPath) && Config().EnableDistinctFile {
for i := 0; i < 10000; i++ {
outPath = fmt.Sprintf(folder+"/%d_%s", i, header.Filename)
fileInfo.Name = fmt.Sprintf("%d_%s", i, header.Filename)
if !this.util.FileExists(outPath) {
break
}
}
}
log.Info(fmt.Sprintf("upload: %s", outPath))
if outFile, err = os.Create(outPath); err != nil {
return fileInfo, err
}
defer outFile.Close()
if err != nil {
log.Error(err)
return fileInfo, errors.New("(error)fail," + err.Error())
}
if _, err = io.Copy(outFile, file); err != nil {
log.Error(err)
return fileInfo, errors.New("(error)fail," + err.Error())
}
if fi, err = outFile.Stat(); err != nil {
log.Error(err)
} else {
fileInfo.Size = fi.Size()
}
if fi.Size() != header.Size {
return fileInfo, errors.New("(error)file uncomplete")
}
v := this.util.GetFileSum(outFile, Config().FileSumArithmetic)
fileInfo.Md5 = v
//fileInfo.Path = folder //strings.Replace( folder,DOCKER_DIR,"",1)
fileInfo.Path = strings.Replace(folder, DOCKER_DIR, "", 1)
fileInfo.Peers = append(fileInfo.Peers, this.host)
//fmt.Println("upload",fileInfo)
return fileInfo, nil
}
func (this *Server) Upload(w http.ResponseWriter, r *http.Request) {
var (
err error
ok bool
// pathname string
md5sum string
fileInfo FileInfo
uploadFile multipart.File
uploadHeader *multipart.FileHeader
scene string
output string
fileResult FileResult
data []byte
code string
secret interface{}
)
output = r.FormValue("output")
if Config().EnableCrossOrigin {
this.CrossOrigin(w, r)
}
if Config().AuthUrl != "" {
if !this.CheckAuth(w, r) {
log.Warn("auth fail", r.Form)
this.NotPermit(w, r)
w.Write([]byte("auth fail"))
return
}
}
if r.Method == "POST" {
md5sum = r.FormValue("md5")
output = r.FormValue("output")
if Config().ReadOnly {
w.Write([]byte("(error) readonly"))
return
}
if Config().EnableCustomPath {
fileInfo.Path = r.FormValue("path")
fileInfo.Path = strings.Trim(fileInfo.Path, "/")
}
scene = r.FormValue("scene")
code = r.FormValue("code")
if scene == "" {
//Just for Compatibility
scene = r.FormValue("scenes")
}
if Config().EnableGoogleAuth && scene != "" {
if secret, ok = this.sceneMap.GetValue(scene); ok {
if !this.VerifyGoogleCode(secret.(string), code, int64(Config().DownloadTokenExpire/30)) {
this.NotPermit(w, r)
w.Write([]byte("invalid request,error google code"))
return
}
}
}
fileInfo.Md5 = md5sum
fileInfo.OffSet = -1
if uploadFile, uploadHeader, err = r.FormFile("file"); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
return
}
fileInfo.Peers = []string{}
fileInfo.TimeStamp = time.Now().Unix()
if scene == "" {
scene = Config().DefaultScene
}
if output == "" {
output = "text"
}
if !this.util.Contains(output, []string{"json", "text"}) {
w.Write([]byte("output just support json or text"))
return
}
fileInfo.Scene = scene
if _, err = this.CheckScene(scene); err != nil {
w.Write([]byte(err.Error()))
return
}
if err != nil {
log.Error(err)
http.Redirect(w, r, "/", http.StatusMovedPermanently)
return
}
if _, err = this.SaveUploadFile(uploadFile, uploadHeader, &fileInfo, r); err != nil {
w.Write([]byte(err.Error()))
return
}
if Config().EnableDistinctFile {
if v, _ := this.GetFileInfoFromLevelDB(fileInfo.Md5); v != nil && v.Md5 != "" {
fileResult = this.BuildFileResult(v, r)
if Config().RenameFile {
os.Remove(DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName)
} else {
os.Remove(DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name)
}
if output == "json" {
if data, err = json.Marshal(fileResult); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
}
w.Write(data)
} else {
w.Write([]byte(fileResult.Url))
}
return
}
}
if fileInfo.Md5 == "" {
log.Warn(" fileInfo.Md5 is null")
return
}
if md5sum != "" && fileInfo.Md5 != md5sum {
log.Warn(" fileInfo.Md5 and md5sum !=")
return
}
if !Config().EnableDistinctFile {
// bugfix filecount stat
fileInfo.Md5 = this.util.MD5(this.GetFilePathByInfo(&fileInfo, false))
}
if Config().EnableMergeSmallFile && fileInfo.Size < CONST_SMALL_FILE_SIZE {
if err = this.SaveSmallFile(&fileInfo); err != nil {
log.Error(err)
return
}
}
this.saveFileMd5Log(&fileInfo, CONST_FILE_Md5_FILE_NAME) //maybe slow
go this.postFileToPeer(&fileInfo)
if fileInfo.Size <= 0 {
log.Error("file size is zero")
return
}
fileResult = this.BuildFileResult(&fileInfo, r)
if output == "json" {
if data, err = json.Marshal(fileResult); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
}
w.Write(data)
} else {
w.Write([]byte(fileResult.Url))
}
return
} else {
md5sum = r.FormValue("md5")
output = r.FormValue("output")
if md5sum == "" {
w.Write([]byte("(error) if you want to upload fast md5 is require" +
",and if you want to upload file,you must use post method "))
return
}
if v, _ := this.GetFileInfoFromLevelDB(md5sum); v != nil && v.Md5 != "" {
fileResult = this.BuildFileResult(v, r)
}
if output == "json" {
if data, err = json.Marshal(fileResult); err != nil {
log.Error(err)
w.Write([]byte(err.Error()))
}
w.Write(data)
} else {
w.Write([]byte(fileResult.Url))
}
}
}
func (this *Server) SaveSmallFile(fileInfo *FileInfo) error {
var (
err error
filename string
fpath string
srcFile *os.File
desFile *os.File
largeDir string
destPath string
reName string
fileExt string
)
filename = fileInfo.Name
fileExt = path.Ext(filename)
if fileInfo.ReName != "" {
filename = fileInfo.ReName
}
fpath = DOCKER_DIR + fileInfo.Path + "/" + filename
largeDir = LARGE_DIR + "/" + Config().PeerId
if !this.util.FileExists(largeDir) {
os.MkdirAll(largeDir, 0775)
}
reName = fmt.Sprintf("%d", this.util.RandInt(100, 300))
destPath = largeDir + "/" + reName
this.lockMap.LockKey(destPath)
defer this.lockMap.UnLockKey(destPath)
if this.util.FileExists(fpath) {
srcFile, err = os.OpenFile(fpath, os.O_CREATE|os.O_RDONLY, 06666)
if err != nil {
return err
}
defer srcFile.Close()
desFile, err = os.OpenFile(destPath, os.O_CREATE|os.O_RDWR, 06666)
if err != nil {
return err
}
defer desFile.Close()
fileInfo.OffSet, err = desFile.Seek(0, 2)
if _, err = desFile.Write([]byte("1")); err != nil {
//first byte set 1
return err
}
fileInfo.OffSet, err = desFile.Seek(0, 2)
if err != nil {
return err
}
fileInfo.OffSet = fileInfo.OffSet - 1 //minus 1 byte
fileInfo.Size = fileInfo.Size + 1
fileInfo.ReName = fmt.Sprintf("%s,%d,%d,%s", reName, fileInfo.OffSet, fileInfo.Size, fileExt)
if _, err = io.Copy(desFile, srcFile); err != nil {
return err
}
srcFile.Close()
os.Remove(fpath)
fileInfo.Path = strings.Replace(largeDir, DOCKER_DIR, "", 1)
}
return nil
}
func (this *Server) SendToMail(to, subject, body, mailtype string) error {
host := Config().Mail.Host
user := Config().Mail.User
password := Config().Mail.Password
hp := strings.Split(host, ":")
auth := smtp.PlainAuth("", user, password, hp[0])
var contentType string
if mailtype == "html" {
contentType = "Content-Type: text/" + mailtype + "; charset=UTF-8"
} else {
contentType = "Content-Type: text/plain" + "; charset=UTF-8"
}
msg := []byte("To: " + to + "\r\nFrom: " + user + ">\r\nSubject: " + "\r\n" + contentType + "\r\n\r\n" + body)
sendTo := strings.Split(to, ";")
err := smtp.SendMail(host, auth, user, sendTo, msg)
return err
}
func (this *Server) BenchMark(w http.ResponseWriter, r *http.Request) {
t := time.Now()
batch := new(leveldb.Batch)
for i := 0; i < 100000000; i++ {
f := FileInfo{}
f.Peers = []string{"http://192.168.0.1", "http://192.168.2.5"}
f.Path = "20190201/19/02"
s := strconv.Itoa(i)
s = this.util.MD5(s)
f.Name = s
f.Md5 = s
if data, err := json.Marshal(&f); err == nil {
batch.Put([]byte(s), data)
}
if i%10000 == 0 {
if batch.Len() > 0 {
server.ldb.Write(batch, nil)
// batch = new(leveldb.Batch)
batch.Reset()
}
fmt.Println(i, time.Since(t).Seconds())
}
//fmt.Println(server.GetFileInfoFromLevelDB(s))
}
this.util.WriteFile("time.txt", time.Since(t).String())
fmt.Println(time.Since(t).String())
}
func (this *Server) RepairStatWeb(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
date string
inner string
)
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
date = r.FormValue("date")
inner = r.FormValue("inner")
if ok, err := regexp.MatchString("\\d{8}", date); err != nil || !ok {
result.Message = "invalid date"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if date == "" || len(date) != 8 {
date = this.util.GetToDay()
}
if inner != "1" {
for _, peer := range Config().Peers {
req := httplib.Post(peer + this.getRequestURI("repair_stat"))
req.Param("inner", "1")
req.Param("date", date)
if _, err := req.String(); err != nil {
log.Error(err)
}
}
}
result.Data = this.RepairStatByDate(date)
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) Stat(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
inner string
echart string
category []string
barCount []int64
barSize []int64
dataMap map[string]interface{}
)
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
r.ParseForm()
inner = r.FormValue("inner")
echart = r.FormValue("echart")
data := this.GetStat()
result.Status = "ok"
result.Data = data
if echart == "1" {
dataMap = make(map[string]interface{}, 3)
for _, v := range data {
barCount = append(barCount, v.FileCount)
barSize = append(barSize, v.TotalSize)
category = append(category, v.Date)
}
dataMap["category"] = category
dataMap["barCount"] = barCount
dataMap["barSize"] = barSize
result.Data = dataMap
}
if inner == "1" {
w.Write([]byte(this.util.JsonEncodePretty(data)))
} else {
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
func (this *Server) GetStat() []StatDateFileInfo {
var (
min int64
max int64
err error
i int64
rows []StatDateFileInfo
total StatDateFileInfo
)
min = 20190101
max = 20190101
for k := range this.statMap.Get() {
ks := strings.Split(k, "_")
if len(ks) == 2 {
if i, err = strconv.ParseInt(ks[0], 10, 64); err != nil {
continue
}
if i >= max {
max = i
}
if i < min {
min = i
}
}
}
for i := min; i <= max; i++ {
s := fmt.Sprintf("%d", i)
if v, ok := this.statMap.GetValue(s + "_" + CONST_STAT_FILE_TOTAL_SIZE_KEY); ok {
var info StatDateFileInfo
info.Date = s
switch v.(type) {
case int64:
info.TotalSize = v.(int64)
total.TotalSize = total.TotalSize + v.(int64)
}
if v, ok := this.statMap.GetValue(s + "_" + CONST_STAT_FILE_COUNT_KEY); ok {
switch v.(type) {
case int64:
info.FileCount = v.(int64)
total.FileCount = total.FileCount + v.(int64)
}
}
rows = append(rows, info)
}
}
total.Date = "all"
rows = append(rows, total)
return rows
}
func (this *Server) RegisterExit() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
go func() {
for s := range c {
switch s {
case syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT:
this.ldb.Close()
log.Info("Exit", s)
os.Exit(1)
}
}
}()
}
func (this *Server) AppendToQueue(fileInfo *FileInfo) {
for (len(this.queueToPeers) + CONST_QUEUE_SIZE/10) > CONST_QUEUE_SIZE {
time.Sleep(time.Millisecond * 50)
}
this.queueToPeers <- *fileInfo
}
func (this *Server) AppendToDownloadQueue(fileInfo *FileInfo) {
for (len(this.queueFromPeers) + CONST_QUEUE_SIZE/10) > CONST_QUEUE_SIZE {
time.Sleep(time.Millisecond * 50)
}
this.queueFromPeers <- *fileInfo
}
func (this *Server) ConsumerDownLoad() {
ConsumerFunc := func() {
for {
fileInfo := <-this.queueFromPeers
if len(fileInfo.Peers) <= 0 {
log.Warn("Peer is null", fileInfo)
continue
}
for _, peer := range fileInfo.Peers {
if strings.Contains(peer, "127.0.0.1") {
log.Warn("sync error with 127.0.0.1", fileInfo)
continue
}
if peer != this.host {
this.DownloadFromPeer(peer, &fileInfo)
break
}
}
}
}
for i := 0; i < 200; i++ {
go ConsumerFunc()
}
}
func (this *Server) RemoveDownloading() {
RemoveDownloadFunc := func() {
for {
iter := this.ldb.NewIterator(util.BytesPrefix([]byte("downloading_")), nil)
for iter.Next() {
key := iter.Key()
keys := strings.Split(string(key), "_")
if len(keys) == 3 {
if t, err := strconv.ParseInt(keys[1], 10, 64); err == nil && time.Now().Unix()-t > 60*10 {
os.Remove(DOCKER_DIR + keys[2])
}
}
}
iter.Release()
time.Sleep(time.Minute * 3)
}
}
go RemoveDownloadFunc()
}
func (this *Server) ConsumerLog() {
go func() {
var (
fileLog *FileLog
)
for {
fileLog = <-this.queueFileLog
this.saveFileMd5Log(fileLog.FileInfo, fileLog.FileName)
}
}()
}
func (this *Server) LoadSearchDict() {
go func() {
log.Info("Load search dict ....")
f, err := os.Open(CONST_SEARCH_FILE_NAME)
if err != nil {
log.Error(err)
return
}
defer f.Close()
r := bufio.NewReader(f)
for {
line, isprefix, err := r.ReadLine()
for isprefix && err == nil {
kvs := strings.Split(string(line), "\t")
if len(kvs) == 2 {
this.searchMap.Put(kvs[0], kvs[1])
}
}
}
log.Info("finish load search dict")
}()
}
func (this *Server) SaveSearchDict() {
var (
err error
fp *os.File
searchDict map[string]interface{}
k string
v interface{}
)
this.lockMap.LockKey(CONST_SEARCH_FILE_NAME)
defer this.lockMap.UnLockKey(CONST_SEARCH_FILE_NAME)
searchDict = this.searchMap.Get()
fp, err = os.OpenFile(CONST_SEARCH_FILE_NAME, os.O_RDWR, 0755)
if err != nil {
log.Error(err)
return
}
defer fp.Close()
for k, v = range searchDict {
fp.WriteString(fmt.Sprintf("%s\t%s", k, v.(string)))
}
}
func (this *Server) ConsumerPostToPeer() {
ConsumerFunc := func() {
for {
fileInfo := <-this.queueToPeers
this.postFileToPeer(&fileInfo)
}
}
for i := 0; i < 200; i++ {
go ConsumerFunc()
}
}
func (this *Server) AutoRepair(forceRepair bool) {
if this.lockMap.IsLock("AutoRepair") {
log.Warn("Lock AutoRepair")
return
}
this.lockMap.LockKey("AutoRepair")
defer this.lockMap.UnLockKey("AutoRepair")
AutoRepairFunc := func(forceRepair bool) {
var (
dateStats []StatDateFileInfo
err error
countKey string
md5s string
localSet mapset.Set
remoteSet mapset.Set
allSet mapset.Set
tmpSet mapset.Set
fileInfo *FileInfo
)
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("AutoRepair")
log.Error(re)
log.Error(string(buffer))
}
}()
Update := func(peer string, dateStat StatDateFileInfo) {
//从远端拉数据过来
req := httplib.Get(fmt.Sprintf("%s%s?date=%s&force=%s", peer, this.getRequestURI("sync"), dateStat.Date, "1"))
req.SetTimeout(time.Second*5, time.Second*5)
if _, err = req.String(); err != nil {
log.Error(err)
}
log.Info(fmt.Sprintf("syn file from %s date %s", peer, dateStat.Date))
}
for _, peer := range Config().Peers {
req := httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("stat")))
req.Param("inner", "1")
req.SetTimeout(time.Second*5, time.Second*15)
if err = req.ToJSON(&dateStats); err != nil {
log.Error(err)
continue
}
for _, dateStat := range dateStats {
if dateStat.Date == "all" {
continue
}
countKey = dateStat.Date + "_" + CONST_STAT_FILE_COUNT_KEY
if v, ok := this.statMap.GetValue(countKey); ok {
switch v.(type) {
case int64:
if v.(int64) != dateStat.FileCount || forceRepair {
//不相等,找差异
//TODO
req := httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("get_md5s_by_date")))
req.SetTimeout(time.Second*15, time.Second*60)
req.Param("date", dateStat.Date)
if md5s, err = req.String(); err != nil {
continue
}
if localSet, err = this.GetMd5sByDate(dateStat.Date, CONST_FILE_Md5_FILE_NAME); err != nil {
log.Error(err)
continue
}
remoteSet = this.util.StrToMapSet(md5s, ",")
allSet = localSet.Union(remoteSet)
md5s = this.util.MapSetToStr(allSet.Difference(localSet), ",")
req = httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("receive_md5s")))
req.SetTimeout(time.Second*15, time.Second*60)
req.Param("md5s", md5s)
req.String()
tmpSet = allSet.Difference(remoteSet)
for v := range tmpSet.Iter() {
if v != nil {
if fileInfo, err = this.GetFileInfoFromLevelDB(v.(string)); err != nil {
log.Error(err)
continue
}
this.AppendToQueue(fileInfo)
}
}
//Update(peer,dateStat)
}
}
} else {
Update(peer, dateStat)
}
}
}
}
AutoRepairFunc(forceRepair)
}
func (this *Server) CleanLogLevelDBByDate(date string, filename string) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("CleanLogLevelDBByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
keys mapset.Set
)
keys = mapset.NewSet()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, filename)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
for iter.Next() {
keys.Add(string(iter.Value()))
}
iter.Release()
for key := range keys.Iter() {
err = this.RemoveKeyFromLevelDB(key.(string), this.logDB)
if err != nil {
log.Error(err)
}
}
}
func (this *Server) CleanAndBackUp() {
Clean := func() {
var (
filenames []string
yesterday string
)
if this.curDate != this.util.GetToDay() {
filenames = []string{CONST_Md5_QUEUE_FILE_NAME, CONST_Md5_ERROR_FILE_NAME, CONST_REMOME_Md5_FILE_NAME}
yesterday = this.util.GetDayFromTimeStamp(time.Now().AddDate(0, 0, -1).Unix())
for _, filename := range filenames {
this.CleanLogLevelDBByDate(yesterday, filename)
}
this.BackUpMetaDataByDate(yesterday)
this.curDate = this.util.GetToDay()
}
}
go func() {
for {
time.Sleep(time.Hour * 6)
Clean()
}
}()
}
func (this *Server) LoadFileInfoByDate(date string, filename string) (mapset.Set, error) {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("LoadFileInfoByDate")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
err error
keyPrefix string
fileInfos mapset.Set
)
fileInfos = mapset.NewSet()
keyPrefix = "%s_%s_"
keyPrefix = fmt.Sprintf(keyPrefix, date, filename)
iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil)
for iter.Next() {
var fileInfo FileInfo
if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil {
continue
}
fileInfos.Add(&fileInfo)
}
iter.Release()
return fileInfos, nil
}
func (this *Server) LoadQueueSendToPeer() {
if queue, err := this.LoadFileInfoByDate(this.util.GetToDay(), CONST_Md5_QUEUE_FILE_NAME); err != nil {
log.Error(err)
} else {
for fileInfo := range queue.Iter() {
//this.queueFromPeers <- *fileInfo.(*FileInfo)
this.AppendToDownloadQueue(fileInfo.(*FileInfo))
}
}
}
func (this *Server) CheckClusterStatus() {
check := func() {
defer func() {
if re := recover(); re != nil {
buffer := debug.Stack()
log.Error("CheckClusterStatus")
log.Error(re)
log.Error(string(buffer))
}
}()
var (
status JsonResult
err error
subject string
body string
req *httplib.BeegoHTTPRequest
)
for _, peer := range Config().Peers {
req = httplib.Get(fmt.Sprintf("%s%s", peer, this.getRequestURI("status")))
req.SetTimeout(time.Second*5, time.Second*5)
err = req.ToJSON(&status)
if status.Status != "ok" {
for _, to := range Config().AlarmReceivers {
subject = "fastdfs server error"
if err != nil {
body = fmt.Sprintf("%s\nserver:%s\nerror:\n%s", subject, peer, err.Error())
} else {
body = fmt.Sprintf("%s\nserver:%s\n", subject, peer)
}
if err = this.SendToMail(to, subject, body, "text"); err != nil {
log.Error(err)
}
}
if Config().AlarmUrl != "" {
req = httplib.Post(Config().AlarmUrl)
req.SetTimeout(time.Second*10, time.Second*10)
req.Param("message", body)
req.Param("subject", subject)
if _, err = req.String(); err != nil {
log.Error(err)
}
}
}
}
}
go func() {
for {
time.Sleep(time.Minute * 10)
check()
}
}()
}
func (this *Server) RepairFileInfo(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
if !Config().EnableMigrate {
w.Write([]byte("please set enable_migrate=true"))
return
}
result.Status = "ok"
result.Message = "repair job start,don't try again,very danger "
go this.RepairFileInfoFromFile()
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) Reload(w http.ResponseWriter, r *http.Request) {
var (
err error
data []byte
cfg GloablConfig
action string
cfgjson string
result JsonResult
)
result.Status = "fail"
r.ParseForm()
if !this.IsPeer(r) {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
return
}
cfgjson = r.FormValue("cfg")
action = r.FormValue("action")
_ = cfgjson
if action == "get" {
result.Data = Config()
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if action == "set" {
if cfgjson == "" {
result.Message = "(error)parameter cfg(json) require"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if err = json.Unmarshal([]byte(cfgjson), &cfg); err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
result.Status = "ok"
cfgjson = this.util.JsonEncodePretty(cfg)
this.util.WriteFile(CONST_CONF_FILE_NAME, cfgjson)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if action == "reload" {
if data, err = ioutil.ReadFile(CONST_CONF_FILE_NAME); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if err = json.Unmarshal(data, &cfg); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
ParseConfig(CONST_CONF_FILE_NAME)
this.initComponent(true)
result.Status = "ok"
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if action == "" {
w.Write([]byte("(error)action support set(json) get reload"))
}
}
func (this *Server) RemoveEmptyDir(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
result.Status = "ok"
if this.IsPeer(r) {
go this.util.RemoveEmptyDir(DATA_DIR)
go this.util.RemoveEmptyDir(STORE_DIR)
result.Message = "clean job start ..,don't try again!!!"
w.Write([]byte(this.util.JsonEncodePretty(result)))
} else {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
func (this *Server) BackUp(w http.ResponseWriter, r *http.Request) {
var (
err error
date string
result JsonResult
inner string
url string
)
result.Status = "ok"
r.ParseForm()
date = r.FormValue("date")
inner = r.FormValue("inner")
if date == "" {
date = this.util.GetToDay()
}
if this.IsPeer(r) {
if inner != "1" {
for _, peer := range Config().Peers {
backUp := func(peer string, date string) {
url = fmt.Sprintf("%s%s", peer, this.getRequestURI("backup"))
req := httplib.Post(url)
req.Param("date", date)
req.Param("inner", "1")
req.SetTimeout(time.Second*5, time.Second*600)
if _, err = req.String(); err != nil {
log.Error(err)
}
}
go backUp(peer, date)
}
}
go this.BackUpMetaDataByDate(date)
result.Message = "back job start..."
w.Write([]byte(this.util.JsonEncodePretty(result)))
} else {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
// Notice: performance is poor,just for low capacity,but low memory , if you want to high performance,use searchMap for search,but memory ....
func (this *Server) Search(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
err error
kw string
count int
fileInfos []FileInfo
md5s []string
)
kw = r.FormValue("kw")
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
iter := this.ldb.NewIterator(nil, nil)
for iter.Next() {
var fileInfo FileInfo
value := iter.Value()
if err = json.Unmarshal(value, &fileInfo); err != nil {
log.Error(err)
continue
}
if strings.Contains(fileInfo.Name, kw) && !this.util.Contains(fileInfo.Md5, md5s) {
count = count + 1
fileInfos = append(fileInfos, fileInfo)
md5s = append(md5s, fileInfo.Md5)
}
if count >= 100 {
break
}
}
iter.Release()
err = iter.Error()
if err != nil {
log.Error()
}
//fileInfos=this.SearchDict(kw) // serch file from map for huge capacity
result.Status = "ok"
result.Data = fileInfos
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) SearchDict(kw string) []FileInfo {
var (
fileInfos []FileInfo
fileInfo *FileInfo
)
for dict := range this.searchMap.Iter() {
if strings.Contains(dict.Val.(string), kw) {
if fileInfo, _ = this.GetFileInfoFromLevelDB(dict.Key); fileInfo != nil {
fileInfos = append(fileInfos, *fileInfo)
}
}
}
return fileInfos
}
func (this *Server) ListDir(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
dir string
filesInfo []os.FileInfo
err error
filesResult []FileInfoResult
tmpDir string
)
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
dir = r.FormValue("dir")
//if dir == "" {
// result.Message = "dir can't null"
// w.Write([]byte(this.util.JsonEncodePretty(result)))
// return
//}
dir = strings.Replace(dir, ".", "", -1)
if tmpDir, err = os.Readlink(dir); err == nil {
dir = tmpDir
}
filesInfo, err = ioutil.ReadDir(DOCKER_DIR + STORE_DIR_NAME + "/" + dir)
if err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
for _, f := range filesInfo {
fi := FileInfoResult{
Name: f.Name(),
Size: f.Size(),
IsDir: f.IsDir(),
ModTime: f.ModTime().Unix(),
Path: dir,
Md5: this.util.MD5(STORE_DIR_NAME + "/" + dir + "/" + f.Name()),
}
filesResult = append(filesResult, fi)
}
result.Status = "ok"
result.Data = filesResult
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
func (this *Server) VerifyGoogleCode(secret string, code string, discrepancy int64) bool {
var (
goauth *googleAuthenticator.GAuth
)
goauth = googleAuthenticator.NewGAuth()
if ok, err := goauth.VerifyCode(secret, code, discrepancy); ok {
return ok
} else {
log.Error(err)
return ok
}
}
func (this *Server) GenGoogleCode(w http.ResponseWriter, r *http.Request) {
var (
err error
result JsonResult
secret string
goauth *googleAuthenticator.GAuth
)
r.ParseForm()
goauth = googleAuthenticator.NewGAuth()
secret = r.FormValue("secret")
result.Status = "ok"
result.Message = "ok"
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
if result.Data, err = goauth.GetCode(secret); err != nil {
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
}
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) GenGoogleSecret(w http.ResponseWriter, r *http.Request) {
var (
result JsonResult
)
result.Status = "ok"
result.Message = "ok"
if !this.IsPeer(r) {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
GetSeed := func(length int) string {
seeds := "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
s := ""
random.Seed(time.Now().UnixNano())
for i := 0; i < length; i++ {
s += string(seeds[random.Intn(32)])
}
return s
}
result.Data = GetSeed(16)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
func (this *Server) Report(w http.ResponseWriter, r *http.Request) {
var (
reportFileName string
result JsonResult
html string
)
result.Status = "ok"
r.ParseForm()
if this.IsPeer(r) {
reportFileName = STATIC_DIR + "/report.html"
if this.util.IsExist(reportFileName) {
if data, err := this.util.ReadBinFile(reportFileName); err != nil {
log.Error(err)
result.Message = err.Error()
w.Write([]byte(this.util.JsonEncodePretty(result)))
return
} else {
html = string(data)
if Config().SupportGroupManage {
html = strings.Replace(html, "{group}", "/"+Config().Group, 1)
} else {
html = strings.Replace(html, "{group}", "", 1)
}
w.Write([]byte(html))
return
}
} else {
w.Write([]byte(fmt.Sprintf("%s is not found", reportFileName)))
}
} else {
w.Write([]byte(this.GetClusterNotPermitMessage(r)))
}
}
func (this *Server) Repair(w http.ResponseWriter, r *http.Request) {
var (
force string
forceRepair bool
result JsonResult
)
result.Status = "ok"
r.ParseForm()
force = r.FormValue("force")
if force == "1" {
forceRepair = true
}
if this.IsPeer(r) {
go this.AutoRepair(forceRepair)
result.Message = "repair job start..."
w.Write([]byte(this.util.JsonEncodePretty(result)))
} else {
result.Message = this.GetClusterNotPermitMessage(r)
w.Write([]byte(this.util.JsonEncodePretty(result)))
}
}
func (this *Server) Status(w http.ResponseWriter, r *http.Request) {
var (
status JsonResult
sts map[string]interface{}
today string
sumset mapset.Set
ok bool
v interface{}
)
memStat := new(runtime.MemStats)
runtime.ReadMemStats(memStat)
today = this.util.GetToDay()
sts = make(map[string]interface{})
sts["Fs.QueueFromPeers"] = len(this.queueFromPeers)
sts["Fs.QueueToPeers"] = len(this.queueToPeers)
sts["Fs.QueueFileLog"] = len(this.queueFileLog)
for _, k := range []string{CONST_FILE_Md5_FILE_NAME, CONST_Md5_ERROR_FILE_NAME, CONST_Md5_QUEUE_FILE_NAME} {
k2 := fmt.Sprintf("%s_%s", today, k)
if v, ok = this.sumMap.GetValue(k2); ok {
sumset = v.(mapset.Set)
if k == CONST_Md5_QUEUE_FILE_NAME {
sts["Fs.QueueSetSize"] = sumset.Cardinality()
}
if k == CONST_Md5_ERROR_FILE_NAME {
sts["Fs.ErrorSetSize"] = sumset.Cardinality()
}
if k == CONST_FILE_Md5_FILE_NAME {
sts["Fs.FileSetSize"] = sumset.Cardinality()
}
}
}
sts["Fs.AutoRepair"] = Config().AutoRepair
sts["Fs.RefreshInterval"] = Config().RefreshInterval
sts["Fs.Peers"] = Config().Peers
sts["Fs.Local"] = this.host
sts["Fs.FileStats"] = this.GetStat()
sts["Fs.ShowDir"] = Config().ShowDir
sts["Sys.NumGoroutine"] = runtime.NumGoroutine()
sts["Sys.NumCpu"] = runtime.NumCPU()
sts["Sys.Alloc"] = memStat.Alloc
sts["Sys.TotalAlloc"] = memStat.TotalAlloc
sts["Sys.HeapAlloc"] = memStat.HeapAlloc
sts["Sys.Frees"] = memStat.Frees
sts["Sys.HeapObjects"] = memStat.HeapObjects
sts["Sys.NumGC"] = memStat.NumGC
sts["Sys.GCCPUFraction"] = memStat.GCCPUFraction
sts["Sys.GCSys"] = memStat.GCSys
//sts["Sys.MemInfo"] = memStat
status.Status = "ok"
status.Data = sts
w.Write([]byte(this.util.JsonEncodePretty(status)))
}
func (this *Server) HeartBeat(w http.ResponseWriter, r *http.Request) {
}
func (this *Server) Index(w http.ResponseWriter, r *http.Request) {
var (
uploadUrl string
uploadBigUrl string
uppy string
)
uploadUrl = "/upload"
uploadBigUrl = CONST_BIG_UPLOAD_PATH_SUFFIX
if Config().EnableWebUpload {
if Config().SupportGroupManage {
uploadUrl = fmt.Sprintf("/%s/upload", Config().Group)
uploadBigUrl = fmt.Sprintf("/%s%s", Config().Group, CONST_BIG_UPLOAD_PATH_SUFFIX)
}
uppy = `<html>
<head>
<meta charset="utf-8" />
<title>go-fastdfs</title>
<style>form { bargin } .form-line { display:block;height: 30px;margin:8px; } #stdUpload {background: #fafafa;border-radius: 10px;width: 745px; }</style>
<link href="https://transloadit.edgly.net/releases/uppy/v0.30.0/dist/uppy.min.css" rel="stylesheet"></head>
<body>
<div>标准上传(强列建议使用这种方式)</div>
<div id="stdUpload">
<form action="%s" method="post" enctype="multipart/form-data">
<span class="form-line">文件(file):
<input type="file" id="file" name="file" /></span>
<span class="form-line">场景(scene):
<input type="text" id="scene" name="scene" value="%s" /></span>
<span class="form-line">输出(output):
<input type="text" id="output" name="output" value="json" /></span>
<span class="form-line">自定义路径(path):
<input type="text" id="path" name="path" value="" /></span>
<span class="form-line">google认证码(code):
<input type="text" id="code" name="code" value="" /></span>
<span class="form-line">自定义认证(auth_token):
<input type="text" id="auth_token" name="auth_token" value="" /></span>
<input type="submit" name="submit" value="upload" />
</form>
</div>
<div>断点续传(如果文件很大时可以考虑)</div>
<div>
<div id="drag-drop-area"></div>
<script src="https://transloadit.edgly.net/releases/uppy/v0.30.0/dist/uppy.min.js"></script>
<script>var uppy = Uppy.Core().use(Uppy.Dashboard, {
inline: true,
target: '#drag-drop-area'
}).use(Uppy.Tus, {
endpoint: '%s'
})
uppy.on('complete', (result) => {
// console.log(result) console.log('Upload complete! We’ve uploaded these files:', result.successful)
})
uppy.setMeta({ auth_token: '9ee60e59-cb0f-4578-aaba-29b9fc2919ca',callback_url:'http://127.0.0.1/callback' })//这里是传递上传的认证参数,callback_url参数中 id为文件的ID,info 文转的基本信息json
</script>
</div>
</body>
</html>`
uppyFileName := STATIC_DIR + "/uppy.html"
if this.util.IsExist(uppyFileName) {
if data, err := this.util.ReadBinFile(uppyFileName); err != nil {
log.Error(err)
} else {
uppy = string(data)
}
} else {
this.util.WriteFile(uppyFileName, uppy)
}
fmt.Fprintf(w,
fmt.Sprintf(uppy, uploadUrl, Config().DefaultScene, uploadBigUrl))
} else {
w.Write([]byte("web upload deny"))
}
}
func init() {
flag.Parse()
if *v {
fmt.Printf("%s\n%s\n%s\n%s\n", VERSION, BUILD_TIME, GO_VERSION, GIT_VERSION)
os.Exit(0)
}
DOCKER_DIR = os.Getenv("GO_FASTDFS_DIR")
if DOCKER_DIR != "" {
if !strings.HasSuffix(DOCKER_DIR, "/") {
DOCKER_DIR = DOCKER_DIR + "/"
}
}
STORE_DIR = DOCKER_DIR + STORE_DIR_NAME
CONF_DIR = DOCKER_DIR + CONF_DIR_NAME
DATA_DIR = DOCKER_DIR + DATA_DIR_NAME
LOG_DIR = DOCKER_DIR + LOG_DIR_NAME
STATIC_DIR = DOCKER_DIR + STATIC_DIR_NAME
LARGE_DIR_NAME = "haystack"
LARGE_DIR = STORE_DIR + "/haystack"
CONST_LEVELDB_FILE_NAME = DATA_DIR + "/fileserver.db"
CONST_LOG_LEVELDB_FILE_NAME = DATA_DIR + "/log.db"
CONST_STAT_FILE_NAME = DATA_DIR + "/stat.json"
CONST_CONF_FILE_NAME = CONF_DIR + "/cfg.json"
CONST_SEARCH_FILE_NAME = DATA_DIR + "/search.txt"
FOLDERS = []string{DATA_DIR, STORE_DIR, CONF_DIR, STATIC_DIR}
logAccessConfigStr = strings.Replace(logAccessConfigStr, "{DOCKER_DIR}", DOCKER_DIR, -1)
logConfigStr = strings.Replace(logConfigStr, "{DOCKER_DIR}", DOCKER_DIR, -1)
for _, folder := range FOLDERS {
os.MkdirAll(folder, 0775)
}
server = NewServer()
peerId := fmt.Sprintf("%d", server.util.RandInt(0, 9))
if !server.util.FileExists(CONST_CONF_FILE_NAME) {
peer := "http://" + server.util.GetPulicIP() + ":8080"
cfg := fmt.Sprintf(cfgJson, peerId, peer, peer)
server.util.WriteFile(CONST_CONF_FILE_NAME, cfg)
}
if logger, err := log.LoggerFromConfigAsBytes([]byte(logConfigStr)); err != nil {
panic(err)
} else {
log.ReplaceLogger(logger)
}
if _logacc, err := log.LoggerFromConfigAsBytes([]byte(logAccessConfigStr)); err == nil {
logacc = _logacc
log.Info("succes init log access")
} else {
log.Error(err.Error())
}
ParseConfig(CONST_CONF_FILE_NAME)
if Config().QueueSize == 0 {
Config().QueueSize = CONST_QUEUE_SIZE
}
if Config().PeerId == "" {
Config().PeerId = peerId
}
staticHandler = http.StripPrefix("/"+Config().Group+"/", http.FileServer(http.Dir(STORE_DIR)))
server.initComponent(false)
}
func (this *Server) test() {
testLock := func() {
wg := sync.WaitGroup{}
tt := func(i int, wg *sync.WaitGroup) {
//if server.lockMap.IsLock("xx") {
// return
//}
//fmt.Println("timeer len",len(server.lockMap.Get()))
//time.Sleep(time.Nanosecond*10)
server.lockMap.LockKey("xx")
defer server.lockMap.UnLockKey("xx")
//time.Sleep(time.Nanosecond*1)
//fmt.Println("xx", i)
wg.Done()
}
go func() {
for {
time.Sleep(time.Second * 1)
fmt.Println("timeer len", len(server.lockMap.Get()), server.lockMap.Get())
}
}()
fmt.Println(len(server.lockMap.Get()))
for i := 0; i < 10000; i++ {
wg.Add(1)
go tt(i, &wg)
}
fmt.Println(len(server.lockMap.Get()))
fmt.Println(len(server.lockMap.Get()))
server.lockMap.LockKey("abc")
fmt.Println("lock")
time.Sleep(time.Second * 5)
server.lockMap.UnLockKey("abc")
server.lockMap.LockKey("abc")
server.lockMap.UnLockKey("abc")
}
_ = testLock
testFile := func() {
var (
err error
f *os.File
)
f, err = os.OpenFile("tt", os.O_CREATE|os.O_RDWR, 0777)
if err != nil {
fmt.Println(err)
}
f.WriteAt([]byte("1"), 100)
f.Seek(0, 2)
f.Write([]byte("2"))
//fmt.Println(f.Seek(0, 2))
//fmt.Println(f.Seek(3, 2))
//fmt.Println(f.Seek(3, 0))
//fmt.Println(f.Seek(3, 1))
//fmt.Println(f.Seek(3, 0))
//f.Write([]byte("1"))
}
_ = testFile
//testFile()
//testLock()
}
type hookDataStore struct {
tusd.DataStore
}
type httpError struct {
error
statusCode int
}
func (err httpError) StatusCode() int {
return err.statusCode
}
func (err httpError) Body() []byte {
return []byte(err.Error())
}
func (store hookDataStore) NewUpload(info tusd.FileInfo) (id string, err error) {
var (
jsonResult JsonResult
)
if Config().AuthUrl != "" {
if auth_token, ok := info.MetaData["auth_token"]; !ok {
msg := "token auth fail,auth_token is not in http header Upload-Metadata," +
"in uppy uppy.setMeta({ auth_token: '9ee60e59-cb0f-4578-aaba-29b9fc2919ca' })"
log.Error(msg, fmt.Sprintf("current header:%v", info.MetaData))
return "", httpError{error: errors.New(msg), statusCode: 401}
} else {
req := httplib.Post(Config().AuthUrl)
req.Param("auth_token", auth_token)
req.SetTimeout(time.Second*5, time.Second*10)
content, err := req.String()
content = strings.TrimSpace(content)
if strings.HasPrefix(content, "{") && strings.HasSuffix(content, "}") {
if err = json.Unmarshal([]byte(content), &jsonResult); err != nil {
log.Error(err)
return "", httpError{error: errors.New(err.Error() + content), statusCode: 401}
}
if jsonResult.Data != "ok" {
return "", httpError{error: errors.New(content), statusCode: 401}
}
} else {
if err != nil {
log.Error(err)
return "", err
}
if strings.TrimSpace(content) != "ok" {
return "", httpError{error: errors.New(content), statusCode: 401}
}
}
}
}
return store.DataStore.NewUpload(info)
}
func (this *Server) initTus() {
var (
err error
fileLog *os.File
bigDir string
)
BIG_DIR := STORE_DIR + "/_big/" + Config().PeerId
os.MkdirAll(BIG_DIR, 0775)
os.MkdirAll(LOG_DIR, 0775)
store := filestore.FileStore{
Path: BIG_DIR,
}
if fileLog, err = os.OpenFile(LOG_DIR+"/tusd.log", os.O_CREATE|os.O_RDWR, 0666); err != nil {
log.Error(err)
panic("initTus")
}
go func() {
for {
if fi, err := fileLog.Stat(); err != nil {
log.Error(err)
} else {
if fi.Size() > 1024*1024*500 {
//500M
this.util.CopyFile(LOG_DIR+"/tusd.log", LOG_DIR+"/tusd.log.2")
fileLog.Seek(0, 0)
fileLog.Truncate(0)
fileLog.Seek(0, 2)
}
}
time.Sleep(time.Second * 30)
}
}()
l := slog.New(fileLog, "[tusd] ", slog.LstdFlags)
bigDir = CONST_BIG_UPLOAD_PATH_SUFFIX
if Config().SupportGroupManage {
bigDir = fmt.Sprintf("/%s%s", Config().Group, CONST_BIG_UPLOAD_PATH_SUFFIX)
}
composer := tusd.NewStoreComposer()
// support raw tus upload and download
store.GetReaderExt = func(id string) (io.Reader, error) {
var (
offset int64
err error
length int
buffer []byte
fi *FileInfo
)
if fi, err = this.GetFileInfoFromLevelDB(id); err != nil {
log.Error(err)
return nil, err
} else {
fp := DOCKER_DIR + fi.Path + "/" + fi.ReName
if this.util.FileExists(fp) {
log.Info(fmt.Sprintf("download:%s", fp))
return os.Open(fp)
}
ps := strings.Split(fp, ",")
if len(ps) > 2 && this.util.FileExists(ps[0]) {
if length, err = strconv.Atoi(ps[2]); err != nil {
return nil, err
}
if offset, err = strconv.ParseInt(ps[1], 10, 64); err != nil {
return nil, err
}
if buffer, err = this.util.ReadFileByOffSet(ps[0], offset, length); err != nil {
return nil, err
}
if buffer[0] == '1' {
bufferReader := bytes.NewBuffer(buffer[1:])
return bufferReader, nil
} else {
msg := "data no sync"
log.Error(msg)
return nil, errors.New(msg)
}
}
return nil, errors.New(fmt.Sprintf("%s not found", fp))
}
}
store.UseIn(composer)
SetupPreHooks := func(composer *tusd.StoreComposer) {
composer.UseCore(hookDataStore{
DataStore: composer.Core,
})
}
SetupPreHooks(composer)
handler, err := tusd.NewHandler(tusd.Config{
Logger: l,
BasePath: bigDir,
StoreComposer: composer,
NotifyCompleteUploads: true,
RespectForwardedHeaders: true,
})
notify := func(handler *tusd.Handler) {
for {
select {
case info := <-handler.CompleteUploads:
log.Info("CompleteUploads", info)
name := ""
if v, ok := info.MetaData["filename"]; ok {
name = v
}
var err error
md5sum := ""
oldFullPath := BIG_DIR + "/" + info.ID + ".bin"
infoFullPath := BIG_DIR + "/" + info.ID + ".info"
if md5sum, err = this.util.GetFileSumByName(oldFullPath, Config().FileSumArithmetic); err != nil {
log.Error(err)
continue
}
ext := path.Ext(name)
filename := md5sum + ext
timeStamp := time.Now().Unix()
fpath := time.Now().Format("/20060102/15/04/")
newFullPath := STORE_DIR + "/" + Config().DefaultScene + fpath + Config().PeerId + "/" + filename
if fi, err := this.GetFileInfoFromLevelDB(md5sum); err != nil {
log.Error(err)
} else {
if fi.Md5 != "" {
if _, err := this.SaveFileInfoToLevelDB(info.ID, fi, this.ldb); err != nil {
log.Error(err)
}
log.Info(fmt.Sprintf("file is found md5:%s", fi.Md5))
log.Info("remove file:", oldFullPath)
log.Info("remove file:", infoFullPath)
os.Remove(oldFullPath)
os.Remove(infoFullPath)
continue
}
}
fpath = STORE_DIR_NAME + "/" + Config().DefaultScene + fpath + Config().PeerId
os.MkdirAll(DOCKER_DIR+fpath, 0775)
fileInfo := &FileInfo{
Name: name,
Path: fpath,
ReName: filename,
Size: info.Size,
TimeStamp: timeStamp,
Md5: md5sum,
Peers: []string{this.host},
OffSet: -1,
}
if err = os.Rename(oldFullPath, newFullPath); err != nil {
log.Error(err)
continue
}
log.Info(fileInfo)
os.Remove(infoFullPath)
if _, err = this.SaveFileInfoToLevelDB(info.ID, fileInfo, this.ldb); err != nil {
//assosiate file id
log.Error(err)
}
this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME)
go this.postFileToPeer(fileInfo)
callBack := func(info tusd.FileInfo, fileInfo *FileInfo) {
if callback_url, ok := info.MetaData["callback_url"]; ok {
req := httplib.Post(callback_url)
req.SetTimeout(time.Second*10, time.Second*10)
req.Param("info", server.util.JsonEncodePretty(fileInfo))
req.Param("id", info.ID)
if _, err := req.String(); err != nil {
log.Error(err)
}
}
}
go callBack(info, fileInfo)
}
}
}
go notify(handler)
if err != nil {
log.Error(err)
}
http.Handle(bigDir, http.StripPrefix(bigDir, handler))
}
func (this *Server) FormatStatInfo() {
var (
data []byte
err error
count int64
stat map[string]interface{}
)
if this.util.FileExists(CONST_STAT_FILE_NAME) {
if data, err = this.util.ReadBinFile(CONST_STAT_FILE_NAME); err != nil {
log.Error(err)
} else {
if err = json.Unmarshal(data, &stat); err != nil {
log.Error(err)
} else {
for k, v := range stat {
switch v.(type) {
case float64:
vv := strings.Split(fmt.Sprintf("%f", v), ".")[0]
if count, err = strconv.ParseInt(vv, 10, 64); err != nil {
log.Error(err)
} else {
this.statMap.Put(k, count)
}
default:
this.statMap.Put(k, v)
}
}
}
}
} else {
this.RepairStatByDate(this.util.GetToDay())
}
}
func (this *Server) initComponent(isReload bool) {
var (
ip string
)
ip = this.util.GetPulicIP()
if Config().Host == "" {
if len(strings.Split(Config().Addr, ":")) == 2 {
server.host = fmt.Sprintf("http://%s:%s", ip, strings.Split(Config().Addr, ":")[1])
Config().Host = server.host
}
} else {
if strings.HasPrefix(Config().Host, "http") {
server.host = Config().Host
} else {
server.host = "http://" + Config().Host
}
}
ex, _ := regexp.Compile("\\d+\\.\\d+\\.\\d+\\.\\d+")
var peers []string
for _, peer := range Config().Peers {
if this.util.Contains(ip, ex.FindAllString(peer, -1)) ||
this.util.Contains("127.0.0.1", ex.FindAllString(peer, -1)) {
continue
}
if strings.HasPrefix(peer, "http") {
peers = append(peers, peer)
} else {
peers = append(peers, "http://"+peer)
}
}
Config().Peers = peers
if !isReload {
this.FormatStatInfo()
if Config().EnableTus {
this.initTus()
}
}
for _, s := range Config().Scenes {
kv := strings.Split(s, ":")
if len(kv) == 2 {
this.sceneMap.Put(kv[0], kv[1])
}
}
}
type HttpHandler struct {
}
func (HttpHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
status_code := "200"
defer func(t time.Time) {
logStr := fmt.Sprintf("[Access] %s | %v | %s | %s | %s | %s |%s",
time.Now().Format("2006/01/02 - 15:04:05"),
res.Header(),
time.Since(t).String(),
server.util.GetClientIp(req),
req.Method,
status_code,
req.RequestURI,
)
logacc.Info(logStr)
}(time.Now())
defer func() {
if err := recover(); err != nil {
status_code = "500"
res.WriteHeader(500)
print(err)
buff := debug.Stack()
log.Error(err)
log.Error(string(buff))
}
}()
if Config().EnableCrossOrigin {
server.CrossOrigin(res, req)
}
http.DefaultServeMux.ServeHTTP(res, req)
}
func (this *Server) Main() {
go func() {
for {
this.CheckFileAndSendToPeer(this.util.GetToDay(), CONST_Md5_ERROR_FILE_NAME, false)
//fmt.Println("CheckFileAndSendToPeer")
time.Sleep(time.Second * time.Duration(Config().RefreshInterval))
//this.util.RemoveEmptyDir(STORE_DIR)
}
}()
go this.CleanAndBackUp()
go this.CheckClusterStatus()
go this.LoadQueueSendToPeer()
go this.ConsumerPostToPeer()
go this.ConsumerLog()
go this.ConsumerDownLoad()
go this.RemoveDownloading()
//go this.LoadSearchDict()
if Config().EnableMigrate {
go this.RepairFileInfoFromFile()
}
if Config().AutoRepair {
go func() {
for {
time.Sleep(time.Minute * 3)
this.AutoRepair(false)
time.Sleep(time.Minute * 60)
}
}()
}
groupRoute := ""
if Config().SupportGroupManage {
groupRoute = "/" + Config().Group
}
uploadPage := "upload.html"
if groupRoute == "" {
http.HandleFunc(fmt.Sprintf("%s", "/"), this.Index)
http.HandleFunc(fmt.Sprintf("/%s", uploadPage), this.Index)
} else {
http.HandleFunc(fmt.Sprintf("%s", groupRoute), this.Index)
http.HandleFunc(fmt.Sprintf("%s/%s", groupRoute, uploadPage), this.Index)
}
http.HandleFunc(fmt.Sprintf("%s/check_files_exist", groupRoute), this.CheckFilesExist)
http.HandleFunc(fmt.Sprintf("%s/check_file_exist", groupRoute), this.CheckFileExist)
http.HandleFunc(fmt.Sprintf("%s/upload", groupRoute), this.Upload)
http.HandleFunc(fmt.Sprintf("%s/delete", groupRoute), this.RemoveFile)
http.HandleFunc(fmt.Sprintf("%s/get_file_info", groupRoute), this.GetFileInfo)
http.HandleFunc(fmt.Sprintf("%s/sync", groupRoute), this.Sync)
http.HandleFunc(fmt.Sprintf("%s/stat", groupRoute), this.Stat)
http.HandleFunc(fmt.Sprintf("%s/repair_stat", groupRoute), this.RepairStatWeb)
http.HandleFunc(fmt.Sprintf("%s/status", groupRoute), this.Status)
http.HandleFunc(fmt.Sprintf("%s/repair", groupRoute), this.Repair)
http.HandleFunc(fmt.Sprintf("%s/report", groupRoute), this.Report)
http.HandleFunc(fmt.Sprintf("%s/backup", groupRoute), this.BackUp)
http.HandleFunc(fmt.Sprintf("%s/search", groupRoute), this.Search)
http.HandleFunc(fmt.Sprintf("%s/list_dir", groupRoute), this.ListDir)
http.HandleFunc(fmt.Sprintf("%s/remove_empty_dir", groupRoute), this.RemoveEmptyDir)
http.HandleFunc(fmt.Sprintf("%s/repair_fileinfo", groupRoute), this.RepairFileInfo)
http.HandleFunc(fmt.Sprintf("%s/reload", groupRoute), this.Reload)
http.HandleFunc(fmt.Sprintf("%s/syncfile_info", groupRoute), this.SyncFileInfo)
http.HandleFunc(fmt.Sprintf("%s/get_md5s_by_date", groupRoute), this.GetMd5sForWeb)
http.HandleFunc(fmt.Sprintf("%s/receive_md5s", groupRoute), this.ReceiveMd5s)
http.HandleFunc(fmt.Sprintf("%s/gen_google_secret", groupRoute), this.GenGoogleSecret)
http.HandleFunc(fmt.Sprintf("%s/gen_google_code", groupRoute), this.GenGoogleCode)
http.HandleFunc("/"+Config().Group+"/", this.Download)
fmt.Println("Listen on " + Config().Addr)
err := http.ListenAndServe(Config().Addr, new(HttpHandler))
log.Error(err)
fmt.Println(err)
}
func main() {
server.Main()
}
|
OpenFile(lo
|
chunk-3023b548.f6b3d360.js
|
(window["webpackJsonp"]=window["webpackJsonp"]||[]).push([["chunk-3023b548"],{"07a6":function(t,e,r){t.exports=r.p+"static/img/login.6da46162.png"},"270d":function(t,e,r){"use strict";var n=r("ebe7"),a=r.n(n);a.a},a481:function(t,e,r){"use strict";var n=r("cb7c"),a=r("4bf8"),i=r("9def"),s=r("4588"),o=r("0390"),c=r("5f1b"),l=Math.max,u=Math.min,d=Math.floor,p=/\$([$&`']|\d\d?|<[^>]*>)/g,v=/\$([$&`']|\d\d?)/g,f=function(t){return void 0===t?t:String(t)};r("214f")("replace",2,(function(t,e,r,g){return[function(n,a){var i=t(this),s=void 0==n?void 0:n[e];return void 0!==s?s.call(n,i,a):r.call(String(i),n,a)},function(t,e){var a=g(r,t,this,e);if(a.done)return a.value;var d=n(t),p=String(this),v="function"===typeof e;v||(e=String(e));var b=d.global;if(b){var m=d.unicode;d.lastIndex=0}var w=[];while(1){var y=c(d,p);if(null===y)break;if(w.push(y),!b)break;var O=String(y[0]);""===O&&(d.lastIndex=o(p,i(d.lastIndex),m))}for(var k="",j=0,x=0;x<w.length;x++){y=w[x];for(var C=String(y[0]),$=l(u(s(y.index),p.length),0),P=[],_=1;_<y.length;_++)P.push(f(y[_]));var S=y.groups;if(v){var E=[C].concat(P,$,p);void 0!==S&&E.push(S);var D=String(e.apply(void 0,E))}else D=h(C,p,$,P,S,e);$>=j&&(k+=p.slice(j,$)+D,j=$+C.length)}return k+p.slice(j)}];function h(t,e,n,i,s,o){var c=n+t.length,l=i.length,u=v;return void 0!==s&&(s=a(s),u=p),r.call(o,u,(function(r,a){var o;switch(a.charAt(0)){case"$":return"$";case"&":return t;case"`":return e.slice(0,n);case"'":return e.slice(c);case"<":o=s[a.slice(1,-1)];break;default:var u=+a;if(0===u)return r;if(u>l){var p=d(u/10);return 0===p?r:p<=l?void 0===i[p-1]?a.charAt(1):i[p-1]+a.charAt(1):r}o=i[u-1]}return void 0===o?"":o}))}}))},d9c9:function(t,e,r){"use strict";r.r(e);var n=function(){var t=this,e=t.$createElement,r=t._self._c||e;return r("div",{staticClass:"log_wrap"},[r("div",{staticClass:"main"},[t._m(0),r("div",{staticClass:"formBox"},[r("h1",{staticClass:"title"},[t._v("千人计划提现审核系统")]),r("h6",{staticClass:"border"}),r("div",[r("a-icon",{staticClass:"icon",attrs:{type:"user"}}),r("input",{directives:[{name:"model",rawName:"v-model",value:t.username,expression:"username"}],staticClass:"input",attrs:{type:"text",placeholder:"请输入账号"},domProps:{value:t.username},on:{input:function(e){e.target.composing||(t.username=e.target.value)}}})],1),r("div",[r("a-icon",{staticClass:"icon",attrs:{type:"lock"}}),r("input",{directives:[{name:"model",rawName:"v-model",value:t.password,expression:"password"}],staticClass:"input",attrs:{type:"password",placeholder:"请输入密码"},domProps:{value:t.password},on:{keypress:function(e){return!e.type.indexOf("key")&&t._k(e.keyCode,"enter",13,e.key,"Enter")?null:t.login(e)},input:function(e){e.target.composing||(t.password=e.target.value)}}})],1),r("a-button",{staticClass:"loginBtn",attrs:{type:"primary",loading:t.btnloading},on:{click:t.login}},[t._v("立即登录")])],1)])])},a=[function(){var t=this,e=t.$createElement,n=t._self._c||e;return n("div",{staticClass:"img"},[n("img",{attrs:{src:r("07a6")}})])}],i=(r("8e6e"),r("ac6a"),r("456d"),r("a481"),r("ade3")),s=r("8ded"),o=r.n(s),c=r("2f62");function l(t,e){var r=Object.keys(t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(t);e&&(n=n.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),r.push.apply(r,n)}return r}function u(t){for(var e=1;e<arguments.length;e++){var r=null!=arguments[e]?arguments[e]:{};e%2?l(Object(r),!0).forEach((function(e){Object(i["a"])(t,e,r[e])})):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(r)):l(Object(r)).forEach((function(e){Object.defineProperty(t,e,Object.getOwnPropertyDescriptor(r,e))}))}return t}var d={data:function(){return{username:"",password:"",btnloading:!1}},methods:u(u({},Object(c["b"])(["setToken"])),{},{login:function(){var t=this;this.username?this.password?(this.btnloading=!0,this.$api.index_api.login({name:this.username,password:this.password}).then((function(e){o.a.set("token",e.token),t.setToken(e.token),t.btnloading=!1,t.$router.replace("/select")})).catch((function(e){console.log(e),t.btnloading=!1}))):this.$message.error("请输入密码"):this.$message.error("请输入用户名")}})},p=d,v=(r("270d"),r("2877")),f=Object(v["a"])(p,n,a,!1,null,"d0d5a3c8",null);e["default"]=f.exports},ebe7:function(t,e,r){}}]);
|
||
Header.js
|
import ThemeToggle from './ThemeToggle';
export default function Header({ lights, setLights, callLogout }){
const submitLogout = e => {
e.preventDefault();
callLogout();
}
return(
<header>
<h1>
ToDo, or not ToDo, that is the Do
|
A simple ToDo Application to help you track your tasks
</h3>
<nav className="nav-header">
{
(!window.localStorage.getItem("token")) ? "" :
<button className="reg-btn" onClick={submitLogout} >Logout</button>
}
<ThemeToggle lights={lights} setLights={setLights} />
</nav>
</header>
)
}
|
</h1>
<h3>
|
source-element.js
|
import {registerHTMLClass} from '../shared/register-html-class.js';
import {stringAttribute} from '../shared/attributes.js';
import {HTMLElement} from './element.js';
const tagName = 'source';
/**
* @implements globalThis.HTMLSourceElement
*/
class
|
extends HTMLElement {
constructor(ownerDocument, localName = tagName) {
super(ownerDocument, localName);
}
/* c8 ignore start */
get src() { return stringAttribute.get(this, 'src'); }
set src(value) { stringAttribute.set(this, 'src', value); }
get srcset() { return stringAttribute.get(this, 'srcset'); }
set srcset(value) { stringAttribute.set(this, 'srcset', value); }
get sizes() { return stringAttribute.get(this, 'sizes'); }
set sizes(value) { stringAttribute.set(this, 'sizes', value); }
get type() { return stringAttribute.get(this, 'type'); }
set type(value) { stringAttribute.set(this, 'type', value); }
/* c8 ignore stop */
}
registerHTMLClass(tagName, HTMLSourceElement);
export {HTMLSourceElement};
|
HTMLSourceElement
|
ime_service.rs
|
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::fidl_helpers::clone_keyboard_event;
use crate::legacy_ime::ImeState;
use crate::legacy_ime::LegacyIme;
|
use fidl::endpoints::{ClientEnd, RequestStream, ServerEnd};
use fidl_fuchsia_ui_input as uii;
use fidl_fuchsia_ui_text as txt;
use fuchsia_syslog::{fx_log_err, fx_vlog};
use futures::lock::Mutex;
use futures::prelude::*;
use std::sync::{Arc, Weak};
pub struct ImeServiceState {
pub keyboard_visible: bool,
pub active_ime: Option<Weak<Mutex<ImeState>>>,
pub visibility_listeners: Vec<uii::ImeVisibilityServiceControlHandle>,
pub multiplexer: Option<TextFieldMultiplexer>,
/// `TextInputContext` is a service provided to input methods that want to edit text. Whenever
/// a new text field is focused, we provide a TextField interface to any connected `TextInputContext`s,
/// which are listed here.
pub text_input_context_clients: Vec<txt::TextInputContextControlHandle>,
}
/// The internal state of the IMEService, usually held behind an Arc<Mutex>
/// so it can be accessed from multiple places.
impl ImeServiceState {
pub fn update_keyboard_visibility(&mut self, visible: bool) {
self.keyboard_visible = visible;
self.visibility_listeners.retain(|listener| {
// drop listeners if they error on send
listener.send_on_keyboard_visibility_changed(visible).is_ok()
});
}
}
/// Serves several public FIDL services: `ImeService`, `ImeVisibilityService`, and
/// `TextInputContext`.
#[derive(Clone)]
pub struct ImeService {
state: Arc<Mutex<ImeServiceState>>,
}
impl ImeService {
pub fn new() -> ImeService {
ImeService {
state: Arc::new(Mutex::new(ImeServiceState {
keyboard_visible: false,
active_ime: None,
multiplexer: None,
visibility_listeners: Vec::new(),
text_input_context_clients: Vec::new(),
})),
}
}
/// Only updates the keyboard visibility if IME passed in is active
pub async fn update_keyboard_visibility_from_ime<'a>(
&'a self,
check_ime: &'a Arc<Mutex<ImeState>>,
visible: bool,
) {
let mut state = self.state.lock().await;
let active_ime_weak = match &state.active_ime {
Some(val) => val,
None => return,
};
let active_ime = match active_ime_weak.upgrade() {
Some(val) => val,
None => return,
};
if Arc::ptr_eq(check_ime, &active_ime) {
state.update_keyboard_visibility(visible);
}
}
pub async fn get_input_method_editor(
&mut self,
keyboard_type: uii::KeyboardType,
action: uii::InputMethodAction,
initial_state: uii::TextInputState,
client: ClientEnd<uii::InputMethodEditorClientMarker>,
editor: ServerEnd<uii::InputMethodEditorMarker>,
) {
let client_proxy = match client.into_proxy() {
Ok(v) => v,
Err(_) => return,
};
let ime = LegacyIme::new(keyboard_type, action, initial_state, client_proxy, self.clone());
let mut state = self.state.lock().await;
let editor_stream = match editor.into_stream() {
Ok(v) => v,
Err(e) => {
fx_log_err!("Failed to create stream: {}", e);
return;
}
};
let (txt_proxy, txt_request_stream) =
match fidl::endpoints::create_proxy_and_stream::<txt::TextFieldMarker>() {
Ok(v) => v,
Err(e) => {
fx_log_err!("Failed to create TextField proxy and stream: {}", e);
return;
}
};
state.active_ime = Some(ime.downgrade());
ime.bind_ime(editor_stream);
ime.bind_text_field(txt_request_stream);
let multiplexer = TextFieldMultiplexer::new(txt_proxy);
state.text_input_context_clients.retain(|listener| {
// drop listeners if they error on send
bind_new_text_field(&multiplexer, &listener).is_ok()
});
state.multiplexer = Some(multiplexer);
}
pub async fn show_keyboard(&self) {
self.state.lock().await.update_keyboard_visibility(true);
}
pub async fn hide_keyboard(&self) {
self.state.lock().await.update_keyboard_visibility(false);
}
/// This is called by the operating system when input from the physical keyboard comes in.
/// It also is called by legacy onscreen keyboards that just simulate physical keyboard input.
async fn inject_input(&mut self, mut event: uii::InputEvent) {
let keyboard_event = match &event {
uii::InputEvent::Keyboard(e) => clone_keyboard_event(e),
_ => return,
};
let mut state = self.state.lock().await;
let ime = {
let active_ime_weak = match state.active_ime {
Some(ref v) => v,
None => return, // no currently active IME
};
match LegacyIme::upgrade(active_ime_weak) {
Some(active_ime) => active_ime,
None => return, // IME no longer exists
}
};
// Send the legacy ime a keystroke event to forward to connected clients. Even if a v2 input
// method is connected, this ensures legacy text fields are able to still see key events;
// something not yet provided by the new `TextField` API.
ime.forward_event(clone_keyboard_event(&keyboard_event)).await;
// Send the key event to any listening `TextInputContext` clients. If at least one still
// exists, we assume it handled it and converted it into an edit sent via its handle to the
// `TextField` protocol.
state.text_input_context_clients.retain(|listener| {
// drop listeners if they error on send
listener.send_on_input_event(&mut event).is_ok()
});
// If no `TextInputContext` clients handled the input event, or if there are none connected,
// we allow the internal input method inside of `LegacyIme` to convert this key event into
// an edit.
if state.text_input_context_clients.len() == 0 {
ime.inject_input(keyboard_event).await;
}
}
pub fn bind_ime_service(&self, mut stream: uii::ImeServiceRequestStream) {
let mut self_clone = self.clone();
fuchsia_async::spawn(
async move {
while let Some(msg) = stream
.try_next()
.await
.context("error reading value from IME service request stream")?
{
self_clone
.handle_ime_service_msg(msg)
.await
.context("Handle IME service messages")?
}
Ok(())
}
.unwrap_or_else(|e: failure::Error| fx_log_err!("{:?}", e)),
);
}
pub async fn handle_ime_service_msg(
&mut self,
msg: uii::ImeServiceRequest,
) -> Result<(), Error> {
match msg {
uii::ImeServiceRequest::GetInputMethodEditor {
keyboard_type,
action,
initial_state,
client,
editor,
..
} => {
self.get_input_method_editor(keyboard_type, action, initial_state, client, editor)
.await;
}
uii::ImeServiceRequest::ShowKeyboard { .. } => {
self.show_keyboard().await;
}
uii::ImeServiceRequest::HideKeyboard { .. } => {
self.hide_keyboard().await;
}
uii::ImeServiceRequest::InjectInput { event, .. } => {
fx_vlog!(tag: "ime", 1, "InjectInput triggered: {:?}", event);
self.inject_input(event).await;
}
uii::ImeServiceRequest::DispatchKey { .. } => {
// Transitional: DispatchKey should be handled by keyboard/Service.
// See Service.spawn_ime_service() for handing DispatchKey.
// In future, Keyboard service will receive keys directly.
panic!("Should be handled by keyboard service");
}
}
Ok(())
}
pub fn bind_ime_visibility_service(&self, stream: uii::ImeVisibilityServiceRequestStream) {
let self_clone = self.clone();
fuchsia_async::spawn(
async move {
let control_handle = stream.control_handle();
let mut state = self_clone.state.lock().await;
if control_handle
.send_on_keyboard_visibility_changed(state.keyboard_visible)
.is_ok()
{
state.visibility_listeners.push(control_handle);
}
Ok(())
}
.unwrap_or_else(|e: failure::Error| fx_log_err!("{:?}", e)),
);
}
pub fn bind_text_input_context(&self, mut stream: txt::TextInputContextRequestStream) {
let self_clone = self.clone();
fuchsia_async::spawn(
async move {
let control_handle = stream.control_handle();
{
let mut state = self_clone.state.lock().await;
if let Some(multiplexer) = &state.multiplexer {
if let Err(e) = bind_new_text_field(multiplexer, &control_handle) {
fx_log_err!("Error when binding text field for newly connected TextInputContext: {}", e);
}
}
state.text_input_context_clients.push(control_handle)
}
while let Some(msg) = stream.try_next().await
.context("error reading value from text input context request stream")?
{
match msg {
txt::TextInputContextRequest::HideKeyboard { .. } => {
self_clone.hide_keyboard().await;
}
}
}
Ok(())
}
.unwrap_or_else(|e: failure::Error| fx_log_err!("{:?}", e)),
);
}
}
pub fn bind_new_text_field(
multiplexer: &TextFieldMultiplexer,
control_handle: &txt::TextInputContextControlHandle,
) -> Result<(), fidl::Error> {
let (client_end, request_stream) =
fidl::endpoints::create_request_stream::<txt::TextFieldMarker>()
.expect("Failed to create text field request stream");
multiplexer.add_request_stream(request_stream);
control_handle.send_on_focus(client_end)
}
#[cfg(test)]
mod test {
use super::*;
use crate::fidl_helpers::default_state;
use crate::legacy_ime::{HID_USAGE_KEY_ENTER, HID_USAGE_KEY_LEFT};
use fidl;
use fidl_fuchsia_ui_input as uii;
use fuchsia_async as fasync;
use pin_utils::pin_mut;
async fn get_state_update(
editor_stream: &mut uii::InputMethodEditorClientRequestStream,
) -> (uii::TextInputState, Option<uii::KeyboardEvent>) {
let msg = editor_stream
.try_next()
.await
.expect("expected working event stream")
.expect("ime should have sent message");
if let uii::InputMethodEditorClientRequest::DidUpdateState { state, event, .. } = msg {
let keyboard_event = event.map(|e| {
if let uii::InputEvent::Keyboard(keyboard_event) = *e {
keyboard_event
} else {
panic!("expected DidUpdateState to only send Keyboard events");
}
});
(state, keyboard_event)
} else {
panic!("request should be DidUpdateState");
}
}
fn async_service_test<T, F>(test_fn: T)
where
T: FnOnce(uii::ImeServiceProxy, uii::ImeVisibilityServiceProxy) -> F,
F: Future,
{
let mut executor = fasync::Executor::new()
.expect("Creating fuchsia_async executor for IME service tests failed");
let ime_service = ImeService::new();
let ime_service_proxy = {
let (service_proxy, ime_stream) =
fidl::endpoints::create_proxy_and_stream::<uii::ImeServiceMarker>().unwrap();
ime_service.bind_ime_service(ime_stream);
service_proxy
};
let visibility_service_proxy = {
let (service_proxy, ime_vis_stream) =
fidl::endpoints::create_proxy_and_stream::<uii::ImeVisibilityServiceMarker>()
.unwrap();
ime_service.bind_ime_visibility_service(ime_vis_stream);
service_proxy
};
let done = test_fn(ime_service_proxy, visibility_service_proxy);
pin_mut!(done);
// this will return a non-ready future if the tests stall
let res = executor.run_until_stalled(&mut done);
assert!(res.is_ready());
}
fn bind_ime_for_test(
ime_service: &uii::ImeServiceProxy,
) -> (uii::InputMethodEditorProxy, uii::InputMethodEditorClientRequestStream) {
let (ime_proxy, ime_server_end) =
fidl::endpoints::create_proxy::<uii::InputMethodEditorMarker>().unwrap();
let (editor_client_end, editor_request_stream) =
fidl::endpoints::create_request_stream().unwrap();
ime_service
.get_input_method_editor(
uii::KeyboardType::Text,
uii::InputMethodAction::Done,
&mut default_state(),
editor_client_end,
ime_server_end,
)
.unwrap();
(ime_proxy, editor_request_stream)
}
fn simulate_keypress(ime_service: &uii::ImeServiceProxy, code_point: u32, hid_usage: u32) {
ime_service
.inject_input(&mut uii::InputEvent::Keyboard(uii::KeyboardEvent {
event_time: 0,
device_id: 0,
phase: uii::KeyboardEventPhase::Pressed,
hid_usage: hid_usage,
code_point: code_point,
modifiers: 0,
}))
.unwrap();
ime_service
.inject_input(&mut uii::InputEvent::Keyboard(uii::KeyboardEvent {
event_time: 0,
device_id: 0,
phase: uii::KeyboardEventPhase::Released,
hid_usage: hid_usage,
code_point: code_point,
modifiers: 0,
}))
.unwrap();
}
#[test]
fn test_visibility_service_sends_updates() {
async_service_test(|ime_service, visibility_service| {
async move {
let mut ev_stream = visibility_service.take_event_stream();
// expect initial update with current status
let msg = ev_stream
.try_next()
.await
.expect("expected working event stream")
.expect("visibility service should have sent message");
let uii::ImeVisibilityServiceEvent::OnKeyboardVisibilityChanged { visible } = msg;
assert_eq!(visible, false);
// expect asking for keyboard to reclose results in another message
ime_service.hide_keyboard().unwrap();
let msg = ev_stream
.try_next()
.await
.expect("expected working event stream")
.expect("visibility service should have sent message");
let uii::ImeVisibilityServiceEvent::OnKeyboardVisibilityChanged { visible } = msg;
assert_eq!(visible, false);
// expect asking for keyboard to to open in another message
ime_service.show_keyboard().unwrap();
let msg = ev_stream
.try_next()
.await
.expect("expected working event stream")
.expect("visibility service should have sent message");
let uii::ImeVisibilityServiceEvent::OnKeyboardVisibilityChanged { visible } = msg;
assert_eq!(visible, true);
// expect asking for keyboard to close/open from IME works
let (ime, _editor_stream) = bind_ime_for_test(&ime_service);
ime.hide().unwrap();
let msg = ev_stream
.try_next()
.await
.expect("expected working event stream")
.expect("visibility service should have sent message");
let uii::ImeVisibilityServiceEvent::OnKeyboardVisibilityChanged { visible } = msg;
assert_eq!(visible, false);
ime.show().unwrap();
let msg = ev_stream
.try_next()
.await
.expect("expected working event stream")
.expect("visibility service should have sent message");
let uii::ImeVisibilityServiceEvent::OnKeyboardVisibilityChanged { visible } = msg;
assert_eq!(visible, true);
}
});
}
#[test]
fn test_inject_input_updates_ime() {
async_service_test(|ime_service, _visibility_service| {
async move {
// expect asking for keyboard to close/open from IME works
let (_ime, mut editor_stream) = bind_ime_for_test(&ime_service);
// type 'a'
simulate_keypress(&ime_service, 'a'.into(), 0);
// get first message with keypress event but no state update
let (state, event) = get_state_update(&mut editor_stream).await;
let event = event.expect("expected event to be set");
assert_eq!(event.phase, uii::KeyboardEventPhase::Pressed);
assert_eq!(event.code_point, 97);
assert_eq!(state.text, "");
// get second message with state update
let (state, event) = get_state_update(&mut editor_stream).await;
assert!(event.is_none());
assert_eq!(state.text, "a");
assert_eq!(state.selection.base, 1);
assert_eq!(state.selection.extent, 1);
// get third message with keyrelease event but no state update
let (state, event) = get_state_update(&mut editor_stream).await;
let event = event.expect("expected event to be set");
assert_eq!(event.phase, uii::KeyboardEventPhase::Released);
assert_eq!(event.code_point, 97);
assert_eq!(state.text, "a");
// press left arrow
simulate_keypress(&ime_service, 0, HID_USAGE_KEY_LEFT);
// get first message with keypress event but no state update
let (state, event) = get_state_update(&mut editor_stream).await;
let event = event.expect("expected event to be set");
assert_eq!(event.phase, uii::KeyboardEventPhase::Pressed);
assert_eq!(event.code_point, 0);
assert_eq!(event.hid_usage, HID_USAGE_KEY_LEFT);
assert_eq!(state.text, "a");
// get second message with state update
let (state, event) = get_state_update(&mut editor_stream).await;
assert!(event.is_none());
assert_eq!(state.text, "a");
assert_eq!(state.selection.base, 0);
assert_eq!(state.selection.extent, 0);
// get first message with keyrelease event but no state update
let (state, event) = get_state_update(&mut editor_stream).await;
let event = event.expect("expected event to be set");
assert_eq!(event.phase, uii::KeyboardEventPhase::Released);
assert_eq!(event.code_point, 0);
assert_eq!(event.hid_usage, HID_USAGE_KEY_LEFT);
assert_eq!(state.text, "a");
}
});
}
#[test]
fn test_inject_input_sends_action() {
async_service_test(|ime_service, _visibility_service| {
async move {
let (_ime, mut editor_stream) = bind_ime_for_test(&ime_service);
// send key events
simulate_keypress(&ime_service, 0, HID_USAGE_KEY_ENTER);
// get first message with keypress event
let (_state, event) = get_state_update(&mut editor_stream).await;
let event = event.expect("expected event to be set");
assert_eq!(event.phase, uii::KeyboardEventPhase::Pressed);
assert_eq!(event.code_point, 0);
assert_eq!(event.hid_usage, HID_USAGE_KEY_ENTER);
// get second message with onaction event
let msg = editor_stream
.try_next()
.await
.expect("expected working event stream")
.expect("ime should have sent message");
if let uii::InputMethodEditorClientRequest::OnAction { action, .. } = msg {
assert_eq!(action, uii::InputMethodAction::Done);
} else {
panic!("request should be OnAction");
}
}
})
}
}
|
use crate::multiplex::TextFieldMultiplexer;
use failure::{Error, ResultExt};
|
TestFAEModel.py
|
import os
import csv
import numpy as np
from FAE.FeatureAnalysis.Normalizer import Normalizer
from FAE.DataContainer.DataContainer import DataContainer
from FAE.FeatureAnalysis.Classifier import Classifier
from FAE.Func.Metric import EstimateMetirc
from FAE.FeatureAnalysis.FeatureSelector import FeatureSelector
from FAE.FeatureAnalysis.CrossValidation import CrossValidation
def LoadTrainInfo(model_folder):
train_info = {}
##Load normalizaiton
normalizer = Normalizer()
normalization_path = ''
for sub_file in os.listdir(model_folder):
if sub_file.rfind('_normalization_training.csv') != -1:
normalization_path = os.path.join(model_folder, sub_file)
if not os.path.exists(normalization_path):
print('Check the normalization name : zero_center_normalization')
else:
normalizer.Load(normalization_path)
train_info['normalizer'] = normalizer
## Load selected features
selected_feature_path = os.path.join(model_folder, 'feature_select_info.csv')
selected_feature_list = []
with open(selected_feature_path, 'r', newline='') as f:
f_reader = csv.reader(f)
for index in f_reader:
if index[0] == 'selected_feature':
selected_feature_list = index[1:]
if selected_feature_list == []:
print('No selected features')
train_info['selected_features'] = selected_feature_list
## Load FAE model
classifier = Classifier()
classifier.Load(model_folder)
train_info['classifier'] = classifier
return train_info
def
|
(NewDataCsv, model_folder, result_save_path):
'''
:param NewDataCsv: New radiomics feature matrix csv file path
:param model_folder:The trained model path
:return:classification result
'''
train_info = LoadTrainInfo(model_folder)
new_data_container = DataContainer()
#Normlization
new_data_container.Load(NewDataCsv)
feature_selector = FeatureSelector()
feature_selector.SelectFeatureByName(new_data_container, train_info['selected_features'], is_replace=True)
new_data_container = train_info['normalizer'].Transform(new_data_container)
# data_frame = new_data_container.GetFrame()
# data_frame = data_frame[train_info['selected_features']]
# new_data_container.SetFrame(data_frame)
# new_data_container.UpdateDataByFrame()
##Model
train_info['classifier'].SetDataContainer(new_data_container)
model = train_info['classifier'].GetModel()
predict = model.predict_proba(new_data_container.GetArray())[:, 1]
label = new_data_container.GetLabel()
case_name = new_data_container.GetCaseName()
np.save(os.path.join(result_save_path, 'test_predict.npy'), predict)
np.save(os.path.join(result_save_path, 'test_label.npy'), label)
test_result_info = [['CaseName', 'Pred', 'Label']]
for index in range(len(label)):
test_result_info.append([case_name[index], predict[index], label[index]])
with open(os.path.join(result_save_path, 'test_info.csv'), 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(test_result_info)
metric = EstimateMetirc(predict, label)
info = {}
info.update(metric)
cv = CrossValidation()
cv.SaveResult(info, result_save_path)
# print(metric)
return metric
if __name__ == '__main__':
TestNewData(r'D:\hospital\Huangli\smote\test_numeric_feature.csv',
r'D:\hospital\Huangli\smote\process-result\Norm0Center_PCC_ANOVA_5_LR',
r'D:\MyScript\demo')
|
TestNewData
|
DrawFlexes.ts
|
namespace Flexagonator {
// draw possible flexes & create buttons that understand the associated flexes
export function
|
(ctx: CanvasRenderingContext2D, regions: RegionForFlexes[], height: number): ScriptButtons {
const buttons = new ButtonsBuilder();
ctx.font = height + "px sans-serif";
ctx.fillStyle = "rgb(0, 0, 0)";
ctx.textAlign = "left";
ctx.textBaseline = "bottom";
for (const i in regions) {
addFlexes(ctx, regions[i], height, buttons);
}
return buttons.create();
}
// draw each flex and add a button that knows how to apply the flex
function addFlexes(ctx: CanvasRenderingContext2D, region: RegionForFlexes, h: number,
/*output*/ buttons: ButtonsBuilder) {
const spaceWidth = ctx.measureText(' ').width;
const pad = 3;
const y = region.corner.y + h;
let x = region.corner.x;
for (let flex of region.flexes) {
const metrics = ctx.measureText(flex);
const thisx = region.isOnLeft ? x - metrics.width : x;
const thisy = region.isOnTop ? y - h : y;
const thisflex = region.prefix + flex + region.postfix;
const thisWidth = metrics.width;
ctx.fillText(flex, thisx, thisy);
buttons.addFlexButton({ x: thisx - pad, y: thisy - h - pad, w: thisWidth + pad * 2, h: h + pad * 2 }, thisflex);
x = region.isOnLeft ? x - thisWidth - spaceWidth : x + thisWidth + spaceWidth;
}
}
}
|
drawPossibleFlexes
|
rest.py
|
# coding: utf-8
"""
Tradenity API
Tradenity eCommerce Rest API
Contact: [email protected]
"""
from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if six.PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
|
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
|
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
|
spidevtest.py
|
#!/usr/bin/env python
#
# Bitbang'd SPI interface with an MCP3008 ADC device
# MCP3008 is 8-channel 10-bit analog to digital converter
# Connections are:
# CLK => SCLK
# DOUT => MISO
# DIN => MOSI
# CS => CE0
import time
import sys
import spidev
spi = spidev.SpiDev()
spi.open(0,0)
def buildReadCommand(channel):
|
def processAdcValue(result):
'''Take in result as array of three bytes.
Return the two lowest bits of the 2nd byte and
all of the third byte'''
pass
def readAdc(channel):
if ((channel > 7) or (channel < 0)):
return -1
r = spi.xfer2(buildReadCommand(channel))
return processAdcValue(r)
if __name__ == '__main__':
try:
while True:
val = readAdc(0)
print "ADC Result: ", str(val)
time.sleep(5)
except KeyboardInterrupt:
spi.close()
sys.exit(0)
|
startBit = 0x01
singleEnded = 0x08
# Return python list of 3 bytes
# Build a python list using [1, 2, 3]
# First byte is the start bit
# Second byte contains single ended along with channel #
# 3rd byte is 0
return []
|
graphqlizer.go
|
package graphqlizer
import (
"bytes"
"encoding/json"
"reflect"
"strconv"
"text/template"
"github.com/Masterminds/sprig"
"github.com/kyma-incubator/compass/components/director/pkg/graphql"
"github.com/pkg/errors"
|
// Graphqlizer is responsible for converting Go objects to input arguments in graphql format
type Graphqlizer struct{}
func (g *Graphqlizer) ApplicationRegisterInputToGQL(in graphql.ApplicationRegisterInput) (string, error) {
return g.genericToGQL(in, `{
name: "{{.Name}}",
{{- if .ProviderName }}
providerName: "{{ .ProviderName }}",
{{- end }}
{{- if .Description }}
description: "{{ .Description }}",
{{- end }}
{{- if .Labels }}
labels: {{ LabelsToGQL .Labels}},
{{- end }}
{{- if .Webhooks }}
webhooks: [
{{- range $i, $e := .Webhooks }}
{{- if $i}}, {{- end}} {{ WebhookInputToGQL $e }}
{{- end }} ],
{{- end}}
{{- if .HealthCheckURL }}
healthCheckURL: "{{ .HealthCheckURL }}",
{{- end }}
{{- if .Packages }}
packages: [
{{- range $i, $e := .Packages }}
{{- if $i}}, {{- end}} {{- PackageCreateInputToGQL $e }}
{{- end }} ],
{{- end }}
{{- if .IntegrationSystemID }}
integrationSystemID: "{{ .IntegrationSystemID }}",
{{- end }}
{{- if .StatusCondition }}
statusCondition: {{ .StatusCondition }}
{{- end }}
}`)
}
func (g *Graphqlizer) ApplicationUpdateInputToGQL(in graphql.ApplicationUpdateInput) (string, error) {
return g.genericToGQL(in, `{
{{- if .ProviderName }}
providerName: "{{ .ProviderName }}",
{{- end }}
{{- if .Description }}
description: "{{.Description}}",
{{- end }}
{{- if .HealthCheckURL }}
healthCheckURL: "{{ .HealthCheckURL }}",
{{- end }}
{{- if .IntegrationSystemID }}
integrationSystemID: "{{ .IntegrationSystemID }}",
{{- end }}
{{- if .StatusCondition }}
statusCondition: {{ .StatusCondition }}
{{- end }}
}`)
}
func (g *Graphqlizer) ApplicationTemplateInputToGQL(in graphql.ApplicationTemplateInput) (string, error) {
return g.genericToGQL(in, `{
name: "{{.Name}}",
{{- if .Description }}
description: "{{.Description}}",
{{- end }}
applicationInput: {{ ApplicationRegisterInputToGQL .ApplicationInput}},
{{- if .Placeholders }}
placeholders: [
{{- range $i, $e := .Placeholders }}
{{- if $i}}, {{- end}} {{ PlaceholderDefinitionInputToGQL $e }}
{{- end }} ],
{{- end }}
accessLevel: {{.AccessLevel}},
}`)
}
func (g *Graphqlizer) DocumentInputToGQL(in *graphql.DocumentInput) (string, error) {
return g.genericToGQL(in, `{
title: "{{.Title}}",
displayName: "{{.DisplayName}}",
description: "{{.Description}}",
format: {{.Format}},
{{- if .Kind }}
kind: "{{.Kind}}",
{{- end}}
{{- if .Data }}
data: "{{.Data}}",
{{- end}}
{{- if .FetchRequest }}
fetchRequest: {{- FetchRequesstInputToGQL .FetchRequest }},
{{- end}}
}`)
}
func (g *Graphqlizer) FetchRequestInputToGQL(in *graphql.FetchRequestInput) (string, error) {
return g.genericToGQL(in, `{
url: "{{.URL}}",
{{- if .Auth }}
auth: {{- AuthInputToGQL .Auth }},
{{- end }}
{{- if .Mode }}
mode: {{.Mode}},
{{- end}}
{{- if .Filter}}
filter: "{{.Filter}}",
{{- end}}
}`)
}
func (g *Graphqlizer) CredentialRequestAuthInputToGQL(in *graphql.CredentialRequestAuthInput) (string, error) {
return g.genericToGQL(in, `{
{{- if .Csrf }}
csrf: {{ CSRFTokenCredentialRequestAuthInputToGQL .Csrf }},
{{- end }}
}`)
}
func (g *Graphqlizer) CredentialDataInputToGQL(in *graphql.CredentialDataInput) (string, error) {
return g.genericToGQL(in, ` {
{{- if .Basic }}
basic: {
username: "{{ .Basic.Username }}",
password: "{{ .Basic.Password }}",
},
{{- end }}
{{- if .Oauth }}
oauth: {
clientId: "{{ .Oauth.ClientID }}",
clientSecret: "{{ .Oauth.ClientSecret }}",
url: "{{ .Oauth.URL }}",
},
{{- end }}
}`)
}
func (g *Graphqlizer) CSRFTokenCredentialRequestAuthInputToGQL(in *graphql.CSRFTokenCredentialRequestAuthInput) (string, error) {
in.AdditionalHeadersSerialized = quoteHTTPHeadersSerialized(in.AdditionalHeadersSerialized)
in.AdditionalQueryParamsSerialized = quoteQueryParamsSerialized(in.AdditionalQueryParamsSerialized)
return g.genericToGQL(in, `{
tokenEndpointURL: "{{ .TokenEndpointURL }}",
{{- if .Credential }}
credential: {{ CredentialDataInputToGQL .Credential }},
{{- end }}
{{- if .AdditionalHeaders }}
additionalHeaders: {{ HTTPHeadersToGQL .AdditionalHeaders }},
{{- end }}
{{- if .AdditionalHeadersSerialized }}
additionalHeadersSerialized: {{ .AdditionalHeadersSerialized }},
{{- end }}
{{- if .AdditionalQueryParams }}
additionalQueryParams: {{ QueryParamsToGQL .AdditionalQueryParams }},
{{- end }}
{{- if .AdditionalQueryParamsSerialized }}
additionalQueryParamsSerialized: {{ .AdditionalQueryParamsSerialized }},
{{- end }}
}`)
}
func (g *Graphqlizer) AuthInputToGQL(in *graphql.AuthInput) (string, error) {
in.AdditionalHeadersSerialized = quoteHTTPHeadersSerialized(in.AdditionalHeadersSerialized)
in.AdditionalQueryParamsSerialized = quoteQueryParamsSerialized(in.AdditionalQueryParamsSerialized)
return g.genericToGQL(in, `{
{{- if .Credential }}
credential: {{ CredentialDataInputToGQL .Credential }},
{{- end }}
{{- if .AdditionalHeaders }}
additionalHeaders: {{ HTTPHeadersToGQL .AdditionalHeaders }},
{{- end }}
{{- if .AdditionalHeadersSerialized }}
additionalHeadersSerialized: {{ .AdditionalHeadersSerialized }},
{{- end }}
{{- if .AdditionalQueryParams }}
additionalQueryParams: {{ QueryParamsToGQL .AdditionalQueryParams}},
{{- end }}
{{- if .AdditionalQueryParamsSerialized }}
additionalQueryParamsSerialized: {{ .AdditionalQueryParamsSerialized }},
{{- end }}
{{- if .RequestAuth }}
requestAuth: {{ CredentialRequestAuthInputToGQL .RequestAuth }},
{{- end }}
}`)
}
func (g *Graphqlizer) LabelsToGQL(in graphql.Labels) (string, error) {
return g.marshal(in), nil
}
func (g *Graphqlizer) HTTPHeadersToGQL(in graphql.HttpHeaders) (string, error) {
return g.genericToGQL(in, `{
{{- range $k,$v := . }}
{{$k}}: [
{{- range $i,$j := $v }}
{{- if $i}},{{- end}}"{{$j}}"
{{- end }} ],
{{- end}}
}`)
}
func (g *Graphqlizer) QueryParamsToGQL(in graphql.QueryParams) (string, error) {
return g.genericToGQL(in, `{
{{- range $k,$v := . }}
{{$k}}: [
{{- range $i,$j := $v }}
{{- if $i}},{{- end}}"{{$j}}"
{{- end }} ],
{{- end}}
}`)
}
func (g *Graphqlizer) WebhookInputToGQL(in *graphql.WebhookInput) (string, error) {
return g.genericToGQL(in, `{
type: {{.Type}},
url: "{{.URL }}",
{{- if .Auth }}
auth: {{- AuthInputToGQL .Auth }},
{{- end }}
}`)
}
func (g *Graphqlizer) APIDefinitionInputToGQL(in graphql.APIDefinitionInput) (string, error) {
return g.genericToGQL(in, `{
name: "{{ .Name}}",
{{- if .Description }}
description: "{{.Description}}",
{{- end}}
targetURL: "{{.TargetURL}}",
{{- if .Group }}
group: "{{.Group}}",
{{- end }}
{{- if .Spec }}
spec: {{- ApiSpecInputToGQL .Spec }},
{{- end }}
{{- if .Version }}
version: {{- VersionInputToGQL .Version }},
{{- end}}
}`)
}
func (g *Graphqlizer) EventDefinitionInputToGQL(in graphql.EventDefinitionInput) (string, error) {
return g.genericToGQL(in, `{
name: "{{.Name}}",
{{- if .Description }}
description: "{{.Description}}",
{{- end }}
{{- if .Spec }}
spec: {{ EventAPISpecInputToGQL .Spec }},
{{- end }}
{{- if .Group }}
group: "{{.Group}}",
{{- end }}
{{- if .Version }}
version: {{- VersionInputToGQL .Version }},
{{- end}}
}`)
}
func (g *Graphqlizer) EventAPISpecInputToGQL(in graphql.EventSpecInput) (string, error) {
in.Data = quoteCLOB(in.Data)
return g.genericToGQL(in, `{
{{- if .Data }}
data: {{.Data}},
{{- end }}
type: {{.Type}},
{{- if .FetchRequest }}
fetchRequest: {{- FetchRequesstInputToGQL .FetchRequest }},
{{- end }}
format: {{.Format}},
}`)
}
func (g *Graphqlizer) ApiSpecInputToGQL(in graphql.APISpecInput) (string, error) {
in.Data = quoteCLOB(in.Data)
return g.genericToGQL(in, `{
{{- if .Data}}
data: {{.Data}},
{{- end}}
type: {{.Type}},
format: {{.Format}},
{{- if .FetchRequest }}
fetchRequest: {{- FetchRequesstInputToGQL .FetchRequest }},
{{- end }}
}`)
}
func (g *Graphqlizer) VersionInputToGQL(in graphql.VersionInput) (string, error) {
return g.genericToGQL(in, `{
value: "{{.Value}}",
{{- if .Deprecated }}
deprecated: {{.Deprecated}},
{{- end}}
{{- if .DeprecatedSince }}
deprecatedSince: "{{.DeprecatedSince}}",
{{- end}}
{{- if .ForRemoval }}
forRemoval: {{.ForRemoval }},
{{- end }}
}`)
}
func (g *Graphqlizer) RuntimeInputToGQL(in graphql.RuntimeInput) (string, error) {
return g.genericToGQL(in, `{
name: "{{.Name}}",
{{- if .Description }}
description: "{{.Description}}",
{{- end }}
{{- if .Labels }}
labels: {{ LabelsToGQL .Labels}},
{{- end }}
{{- if .StatusCondition }}
statusCondition: {{ .StatusCondition }},
{{- end }}
}`)
}
func (g *Graphqlizer) LabelDefinitionInputToGQL(in graphql.LabelDefinitionInput) (string, error) {
return g.genericToGQL(in, `{
key: "{{.Key}}",
{{- if .Schema }}
schema: {{.Schema}},
{{- end }}
}`)
}
func (g *Graphqlizer) LabelFilterToGQL(in graphql.LabelFilter) (string, error) {
return g.genericToGQL(in, `{
key: "{{.Key}}",
{{- if .Query }}
query: "{{.Query}}",
{{- end }}
}`)
}
func (g *Graphqlizer) IntegrationSystemInputToGQL(in graphql.IntegrationSystemInput) (string, error) {
return g.genericToGQL(in, `{
name: "{{.Name}}",
{{- if .Description }}
description: "{{.Description}}",
{{- end }}
}`)
}
func (g *Graphqlizer) PlaceholderDefinitionInputToGQL(in graphql.PlaceholderDefinitionInput) (string, error) {
return g.genericToGQL(in, `{
name: "{{.Name}}",
{{- if .Description }}
description: "{{.Description}}",
{{- end }}
}`)
}
func (g *Graphqlizer) TemplateValueInputToGQL(in graphql.TemplateValueInput) (string, error) {
return g.genericToGQL(in, `{
placeholder: "{{.Placeholder}}"
value: "{{.Value}}"
}`)
}
func (g *Graphqlizer) ApplicationFromTemplateInputToGQL(in graphql.ApplicationFromTemplateInput) (string, error) {
return g.genericToGQL(in, `{
templateName: "{{.TemplateName}}"
{{- if .Values }}
values: [
{{- range $i, $e := .Values }}
{{- if $i}}, {{- end}} {{ TemplateValueInput $e }}
{{- end }} ],
{{- end }},
}`)
}
func (g *Graphqlizer) PackageCreateInputToGQL(in graphql.PackageCreateInput) (string, error) {
return g.genericToGQL(in, `{
name: "{{ .Name }}"
{{- if .Description }}
description: "{{ .Description }}"
{{- end }}
{{- if .InstanceAuthRequestInputSchema }}
instanceAuthRequestInputSchema: {{ .InstanceAuthRequestInputSchema }}
{{- end }}
{{- if .DefaultInstanceAuth }}
defaultInstanceAuth: {{- AuthInputToGQL .DefaultInstanceAuth }}
{{- end }}
{{- if .APIDefinitions }}
apiDefinitions: [
{{- range $i, $e := .APIDefinitions }}
{{- if $i}}, {{- end}} {{ APIDefinitionInputToGQL $e }}
{{- end }}],
{{- end }}
{{- if .EventDefinitions }}
eventDefinitions: [
{{- range $i, $e := .EventDefinitions }}
{{- if $i}}, {{- end}} {{ EventDefinitionInputToGQL $e }}
{{- end }}],
{{- end }}
{{- if .Documents }}
documents: [
{{- range $i, $e := .Documents }}
{{- if $i}}, {{- end}} {{- DocumentInputToGQL $e }}
{{- end }} ],
{{- end }}
}`)
}
func (g *Graphqlizer) PackageUpdateInputToGQL(in graphql.PackageUpdateInput) (string, error) {
return g.genericToGQL(in, `{
name: "{{ .Name }}"
{{- if .Description }}
description: "{{ .Description }}"
{{- end }}
{{- if .InstanceAuthRequestInputSchema }}
instanceAuthRequestInputSchema: {{ .InstanceAuthRequestInputSchema }}
{{- end }}
{{- if .DefaultInstanceAuth }}
defaultInstanceAuth: {{- AuthInputToGQL .DefaultInstanceAuth }}
{{- end }}
}`)
}
func (g *Graphqlizer) PackageInstanceAuthStatusInputToGQL(in graphql.PackageInstanceAuthStatusInput) (string, error) {
return g.genericToGQL(in, `{
condition: {{ .Condition }}
{{- if .Message }}
message: "{{ .Message }}"
{{- end }}
{{- if .Reason }}
reason: "{{ .Reason }}"
{{- end }}
}`)
}
func (g *Graphqlizer) PackageInstanceAuthRequestInputToGQL(in graphql.PackageInstanceAuthRequestInput) (string, error) {
return g.genericToGQL(in, `{
{{- if .Context }}
context: {{ .Context }}
{{- end }}
{{- if .InputParams }}
inputParams: {{ .InputParams }}
{{- end }}
}`)
}
func (g *Graphqlizer) PackageInstanceAuthSetInputToGQL(in graphql.PackageInstanceAuthSetInput) (string, error) {
return g.genericToGQL(in, `{
{{- if .Auth }}
auth: {{- AuthInputToGQL .Auth}}
{{- end }}
{{- if .Status }}
status: {{- PackageInstanceAuthStatusInputToGQL .Status }}
{{- end }}
}`)
}
func (g *Graphqlizer) LabelSelectorInputToGQL(in graphql.LabelSelectorInput) (string, error) {
return g.genericToGQL(in, `{
key: "{{ .Key }}"
value: "{{ .Value }}"
}`)
}
func (g *Graphqlizer) AutomaticScenarioAssignmentSetInputToGQL(in graphql.AutomaticScenarioAssignmentSetInput) (string, error) {
return g.genericToGQL(in, `{
scenarioName: "{{ .ScenarioName }}"
selector: {{- LabelSelectorInputToGQL .Selector }}
}`)
}
func (g *Graphqlizer) marshal(obj interface{}) string {
var out string
val := reflect.ValueOf(obj)
switch val.Kind() {
case reflect.Map:
s, err := g.genericToGQL(obj, `{ {{- range $k, $v := . }}{{ $k }}:{{ marshal $v }},{{ end -}} }`)
if err != nil {
return ""
}
out = s
case reflect.Slice, reflect.Array:
s, err := g.genericToGQL(obj, `[{{ range $i, $e := . }}{{ if $i }},{{ end }}{{ marshal $e }}{{ end }}]`)
if err != nil {
return ""
}
out = s
default:
marshalled, err := json.Marshal(obj)
if err != nil {
return ""
}
out = string(marshalled)
}
return out
}
func (g *Graphqlizer) genericToGQL(obj interface{}, tmpl string) (string, error) {
fm := sprig.TxtFuncMap()
fm["marshal"] = g.marshal
fm["ApplicationRegisterInputToGQL"] = g.ApplicationRegisterInputToGQL
fm["DocumentInputToGQL"] = g.DocumentInputToGQL
fm["FetchRequesstInputToGQL"] = g.FetchRequestInputToGQL
fm["AuthInputToGQL"] = g.AuthInputToGQL
fm["LabelsToGQL"] = g.LabelsToGQL
fm["WebhookInputToGQL"] = g.WebhookInputToGQL
fm["APIDefinitionInputToGQL"] = g.APIDefinitionInputToGQL
fm["EventDefinitionInputToGQL"] = g.EventDefinitionInputToGQL
fm["ApiSpecInputToGQL"] = g.ApiSpecInputToGQL
fm["VersionInputToGQL"] = g.VersionInputToGQL
fm["HTTPHeadersToGQL"] = g.HTTPHeadersToGQL
fm["QueryParamsToGQL"] = g.QueryParamsToGQL
fm["EventAPISpecInputToGQL"] = g.EventAPISpecInputToGQL
fm["CredentialDataInputToGQL"] = g.CredentialDataInputToGQL
fm["CSRFTokenCredentialRequestAuthInputToGQL"] = g.CSRFTokenCredentialRequestAuthInputToGQL
fm["CredentialRequestAuthInputToGQL"] = g.CredentialRequestAuthInputToGQL
fm["PlaceholderDefinitionInputToGQL"] = g.PlaceholderDefinitionInputToGQL
fm["TemplateValueInput"] = g.TemplateValueInputToGQL
fm["PackageInstanceAuthStatusInputToGQL"] = g.PackageInstanceAuthStatusInputToGQL
fm["PackageCreateInputToGQL"] = g.PackageCreateInputToGQL
fm["LabelSelectorInputToGQL"] = g.LabelSelectorInputToGQL
t, err := template.New("tmpl").Funcs(fm).Parse(tmpl)
if err != nil {
return "", errors.Wrapf(err, "while parsing template")
}
var b bytes.Buffer
if err := t.Execute(&b, obj); err != nil {
return "", errors.Wrap(err, "while executing template")
}
return b.String(), nil
}
func quoteCLOB(in *graphql.CLOB) *graphql.CLOB {
if in == nil {
return nil
}
quoted := strconv.Quote(string(*in))
return (*graphql.CLOB)("ed)
}
func quoteHTTPHeadersSerialized(in *graphql.HttpHeadersSerialized) *graphql.HttpHeadersSerialized {
if in == nil {
return nil
}
quoted := strconv.Quote(string(*in))
return (*graphql.HttpHeadersSerialized)("ed)
}
func quoteQueryParamsSerialized(in *graphql.QueryParamsSerialized) *graphql.QueryParamsSerialized {
if in == nil {
return nil
}
quoted := strconv.Quote(string(*in))
return (*graphql.QueryParamsSerialized)("ed)
}
|
)
|
vmops.py
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for VM tasks like spawn, snapshot, suspend, resume etc.
"""
import collections
import copy
import os
import time
import decorator
from oslo.config import cfg
from oslo.vmware import exceptions as vexc
from nova.api.metadata import base as instance_metadata
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import utils
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmware_images
CONF = cfg.CONF
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('remove_unused_base_images', 'nova.virt.imagecache')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('my_ip', 'nova.netconf')
LOG = logging.getLogger(__name__)
VMWARE_POWER_STATES = {
'poweredOff': power_state.SHUTDOWN,
'poweredOn': power_state.RUNNING,
'suspended': power_state.SUSPENDED}
RESIZE_TOTAL_STEPS = 4
DcInfo = collections.namedtuple('DcInfo',
['ref', 'name', 'vmFolder'])
class VirtualMachineInstanceConfigInfo(object):
"""Parameters needed to create and configure a new instance."""
def __init__(self, instance, instance_name, image_info,
datastore, dc_info, image_cache):
# Some methods called during spawn take the instance parameter purely
# for logging purposes.
# TODO(vui) Clean them up, so we no longer need to keep this variable
self.instance = instance
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
self.instance_name = instance_name or instance.uuid
self.ii = image_info
self.root_gb = instance.root_gb
self.datastore = datastore
self.dc_info = dc_info
self._image_cache = image_cache
@property
def cache_image_folder(self):
if self.ii.image_id is None:
return
return self._image_cache.get_image_cache_folder(
self.datastore, self.ii.image_id)
@property
def cache_image_path(self):
if self.ii.image_id is None:
return
cached_image_file_name = "%s.%s" % (self.ii.image_id,
self.ii.file_type)
return self.cache_image_folder.join(cached_image_file_name)
# Note(vui): See https://bugs.launchpad.net/nova/+bug/1363349
# for cases where mocking time.sleep() can have unintended effects on code
# not under test. For now, unblock the affected test cases by providing
# a wrapper function to work around needing to mock time.sleep()
def _time_sleep_wrapper(delay):
time.sleep(delay)
@decorator.decorator
def retry_if_task_in_progress(f, *args, **kwargs):
retries = max(CONF.vmware.api_retry_count, 1)
delay = 1
for attempt in range(1, retries + 1):
if attempt != 1:
_time_sleep_wrapper(delay)
delay = min(2 * delay, 60)
try:
f(*args, **kwargs)
return
except error_util.TaskInProgress:
pass
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session, virtapi, volumeops, cluster=None,
datastore_regex=None):
"""Initializer."""
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops
self._cluster = cluster
self._datastore_regex = datastore_regex
# Ensure that the base folder is unique per compute node
if CONF.remove_unused_base_images:
self._base_folder = '%s%s' % (CONF.my_ip,
CONF.image_cache_subdirectory_name)
else:
# Aging disable ensures backward compatibility
self._base_folder = CONF.image_cache_subdirectory_name
self._tmp_folder = 'vmware_temp'
self._default_root_device = 'vda'
self._rescue_suffix = '-rescue'
self._migrate_suffix = '-orig'
self._datastore_dc_mapping = {}
self._datastore_browser_mapping = {}
self._imagecache = imagecache.ImageCacheManager(self._session,
self._base_folder)
def _extend_virtual_disk(self, instance, requested_size, name, dc_ref):
service_content = self._session._get_vim().service_content
LOG.debug("Extending root virtual disk to %s", requested_size)
vmdk_extend_task = self._session._call_method(
self._session._get_vim(),
"ExtendVirtualDisk_Task",
service_content.virtualDiskManager,
name=name,
datacenter=dc_ref,
newCapacityKb=requested_size,
eagerZero=False)
try:
self._session._wait_for_task(vmdk_extend_task)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Extending virtual disk failed with error: %s'),
e, instance=instance)
# Clean up files created during the extend operation
files = [name.replace(".vmdk", "-flat.vmdk"), name]
for file in files:
ds_path = ds_util.DatastorePath.parse(file)
self._delete_datastore_file(instance, ds_path, dc_ref)
LOG.debug("Extended root virtual disk")
def _delete_datastore_file(self, instance, datastore_path, dc_ref):
try:
ds_util.file_delete(self._session, datastore_path, dc_ref)
except (vexc.CannotDeleteFileException,
vexc.FileFaultException,
vexc.FileLockedException,
vexc.FileNotFoundException):
LOG.debug("Unable to delete %(ds)s. There may be more than "
"one process or thread trying to delete the file",
{'ds': datastore_path},
exc_info=True)
def _extend_if_required(self, dc_info, image_info, instance,
root_vmdk_path):
"""Increase the size of the root vmdk if necessary."""
if instance.root_gb > image_info.file_size_in_gb:
size_in_kb = instance.root_gb * units.Mi
self._extend_virtual_disk(instance, size_in_kb,
root_vmdk_path, dc_info.ref)
def _configure_config_drive(self, instance, vm_ref, dc_info, datastore,
injected_files, admin_password):
session_vim = self._session._get_vim()
cookies = session_vim.client.options.transport.cookiejar
uploaded_iso_path = self._create_config_drive(instance,
injected_files,
admin_password,
datastore.name,
dc_info.name,
instance['uuid'],
cookies)
uploaded_iso_path = datastore.build_path(uploaded_iso_path)
self._attach_cdrom_to_vm(
vm_ref, instance,
datastore.ref,
str(uploaded_iso_path))
def build_virtual_machine(self, instance, instance_name, image_info,
dc_info, datastore, network_info):
node_mo_id = vm_util.get_mo_id_from_instance(instance)
res_pool_ref = vm_util.get_res_pool_ref(self._session,
self._cluster, node_mo_id)
vif_infos = vmwarevif.get_vif_info(self._session,
self._cluster,
utils.is_neutron(),
image_info.vif_model,
network_info)
allocations = self._get_cpu_allocations(instance.instance_type_id)
# Get the create vm config spec
client_factory = self._session._get_vim().client.factory
config_spec = vm_util.get_vm_create_spec(client_factory,
instance,
instance_name,
datastore.name,
vif_infos,
image_info.os_type,
allocations=allocations)
# Create the VM
vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder,
config_spec, res_pool_ref)
return vm_ref
def _get_cpu_allocations(self, instance_type_id):
# Read flavors for allocations
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance_type_id)
allocations = {}
for (key, type) in (('cpu_limit', int),
('cpu_reservation', int),
('cpu_shares_level', str),
('cpu_shares_share', int)):
value = flavor.extra_specs.get('quota:' + key)
if value:
allocations[key] = type(value)
return allocations
def _fetch_image_as_file(self, context, vi, image_ds_loc):
"""Download image as an individual file to host via HTTP PUT."""
session = self._session
session_vim = session._get_vim()
cookies = session_vim.client.options.transport.cookiejar
LOG.debug("Downloading image file data %(image_id)s to "
"%(file_path)s on the data store "
"%(datastore_name)s",
{'image_id': vi.ii.image_id,
'file_path': image_ds_loc,
'datastore_name': vi.datastore.name},
instance=vi.instance)
vmware_images.fetch_image(
context,
vi.instance,
session._host,
vi.dc_info.name,
vi.datastore.name,
image_ds_loc.rel_path,
cookies=cookies)
def _prepare_sparse_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, "tmp-sparse.vmdk")
return tmp_dir_loc, tmp_image_ds_loc
def _prepare_flat_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
ds_util.mkdir(self._session, tmp_image_ds_loc.parent, vi.dc_info.ref)
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(tmp_image_ds_loc),
vi.ii.file_size_in_kb)
flat_vmdk_name = vi.cache_image_path.basename.replace('.vmdk',
'-flat.vmdk')
flat_vmdk_ds_loc = tmp_dir_loc.join(vi.ii.image_id, flat_vmdk_name)
self._delete_datastore_file(vi.instance, str(flat_vmdk_ds_loc),
vi.dc_info.ref)
return tmp_dir_loc, flat_vmdk_ds_loc
def _prepare_iso_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
return tmp_dir_loc, tmp_image_ds_loc
def _move_to_cache(self, dc_ref, src_folder_ds_path, dst_folder_ds_path):
try:
ds_util.file_move(self._session, dc_ref,
src_folder_ds_path, dst_folder_ds_path)
except vexc.FileAlreadyExistsException:
# Folder move has failed. This may be due to the fact that a
# process or thread has already completed the operation.
# Since image caching is synchronized, this can only happen
# due to action external to the process.
# In the event of a FileAlreadyExists we continue,
# all other exceptions will be raised.
LOG.warning(_LW("Destination %s already exists! Concurrent moves "
"can lead to unexpected results."),
dst_folder_ds_path)
def _cache_sparse_image(self, vi, tmp_image_ds_loc):
tmp_dir_loc = tmp_image_ds_loc.parent.parent
converted_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
# converts fetched image to preallocated disk
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(tmp_image_ds_loc),
str(converted_image_ds_loc))
self._delete_datastore_file(vi.instance, str(tmp_image_ds_loc),
vi.dc_info.ref)
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_flat_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_iso_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _get_vm_config_info(self, instance, image_info, instance_name=None):
"""Captures all relevant information from the spawn parameters."""
if (instance.root_gb != 0 and
image_info.file_size_in_gb > instance.root_gb):
reason = _("Image disk size greater than requested disk size")
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
datastore = ds_util.get_datastore(
self._session, self._cluster, self._datastore_regex)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
return VirtualMachineInstanceConfigInfo(instance,
instance_name,
image_info,
datastore,
dc_info,
self._imagecache)
def _get_image_callbacks(self, vi):
disk_type = vi.ii.disk_type
image_fetch = self._fetch_image_as_file
if vi.ii.is_iso:
image_prepare = self._prepare_iso_image
image_cache = self._cache_iso_image
elif disk_type == constants.DISK_TYPE_SPARSE:
image_prepare = self._prepare_sparse_image
image_cache = self._cache_sparse_image
elif disk_type in constants.SUPPORTED_FLAT_VARIANTS:
image_prepare = self._prepare_flat_image
image_cache = self._cache_flat_image
else:
reason = _("disk type '%s' not supported") % disk_type
raise exception.InvalidDiskInfo(reason=reason)
return image_prepare, image_fetch, image_cache
def _fetch_image_if_missing(self, context, vi):
image_prepare, image_fetch, image_cache = self._get_image_callbacks(vi)
LOG.debug("Processing image %s", vi.ii.image_id)
with lockutils.lock(str(vi.cache_image_path),
lock_file_prefix='nova-vmware-fetch_image'):
self.check_cache_folder(vi.datastore.name, vi.datastore.ref)
ds_browser = self._get_ds_browser(vi.datastore.ref)
if not ds_util.file_exists(self._session, ds_browser,
vi.cache_image_folder,
vi.cache_image_path.basename):
LOG.debug("Preparing fetch location")
tmp_dir_loc, tmp_image_ds_loc = image_prepare(vi)
LOG.debug("Fetch image to %s", tmp_image_ds_loc)
image_fetch(context, vi, tmp_image_ds_loc)
LOG.debug("Caching image")
image_cache(vi, tmp_image_ds_loc)
LOG.debug("Cleaning up location %s", str(tmp_dir_loc))
self._delete_datastore_file(vi.instance, str(tmp_dir_loc),
vi.dc_info.ref)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None,
instance_name=None, power_on=True):
client_factory = self._session._get_vim().client.factory
image_info = vmware_images.VMwareImage.from_image(instance.image_ref,
image_meta)
vi = self._get_vm_config_info(instance, image_info, instance_name)
# Creates the virtual machine. The virtual machine reference returned
# is unique within Virtual Center.
vm_ref = self.build_virtual_machine(instance,
vi.instance_name,
image_info,
vi.dc_info,
vi.datastore,
network_info)
# Cache the vm_ref. This saves a remote call to the VC. This uses the
# instance_name. This covers all use cases including rescue and resize.
vm_util.vm_ref_cache_update(vi.instance_name, vm_ref)
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info)
# Set the vnc configuration of the instance, vnc port starts from 5900
if CONF.vnc_enabled:
self._get_and_set_vnc_config(client_factory, instance)
block_device_mapping = []
if block_device_info is not None:
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
# NOTE(mdbooth): the logic here is that we ignore the image if there
# are block device mappings. This behaviour is incorrect, and a bug in
# the driver. We should be able to accept an image and block device
# mappings.
if len(block_device_mapping) > 0:
msg = "Block device information present: %s" % block_device_info
# NOTE(mriedem): block_device_info can contain an auth_password
# so we have to scrub the message before logging it.
LOG.debug(logging.mask_password(msg), instance=instance)
for root_disk in block_device_mapping:
connection_info = root_disk['connection_info']
# TODO(hartsocks): instance is unnecessary, remove it
# we still use instance in many locations for no other purpose
# than logging, can we simplify this?
self._volumeops.attach_root_volume(connection_info, instance,
self._default_root_device,
vi.datastore.ref)
else:
self._imagecache.enlist_image(
image_info.image_id, vi.datastore, vi.dc_info.ref)
self._fetch_image_if_missing(context, vi)
if image_info.is_iso:
self._use_iso_image(vm_ref, vi)
elif image_info.linked_clone:
self._use_disk_image_as_linked_clone(vm_ref, vi)
else:
self._use_disk_image_as_full_clone(vm_ref, vi)
if configdrive.required_by(instance):
self._configure_config_drive(
instance, vm_ref, vi.dc_info, vi.datastore,
injected_files, admin_password)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def _create_config_drive(self, instance, injected_files, admin_password,
data_store_name, dc_name, upload_folder, cookies):
if CONF.config_drive_format != 'iso9660':
reason = (_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
raise exception.InstancePowerOnFailure(reason=reason)
LOG.info(_('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md)
try:
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive.iso')
cdb.make_drive(tmp_file)
upload_iso_path = "%s/configdrive.iso" % (
upload_folder)
vmware_images.upload_iso_to_datastore(
tmp_file, instance,
host=self._session._host,
data_center_name=dc_name,
datastore_name=data_store_name,
cookies=cookies,
file_path=upload_iso_path)
return upload_iso_path
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed with error: %s'),
e, instance=instance)
def _attach_cdrom_to_vm(self, vm_ref, instance,
datastore, file_path):
"""Attach cdrom to VM by reconfiguration."""
client_factory = self._session._get_vim().client.factory
devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
client_factory,
devices,
'ide')
cdrom_attach_config_spec = vm_util.get_cdrom_attach_config_spec(
client_factory, datastore, file_path,
controller_key, unit_number)
if controller_spec:
cdrom_attach_config_spec.deviceChange.append(controller_spec)
LOG.debug("Reconfiguring VM instance to attach cdrom %s",
file_path, instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, cdrom_attach_config_spec)
LOG.debug("Reconfigured VM instance to attach cdrom %s",
file_path, instance=instance)
def _create_vm_snapshot(self, instance, vm_ref):
LOG.debug("Creating Snapshot of the VM instance", instance=instance)
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance.uuid,
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
self._session._wait_for_task(snapshot_task)
LOG.debug("Created Snapshot of the VM instance", instance=instance)
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
snapshot_task, "Task", "info")
snapshot = task_info.result
return snapshot
@retry_if_task_in_progress
def _delete_vm_snapshot(self, instance, vm_ref, snapshot):
LOG.debug("Deleting Snapshot of the VM instance", instance=instance)
delete_snapshot_task = self._session._call_method(
self._session._get_vim(),
"RemoveSnapshot_Task", snapshot,
removeChildren=False, consolidate=True)
self._session._wait_for_task(delete_snapshot_task)
LOG.debug("Deleted Snapshot of the VM instance", instance=instance)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
Steps followed are:
1. Get the name of the vmdk file which the VM points to right now.
Can be a chain of snapshots, so we need to know the last in the
chain.
2. Create the snapshot. A new vmdk is created which the VM points to
now. The earlier vmdk becomes read-only.
3. Call CopyVirtualDisk which coalesces the disk chain to form a single
vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
service_content = self._session._get_vim().service_content
def _get_vm_and_vmdk_attribs():
# Get the vmdk file name that the VM is pointing to
hw_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(vmdk_file_path_before_snapshot, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
hw_devices, uuid=instance.uuid)
if not vmdk_file_path_before_snapshot:
LOG.debug("No root disk defined. Unable to snapshot.")
raise error_util.NoRootDiskDefined()
datastore_name = ds_util.DatastorePath.parse(
vmdk_file_path_before_snapshot).datastore
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
return (vmdk_file_path_before_snapshot, adapter_type, disk_type,
datastore_name, os_type)
(vmdk_file_path_before_snapshot, adapter_type, disk_type,
datastore_name, os_type) = _get_vm_and_vmdk_attribs()
snapshot = self._create_vm_snapshot(instance, vm_ref)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
def _check_if_tmp_folder_exists():
# Copy the contents of the VM that were there just before the
# snapshot was taken
ds_ref_ret = self._session._call_method(
vim_util, "get_dynamic_property", vm_ref, "VirtualMachine",
"datastore")
if ds_ref_ret is None:
raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
self.check_temp_folder(datastore_name, ds_ref)
return ds_ref
ds_ref = _check_if_tmp_folder_exists()
# Generate a random vmdk file name to which the coalesced vmdk content
# will be copied to. A random name is chosen so that we don't have
# name clashes.
random_name = uuidutils.generate_uuid()
dest_vmdk_file_path = ds_util.DatastorePath(
datastore_name, self._tmp_folder, "%s.vmdk" % random_name)
dest_vmdk_data_file_path = ds_util.DatastorePath(
datastore_name, self._tmp_folder, "%s-flat.vmdk" % random_name)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
def _copy_vmdk_content():
# Consolidate the snapshotted disk to a temporary vmdk.
LOG.debug('Copying snapshotted disk %s.',
vmdk_file_path_before_snapshot,
instance=instance)
copy_disk_task = self._session._call_method(
self._session._get_vim(),
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=vmdk_file_path_before_snapshot,
sourceDatacenter=dc_info.ref,
destName=str(dest_vmdk_file_path),
destDatacenter=dc_info.ref,
force=False)
self._session._wait_for_task(copy_disk_task)
LOG.debug('Copied snapshotted disk %s.',
vmdk_file_path_before_snapshot,
instance=instance)
_copy_vmdk_content()
self._delete_vm_snapshot(instance, vm_ref, snapshot)
cookies = self._session._get_vim().client.options.transport.cookiejar
def _upload_vmdk_to_image_repository():
# Upload the contents of -flat.vmdk file which has the disk data.
LOG.debug("Uploading image %s", image_id,
instance=instance)
vmware_images.upload_image(
context,
image_id,
instance,
os_type=os_type,
disk_type=constants.DEFAULT_DISK_TYPE,
adapter_type=adapter_type,
image_version=1,
host=self._session._host,
data_center_name=dc_info.name,
datastore_name=datastore_name,
cookies=cookies,
file_path="%s/%s-flat.vmdk" % (self._tmp_folder, random_name))
LOG.debug("Uploaded image %s", image_id,
instance=instance)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
_upload_vmdk_to_image_repository()
def _clean_temp_data():
"""Delete temporary vmdk files generated in image handling
operations.
"""
# The data file is the one occupying space, and likelier to see
# deletion problems, so prioritize its deletion first. In the
# unlikely event that its deletion fails, the small descriptor file
# is retained too by design since it makes little sense to remove
# it when the data disk it refers to still lingers.
for f in dest_vmdk_data_file_path, dest_vmdk_file_path:
self._delete_datastore_file(instance, f, dc_info.ref)
_clean_temp_data()
def reboot(self, instance, network_info):
"""Reboot a VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session, props)
pwr_state = query['runtime.powerState']
tools_status = query['summary.guest.toolsStatus']
tools_running_status = query['summary.guest.toolsRunningStatus']
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
reason = _("instance is not powered on")
raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
if (tools_status == "toolsOk" and
tools_running_status == "guestToolsRunning"):
LOG.debug("Rebooting guest OS of VM", instance=instance)
self._session._call_method(self._session._get_vim(), "RebootGuest",
vm_ref)
LOG.debug("Rebooted guest OS of VM", instance=instance)
else:
LOG.debug("Doing hard reboot of VM", instance=instance)
reset_task = self._session._call_method(self._session._get_vim(),
"ResetVM_Task", vm_ref)
self._session._wait_for_task(reset_task)
LOG.debug("Did hard reboot of VM", instance=instance)
def _destroy_instance(self, instance, destroy_disks=True,
instance_name=None):
# Destroy a VM instance
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
if instance_name is None:
instance_name = instance['uuid']
try:
vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
if vm_ref is None:
LOG.warning(_('Instance does not exist on backend'),
instance=instance)
return
lst_properties = ["config.files.vmPathName", "runtime.powerState",
"datastore"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine", lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, props)
pwr_state = query['runtime.powerState']
vm_config_pathname = query['config.files.vmPathName']
vm_ds_path = None
if vm_config_pathname:
vm_ds_path = ds_util.DatastorePath.parse(vm_config_pathname)
# Power off the VM if it is in PoweredOn state.
if pwr_state == "poweredOn":
vm_util.power_off_instance(self._session, instance, vm_ref)
# Un-register the VM
try:
LOG.debug("Unregistering the VM", instance=instance)
self._session._call_method(self._session._get_vim(),
"UnregisterVM", vm_ref)
LOG.debug("Unregistered the VM", instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:_destroy_instance, got this "
"exception while un-registering the VM: %s"),
excep)
# Delete the folder holding the VM related content on
# the datastore.
if destroy_disks and vm_ds_path:
try:
dir_ds_compliant_path = vm_ds_path.parent
LOG.debug("Deleting contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
ds_ref_ret = query['datastore']
ds_ref = ds_ref_ret.ManagedObjectReference[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
ds_util.file_delete(self._session,
dir_ds_compliant_path,
dc_info.ref)
LOG.debug("Deleted contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
except Exception:
LOG.warn(_("In vmwareapi:vmops:_destroy_instance, "
"exception while deleting the VM contents from "
"the disk"), exc_info=True)
except Exception as exc:
LOG.exception(exc, instance=instance)
finally:
vm_util.vm_ref_cache_delete(instance_name)
def destroy(self, instance, destroy_disks=True):
"""Destroy a VM instance.
Steps followed for each VM are:
1. Power off, if it is in poweredOn state.
2. Un-register.
3. Delete the contents of the folder holding the VM related data.
"""
# If there is a rescue VM then we need to destroy that one too.
LOG.debug("Destroying instance", instance=instance)
if instance['vm_state'] == vm_states.RESCUED:
LOG.debug("Rescue VM configured", instance=instance)
try:
self.unrescue(instance, power_on=False)
LOG.debug("Rescue VM destroyed", instance=instance)
except Exception:
rescue_name = instance['uuid'] + self._rescue_suffix
self._destroy_instance(instance,
destroy_disks=destroy_disks,
instance_name=rescue_name)
# NOTE(arnaud): Destroy uuid-orig and uuid VMs iff it is not
# triggered by the revert resize api call. This prevents
# the uuid-orig VM to be deleted to be able to associate it later.
if instance.task_state != task_states.RESIZE_REVERTING:
# When VM deletion is triggered in middle of VM resize before VM
# arrive RESIZED state, uuid-orig VM need to deleted to avoid
# VM leak. Within method _destroy_instance it will check vmref
# exist or not before attempt deletion.
resize_orig_vmname = instance['uuid'] + self._migrate_suffix
vm_orig_ref = vm_util.get_vm_ref_from_name(self._session,
resize_orig_vmname)
if vm_orig_ref:
self._destroy_instance(instance,
destroy_disks=destroy_disks,
instance_name=resize_orig_vmname)
self._destroy_instance(instance, destroy_disks=destroy_disks)
LOG.debug("Instance destroyed", instance=instance)
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
def unpause(self, instance):
msg = _("unpause not supported for vmwareapi")
raise NotImplementedError(msg)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be suspended.
if pwr_state == "poweredOn":
LOG.debug("Suspending the VM", instance=instance)
suspend_task = self._session._call_method(self._session._get_vim(),
"SuspendVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Suspended the VM", instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
reason = _("instance is powered off and cannot be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
else:
LOG.debug("VM was already in suspended state. So returning "
"without doing anything", instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
if pwr_state.lower() == "suspended":
LOG.debug("Resuming the VM", instance=instance)
suspend_task = self._session._call_method(
self._session._get_vim(),
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Resumed the VM", instance=instance)
else:
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
def rescue(self, context, instance, network_info, image_meta):
"""Rescue the specified instance.
- shutdown the instance VM.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
self.power_off(instance)
r_instance = copy.deepcopy(instance)
instance_name = r_instance.uuid + self._rescue_suffix
self.spawn(context, r_instance, image_meta,
None, None, network_info,
instance_name=instance_name,
power_on=False)
# Attach vmdk to the rescue VM
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(vmdk_path, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance.uuid)
rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
instance_name)
self._volumeops.attach_disk_to_vm(
rescue_vm_ref, r_instance,
adapter_type, disk_type, vmdk_path)
vm_util.power_on_instance(self._session, r_instance,
vm_ref=rescue_vm_ref)
def unrescue(self, instance, power_on=True):
"""Unrescue the specified instance."""
# Get the original vmdk_path
vm_ref = vm_util.get_vm_ref(self._session, instance)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(vmdk_path, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance.uuid)
r_instance = copy.deepcopy(instance)
instance_name = r_instance.uuid + self._rescue_suffix
# detach the original instance disk from the rescue disk
vm_rescue_ref = vm_util.get_vm_ref_from_name(self._session,
instance_name)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_rescue_ref,
"VirtualMachine", "config.hardware.device")
device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk_path)
vm_util.power_off_instance(self._session, r_instance, vm_rescue_ref)
self._volumeops.detach_disk_from_vm(vm_rescue_ref, r_instance, device)
self._destroy_instance(r_instance, instance_name=instance_name)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def power_off(self, instance):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
"""
vm_util.power_off_instance(self._session, instance)
def power_on(self, instance):
vm_util.power_on_instance(self._session, instance)
def _get_orig_vm_name_label(self, instance):
return instance.uuid + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the clone disk step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
instance_uuid = instance.uuid
LOG.debug("Updating instance '%(instance_uuid)s' progress to"
" %(progress)d",
{'instance_uuid': instance_uuid, 'progress': progress},
instance=instance)
instance.progress = progress
instance.save()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
# Checks if the migration needs a disk resize down.
if flavor['root_gb'] < instance['root_gb']:
reason = _("Unable to shrink disk.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Read the host_ref for the destination. If this is None then the
# VC will decide on placement
host_ref = self._get_host_ref_from_name(dest)
# 1. Power off the instance
self.power_off(instance)
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Disassociate the linked vsphere VM from the instance
vm_util.disassociate_vmref_from_instance(self._session, instance,
vm_ref,
suffix=self._migrate_suffix)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
ds_ref = ds_util.get_datastore(
self._session, self._cluster,
datastore_regex=self._datastore_regex).ref
dc_info = self.get_datacenter_ref_and_name(ds_ref)
# 3. Clone the VM for instance
vm_util.clone_vmref_for_instance(self._session, instance, vm_ref,
host_ref, ds_ref, dc_info.vmFolder)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# Destroy the original VM. The vm_ref needs to be searched using the
# instance.uuid + self._migrate_suffix as the identifier. We will
# not get the vm when searched using the instanceUuid but rather will
# be found using the uuid buried in the extraConfig
vm_ref = vm_util.search_vm_ref_by_identifier(self._session,
instance.uuid + self._migrate_suffix)
if vm_ref is None:
LOG.debug("instance not present", instance=instance)
return
try:
LOG.debug("Destroying the VM", instance=instance)
destroy_task = self._session._call_method(
self._session._get_vim(),
"Destroy_Task", vm_ref)
self._session._wait_for_task(destroy_task)
LOG.debug("Destroyed the VM", instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this "
"exception while destroying the VM: %s"), excep)
def finish_revert_migration(self, context, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
vm_util.associate_vmref_for_instance(self._session, instance,
suffix=self._migrate_suffix)
if power_on:
vm_util.power_on_instance(self._session, instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
if resize_instance:
client_factory = self._session._get_vim().client.factory
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
instance)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
# Resize the disk (if larger)
old_root_gb = instance.system_metadata['old_instance_type_root_gb']
if instance['root_gb'] > int(old_root_gb):
root_disk_in_kb = instance['root_gb'] * units.Mi
vmdk_path = vm_util.get_vmdk_path(self._session, vm_ref,
instance)
data_store_ref = ds_util.get_datastore(self._session,
self._cluster, datastore_regex=self._datastore_regex).ref
dc_info = self.get_datacenter_ref_and_name(data_store_ref)
self._extend_virtual_disk(instance, root_disk_in_kb, vmdk_path,
dc_info.ref)
# TODO(ericwb): add extend for ephemeral disk
# 4. Start VM
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False):
"""Spawning live_migration operation for distributing high-load."""
vm_ref = vm_util.get_vm_ref(self._session, instance_ref)
host_ref = self._get_host_ref_from_name(dest)
if host_ref is None:
raise exception.HostNotFound(host=dest)
LOG.debug("Migrating VM to host %s", dest, instance=instance_ref)
try:
vm_migrate_task = self._session._call_method(
self._session._get_vim(),
"MigrateVM_Task", vm_ref,
host=host_ref,
priority="defaultPriority")
self._session._wait_for_task(vm_migrate_task)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance_ref, dest, block_migration)
post_method(context, instance_ref, dest, block_migration)
LOG.debug("Migrated VM to host %s", dest, instance=instance_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance):
"""Return data about the VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
"runtime.powerState"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, vm_props)
max_mem = int(query['summary.config.memorySizeMB']) * 1024
return {'state': VMWARE_POWER_STATES[query['runtime.powerState']],
'max_mem': max_mem,
'mem': max_mem,
'num_cpu': int(query['summary.config.numCpu']),
'cpu_time': 0}
def _get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config",
"summary.quickStats",
"summary.runtime"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session,
vm_props)
data = {}
# All of values received are objects. Convert them to dictionaries
for value in query.values():
prop_dict = vim_util.object_to_dict(value, list_depth=1)
data.update(prop_dict)
return data
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
# Add a namespace to all of the diagnostsics
return dict([('vmware:' + k, v) for k, v in data.items()])
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
state = data.get('powerState')
if state:
state = power_state.STATE_MAP[VMWARE_POWER_STATES[state]]
uptime = data.get('uptimeSeconds', 0)
config_drive = configdrive.required_by(instance)
diags = diagnostics.Diagnostics(state=state,
driver='vmwareapi',
config_drive=config_drive,
hypervisor_os='esxi',
uptime=uptime)
diags.memory_details.maximum = data.get('memorySizeMB', 0)
diags.memory_details.used = data.get('guestMemoryUsage', 0)
# TODO(garyk): add in cpu, nic and disk stats
return diags
def _get_vnc_console_connection(self, instance):
"""Return connection info for a vnc console."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
opt_value = self._session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
vm_util.VNC_CONFIG_KEY)
if opt_value:
port = int(opt_value.value)
else:
raise exception.ConsoleTypeUnavailable(console_type='vnc')
return {'port': port,
'internal_access_path': None}
@staticmethod
def _get_machine_id_str(network_info):
machine_id_str = ''
for vif in network_info:
# TODO(vish): add support for dns2
# TODO(sateesh): add support for injection of ipv6 configuration
network = vif['network']
ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None
subnets_v4 = [s for s in network['subnets'] if s['version'] == 4]
if len(subnets_v4) > 0:
|
interface_str = ";".join([vif['address'],
ip_v4 and ip_v4['address'] or '',
netmask_v4 or '',
gateway_v4 or '',
broadcast_v4 or '',
dns or ''])
machine_id_str = machine_id_str + interface_str + '#'
return machine_id_str
def _set_machine_id(self, client_factory, instance, network_info):
"""Set the machine id of the VM for guest tools to pick up
and reconfigure the network interfaces.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
machine_id_change_spec = vm_util.get_machine_id_change_spec(
client_factory,
self._get_machine_id_str(network_info))
LOG.debug("Reconfiguring VM instance to set the machine id",
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, machine_id_change_spec)
LOG.debug("Reconfigured VM instance to set the machine id",
instance=instance)
@utils.synchronized('vmware.get_and_set_vnc_port')
def _get_and_set_vnc_config(self, client_factory, instance):
"""Set the vnc configuration of the VM."""
port = vm_util.get_vnc_port(self._session)
vm_ref = vm_util.get_vm_ref(self._session, instance)
vnc_config_spec = vm_util.get_vnc_config_spec(
client_factory, port)
LOG.debug("Reconfiguring VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, vnc_config_spec)
LOG.debug("Reconfigured VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
def _get_ds_browser(self, ds_ref):
ds_browser = self._datastore_browser_mapping.get(ds_ref.value)
if not ds_browser:
ds_browser = self._session._call_method(
vim_util, "get_dynamic_property", ds_ref, "Datastore",
"browser")
self._datastore_browser_mapping[ds_ref.value] = ds_browser
return ds_browser
def _get_host_ref_from_name(self, host_name):
"""Get reference to the host with the name specified."""
host_objs = self._session._call_method(vim_util, "get_objects",
"HostSystem", ["name"])
vm_util._cancel_retrieve_if_necessary(self._session, host_objs)
for host in host_objs:
if hasattr(host, 'propSet'):
if host.propSet[0].val == host_name:
return host.obj
return None
def _get_vmfolder_ref(self):
"""Get the Vm folder ref from the datacenter."""
dc_objs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["vmFolder"])
vm_util._cancel_retrieve_if_necessary(self._session, dc_objs)
# There is only one default datacenter in a standalone ESX host
vm_folder_ref = dc_objs.objects[0].propSet[0].val
return vm_folder_ref
def _create_folder_if_missing(self, ds_name, ds_ref, folder):
"""Create a folder if it does not exist.
Currently there are two folder that are required on the datastore
- base folder - the folder to store cached images
- temp folder - the folder used for snapshot management and
image uploading
This method is aimed to be used for the management of those
folders to ensure that they are created if they are missing.
The ds_util method mkdir will be used to check if the folder
exists. If this throws and exception 'FileAlreadyExistsException'
then the folder already exists on the datastore.
"""
path = ds_util.DatastorePath(ds_name, folder)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
try:
ds_util.mkdir(self._session, path, dc_info.ref)
LOG.debug("Folder %s created.", path)
except vexc.FileAlreadyExistsException:
# NOTE(hartsocks): if the folder already exists, that
# just means the folder was prepped by another process.
pass
def check_cache_folder(self, ds_name, ds_ref):
"""Check that the cache folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._base_folder)
def check_temp_folder(self, ds_name, ds_ref):
"""Check that the temp folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._tmp_folder)
def _check_if_folder_file_exists(self, ds_browser, ds_ref, ds_name,
folder_name, file_name):
# Ensure that the cache folder exists
self.check_cache_folder(ds_name, ds_ref)
# Check if the file exists or not.
folder_ds_path = ds_util.DatastorePath(ds_name, folder_name)
return ds_util.file_exists(
self._session, ds_browser, folder_ds_path, file_name)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
client_factory = self._session._get_vim().client.factory
self._set_machine_id(client_factory, instance, network_info)
def manage_image_cache(self, context, instances):
if not CONF.remove_unused_base_images:
LOG.debug("Image aging disabled. Aging will not be done.")
return
datastores = ds_util.get_available_datastores(self._session,
self._cluster,
self._datastore_regex)
datastores_info = []
for ds in datastores:
dc_info = self.get_datacenter_ref_and_name(ds.ref)
datastores_info.append((ds, dc_info))
self._imagecache.update(context, instances, datastores_info)
def _get_valid_vms_from_retrieve_result(self, retrieve_result):
"""Returns list of valid vms from RetrieveResult object."""
lst_vm_names = []
while retrieve_result:
token = vm_util._get_token(retrieve_result)
for vm in retrieve_result.objects:
vm_name = None
conn_state = None
for prop in vm.propSet:
if prop.name == "name":
vm_name = prop.val
elif prop.name == "runtime.connectionState":
conn_state = prop.val
# Ignoring the orphaned or inaccessible VMs
if conn_state not in ["orphaned", "inaccessible"]:
lst_vm_names.append(vm_name)
if token:
retrieve_result = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return lst_vm_names
def instance_exists(self, instance):
try:
vm_util.get_vm_ref(self._session, instance)
return True
except exception.InstanceNotFound:
return False
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
vif_model = image_meta.get("hw_vif_model",
constants.DEFAULT_VIF_MODEL)
vif_model = vm_util.convert_vif_model(vif_model)
vif_info = vmwarevif.get_vif_dict(self._session, self._cluster,
vif_model, utils.is_neutron(), vif)
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_attach_port_index(self._session, vm_ref)
client_factory = self._session._get_vim().client.factory
attach_config_spec = vm_util.get_network_attach_config_spec(
client_factory, vif_info, port_index)
LOG.debug("Reconfiguring VM to attach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
attach_config_spec)
except Exception as e:
LOG.error(_LE('Attaching network adapter failed. Exception: '
' %s'),
e, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance['uuid'])
LOG.debug("Reconfigured VM to attach interface", instance=instance)
def detach_interface(self, instance, vif):
"""Detach an interface from the instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_vm_detach_port_index(self._session,
vm_ref,
vif['id'])
if port_index is None:
msg = _("No device with interface-id %s exists on "
"VM") % vif['id']
raise exception.NotFound(msg)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
device = vmwarevif.get_network_device(hardware_devices,
vif['address'])
if device is None:
msg = _("No device with MAC address %s exists on the "
"VM") % vif['address']
raise exception.NotFound(msg)
client_factory = self._session._get_vim().client.factory
detach_config_spec = vm_util.get_network_detach_config_spec(
client_factory, device, port_index)
LOG.debug("Reconfiguring VM to detach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
detach_config_spec)
except Exception as e:
LOG.error(_LE('Detaching network adapter failed. Exception: '
'%s'),
e, instance=instance)
raise exception.InterfaceDetachFailed(
instance_uuid=instance['uuid'])
LOG.debug("Reconfigured VM to detach interface", instance=instance)
def _use_disk_image_as_full_clone(self, vm_ref, vi):
"""Uses cached image disk by copying it into the VM directory."""
instance_folder = vi.instance_name
root_disk_name = "%s.vmdk" % vi.instance_name
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(root_disk_ds_loc))
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(root_disk_ds_loc))
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, False)
def _sized_image_exists(self, sized_disk_ds_loc, ds_ref):
ds_browser = self._get_ds_browser(ds_ref)
return ds_util.file_exists(
self._session, ds_browser, sized_disk_ds_loc.parent,
sized_disk_ds_loc.basename)
def _use_disk_image_as_linked_clone(self, vm_ref, vi):
"""Uses cached image as parent of a COW child in the VM directory."""
sized_image_disk_name = "%s.vmdk" % vi.ii.image_id
if vi.root_gb > 0:
sized_image_disk_name = "%s.%s.vmdk" % (vi.ii.image_id, vi.root_gb)
sized_disk_ds_loc = vi.cache_image_folder.join(sized_image_disk_name)
# Ensure only a single thread extends the image at once.
# We do this by taking a lock on the name of the extended
# image. This allows multiple threads to create resized
# copies simultaneously, as long as they are different
# sizes. Threads attempting to create the same resized copy
# will be serialized, with only the first actually creating
# the copy.
#
# Note that the object is in a per-nova cache directory,
# so inter-nova locking is not a concern. Consequently we
# can safely use simple thread locks.
with lockutils.lock(str(sized_disk_ds_loc),
lock_file_prefix='nova-vmware-image'):
if not self._sized_image_exists(sized_disk_ds_loc,
vi.datastore.ref):
LOG.debug("Copying root disk of size %sGb", vi.root_gb)
try:
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(sized_disk_ds_loc))
except Exception as e:
LOG.warning(_("Root disk file creation "
"failed - %s"), e)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to copy cached '
'image %(source)s to '
'%(dest)s for resize: '
'%(error)s'),
{'source': vi.cache_image_path,
'dest': sized_disk_ds_loc,
'error': e.message})
try:
ds_util.file_delete(self._session,
sized_disk_ds_loc,
vi.dc_info.ref)
except vexc.FileNotFoundException:
# File was never created: cleanup not
# required
pass
# Resize the copy to the appropriate size. No need
# for cleanup up here, as _extend_virtual_disk
# already does it
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(sized_disk_ds_loc))
# Associate the sized image disk to the VM by attaching to the VM a
# COW child of said disk.
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(sized_disk_ds_loc),
vi.root_gb * units.Mi, vi.ii.linked_clone)
def _use_iso_image(self, vm_ref, vi):
"""Uses cached image as a bootable virtual cdrom."""
self._attach_cdrom_to_vm(
vm_ref, vi.instance, vi.datastore.ref,
str(vi.cache_image_path))
# Optionally create and attach blank disk
if vi.root_gb > 0:
instance_folder = vi.instance_name
root_disk_name = "%s.vmdk" % vi.instance_name
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
# It is pointless to COW a blank disk
linked_clone = False
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi)
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, linked_clone)
def _update_datacenter_cache_from_objects(self, dcs):
"""Updates the datastore/datacenter cache."""
while dcs:
token = vm_util._get_token(dcs)
for dco in dcs.objects:
dc_ref = dco.obj
ds_refs = []
prop_dict = vm_util.propset_dict(dco.propSet)
name = prop_dict.get('name')
vmFolder = prop_dict.get('vmFolder')
datastore_refs = prop_dict.get('datastore')
if datastore_refs:
datastore_refs = datastore_refs.ManagedObjectReference
for ds in datastore_refs:
ds_refs.append(ds.value)
else:
LOG.debug("Datacenter %s doesn't have any datastore "
"associated with it, ignoring it", name)
for ds_ref in ds_refs:
self._datastore_dc_mapping[ds_ref] = DcInfo(ref=dc_ref,
name=name, vmFolder=vmFolder)
if token:
dcs = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
def get_datacenter_ref_and_name(self, ds_ref):
"""Get the datacenter name and the reference."""
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
if not dc_info:
dcs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name", "datastore", "vmFolder"])
self._update_datacenter_cache_from_objects(dcs)
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
return dc_info
def list_instances(self):
"""Lists the VM instances that are registered with vCenter cluster."""
properties = ['name', 'runtime.connectionState']
LOG.debug("Getting list of instances from cluster %s",
self._cluster)
vms = []
root_res_pool = self._session._call_method(
vim_util, "get_dynamic_property", self._cluster,
'ClusterComputeResource', 'resourcePool')
if root_res_pool:
vms = self._session._call_method(
vim_util, 'get_inner_objects', root_res_pool, 'vm',
'VirtualMachine', properties)
lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
LOG.debug("Got total of %s instances", str(len(lst_vm_names)))
return lst_vm_names
def get_vnc_console(self, instance):
"""Return connection info for a vnc console using vCenter logic."""
# vCenter does not run virtual machines and does not run
# a VNC proxy. Instead, you need to tell OpenStack to talk
# directly to the ESX host running the VM you are attempting
# to connect to via VNC.
vnc_console = self._get_vnc_console_connection(instance)
host_name = vm_util.get_host_name_for_vm(
self._session,
instance)
vnc_console['host'] = host_name
# NOTE: VM can move hosts in some situations. Debug for admins.
LOG.debug("VM %(uuid)s is currently on host %(host_name)s",
{'uuid': instance.name, 'host_name': host_name},
instance=instance)
return ctype.ConsoleVNC(**vnc_console)
|
if len(subnets_v4[0]['ips']) > 0:
ip_v4 = subnets_v4[0]['ips'][0]
if len(subnets_v4[0]['dns']) > 0:
dns = subnets_v4[0]['dns'][0]['address']
netmask_v4 = str(subnets_v4[0].as_netaddr().netmask)
gateway_v4 = subnets_v4[0]['gateway']['address']
broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast)
|
index.js
|
/*!
* bytes
* Copyright(c) 2012-2014 TJ Holowaychuk
* Copyright(c) 2015 Jed Watson
* MIT Licensed
*/
'use strict';
/**
* Module exports.
* @public
*/
module.exports = bytes;
module.exports.format = format;
module.exports.parse = parse;
/**
* Module variables.
* @private
*/
var formatThousandsRegExp = /\B(?=(\d{3})+(?!\d))/g;
var formatDecimalsRegExp = /(?:\.0*|(\.[^0]+)0+)$/;
var map = {
b: 1,
kb: 1 << 10,
mb: 1 << 20,
gb: 1 << 30,
tb: ((1 << 30) * 1024)
};
// TODO: use is-finite module?
var numberIsFinite = Number.isFinite || function (v) { return typeof v === 'number' && isFinite(v); };
var parseRegExp = /^((-|\+)?(\d+(?:\.\d+)?)) *(kb|mb|gb|tb)$/i;
/**
* Convert the given value in bytes into a string or parse to string to an integer in bytes.
*
* @param {string|number} value
* @param {{
* case: [string],
* decimalPlaces: [number]
* fixedDecimals: [boolean]
* thousandsSeparator: [string]
* }} [options] bytes options.
*
* @returns {string|number|null}
*/
function bytes(value, options) {
if (typeof value === 'string') {
return parse(value);
}
if (typeof value === 'number') {
return format(value, options);
}
return null;
}
/**
* Format the given value in bytes into a string.
*
* If the value is negative, it is kept as such. If it is a float,
* it is rounded.
*
* @param {number} value
* @param {object} [options]
* @param {number} [options.decimalPlaces=2]
* @param {number} [options.fixedDecimals=false]
* @param {string} [options.thousandsSeparator=]
*
* @returns {string|null}
* @public
*/
function format(value, options) {
if (!numberIsFinite(value)) {
return null;
}
var mag = Math.abs(value);
var thousandsSeparator = (options && options.thousandsSeparator) || '';
var decimalPlaces = (options && options.decimalPlaces !== undefined) ? options.decimalPlaces : 2;
var fixedDecimals = Boolean(options && options.fixedDecimals);
var unit = 'B';
if (mag >= map.tb) {
unit = 'TB';
} else if (mag >= map.gb) {
unit = 'GB';
} else if (mag >= map.mb) {
unit = 'MB';
} else if (mag >= map.kb) {
unit = 'kB';
}
var val = value / map[unit.toLowerCase()];
var str = val.toFixed(decimalPlaces);
if (!fixedDecimals) {
str = str.replace(formatDecimalsRegExp, '$1');
}
if (thousandsSeparator) {
str = str.replace(formatThousandsRegExp, thousandsSeparator);
}
return str + unit;
}
/**
* Parse the string value into an integer in bytes.
*
* If no unit is given, it is assumed the value is in bytes.
*
* @param {number|string} val
*
* @returns {number|null}
* @public
*/
|
return val;
}
if (typeof val !== 'string') {
return null;
}
// Test if the string passed is valid
var results = parseRegExp.exec(val);
var floatValue;
var unit = 'b';
if (!results) {
// Nothing could be extracted from the given string
floatValue = parseInt(val, 10);
unit = 'b'
} else {
// Retrieve the value and the unit
floatValue = parseFloat(results[1]);
unit = results[4].toLowerCase();
}
return Math.floor(map[unit] * floatValue);
}
|
function parse(val) {
if (typeof val === 'number' && !isNaN(val)) {
|
extension_apiserver_test.go
|
package integration
import (
"testing"
"time"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilwait "k8s.io/apimachinery/pkg/util/wait"
kapi "k8s.io/kubernetes/pkg/apis/core"
testutil "github.com/openshift/origin/test/util"
testserver "github.com/openshift/origin/test/util/server"
)
func
|
(t *testing.T) {
masterConfig, clusterAdminKubeConfig, err := testserver.StartTestMaster()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer testserver.CleanupMasterEtcd(t, masterConfig)
clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
var configmap *kapi.ConfigMap
err = utilwait.PollImmediate(50*time.Millisecond, 10*time.Second, func() (bool, error) {
configmap, err = clusterAdminKubeClient.Core().ConfigMaps(metav1.NamespaceSystem).Get("extension-apiserver-authentication", metav1.GetOptions{})
if err == nil {
return true, nil
}
if kapierrors.IsNotFound(err) {
return false, nil
}
return false, err
})
if err != nil {
t.Fatal(err)
}
if _, ok := configmap.Data["client-ca-file"]; !ok {
t.Fatal("missing client-ca-file")
}
}
|
TestExtensionAPIServerConfigMap
|
file_info_query.go
|
package hedera
import (
"time"
"github.com/hashgraph/hedera-protobufs-go/services"
)
type FileInfoQuery struct {
Query
fileID *FileID
}
func NewFileInfoQuery() *FileInfoQuery {
header := services.QueryHeader{}
return &FileInfoQuery{
Query: _NewQuery(true, &header),
}
}
func (query *FileInfoQuery) SetFileID(fileID FileID) *FileInfoQuery {
query.fileID = &fileID
return query
}
func (query *FileInfoQuery) GetFileID() FileID {
if query.fileID == nil {
return FileID{}
}
return *query.fileID
}
func (query *FileInfoQuery) _ValidateNetworkOnIDs(client *Client) error {
if client == nil || !client.autoValidateChecksums {
return nil
}
if query.fileID != nil {
if err := query.fileID.ValidateChecksum(client); err != nil {
return err
}
}
return nil
}
func (query *FileInfoQuery) _Build() *services.Query_FileGetInfo {
body := &services.FileGetInfoQuery{
Header: &services.QueryHeader{},
}
if query.fileID != nil {
body.FileID = query.fileID._ToProtobuf()
}
return &services.Query_FileGetInfo{
FileGetInfo: body,
}
}
func (query *FileInfoQuery) GetCost(client *Client) (Hbar, error) {
if client == nil || client.operator == nil {
return Hbar{}, errNoClientProvided
}
var err error
if len(query.Query.GetNodeAccountIDs()) == 0 {
nodeAccountIDs, err := client.network._GetNodeAccountIDsForExecute()
if err != nil {
return Hbar{}, err
}
query.SetNodeAccountIDs(nodeAccountIDs)
}
err = query._ValidateNetworkOnIDs(client)
if err != nil {
return Hbar{}, err
}
for range query.nodeAccountIDs {
paymentTransaction, err := _QueryMakePaymentTransaction(TransactionID{}, AccountID{}, client.operator, Hbar{})
if err != nil {
return Hbar{}, err
}
query.paymentTransactions = append(query.paymentTransactions, paymentTransaction)
}
pb := query._Build()
pb.FileGetInfo.Header = query.pbHeader
query.pb = &services.Query{
Query: pb,
}
resp, err := _Execute(
client,
_Request{
query: &query.Query,
},
_FileInfoQueryShouldRetry,
_CostQueryMakeRequest,
_QueryAdvanceRequest,
_QueryGetNodeAccountID,
_FileInfoQueryGetMethod,
_FileInfoQueryMapStatusError,
_QueryMapResponse,
)
if err != nil {
return Hbar{}, err
}
cost := int64(resp.query.GetFileGetInfo().Header.Cost)
if cost < 25 {
return HbarFromTinybar(25), nil
}
return HbarFromTinybar(cost), nil
}
func _FileInfoQueryShouldRetry(_ _Request, response _Response) _ExecutionState {
return _QueryShouldRetry(Status(response.query.GetFileGetInfo().Header.NodeTransactionPrecheckCode))
}
func _FileInfoQueryMapStatusError(_ _Request, response _Response) error {
return ErrHederaPreCheckStatus{
Status: Status(response.query.GetFileGetInfo().Header.NodeTransactionPrecheckCode),
}
}
func
|
(_ _Request, channel *_Channel) _Method {
return _Method{
query: channel._GetFile().GetFileInfo,
}
}
func (query *FileInfoQuery) Execute(client *Client) (FileInfo, error) {
if client == nil || client.operator == nil {
return FileInfo{}, errNoClientProvided
}
var err error
if len(query.Query.GetNodeAccountIDs()) == 0 {
nodeAccountIDs, err := client.network._GetNodeAccountIDsForExecute()
if err != nil {
return FileInfo{}, err
}
query.SetNodeAccountIDs(nodeAccountIDs)
}
err = query._ValidateNetworkOnIDs(client)
if err != nil {
return FileInfo{}, err
}
query.paymentTransactionID = TransactionIDGenerate(client.operator.accountID)
var cost Hbar
if query.queryPayment.tinybar != 0 {
cost = query.queryPayment
} else {
if query.maxQueryPayment.tinybar == 0 {
cost = client.maxQueryPayment
} else {
cost = query.maxQueryPayment
}
actualCost, err := query.GetCost(client)
if err != nil {
return FileInfo{}, err
}
if cost.tinybar < actualCost.tinybar {
return FileInfo{}, ErrMaxQueryPaymentExceeded{
QueryCost: actualCost,
MaxQueryPayment: cost,
query: "FileInfoQuery",
}
}
cost = actualCost
}
query.nextPaymentTransactionIndex = 0
query.paymentTransactions = make([]*services.Transaction, 0)
err = _QueryGeneratePayments(&query.Query, client, cost)
if err != nil {
return FileInfo{}, err
}
pb := query._Build()
pb.FileGetInfo.Header = query.pbHeader
query.pb = &services.Query{
Query: pb,
}
resp, err := _Execute(
client,
_Request{
query: &query.Query,
},
_FileInfoQueryShouldRetry,
_QueryMakeRequest,
_QueryAdvanceRequest,
_QueryGetNodeAccountID,
_FileInfoQueryGetMethod,
_FileInfoQueryMapStatusError,
_QueryMapResponse,
)
if err != nil {
return FileInfo{}, err
}
info, err := _FileInfoFromProtobuf(resp.query.GetFileGetInfo().FileInfo)
if err != nil {
return FileInfo{}, err
}
return info, nil
}
// SetMaxQueryPayment sets the maximum payment allowed for this Query.
func (query *FileInfoQuery) SetMaxQueryPayment(maxPayment Hbar) *FileInfoQuery {
query.Query.SetMaxQueryPayment(maxPayment)
return query
}
// SetQueryPayment sets the payment amount for this Query.
func (query *FileInfoQuery) SetQueryPayment(paymentAmount Hbar) *FileInfoQuery {
query.Query.SetQueryPayment(paymentAmount)
return query
}
func (query *FileInfoQuery) SetNodeAccountIDs(accountID []AccountID) *FileInfoQuery {
query.Query.SetNodeAccountIDs(accountID)
return query
}
func (query *FileInfoQuery) GetNodeAccountIDs() []AccountID {
return query.Query.GetNodeAccountIDs()
}
func (query *FileInfoQuery) SetMaxRetry(count int) *FileInfoQuery {
query.Query.SetMaxRetry(count)
return query
}
func (query *FileInfoQuery) SetMaxBackoff(max time.Duration) *FileInfoQuery {
if max.Nanoseconds() < 0 {
panic("maxBackoff must be a positive duration")
} else if max.Nanoseconds() < query.minBackoff.Nanoseconds() {
panic("maxBackoff must be greater than or equal to minBackoff")
}
query.maxBackoff = &max
return query
}
func (query *FileInfoQuery) GetMaxBackoff() time.Duration {
if query.maxBackoff != nil {
return *query.maxBackoff
}
return 8 * time.Second
}
func (query *FileInfoQuery) SetMinBackoff(min time.Duration) *FileInfoQuery {
if min.Nanoseconds() < 0 {
panic("minBackoff must be a positive duration")
} else if query.maxBackoff.Nanoseconds() < min.Nanoseconds() {
panic("minBackoff must be less than or equal to maxBackoff")
}
query.minBackoff = &min
return query
}
func (query *FileInfoQuery) GetMinBackoff() time.Duration {
if query.minBackoff != nil {
return *query.minBackoff
}
return 250 * time.Millisecond
}
|
_FileInfoQueryGetMethod
|
dump_command_args.py
|
# python3.7
"""Dumps available arguments of all commands (configurations).
This file parses the arguments of all commands provided in `configs/` and dump
the results as a json file. Each parsed argument includes the name, argument
type, default value, and the help message (description). The dumped file looks
like
{
"command_1": {
"type": "object",
"properties": {
"arg_group_1": {
"type": "object",
"properties": {
"arg_1": {
"is_recommended": # true / false
"type": # int / float / bool / str / json-string /
# index-string
"default":
"description":
},
"arg_2": {
"is_recommended":
"type":
"default":
"description":
}
}
},
"arg_group_2": {
"type": "object",
"properties": {
"arg_3": {
"is_recommended":
"type":
"default":
"description":
},
"arg_4": {
"is_recommended":
"type":
"default":
"description":
}
}
}
}
},
"command_2": {
"type": "object",
"properties: {
"arg_group_1": {
"type": "object",
"properties": {
"arg_1": {
"is_recommended":
"type":
"default":
"description":
}
}
}
}
}
}
"""
import sys
import json
from configs import CONFIG_POOL
def parse_args_from_config(config):
"""Parses available arguments from a configuration class.
Args:
config: The configuration class to parse arguments from, which is
defined in `configs/`. This class is supposed to derive from
`BaseConfig` defined in `configs/base_config.py`.
"""
recommended_opts = config.get_recommended_options()
args = dict()
for opt_group, opts in config.get_options().items():
args[opt_group] = dict(
type='object',
properties=dict()
)
for opt in opts:
arg = config.inspect_option(opt)
args[opt_group]['properties'][arg.name] = dict(
is_recommended=arg.name in recommended_opts,
type=arg.type,
default=arg.default,
|
description=arg.help
)
return args
def dump(configs, save_path):
"""Dumps available arguments from given configurations to target file.
Args:
configs: A list of configurations, each of which should be a
class derived from `BaseConfig` defined in `configs/base_config.py`.
save_path: The path to save the dumped results.
"""
args = dict()
for config in configs:
args[config.name] = dict(type='object',
properties=parse_args_from_config(config))
with open(save_path, 'w') as f:
json.dump(args, f, indent=4)
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit(f'Usage: python {sys.argv[0]} SAVE_PATH')
dump(CONFIG_POOL, sys.argv[1])
| |
methods-gen-return.js
|
// Copyright (C) 2013 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
description: >
`return` is a valid statement within generator function bodies.
features: [generators]
es6id: 14.4
---*/
var result;
class
|
{
*g1() { return; }
*g2() { return 1; }
}
result = A.prototype.g1().next();
assert.sameValue(result.value, undefined);
assert.sameValue(result.done, true);
result = A.prototype.g2().next();
assert.sameValue(result.value, 1);
assert.sameValue(result.done, true);
|
A
|
R0914.py
|
##Patterns: R0914: { "max-locals": "3" }
##Warn: R0914
def
|
(thing):
a = 3
b = 3
c = 3
d = 3
e = 3
f = 3
g = 3
h = 3
i = 3
j = 3
k = 3
l = 3
m = 3
n = 3
o = 3
p = 3
q = 3
r = 3
s = 3
t = 3
|
doEverything
|
ToonChatGarbler.py
|
import string
import random
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from otp.chat import ChatGarbler
class ToonChatGarbler(ChatGarbler.ChatGarbler):
animalSounds = {'dog': TTLocalizer.ChatGarblerDog,
'cat': TTLocalizer.ChatGarblerCat,
'mouse': TTLocalizer.ChatGarblerMouse,
'horse': TTLocalizer.ChatGarblerHorse,
'rabbit': TTLocalizer.ChatGarblerRabbit,
'duck': TTLocalizer.ChatGarblerDuck,
'monkey': TTLocalizer.ChatGarblerMonkey,
'bear': TTLocalizer.ChatGarblerBear,
'pig': TTLocalizer.ChatGarblerPig,
'deer': TTLocalizer.ChatGarblerDeer,
'default': OTPLocalizer.ChatGarblerDefault}
def garble(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = random.randint(1, 7)
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
if i < numWords:
newMessage = newMessage + ' '
return newMessage
def garbleSingle(self, toon, message):
newMessage = ''
animalType = toon.getStyle().getType()
if animalType in ToonChatGarbler.animalSounds:
wordlist = ToonChatGarbler.animalSounds[animalType]
else:
wordlist = ToonChatGarbler.animalSounds['default']
numWords = 1
for i in xrange(1, numWords + 1):
wordIndex = random.randint(0, len(wordlist) - 1)
newMessage = newMessage + wordlist[wordIndex]
|
newMessage = newMessage + ' '
return newMessage
|
if i < numWords:
|
bit_mask.rs
|
use crate::consts::{constant, Constant};
use clippy_utils::diagnostics::{span_lint, span_lint_and_then};
use clippy_utils::sugg::Sugg;
use if_chain::if_chain;
use rustc_ast::ast::LitKind;
use rustc_errors::Applicability;
use rustc_hir::{BinOpKind, Expr, ExprKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::source_map::Span;
declare_clippy_lint! {
/// **What it does:** Checks for incompatible bit masks in comparisons.
///
/// The formula for detecting if an expression of the type `_ <bit_op> m
/// <cmp_op> c` (where `<bit_op>` is one of {`&`, `|`} and `<cmp_op>` is one of
/// {`!=`, `>=`, `>`, `!=`, `>=`, `>`}) can be determined from the following
/// table:
///
/// |Comparison |Bit Op|Example |is always|Formula |
/// |------------|------|------------|---------|----------------------|
/// |`==` or `!=`| `&` |`x & 2 == 3`|`false` |`c & m != c` |
/// |`<` or `>=`| `&` |`x & 2 < 3` |`true` |`m < c` |
/// |`>` or `<=`| `&` |`x & 1 > 1` |`false` |`m <= c` |
/// |`==` or `!=`| `|` |`x | 1 == 0`|`false` |`c | m != c` |
/// |`<` or `>=`| `|` |`x | 1 < 1` |`false` |`m >= c` |
/// |`<=` or `>` | `|` |`x | 1 > 0` |`true` |`m > c` |
///
/// **Why is this bad?** If the bits that the comparison cares about are always
/// set to zero or one by the bit mask, the comparison is constant `true` or
/// `false` (depending on mask, compared value, and operators).
///
/// So the code is actively misleading, and the only reason someone would write
/// this intentionally is to win an underhanded Rust contest or create a
/// test-case for this lint.
///
/// **Known problems:** None.
///
/// **Example:**
/// ```rust
/// # let x = 1;
/// if (x & 1 == 2) { }
/// ```
pub BAD_BIT_MASK,
correctness,
"expressions of the form `_ & mask == select` that will only ever return `true` or `false`"
}
declare_clippy_lint! {
/// **What it does:** Checks for bit masks in comparisons which can be removed
/// without changing the outcome. The basic structure can be seen in the
/// following table:
///
/// |Comparison| Bit Op |Example |equals |
/// |----------|---------|-----------|-------|
/// |`>` / `<=`|`|` / `^`|`x | 2 > 3`|`x > 3`|
/// |`<` / `>=`|`|` / `^`|`x ^ 1 < 4`|`x < 4`|
///
/// **Why is this bad?** Not equally evil as [`bad_bit_mask`](#bad_bit_mask),
/// but still a bit misleading, because the bit mask is ineffective.
///
/// **Known problems:** False negatives: This lint will only match instances
/// where we have figured out the math (which is for a power-of-two compared
/// value). This means things like `x | 1 >= 7` (which would be better written
/// as `x >= 6`) will not be reported (but bit masks like this are fairly
/// uncommon).
///
/// **Example:**
/// ```rust
/// # let x = 1;
/// if (x | 1 > 3) { }
/// ```
pub INEFFECTIVE_BIT_MASK,
correctness,
"expressions where a bit mask will be rendered useless by a comparison, e.g., `(x | 1) > 2`"
}
declare_clippy_lint! {
/// **What it does:** Checks for bit masks that can be replaced by a call
/// to `trailing_zeros`
///
/// **Why is this bad?** `x.trailing_zeros() > 4` is much clearer than `x & 15
/// == 0`
///
/// **Known problems:** llvm generates better code for `x & 15 == 0` on x86
///
/// **Example:**
/// ```rust
/// # let x = 1;
/// if x & 0b1111 == 0 { }
/// ```
pub VERBOSE_BIT_MASK,
pedantic,
"expressions where a bit mask is less readable than the corresponding method call"
}
#[derive(Copy, Clone)]
pub struct BitMask {
verbose_bit_mask_threshold: u64,
}
impl BitMask {
#[must_use]
pub fn new(verbose_bit_mask_threshold: u64) -> Self {
Self {
verbose_bit_mask_threshold,
}
}
}
impl_lint_pass!(BitMask => [BAD_BIT_MASK, INEFFECTIVE_BIT_MASK, VERBOSE_BIT_MASK]);
impl<'tcx> LateLintPass<'tcx> for BitMask {
fn
|
(&mut self, cx: &LateContext<'tcx>, e: &'tcx Expr<'_>) {
if let ExprKind::Binary(cmp, left, right) = &e.kind {
if cmp.node.is_comparison() {
if let Some(cmp_opt) = fetch_int_literal(cx, right) {
check_compare(cx, left, cmp.node, cmp_opt, e.span)
} else if let Some(cmp_val) = fetch_int_literal(cx, left) {
check_compare(cx, right, invert_cmp(cmp.node), cmp_val, e.span)
}
}
}
if_chain! {
if let ExprKind::Binary(op, left, right) = &e.kind;
if BinOpKind::Eq == op.node;
if let ExprKind::Binary(op1, left1, right1) = &left.kind;
if BinOpKind::BitAnd == op1.node;
if let ExprKind::Lit(lit) = &right1.kind;
if let LitKind::Int(n, _) = lit.node;
if let ExprKind::Lit(lit1) = &right.kind;
if let LitKind::Int(0, _) = lit1.node;
if n.leading_zeros() == n.count_zeros();
if n > u128::from(self.verbose_bit_mask_threshold);
then {
span_lint_and_then(cx,
VERBOSE_BIT_MASK,
e.span,
"bit mask could be simplified with a call to `trailing_zeros`",
|diag| {
let sugg = Sugg::hir(cx, left1, "...").maybe_par();
diag.span_suggestion(
e.span,
"try",
format!("{}.trailing_zeros() >= {}", sugg, n.count_ones()),
Applicability::MaybeIncorrect,
);
});
}
}
}
}
#[must_use]
fn invert_cmp(cmp: BinOpKind) -> BinOpKind {
match cmp {
BinOpKind::Eq => BinOpKind::Eq,
BinOpKind::Ne => BinOpKind::Ne,
BinOpKind::Lt => BinOpKind::Gt,
BinOpKind::Gt => BinOpKind::Lt,
BinOpKind::Le => BinOpKind::Ge,
BinOpKind::Ge => BinOpKind::Le,
_ => BinOpKind::Or, // Dummy
}
}
fn check_compare(cx: &LateContext<'_>, bit_op: &Expr<'_>, cmp_op: BinOpKind, cmp_value: u128, span: Span) {
if let ExprKind::Binary(op, left, right) = &bit_op.kind {
if op.node != BinOpKind::BitAnd && op.node != BinOpKind::BitOr {
return;
}
fetch_int_literal(cx, right)
.or_else(|| fetch_int_literal(cx, left))
.map_or((), |mask| check_bit_mask(cx, op.node, cmp_op, mask, cmp_value, span))
}
}
#[allow(clippy::too_many_lines)]
fn check_bit_mask(
cx: &LateContext<'_>,
bit_op: BinOpKind,
cmp_op: BinOpKind,
mask_value: u128,
cmp_value: u128,
span: Span,
) {
match cmp_op {
BinOpKind::Eq | BinOpKind::Ne => match bit_op {
BinOpKind::BitAnd => {
if mask_value & cmp_value != cmp_value {
if cmp_value != 0 {
span_lint(
cx,
BAD_BIT_MASK,
span,
&format!(
"incompatible bit mask: `_ & {}` can never be equal to `{}`",
mask_value, cmp_value
),
);
}
} else if mask_value == 0 {
span_lint(cx, BAD_BIT_MASK, span, "&-masking with zero");
}
},
BinOpKind::BitOr => {
if mask_value | cmp_value != cmp_value {
span_lint(
cx,
BAD_BIT_MASK,
span,
&format!(
"incompatible bit mask: `_ | {}` can never be equal to `{}`",
mask_value, cmp_value
),
);
}
},
_ => (),
},
BinOpKind::Lt | BinOpKind::Ge => match bit_op {
BinOpKind::BitAnd => {
if mask_value < cmp_value {
span_lint(
cx,
BAD_BIT_MASK,
span,
&format!(
"incompatible bit mask: `_ & {}` will always be lower than `{}`",
mask_value, cmp_value
),
);
} else if mask_value == 0 {
span_lint(cx, BAD_BIT_MASK, span, "&-masking with zero");
}
},
BinOpKind::BitOr => {
if mask_value >= cmp_value {
span_lint(
cx,
BAD_BIT_MASK,
span,
&format!(
"incompatible bit mask: `_ | {}` will never be lower than `{}`",
mask_value, cmp_value
),
);
} else {
check_ineffective_lt(cx, span, mask_value, cmp_value, "|");
}
},
BinOpKind::BitXor => check_ineffective_lt(cx, span, mask_value, cmp_value, "^"),
_ => (),
},
BinOpKind::Le | BinOpKind::Gt => match bit_op {
BinOpKind::BitAnd => {
if mask_value <= cmp_value {
span_lint(
cx,
BAD_BIT_MASK,
span,
&format!(
"incompatible bit mask: `_ & {}` will never be higher than `{}`",
mask_value, cmp_value
),
);
} else if mask_value == 0 {
span_lint(cx, BAD_BIT_MASK, span, "&-masking with zero");
}
},
BinOpKind::BitOr => {
if mask_value > cmp_value {
span_lint(
cx,
BAD_BIT_MASK,
span,
&format!(
"incompatible bit mask: `_ | {}` will always be higher than `{}`",
mask_value, cmp_value
),
);
} else {
check_ineffective_gt(cx, span, mask_value, cmp_value, "|");
}
},
BinOpKind::BitXor => check_ineffective_gt(cx, span, mask_value, cmp_value, "^"),
_ => (),
},
_ => (),
}
}
fn check_ineffective_lt(cx: &LateContext<'_>, span: Span, m: u128, c: u128, op: &str) {
if c.is_power_of_two() && m < c {
span_lint(
cx,
INEFFECTIVE_BIT_MASK,
span,
&format!(
"ineffective bit mask: `x {} {}` compared to `{}`, is the same as x compared directly",
op, m, c
),
);
}
}
fn check_ineffective_gt(cx: &LateContext<'_>, span: Span, m: u128, c: u128, op: &str) {
if (c + 1).is_power_of_two() && m <= c {
span_lint(
cx,
INEFFECTIVE_BIT_MASK,
span,
&format!(
"ineffective bit mask: `x {} {}` compared to `{}`, is the same as x compared directly",
op, m, c
),
);
}
}
fn fetch_int_literal(cx: &LateContext<'_>, lit: &Expr<'_>) -> Option<u128> {
match constant(cx, cx.typeck_results(), lit)?.0 {
Constant::Int(n) => Some(n),
_ => None,
}
}
|
check_expr
|
gu.js
|
(function(d){d['gu']=Object.assign(d['gu']||{},{a:"ફાઇલ અપલોડ ન થઇ શકી",b:"Image toolbar",c:"Table toolbar",d:"ઘાટુ - બોલ્ડ્",e:"ત્રાંસુ - ઇટલિક્",f:" વિચાર ટાંકો",g:"Insert image or file",h:"Choose heading",i:"Heading",j:"image widget",k:"Full size image",l:"Side image",m:"Left aligned image",n:"Centered image",o:"Right aligned image",p:"Insert image",q:"Increase indent",r:"Decrease indent",s:"Numbered List",t:"Bulleted List",u:"Insert table",v:"Header column",w:"Insert column left",x:"Insert column right",y:"Delete column",z:"Column",aa:"Header row",ab:"Insert row below",ac:"Insert row above",ad:"Delete row",ae:"Row",af:"Merge cell up",ag:"Merge cell right",ah:"Merge cell down",ai:"Merge cell left",aj:"Split cell vertically",ak:"Split cell horizontally",al:"Merge cells",am:"Enter image caption",an:"Upload failed",ao:"Insert media",ap:"The URL must not be empty.",aq:"This media URL is not supported.",ar:"media widget",as:"Link",at:"Widget toolbar",au:"Upload in progress",av:"Open in a new tab",aw:"Downloadable",ax:"Unlink",ay:"Edit link",az:"Open link in new tab",ba:"This link has no URL",bb:"Save",bc:"Cancel",bd:"Paste the media URL in the input.",be:"Tip: Paste the URL into the content to embed faster.",bf:"Media URL",bg:"Link URL",bh:"Dropdown toolbar",bi:"%0 of %1",bj:"Previous",bk:"Next",bl:"Undo",bm:"Redo",bn:"Editor toolbar",bo:"Show more items",bp:"Change image text alternative",bq:"Paragraph",br:"Heading 1",bs:"Heading 2",bt:"Heading 3",bu:"Heading 4",bv:"Heading 5",bw:"Heading 6",bx:"Rich Text Editor, %0",by:"Could not obtain resized image URL.",bz:"Selecting resized image failed",ca:"Could not insert image at the current position.",cb:"Inserting image failed",cc:"Text alternative"})})(window.CKEDITOR_TRANSLATIONS||(window.CKEDITOR_TRANSLATIONS={}));
|
||
mjibson-demo.go
|
// +build main
package main
import (
"fmt"
"os"
"github.com/mjibson/go-dsp/fft"
)
import "github.com/strickyak/canvas"
func
|
(x []float64, filename string) {
n := len(x)
c := canvas.NewCanvasWithScale(512, 512, 0, float64(n), -5.0, 5.0)
for i, e := range x {
c.SSet(float64(i), e, canvas.Green)
}
f, err := os.Create(filename)
if err != nil {
panic(err)
}
c.WritePng(f)
err = f.Close()
if err != nil {
panic(err)
}
}
func plotComplex(vec []complex128, filename string) {
n := len(vec)
c := canvas.NewCanvasWithScale(512, 512, 0, float64(n), -5.0, 5.0)
//var px, py int
for i, e := range vec {
//x, y := c.UnScale(real(e), imag(e))
//if i > 0 {
//c.PaintTriangle((i-1), py, (i-1), py, (i), y, canvas.Blue)
//c.PaintTriangle((i-1), px, (i-1), px, (i), x, canvas.Green)
//}
c.SSet(float64(i), imag(e), canvas.Blue)
c.SSet(float64(i), real(e), canvas.Green)
//px, py = x, y
}
f, err := os.Create(filename)
if err != nil {
panic(err)
}
c.WritePng(f)
err = f.Close()
if err != nil {
panic(err)
}
}
func plotDual(rex, imx []float64, filename string) {
n := len(rex)
c := canvas.NewCanvasWithScale(512, 512, 0, float64(n), -5.0, 5.0)
for i, e := range imx {
c.SSet(float64(i), e, canvas.Blue)
}
for i, e := range rex {
c.SSet(float64(i), e, canvas.Green)
}
f, err := os.Create(filename)
if err != nil {
panic(err)
}
c.WritePng(f)
err = f.Close()
if err != nil {
panic(err)
}
}
func main() {
x := make([]float64, 64)
for i := 0; i < 32; i++ {
x[i] = 1.0
}
plotReal(x, "/tmp/fft1.png")
y := fft.FFTReal(x)
fmt.Println(y)
plotComplex(y, "/tmp/fft2.png")
z := fft.IFFT(y)
plotComplex(z, "/tmp/fft3.png")
}
|
plotReal
|
b.rs
|
#![allow(non_snake_case)]
#![allow(unused_variables)]
#![allow(dead_code)]
fn main()
|
{
let S: Vec<char> = {
let mut line: String = String::new();
std::io::stdin().read_line(&mut line).unwrap();
line.trim().chars().collect()
};
let N = S.len();
let ans = (0..N).filter(|&i| S[i] != S[N - i - 1]).count() / 2;
println!("{}", ans);
}
|
|
cl_display_test_pattern.py
|
#! /usr/bin/env python3
# Copyright (c) 2021 Dave McCoy ([email protected])
#
# NAME is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NAME is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NAME; If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import argparse
import warnings
import time
from pynq import PL
from pynq import Overlay
from pynq.lib.video import *
from pynq.lib.video.hierarchies import *
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from drivers.video_mixer import VideoMixer
from drivers.dynamic_clock import DynamicClock
from drivers.timing_controller import TimingController
from drivers.test_pattern_generator import TestPatternGenerator
from drivers.test_pattern_generator import TestPatternID
from drivers.axi_graphics import AXIGraphics
NAME = os.path.basename(os.path.realpath(__file__))
DESCRIPTION = "\n" \
"\n" \
"usage: %s [options]\n" % NAME
EPILOG = "\n" \
"\n" \
"Examples:\n" \
"\tSomething\n" \
"\n"
def main(argv):
#Parse out the commandline arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
parser.add_argument("-t", "--test",
nargs=1,
default=["something"])
parser.add_argument("-d", "--debug",
action="store_true",
help="Enable Debug Messages")
args = parser.parse_args()
print ("Running Script: %s" % NAME)
if args.debug:
print ("test: %s" % str(args.test[0]))
BF = os.path.join("./data/system_wrapper.bit")
if not os.path.exists(BF):
print ("%s Doesn't exist Exiting!!" % BF)
return
else:
print ("Found bit file!")
ol = Overlay(BF)
ol.download()
print ("Bitfile downloaded!")
print ("Starting Video")
print ("Configuring Timing Controller")
tc = TimingController("video/timing_generator")
tc.reset()
while (not tc.is_reset_done()):
print (".")
WIDTH = tc.get_generator_width()
HEIGHT = tc.get_generator_height()
print ("Image Size (Retrieved from Timing Controller): %d x %d" % (WIDTH, HEIGHT))
print ("Configuring Test Pattern Generator")
tpg = TestPatternGenerator("video/v_tpg_0", debug = args.debug)
tpg.set_image_size(WIDTH, HEIGHT)
tpg.set_color_format_to_rgb()
#tpg.set_color_bar_test_pattern()
#tpg.set_test_pattern(TestPatternID.SOLID_RED)
#tpg.set_test_pattern(TestPatternID.SOLID_WHITE)
tpg.set_test_pattern(TestPatternID.SOLID_BLACK)
#tpg.set_test_pattern(TestPatternID.COLOR_BARS)
tpg.start()
SUB_WIN_WIDTH = 640
SUB_WIN_HEIGHT = 480
#SUB_WIN_WIDTH = 16
#SUB_WIN_HEIGHT = 4
#SUB_WIN_WIDTH = WIDTH
#SUB_WIN_HEIGHT = HEIGHT
print ("Configuring Video Mixer")
vm = VideoMixer("video/v_mix_0", WIDTH, HEIGHT)
#vm.configure_layer(0, 0, 0, WIDTH, HEIGHT)
vm.configure_layer(1, 0, 0, SUB_WIN_WIDTH, SUB_WIN_HEIGHT)
vm.enable_layer(1, True)
vm.configure_master_layer(WIDTH, HEIGHT)
#vm.enable_overlay_layer(True)
if args.debug: print ("Video Mixer Control Register Before Enable: 0x%08X" % vm.get_control())
vm.start()
if args.debug: print ("Video Mixer Control Register After Enable: 0x%08X" % vm.get_control())
print ("Enable Master Layer (Test Pattern) Output")
vm.enable_master_layer(True)
if args.debug: print ("VM Settings:")
if args.debug: print (" x pos: %d" % vm.get_layer_x(0))
if args.debug: print (" y pos: %d" % vm.get_layer_y(0))
if args.debug: print (" width: %d" % vm.get_layer_width(0))
if args.debug: print (" height: %d" % vm.get_layer_height(0))
if args.debug: print (" scale: %d" % vm.get_layer_scale(0))
if args.debug: print (" alpha: %d" % vm.get_layer_alpha(0))
if args.debug: print (" stride: %d" % vm.get_layer_stride(0))
if args.debug:
|
if args.debug: print (" p2 Addr: %d" % vm.get_layer_plane2_addr(0))
if args.debug: print ("Layer Settings: 0x%08X" % vm.get_layer_enable_reg())
g0 = ol.low_speed.axi_gpio_0
g0.write(0x0C, 0x01)
tc.enable(gen_enable = True, use_gen_src = True)
if args.debug: print ("TC Control Register: 0x%08X" % tc.get_control_reg())
print ("Interfacing with AXI Graphics Controller: \n")
print ("%s" % str(ol.ip_dict["video/axi_graphics_0"]))
ag = AXIGraphics("video/axi_graphics_0", debug = args.debug)
#print ("AXI Graphics Started: %s" % str(ol.ip_dict["video/axi_graphics"]))
#time.sleep(0.1)
#time.sleep(5)
print ("AXI Graphics Control Register: 0x%08X" % ag.get_control())
#print ("AXI Graphics: 0x%08X" % ag.get_version())
ag.set_width(SUB_WIN_WIDTH)
ag.set_height(SUB_WIN_HEIGHT)
print ("Size: %d x %d" % (ag.get_width(), ag.get_height()))
#ag.set_mode(0) # Black
#ag.set_mode(1) # White
#ag.set_mode(2) # Red
#ag.set_mode(3) # Green
#ag.set_mode(4) # Blue
ag.set_mode(5) # Color Bars
#ag.set_mode(6) # Block
#ag.set_mode(7) # Ramp
ag.set_alpha(0xFF)
ag.set_ref0_xy(100, 100)
ag.set_ref1_xy(200, 200)
ag.set_interval(100)
ag.enable_rgba_format(True)
#ag.enable_rgba_format(False)
ag.enable(True)
print ("AXI Graphics Control Register: 0x%08X" % ag.get_control())
#print ("Sleeping for 5 seconds")
#time.sleep(5)
if __name__ == "__main__":
main(sys.argv)
|
print (" p1 Addr: %d" % vm.get_layer_plane1_addr(0))
|
mod.rs
|
#[cfg(test)]
mod tests;
use crate::{
builtins::{
object::{internal_methods_trait::ObjectInternalMethods, Object},
property::Property,
value::{to_value, ResultValue, Value, ValueData},
},
exec::Interpreter,
syntax::ast::expr::Expr,
};
use gc::{custom_trace, Gc};
use gc_derive::{Finalize, Trace};
use std::fmt::{self, Debug};
/// fn(this, arguments, ctx)
pub type NativeFunctionData = fn(&Value, &[Value], &mut Interpreter) -> ResultValue;
/// A Javascript function
/// A member of the Object type that may be invoked as a subroutine
/// <https://tc39.github.io/ecma262/#sec-terms-and-definitions-function>
/// In our implementation, Function is extending Object by holding an object field which some extra data
/// A Javascript function
#[derive(Trace, Finalize, Debug, Clone)]
pub enum Function {
/// A native javascript function
NativeFunc(NativeFunction),
/// A regular javascript function
RegularFunc(RegularFunction),
}
/// Represents a regular javascript function in memory
#[derive(Trace, Finalize, Debug, Clone)]
pub struct RegularFunction {
/// The fields associated with the function
pub object: Object,
/// This function's expression
pub expr: Expr,
/// The argument declarations of the function
pub args: Vec<Expr>,
}
impl RegularFunction {
/// Make a new regular function
#[allow(clippy::cast_possible_wrap)]
pub fn new(expr: Expr, args: Vec<Expr>) -> Self {
|
object.properties.insert(
"arguments".to_string(),
Property::default().value(Gc::new(ValueData::Integer(args.len() as i32))),
);
Self { object, expr, args }
}
}
#[derive(Finalize, Clone)]
/// Represents a native javascript function in memory
pub struct NativeFunction {
/// The fields associated with the function
pub object: Object,
/// The callable function data
pub data: NativeFunctionData,
}
impl NativeFunction {
/// Make a new native function with the given function data
pub fn new(data: NativeFunctionData) -> Self {
let object = Object::default();
Self { object, data }
}
}
impl Debug for NativeFunction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{{")?;
for (key, val) in self.object.properties.iter() {
write!(
f,
"{}: {}",
key,
val.value
.as_ref()
.unwrap_or(&Gc::new(ValueData::Undefined))
.clone()
)?;
}
write!(f, "}}")
}
}
unsafe impl gc::Trace for NativeFunction {
custom_trace!(this, mark(&this.object));
}
/// Create a new `Function` object
pub fn _create() -> Value {
let function: Object = Object::default();
to_value(function)
}
/// Initialise the global object with the `Function` object
pub fn init(global: &Value) {
let global_ptr = global;
global_ptr.set_field_slice("Function", _create());
}
/// Arguments
/// https://tc39.es/ecma262/#sec-createunmappedargumentsobject
pub fn create_unmapped_arguments_object(arguments_list: Vec<Value>) -> Value {
let len = arguments_list.len();
let mut obj = Object::default();
obj.set_internal_slot("ParameterMap", Gc::new(ValueData::Undefined));
// Set length
let mut length = Property::default();
length = length.writable(true).value(to_value(len));
// Define length as a property
obj.define_own_property("length".to_string(), length);
let mut index: usize = 0;
while index < len {
let val = arguments_list.get(index).expect("Could not get argument");
let mut prop = Property::default();
prop = prop
.value(val.clone())
.enumerable(true)
.writable(true)
.configurable(true);
obj.properties.insert(index.to_string(), prop);
index += 1;
}
to_value(obj)
}
|
let mut object = Object::default();
|
base.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::SyntaxExtension::*;
use ast::{self, Attribute, Name, PatKind, MetaItem};
use attr::HasAttrs;
use codemap::{self, CodeMap, Spanned, respan};
use syntax_pos::{Span, MultiSpan, DUMMY_SP};
use edition::Edition;
use errors::{DiagnosticBuilder, DiagnosticId};
use ext::expand::{self, Expansion, Invocation};
use ext::hygiene::{self, Mark, SyntaxContext};
use fold::{self, Folder};
use parse::{self, parser, DirectoryOwnership};
use parse::token;
use ptr::P;
use symbol::{keywords, Ident, Symbol};
use util::small_vector::SmallVector;
use std::collections::HashMap;
use std::iter;
use std::path::PathBuf;
use std::rc::Rc;
use rustc_data_structures::sync::{self, Lrc};
use std::default::Default;
use tokenstream::{self, TokenStream};
#[derive(Debug,Clone)]
pub enum Annotatable {
Item(P<ast::Item>),
TraitItem(P<ast::TraitItem>),
ImplItem(P<ast::ImplItem>),
ForeignItem(P<ast::ForeignItem>),
Stmt(P<ast::Stmt>),
Expr(P<ast::Expr>),
}
impl HasAttrs for Annotatable {
fn attrs(&self) -> &[Attribute] {
match *self {
Annotatable::Item(ref item) => &item.attrs,
Annotatable::TraitItem(ref trait_item) => &trait_item.attrs,
Annotatable::ImplItem(ref impl_item) => &impl_item.attrs,
Annotatable::ForeignItem(ref foreign_item) => &foreign_item.attrs,
Annotatable::Stmt(ref stmt) => stmt.attrs(),
Annotatable::Expr(ref expr) => &expr.attrs,
}
}
fn map_attrs<F: FnOnce(Vec<Attribute>) -> Vec<Attribute>>(self, f: F) -> Self {
match self {
Annotatable::Item(item) => Annotatable::Item(item.map_attrs(f)),
Annotatable::TraitItem(trait_item) => Annotatable::TraitItem(trait_item.map_attrs(f)),
Annotatable::ImplItem(impl_item) => Annotatable::ImplItem(impl_item.map_attrs(f)),
Annotatable::ForeignItem(foreign_item) =>
Annotatable::ForeignItem(foreign_item.map_attrs(f)),
Annotatable::Stmt(stmt) => Annotatable::Stmt(stmt.map_attrs(f)),
Annotatable::Expr(expr) => Annotatable::Expr(expr.map_attrs(f)),
}
}
}
impl Annotatable {
pub fn span(&self) -> Span {
match *self {
Annotatable::Item(ref item) => item.span,
Annotatable::TraitItem(ref trait_item) => trait_item.span,
Annotatable::ImplItem(ref impl_item) => impl_item.span,
Annotatable::ForeignItem(ref foreign_item) => foreign_item.span,
Annotatable::Stmt(ref stmt) => stmt.span,
Annotatable::Expr(ref expr) => expr.span,
}
}
pub fn expect_item(self) -> P<ast::Item> {
match self {
Annotatable::Item(i) => i,
_ => panic!("expected Item")
}
}
pub fn map_item_or<F, G>(self, mut f: F, mut or: G) -> Annotatable
where F: FnMut(P<ast::Item>) -> P<ast::Item>,
G: FnMut(Annotatable) -> Annotatable
{
match self {
Annotatable::Item(i) => Annotatable::Item(f(i)),
_ => or(self)
}
}
pub fn expect_trait_item(self) -> ast::TraitItem {
match self {
Annotatable::TraitItem(i) => i.into_inner(),
_ => panic!("expected Item")
}
}
pub fn expect_impl_item(self) -> ast::ImplItem {
match self {
Annotatable::ImplItem(i) => i.into_inner(),
_ => panic!("expected Item")
}
}
pub fn expect_foreign_item(self) -> ast::ForeignItem {
match self {
Annotatable::ForeignItem(i) => i.into_inner(),
_ => panic!("expected foreign item")
}
}
pub fn expect_stmt(self) -> ast::Stmt {
match self {
Annotatable::Stmt(stmt) => stmt.into_inner(),
_ => panic!("expected statement"),
}
}
pub fn expect_expr(self) -> P<ast::Expr> {
match self {
Annotatable::Expr(expr) => expr,
_ => panic!("expected expression"),
}
}
pub fn derive_allowed(&self) -> bool {
match *self {
Annotatable::Item(ref item) => match item.node {
ast::ItemKind::Struct(..) |
ast::ItemKind::Enum(..) |
ast::ItemKind::Union(..) => true,
_ => false,
},
_ => false,
}
}
}
// A more flexible ItemDecorator.
pub trait MultiItemDecorator {
fn expand(&self,
ecx: &mut ExtCtxt,
sp: Span,
meta_item: &ast::MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable));
}
impl<F> MultiItemDecorator for F
where F : Fn(&mut ExtCtxt, Span, &ast::MetaItem, &Annotatable, &mut FnMut(Annotatable))
{
fn expand(&self,
ecx: &mut ExtCtxt,
sp: Span,
meta_item: &ast::MetaItem,
item: &Annotatable,
push: &mut FnMut(Annotatable)) {
(*self)(ecx, sp, meta_item, item, push)
}
}
// `meta_item` is the annotation, and `item` is the item being modified.
// FIXME Decorators should follow the same pattern too.
pub trait MultiItemModifier {
fn expand(&self,
ecx: &mut ExtCtxt,
span: Span,
meta_item: &ast::MetaItem,
item: Annotatable)
-> Vec<Annotatable>;
}
impl<F, T> MultiItemModifier for F
where F: Fn(&mut ExtCtxt, Span, &ast::MetaItem, Annotatable) -> T,
T: Into<Vec<Annotatable>>,
{
fn expand(&self,
ecx: &mut ExtCtxt,
span: Span,
meta_item: &ast::MetaItem,
item: Annotatable)
-> Vec<Annotatable> {
(*self)(ecx, span, meta_item, item).into()
}
}
impl Into<Vec<Annotatable>> for Annotatable {
fn into(self) -> Vec<Annotatable> {
vec![self]
}
}
pub trait ProcMacro {
fn expand<'cx>(&self,
ecx: &'cx mut ExtCtxt,
span: Span,
ts: TokenStream)
-> TokenStream;
}
impl<F> ProcMacro for F
where F: Fn(TokenStream) -> TokenStream
{
fn expand<'cx>(&self,
_ecx: &'cx mut ExtCtxt,
_span: Span,
ts: TokenStream)
-> TokenStream {
// FIXME setup implicit context in TLS before calling self.
(*self)(ts)
}
}
pub trait AttrProcMacro {
fn expand<'cx>(&self,
ecx: &'cx mut ExtCtxt,
span: Span,
annotation: TokenStream,
annotated: TokenStream)
-> TokenStream;
}
impl<F> AttrProcMacro for F
where F: Fn(TokenStream, TokenStream) -> TokenStream
{
fn expand<'cx>(&self,
_ecx: &'cx mut ExtCtxt,
_span: Span,
annotation: TokenStream,
annotated: TokenStream)
-> TokenStream {
// FIXME setup implicit context in TLS before calling self.
(*self)(annotation, annotated)
}
}
/// Represents a thing that maps token trees to Macro Results
pub trait TTMacroExpander {
fn expand<'cx>(&self, ecx: &'cx mut ExtCtxt, span: Span, input: TokenStream)
-> Box<MacResult+'cx>;
}
pub type MacroExpanderFn =
for<'cx> fn(&'cx mut ExtCtxt, Span, &[tokenstream::TokenTree])
-> Box<MacResult+'cx>;
impl<F> TTMacroExpander for F
where F: for<'cx> Fn(&'cx mut ExtCtxt, Span, &[tokenstream::TokenTree]) -> Box<MacResult+'cx>
{
fn expand<'cx>(&self, ecx: &'cx mut ExtCtxt, span: Span, input: TokenStream)
-> Box<MacResult+'cx> {
struct AvoidInterpolatedIdents;
impl Folder for AvoidInterpolatedIdents {
fn fold_tt(&mut self, tt: tokenstream::TokenTree) -> tokenstream::TokenTree {
if let tokenstream::TokenTree::Token(_, token::Interpolated(ref nt)) = tt {
if let token::NtIdent(ident, is_raw) = nt.0 {
return tokenstream::TokenTree::Token(ident.span,
token::Ident(ident, is_raw));
}
}
fold::noop_fold_tt(tt, self)
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
fold::noop_fold_mac(mac, self)
}
}
let input: Vec<_> =
input.trees().map(|tt| AvoidInterpolatedIdents.fold_tt(tt)).collect();
(*self)(ecx, span, &input)
}
}
pub trait IdentMacroExpander {
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt,
sp: Span,
ident: ast::Ident,
token_tree: Vec<tokenstream::TokenTree>)
-> Box<MacResult+'cx>;
}
pub type IdentMacroExpanderFn =
for<'cx> fn(&'cx mut ExtCtxt, Span, ast::Ident, Vec<tokenstream::TokenTree>)
-> Box<MacResult+'cx>;
impl<F> IdentMacroExpander for F
where F : for<'cx> Fn(&'cx mut ExtCtxt, Span, ast::Ident,
Vec<tokenstream::TokenTree>) -> Box<MacResult+'cx>
{
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt,
sp: Span,
ident: ast::Ident,
token_tree: Vec<tokenstream::TokenTree>)
-> Box<MacResult+'cx>
{
(*self)(cx, sp, ident, token_tree)
}
}
// Use a macro because forwarding to a simple function has type system issues
macro_rules! make_stmts_default {
($me:expr) => {
$me.make_expr().map(|e| SmallVector::one(ast::Stmt {
id: ast::DUMMY_NODE_ID,
span: e.span,
node: ast::StmtKind::Expr(e),
}))
}
}
/// The result of a macro expansion. The return values of the various
/// methods are spliced into the AST at the callsite of the macro.
pub trait MacResult {
/// Create an expression.
fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> {
None
}
/// Create zero or more items.
fn make_items(self: Box<Self>) -> Option<SmallVector<P<ast::Item>>> {
None
}
/// Create zero or more impl items.
fn make_impl_items(self: Box<Self>) -> Option<SmallVector<ast::ImplItem>> {
None
}
/// Create zero or more trait items.
fn make_trait_items(self: Box<Self>) -> Option<SmallVector<ast::TraitItem>> {
None
}
/// Create zero or more items in an `extern {}` block
fn make_foreign_items(self: Box<Self>) -> Option<SmallVector<ast::ForeignItem>> { None }
/// Create a pattern.
fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> {
None
}
/// Create zero or more statements.
///
/// By default this attempts to create an expression statement,
/// returning None if that fails.
fn make_stmts(self: Box<Self>) -> Option<SmallVector<ast::Stmt>> {
make_stmts_default!(self)
}
fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> {
None
}
}
macro_rules! make_MacEager {
( $( $fld:ident: $t:ty, )* ) => {
/// `MacResult` implementation for the common case where you've already
/// built each form of AST that you might return.
#[derive(Default)]
pub struct MacEager {
$(
pub $fld: Option<$t>,
)*
}
impl MacEager {
$(
pub fn $fld(v: $t) -> Box<MacResult> {
Box::new(MacEager {
$fld: Some(v),
..Default::default()
})
}
)*
}
}
}
make_MacEager! {
expr: P<ast::Expr>,
pat: P<ast::Pat>,
items: SmallVector<P<ast::Item>>,
impl_items: SmallVector<ast::ImplItem>,
trait_items: SmallVector<ast::TraitItem>,
foreign_items: SmallVector<ast::ForeignItem>,
stmts: SmallVector<ast::Stmt>,
ty: P<ast::Ty>,
}
impl MacResult for MacEager {
fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> {
self.expr
}
fn make_items(self: Box<Self>) -> Option<SmallVector<P<ast::Item>>> {
self.items
}
fn make_impl_items(self: Box<Self>) -> Option<SmallVector<ast::ImplItem>> {
self.impl_items
}
fn make_trait_items(self: Box<Self>) -> Option<SmallVector<ast::TraitItem>> {
self.trait_items
}
fn make_foreign_items(self: Box<Self>) -> Option<SmallVector<ast::ForeignItem>> {
self.foreign_items
}
fn make_stmts(self: Box<Self>) -> Option<SmallVector<ast::Stmt>> {
match self.stmts.as_ref().map_or(0, |s| s.len()) {
0 => make_stmts_default!(self),
_ => self.stmts,
}
}
fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> {
if let Some(p) = self.pat {
return Some(p);
}
if let Some(e) = self.expr {
if let ast::ExprKind::Lit(_) = e.node {
return Some(P(ast::Pat {
id: ast::DUMMY_NODE_ID,
span: e.span,
node: PatKind::Lit(e),
}));
}
}
None
}
fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> {
self.ty
}
}
/// Fill-in macro expansion result, to allow compilation to continue
/// after hitting errors.
#[derive(Copy, Clone)]
pub struct DummyResult {
expr_only: bool,
span: Span
}
impl DummyResult {
/// Create a default MacResult that can be anything.
///
/// Use this as a return value after hitting any errors and
/// calling `span_err`.
pub fn any(sp: Span) -> Box<MacResult+'static> {
Box::new(DummyResult { expr_only: false, span: sp })
}
/// Create a default MacResult that can only be an expression.
///
/// Use this for macros that must expand to an expression, so even
/// if an error is encountered internally, the user will receive
/// an error that they also used it in the wrong place.
pub fn expr(sp: Span) -> Box<MacResult+'static> {
Box::new(DummyResult { expr_only: true, span: sp })
}
/// A plain dummy expression.
pub fn raw_expr(sp: Span) -> P<ast::Expr> {
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprKind::Lit(P(codemap::respan(sp, ast::LitKind::Bool(false)))),
span: sp,
attrs: ast::ThinVec::new(),
})
}
/// A plain dummy pattern.
pub fn raw_pat(sp: Span) -> ast::Pat {
ast::Pat {
id: ast::DUMMY_NODE_ID,
node: PatKind::Wild,
span: sp,
}
}
pub fn raw_ty(sp: Span) -> P<ast::Ty> {
P(ast::Ty {
id: ast::DUMMY_NODE_ID,
node: ast::TyKind::Infer,
span: sp
})
}
}
impl MacResult for DummyResult {
fn make_expr(self: Box<DummyResult>) -> Option<P<ast::Expr>> {
Some(DummyResult::raw_expr(self.span))
}
fn make_pat(self: Box<DummyResult>) -> Option<P<ast::Pat>> {
Some(P(DummyResult::raw_pat(self.span)))
}
fn make_items(self: Box<DummyResult>) -> Option<SmallVector<P<ast::Item>>> {
// this code needs a comment... why not always just return the Some() ?
if self.expr_only {
None
} else {
Some(SmallVector::new())
}
}
fn make_impl_items(self: Box<DummyResult>) -> Option<SmallVector<ast::ImplItem>> {
if self.expr_only {
None
} else {
Some(SmallVector::new())
}
}
fn make_trait_items(self: Box<DummyResult>) -> Option<SmallVector<ast::TraitItem>> {
if self.expr_only {
None
} else {
Some(SmallVector::new())
}
}
fn make_foreign_items(self: Box<Self>) -> Option<SmallVector<ast::ForeignItem>> {
if self.expr_only {
None
} else {
Some(SmallVector::new())
}
}
fn make_stmts(self: Box<DummyResult>) -> Option<SmallVector<ast::Stmt>> {
Some(SmallVector::one(ast::Stmt {
id: ast::DUMMY_NODE_ID,
node: ast::StmtKind::Expr(DummyResult::raw_expr(self.span)),
span: self.span,
}))
}
fn make_ty(self: Box<DummyResult>) -> Option<P<ast::Ty>> {
Some(DummyResult::raw_ty(self.span))
}
}
pub type BuiltinDeriveFn =
for<'cx> fn(&'cx mut ExtCtxt, Span, &MetaItem, &Annotatable, &mut FnMut(Annotatable));
/// Represents different kinds of macro invocations that can be resolved.
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub enum MacroKind {
/// A bang macro - foo!()
Bang,
/// An attribute macro - #[foo]
Attr,
/// A derive attribute macro - #[derive(Foo)]
Derive,
}
/// An enum representing the different kinds of syntax extensions.
pub enum SyntaxExtension {
/// A syntax extension that is attached to an item and creates new items
/// based upon it.
///
/// `#[derive(...)]` is a `MultiItemDecorator`.
///
/// Prefer ProcMacro or MultiModifier since they are more flexible.
MultiDecorator(Box<MultiItemDecorator + sync::Sync + sync::Send>),
/// A syntax extension that is attached to an item and modifies it
/// in-place. Also allows decoration, i.e., creating new items.
MultiModifier(Box<MultiItemModifier + sync::Sync + sync::Send>),
/// A function-like procedural macro. TokenStream -> TokenStream.
ProcMacro(Box<ProcMacro + sync::Sync + sync::Send>, Edition),
/// An attribute-like procedural macro. TokenStream, TokenStream -> TokenStream.
/// The first TokenSteam is the attribute, the second is the annotated item.
/// Allows modification of the input items and adding new items, similar to
/// MultiModifier, but uses TokenStreams, rather than AST nodes.
AttrProcMacro(Box<AttrProcMacro + sync::Sync + sync::Send>, Edition),
/// A normal, function-like syntax extension.
///
/// `bytes!` is a `NormalTT`.
NormalTT {
expander: Box<TTMacroExpander + sync::Sync + sync::Send>,
def_info: Option<(ast::NodeId, Span)>,
/// Whether the contents of the macro can
/// directly use `#[unstable]` things (true == yes).
allow_internal_unstable: bool,
/// Whether the contents of the macro can use `unsafe`
/// without triggering the `unsafe_code` lint.
allow_internal_unsafe: bool,
/// The macro's feature name if it is unstable, and the stability feature
unstable_feature: Option<(Symbol, u32)>,
/// Edition of the crate in which the macro is defined
edition: Edition,
},
/// A function-like syntax extension that has an extra ident before
/// the block.
///
IdentTT(Box<IdentMacroExpander + sync::Sync + sync::Send>, Option<Span>, bool),
/// An attribute-like procedural macro. TokenStream -> TokenStream.
/// The input is the annotated item.
/// Allows generating code to implement a Trait for a given struct
/// or enum item.
ProcMacroDerive(Box<MultiItemModifier + sync::Sync + sync::Send>,
Vec<Symbol> /* inert attribute names */, Edition),
/// An attribute-like procedural macro that derives a builtin trait.
BuiltinDerive(BuiltinDeriveFn),
/// A declarative macro, e.g. `macro m() {}`.
///
/// The second element is the definition site span.
DeclMacro(Box<TTMacroExpander + sync::Sync + sync::Send>, Option<(ast::NodeId, Span)>, Edition),
}
impl SyntaxExtension {
/// Return which kind of macro calls this syntax extension.
pub fn kind(&self) -> MacroKind {
match *self {
SyntaxExtension::DeclMacro(..) |
SyntaxExtension::NormalTT { .. } |
SyntaxExtension::IdentTT(..) |
SyntaxExtension::ProcMacro(..) =>
MacroKind::Bang,
SyntaxExtension::MultiDecorator(..) |
SyntaxExtension::MultiModifier(..) |
SyntaxExtension::AttrProcMacro(..) =>
MacroKind::Attr,
SyntaxExtension::ProcMacroDerive(..) |
SyntaxExtension::BuiltinDerive(..) =>
MacroKind::Derive,
}
}
pub fn is_modern(&self) -> bool {
match *self {
SyntaxExtension::DeclMacro(..) |
SyntaxExtension::ProcMacro(..) |
SyntaxExtension::AttrProcMacro(..) |
SyntaxExtension::ProcMacroDerive(..) => true,
_ => false,
}
}
pub fn edition(&self) -> Edition {
match *self {
SyntaxExtension::NormalTT { edition, .. } |
SyntaxExtension::DeclMacro(.., edition) |
SyntaxExtension::ProcMacro(.., edition) |
SyntaxExtension::AttrProcMacro(.., edition) |
SyntaxExtension::ProcMacroDerive(.., edition) => edition,
// Unstable legacy stuff
SyntaxExtension::IdentTT(..) |
SyntaxExtension::MultiDecorator(..) |
SyntaxExtension::MultiModifier(..) |
SyntaxExtension::BuiltinDerive(..) => hygiene::default_edition(),
}
}
}
pub type NamedSyntaxExtension = (Name, SyntaxExtension);
pub trait Resolver {
fn next_node_id(&mut self) -> ast::NodeId;
fn get_module_scope(&mut self, id: ast::NodeId) -> Mark;
fn eliminate_crate_var(&mut self, item: P<ast::Item>) -> P<ast::Item>;
fn is_whitelisted_legacy_custom_derive(&self, name: Name) -> bool;
fn visit_expansion(&mut self, mark: Mark, expansion: &Expansion, derives: &[Mark]);
fn add_builtin(&mut self, ident: ast::Ident, ext: Lrc<SyntaxExtension>);
fn resolve_imports(&mut self);
// Resolves attribute and derive legacy macros from `#![plugin(..)]`.
fn find_legacy_attr_invoc(&mut self, attrs: &mut Vec<Attribute>, allow_derive: bool)
-> Option<Attribute>;
fn resolve_invoc(&mut self, invoc: &mut Invocation, scope: Mark, force: bool)
-> Result<Option<Lrc<SyntaxExtension>>, Determinacy>;
fn resolve_macro(&mut self, scope: Mark, path: &ast::Path, kind: MacroKind, force: bool)
-> Result<Lrc<SyntaxExtension>, Determinacy>;
fn check_unused_macros(&self);
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum Determinacy {
Determined,
Undetermined,
}
pub struct DummyResolver;
impl Resolver for DummyResolver {
fn next_node_id(&mut self) -> ast::NodeId { ast::DUMMY_NODE_ID }
fn get_module_scope(&mut self, _id: ast::NodeId) -> Mark { Mark::root() }
fn eliminate_crate_var(&mut self, item: P<ast::Item>) -> P<ast::Item> { item }
fn is_whitelisted_legacy_custom_derive(&self, _name: Name) -> bool { false }
fn visit_expansion(&mut self, _invoc: Mark, _expansion: &Expansion, _derives: &[Mark]) {}
fn add_builtin(&mut self, _ident: ast::Ident, _ext: Lrc<SyntaxExtension>) {}
fn resolve_imports(&mut self)
|
fn find_legacy_attr_invoc(&mut self, _attrs: &mut Vec<Attribute>, _allow_derive: bool)
-> Option<Attribute> { None }
fn resolve_invoc(&mut self, _invoc: &mut Invocation, _scope: Mark, _force: bool)
-> Result<Option<Lrc<SyntaxExtension>>, Determinacy> {
Err(Determinacy::Determined)
}
fn resolve_macro(&mut self, _scope: Mark, _path: &ast::Path, _kind: MacroKind,
_force: bool) -> Result<Lrc<SyntaxExtension>, Determinacy> {
Err(Determinacy::Determined)
}
fn check_unused_macros(&self) {}
}
#[derive(Clone)]
pub struct ModuleData {
pub mod_path: Vec<ast::Ident>,
pub directory: PathBuf,
}
#[derive(Clone)]
pub struct ExpansionData {
pub mark: Mark,
pub depth: usize,
pub module: Rc<ModuleData>,
pub directory_ownership: DirectoryOwnership,
pub crate_span: Option<Span>,
}
/// One of these is made during expansion and incrementally updated as we go;
/// when a macro expansion occurs, the resulting nodes have the `backtrace()
/// -> expn_info` of their expansion context stored into their span.
pub struct ExtCtxt<'a> {
pub parse_sess: &'a parse::ParseSess,
pub ecfg: expand::ExpansionConfig<'a>,
pub root_path: PathBuf,
pub resolver: &'a mut Resolver,
pub resolve_err_count: usize,
pub current_expansion: ExpansionData,
pub expansions: HashMap<Span, Vec<String>>,
}
impl<'a> ExtCtxt<'a> {
pub fn new(parse_sess: &'a parse::ParseSess,
ecfg: expand::ExpansionConfig<'a>,
resolver: &'a mut Resolver)
-> ExtCtxt<'a> {
ExtCtxt {
parse_sess,
ecfg,
root_path: PathBuf::new(),
resolver,
resolve_err_count: 0,
current_expansion: ExpansionData {
mark: Mark::root(),
depth: 0,
module: Rc::new(ModuleData { mod_path: Vec::new(), directory: PathBuf::new() }),
directory_ownership: DirectoryOwnership::Owned { relative: None },
crate_span: None,
},
expansions: HashMap::new(),
}
}
/// Returns a `Folder` for deeply expanding all macros in an AST node.
pub fn expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> {
expand::MacroExpander::new(self, false)
}
/// Returns a `Folder` that deeply expands all macros and assigns all node ids in an AST node.
/// Once node ids are assigned, the node may not be expanded, removed, or otherwise modified.
pub fn monotonic_expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> {
expand::MacroExpander::new(self, true)
}
pub fn new_parser_from_tts(&self, tts: &[tokenstream::TokenTree]) -> parser::Parser<'a> {
parse::stream_to_parser(self.parse_sess, tts.iter().cloned().collect())
}
pub fn codemap(&self) -> &'a CodeMap { self.parse_sess.codemap() }
pub fn parse_sess(&self) -> &'a parse::ParseSess { self.parse_sess }
pub fn cfg(&self) -> &ast::CrateConfig { &self.parse_sess.config }
pub fn call_site(&self) -> Span {
match self.current_expansion.mark.expn_info() {
Some(expn_info) => expn_info.call_site,
None => DUMMY_SP,
}
}
pub fn backtrace(&self) -> SyntaxContext {
SyntaxContext::empty().apply_mark(self.current_expansion.mark)
}
/// Returns span for the macro which originally caused the current expansion to happen.
///
/// Stops backtracing at include! boundary.
pub fn expansion_cause(&self) -> Option<Span> {
let mut ctxt = self.backtrace();
let mut last_macro = None;
loop {
if ctxt.outer().expn_info().map_or(None, |info| {
if info.callee.name() == "include" {
// Stop going up the backtrace once include! is encountered
return None;
}
ctxt = info.call_site.ctxt();
last_macro = Some(info.call_site);
Some(())
}).is_none() {
break
}
}
last_macro
}
pub fn struct_span_warn<S: Into<MultiSpan>>(&self,
sp: S,
msg: &str)
-> DiagnosticBuilder<'a> {
self.parse_sess.span_diagnostic.struct_span_warn(sp, msg)
}
pub fn struct_span_err<S: Into<MultiSpan>>(&self,
sp: S,
msg: &str)
-> DiagnosticBuilder<'a> {
self.parse_sess.span_diagnostic.struct_span_err(sp, msg)
}
pub fn struct_span_fatal<S: Into<MultiSpan>>(&self,
sp: S,
msg: &str)
-> DiagnosticBuilder<'a> {
self.parse_sess.span_diagnostic.struct_span_fatal(sp, msg)
}
/// Emit `msg` attached to `sp`, and stop compilation immediately.
///
/// `span_err` should be strongly preferred where-ever possible:
/// this should *only* be used when:
///
/// - continuing has a high risk of flow-on errors (e.g. errors in
/// declaring a macro would cause all uses of that macro to
/// complain about "undefined macro"), or
/// - there is literally nothing else that can be done (however,
/// in most cases one can construct a dummy expression/item to
/// substitute; we never hit resolve/type-checking so the dummy
/// value doesn't have to match anything)
pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
self.parse_sess.span_diagnostic.span_fatal(sp, msg).raise();
}
/// Emit `msg` attached to `sp`, without immediately stopping
/// compilation.
///
/// Compilation will be stopped in the near future (at the end of
/// the macro expansion phase).
pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
self.parse_sess.span_diagnostic.span_err(sp, msg);
}
pub fn span_err_with_code<S: Into<MultiSpan>>(&self, sp: S, msg: &str, code: DiagnosticId) {
self.parse_sess.span_diagnostic.span_err_with_code(sp, msg, code);
}
pub fn mut_span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str)
-> DiagnosticBuilder<'a> {
self.parse_sess.span_diagnostic.mut_span_err(sp, msg)
}
pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
self.parse_sess.span_diagnostic.span_warn(sp, msg);
}
pub fn span_unimpl<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
self.parse_sess.span_diagnostic.span_unimpl(sp, msg);
}
pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
self.parse_sess.span_diagnostic.span_bug(sp, msg);
}
pub fn trace_macros_diag(&mut self) {
for (sp, notes) in self.expansions.iter() {
let mut db = self.parse_sess.span_diagnostic.span_note_diag(*sp, "trace_macro");
for note in notes {
db.note(note);
}
db.emit();
}
// Fixme: does this result in errors?
self.expansions.clear();
}
pub fn bug(&self, msg: &str) -> ! {
self.parse_sess.span_diagnostic.bug(msg);
}
pub fn trace_macros(&self) -> bool {
self.ecfg.trace_mac
}
pub fn set_trace_macros(&mut self, x: bool) {
self.ecfg.trace_mac = x
}
pub fn ident_of(&self, st: &str) -> ast::Ident {
ast::Ident::from_str(st)
}
pub fn std_path(&self, components: &[&str]) -> Vec<ast::Ident> {
let def_site = DUMMY_SP.apply_mark(self.current_expansion.mark);
iter::once(Ident::new(keywords::DollarCrate.name(), def_site))
.chain(components.iter().map(|s| self.ident_of(s)))
.collect()
}
pub fn name_of(&self, st: &str) -> ast::Name {
Symbol::intern(st)
}
pub fn check_unused_macros(&self) {
self.resolver.check_unused_macros();
}
}
/// Extract a string literal from the macro expanded version of `expr`,
/// emitting `err_msg` if `expr` is not a string literal. This does not stop
/// compilation on error, merely emits a non-fatal error and returns None.
pub fn expr_to_spanned_string(cx: &mut ExtCtxt, expr: P<ast::Expr>, err_msg: &str)
-> Option<Spanned<(Symbol, ast::StrStyle)>> {
// Update `expr.span`'s ctxt now in case expr is an `include!` macro invocation.
let expr = expr.map(|mut expr| {
expr.span = expr.span.apply_mark(cx.current_expansion.mark);
expr
});
// we want to be able to handle e.g. concat("foo", "bar")
let expr = cx.expander().fold_expr(expr);
match expr.node {
ast::ExprKind::Lit(ref l) => match l.node {
ast::LitKind::Str(s, style) => return Some(respan(expr.span, (s, style))),
_ => cx.span_err(l.span, err_msg)
},
_ => cx.span_err(expr.span, err_msg)
}
None
}
pub fn expr_to_string(cx: &mut ExtCtxt, expr: P<ast::Expr>, err_msg: &str)
-> Option<(Symbol, ast::StrStyle)> {
expr_to_spanned_string(cx, expr, err_msg).map(|s| s.node)
}
/// Non-fatally assert that `tts` is empty. Note that this function
/// returns even when `tts` is non-empty, macros that *need* to stop
/// compilation should call
/// `cx.parse_sess.span_diagnostic.abort_if_errors()` (this should be
/// done as rarely as possible).
pub fn check_zero_tts(cx: &ExtCtxt,
sp: Span,
tts: &[tokenstream::TokenTree],
name: &str) {
if !tts.is_empty() {
cx.span_err(sp, &format!("{} takes no arguments", name));
}
}
/// Interpreting `tts` as a comma-separated sequence of expressions,
/// expect exactly one string literal, or emit an error and return None.
pub fn get_single_str_from_tts(cx: &mut ExtCtxt,
sp: Span,
tts: &[tokenstream::TokenTree],
name: &str)
-> Option<String> {
let mut p = cx.new_parser_from_tts(tts);
if p.token == token::Eof {
cx.span_err(sp, &format!("{} takes 1 argument", name));
return None
}
let ret = panictry!(p.parse_expr());
let _ = p.eat(&token::Comma);
if p.token != token::Eof {
cx.span_err(sp, &format!("{} takes 1 argument", name));
}
expr_to_string(cx, ret, "argument must be a string literal").map(|(s, _)| {
s.to_string()
})
}
/// Extract comma-separated expressions from `tts`. If there is a
/// parsing error, emit a non-fatal error and return None.
pub fn get_exprs_from_tts(cx: &mut ExtCtxt,
sp: Span,
tts: &[tokenstream::TokenTree]) -> Option<Vec<P<ast::Expr>>> {
let mut p = cx.new_parser_from_tts(tts);
let mut es = Vec::new();
while p.token != token::Eof {
es.push(cx.expander().fold_expr(panictry!(p.parse_expr())));
if p.eat(&token::Comma) {
continue;
}
if p.token != token::Eof {
cx.span_err(sp, "expected token: `,`");
return None;
}
}
Some(es)
}
|
{}
|
TransactionsRepository.ts
|
import { EntityRepository, Repository } from 'typeorm';
import Transaction from '../models/Transaction';
interface Balance {
income: number;
outcome: number;
total: number;
}
@EntityRepository(Transaction)
class TransactionsRepository extends Repository<Transaction> {
public async getBalance(): Promise<Balance> {
const transactions = await this.find();
const { income, outcome } = transactions.reduce(
(accumulator: Balance, transaction: Transaction) => {
switch (transaction.type) {
case 'income':
accumulator.income += Number(transaction.value);
break;
case 'outcome':
accumulator.outcome += Number(transaction.value);
break;
default:
break;
}
return accumulator;
},
{
income: 0,
outcome: 0,
total: 0,
},
);
const total = income - outcome;
return { income, outcome, total };
}
}
export default TransactionsRepository;
| ||
index.d.ts
|
export { Types16 as default } from "../";
| ||
mock_test.go
|
package mocknet
import (
"bytes"
"context"
"errors"
"io"
"math"
"math/rand"
"sync"
"testing"
"time"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-testing/ci"
tnet "github.com/libp2p/go-libp2p-testing/net"
"github.com/libp2p/go-libp2p-testing/race"
)
func TestNetworkSetup(t *testing.T) {
ctx := context.Background()
id1 := tnet.RandIdentityOrFatal(t)
id2 := tnet.RandIdentityOrFatal(t)
id3 := tnet.RandIdentityOrFatal(t)
mn := New(ctx)
// peers := []peer.ID{p1, p2, p3}
// add peers to mock net
a1 := tnet.RandLocalTCPAddress()
a2 := tnet.RandLocalTCPAddress()
a3 := tnet.RandLocalTCPAddress()
h1, err := mn.AddPeer(id1.PrivateKey(), a1)
if err != nil {
t.Fatal(err)
}
p1 := h1.ID()
h2, err := mn.AddPeer(id2.PrivateKey(), a2)
if err != nil {
t.Fatal(err)
}
p2 := h2.ID()
h3, err := mn.AddPeer(id3.PrivateKey(), a3)
if err != nil {
t.Fatal(err)
}
p3 := h3.ID()
// check peers and net
if mn.Host(p1) != h1 {
t.Error("host for p1.ID != h1")
}
if mn.Host(p2) != h2 {
t.Error("host for p2.ID != h2")
}
if mn.Host(p3) != h3 {
t.Error("host for p3.ID != h3")
}
n1 := h1.Network()
if mn.Net(p1) != n1 {
t.Error("net for p1.ID != n1")
}
n2 := h2.Network()
if mn.Net(p2) != n2 {
t.Error("net for p2.ID != n1")
}
n3 := h3.Network()
if mn.Net(p3) != n3 {
t.Error("net for p3.ID != n1")
}
// link p1<-->p2, p1<-->p1, p2<-->p3, p3<-->p2
l12, err := mn.LinkPeers(p1, p2)
if err != nil {
t.Fatal(err)
}
if !(l12.Networks()[0] == n1 && l12.Networks()[1] == n2) &&
!(l12.Networks()[0] == n2 && l12.Networks()[1] == n1) {
t.Error("l12 networks incorrect")
}
l11, err := mn.LinkPeers(p1, p1)
if err != nil {
t.Fatal(err)
}
if !(l11.Networks()[0] == n1 && l11.Networks()[1] == n1) {
t.Error("l11 networks incorrect")
}
l23, err := mn.LinkPeers(p2, p3)
if err != nil {
t.Fatal(err)
}
if !(l23.Networks()[0] == n2 && l23.Networks()[1] == n3) &&
!(l23.Networks()[0] == n3 && l23.Networks()[1] == n2) {
t.Error("l23 networks incorrect")
}
l32, err := mn.LinkPeers(p3, p2)
if err != nil {
t.Fatal(err)
}
if !(l32.Networks()[0] == n2 && l32.Networks()[1] == n3) &&
!(l32.Networks()[0] == n3 && l32.Networks()[1] == n2) {
t.Error("l32 networks incorrect")
}
// check things
links12 := mn.LinksBetweenPeers(p1, p2)
if len(links12) != 1 {
t.Errorf("should be 1 link bt. p1 and p2 (found %d)", len(links12))
}
if links12[0] != l12 {
t.Error("links 1-2 should be l12.")
}
links11 := mn.LinksBetweenPeers(p1, p1)
if len(links11) != 1 {
t.Errorf("should be 1 link bt. p1 and p1 (found %d)", len(links11))
}
if links11[0] != l11 {
t.Error("links 1-1 should be l11.")
}
links23 := mn.LinksBetweenPeers(p2, p3)
if len(links23) != 2 {
t.Errorf("should be 2 link bt. p2 and p3 (found %d)", len(links23))
}
if !((links23[0] == l23 && links23[1] == l32) ||
(links23[0] == l32 && links23[1] == l23)) {
t.Error("links 2-3 should be l23 and l32.")
}
// unlinking
if err := mn.UnlinkPeers(p2, p1); err != nil {
t.Error(err)
}
// check only one link affected:
links12 = mn.LinksBetweenPeers(p1, p2)
if len(links12) != 0 {
t.Error("should be 0 now...", len(links12))
}
links11 = mn.LinksBetweenPeers(p1, p1)
if len(links11) != 1 {
t.Errorf("should be 1 link bt. p1 and p1 (found %d)", len(links11))
}
if links11[0] != l11 {
t.Error("links 1-1 should be l11.")
}
links23 = mn.LinksBetweenPeers(p2, p3)
if len(links23) != 2 {
t.Errorf("should be 2 link bt. p2 and p3 (found %d)", len(links23))
}
if !((links23[0] == l23 && links23[1] == l32) ||
(links23[0] == l32 && links23[1] == l23)) {
t.Error("links 2-3 should be l23 and l32.")
}
// check connecting
// first, no conns
if len(n2.Conns()) > 0 || len(n3.Conns()) > 0 {
t.Errorf("should have 0 conn. Got: (%d, %d)", len(n2.Conns()), len(n3.Conns()))
}
// connect p2->p3
if _, err := n2.DialPeer(ctx, p3); err != nil {
t.Error(err)
}
// should immediately have a conn on peer 1
if len(n2.Conns()) != 1 {
t.Errorf("should have 1 conn on initiator. Got: %d)", len(n2.Conns()))
}
// wait for reciever to see the conn.
for i := 0; i < 10 && len(n3.Conns()) == 0; i++ {
time.Sleep(time.Duration(10*i) * time.Millisecond)
}
if len(n3.Conns()) != 1 {
t.Errorf("should have 1 conn on reciever. Got: %d", len(n3.Conns()))
}
// p := PrinterTo(os.Stdout)
// p.NetworkConns(n1)
// p.NetworkConns(n2)
// p.NetworkConns(n3)
// can create a stream 2->3, 3->2,
if _, err := n2.NewStream(ctx, p3); err != nil {
t.Error(err)
}
if _, err := n3.NewStream(ctx, p2); err != nil {
t.Error(err)
}
// but not 1->2 nor 2->2 (not linked), nor 1->1 (not connected)
if _, err := n1.NewStream(ctx, p2); err == nil {
t.Error("should not be able to connect")
}
if _, err := n2.NewStream(ctx, p2); err == nil {
t.Error("should not be able to connect")
}
if _, err := n1.NewStream(ctx, p1); err == nil {
t.Error("should not be able to connect")
}
// connect p1->p1 (should fail)
if _, err := n1.DialPeer(ctx, p1); err == nil {
t.Error("p1 shouldn't be able to dial self")
}
// and a stream too
if _, err := n1.NewStream(ctx, p1); err == nil {
t.Error("p1 shouldn't be able to dial self")
}
// connect p1->p2
if _, err := n1.DialPeer(ctx, p2); err == nil {
t.Error("p1 should not be able to dial p2, not connected...")
}
// connect p3->p1
if _, err := n3.DialPeer(ctx, p1); err == nil {
t.Error("p3 should not be able to dial p1, not connected...")
}
// relink p1->p2
l12, err = mn.LinkPeers(p1, p2)
if err != nil {
t.Fatal(err)
}
if !(l12.Networks()[0] == n1 && l12.Networks()[1] == n2) &&
!(l12.Networks()[0] == n2 && l12.Networks()[1] == n1) {
t.Error("l12 networks incorrect")
}
// should now be able to connect
// connect p1->p2
if _, err := n1.DialPeer(ctx, p2); err != nil {
t.Error(err)
}
// and a stream should work now too :)
if _, err := n2.NewStream(ctx, p3); err != nil {
t.Error(err)
}
}
func TestStreams(t *testing.T) {
ctx := context.Background()
mn, err := FullMeshConnected(context.Background(), 3)
if err != nil {
t.Fatal(err)
}
handler := func(s network.Stream) {
b := make([]byte, 4)
if _, err := io.ReadFull(s, b); err != nil {
panic(err)
}
if !bytes.Equal(b, []byte("beep")) {
panic("bytes mismatch")
}
if _, err := s.Write([]byte("boop")); err != nil {
panic(err)
}
s.Close()
}
hosts := mn.Hosts()
for _, h := range mn.Hosts() {
h.SetStreamHandler(protocol.TestingID, handler)
}
s, err := hosts[0].NewStream(ctx, hosts[1].ID(), protocol.TestingID)
if err != nil {
t.Fatal(err)
}
if _, err := s.Write([]byte("beep")); err != nil {
panic(err)
}
b := make([]byte, 4)
if _, err := io.ReadFull(s, b); err != nil {
panic(err)
}
if !bytes.Equal(b, []byte("boop")) {
panic("bytes mismatch 2")
}
}
func performPing(t *testing.T, st string, n int, s network.Stream) error {
t.Helper()
defer s.Close()
for i := 0; i < n; i++ {
b := make([]byte, 4+len(st))
if _, err := s.Write([]byte("ping" + st)); err != nil {
return err
}
if _, err := io.ReadFull(s, b); err != nil {
return err
}
if !bytes.Equal(b, []byte("pong"+st)) {
return errors.New("bytes mismatch")
}
}
return nil
}
func makePonger(t *testing.T, st string, errs chan<- error) func(network.Stream) {
t.Helper()
return func(s network.Stream) {
go func() {
defer s.Close()
for {
b := make([]byte, 4+len(st))
if _, err := io.ReadFull(s, b); err != nil {
if err == io.EOF {
return
}
errs <- err
}
if !bytes.Equal(b, []byte("ping"+st)) {
errs <- errors.New("bytes mismatch")
}
if _, err := s.Write([]byte("pong" + st)); err != nil {
errs <- err
}
}
}()
}
}
func TestStreamsStress(t *testing.T) {
ctx := context.Background()
nnodes := 100
if race.WithRace() {
nnodes = 30
}
mn, err := FullMeshConnected(context.Background(), nnodes)
if err != nil {
t.Fatal(err)
}
errs := make(chan error)
hosts := mn.Hosts()
for _, h := range hosts {
ponger := makePonger(t, "pingpong", errs)
h.SetStreamHandler(protocol.TestingID, ponger)
}
var wg sync.WaitGroup
for i := 0; i < 1000; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
var from, to int
for from == to {
from = rand.Intn(len(hosts))
to = rand.Intn(len(hosts))
}
s, err := hosts[from].NewStream(ctx, hosts[to].ID(), protocol.TestingID)
if err != nil {
log.Debugf("%d (%s) %d (%s)", from, hosts[from], to, hosts[to])
panic(err)
}
log.Infof("%d start pinging", i)
errs <- performPing(t, "pingpong", rand.Intn(100), s)
log.Infof("%d done pinging", i)
}(i)
}
go func() {
wg.Wait()
close(errs)
}()
for err := range errs {
if err == nil {
continue
}
t.Fatal(err)
}
}
func TestAdding(t *testing.T) {
mn := New(context.Background())
var peers []peer.ID
for i := 0; i < 3; i++ {
id := tnet.RandIdentityOrFatal(t)
a := tnet.RandLocalTCPAddress()
h, err := mn.AddPeer(id.PrivateKey(), a)
if err != nil {
t.Fatal(err)
}
peers = append(peers, h.ID())
}
p1 := peers[0]
p2 := peers[1]
// link them
for _, p1 := range peers {
for _, p2 := range peers {
if _, err := mn.LinkPeers(p1, p2); err != nil {
t.Error(err)
}
}
}
// set the new stream handler on p2
h2 := mn.Host(p2)
if h2 == nil {
t.Fatalf("no host for %s", p2)
}
h2.SetStreamHandler(protocol.TestingID, func(s network.Stream) {
defer s.Close()
b := make([]byte, 4)
if _, err := io.ReadFull(s, b); err != nil {
panic(err)
}
if string(b) != "beep" {
panic("did not beep!")
}
if _, err := s.Write([]byte("boop")); err != nil {
panic(err)
}
})
// connect p1 to p2
if _, err := mn.ConnectPeers(p1, p2); err != nil {
t.Fatal(err)
}
// talk to p2
h1 := mn.Host(p1)
if h1 == nil {
t.Fatalf("no network for %s", p1)
}
ctx := context.Background()
s, err := h1.NewStream(ctx, p2, protocol.TestingID)
if err != nil {
t.Fatal(err)
}
if _, err := s.Write([]byte("beep")); err != nil {
t.Error(err)
}
b := make([]byte, 4)
if _, err := io.ReadFull(s, b); err != nil {
t.Error(err)
}
if !bytes.Equal(b, []byte("boop")) {
t.Error("bytes mismatch 2")
}
}
func TestRateLimiting(t *testing.T) {
if ci.IsRunning() {
t.Skip("buggy in CI")
}
rl := NewRateLimiter(10)
if !within(rl.Limit(10), time.Duration(float32(time.Second)), time.Millisecond) {
t.Fatal()
}
if !within(rl.Limit(10), time.Duration(float32(time.Second*2)), time.Millisecond) {
t.Fatal()
}
if !within(rl.Limit(10), time.Duration(float32(time.Second*3)), time.Millisecond) {
t.Fatal()
}
if within(rl.Limit(10), time.Duration(float32(time.Second*3)), time.Millisecond) {
t.Fatal()
}
rl.UpdateBandwidth(50)
if !within(rl.Limit(75), time.Duration(float32(time.Second)*1.5), time.Millisecond) {
t.Fatal()
}
if within(rl.Limit(75), time.Duration(float32(time.Second)*1.5), time.Millisecond) {
t.Fatal()
}
rl.UpdateBandwidth(100)
if !within(rl.Limit(1), time.Duration(time.Millisecond*10), time.Millisecond) {
t.Fatal()
}
if within(rl.Limit(1), time.Duration(time.Millisecond*10), time.Millisecond) {
t.Fatal()
}
}
func within(t1 time.Duration, t2 time.Duration, tolerance time.Duration) bool {
return math.Abs(float64(t1)-float64(t2)) < float64(tolerance)
}
func TestLimitedStreams(t *testing.T) {
mn, err := FullMeshConnected(context.Background(), 2)
if err != nil {
t.Fatal(err)
}
var wg sync.WaitGroup
messages := 4
messageSize := 500
handler := func(s network.Stream) {
b := make([]byte, messageSize)
for i := 0; i < messages; i++ {
if _, err := io.ReadFull(s, b); err != nil {
log.Fatal(err)
}
if !bytes.Equal(b[:4], []byte("ping")) {
log.Fatal("bytes mismatch")
}
wg.Done()
}
s.Close()
}
hosts := mn.Hosts()
for _, h := range mn.Hosts() {
h.SetStreamHandler(protocol.TestingID, handler)
}
peers := mn.Peers()
links := mn.LinksBetweenPeers(peers[0], peers[1])
// 1000 byte per second bandwidth
bps := float64(1000)
opts := links[0].Options()
opts.Bandwidth = bps
for _, link := range links {
link.SetOptions(opts)
}
ctx := context.Background()
s, err := hosts[0].NewStream(ctx, hosts[1].ID(), protocol.TestingID)
if err != nil {
t.Fatal(err)
}
filler := make([]byte, messageSize-4)
data := append([]byte("ping"), filler...)
before := time.Now()
for i := 0; i < messages; i++ {
wg.Add(1)
if _, err := s.Write(data); err != nil {
panic(err)
}
}
wg.Wait()
if !within(time.Since(before), time.Second*2, time.Second) {
t.Fatal("Expected 2ish seconds but got ", time.Since(before))
}
}
func TestFuzzManyPeers(t *testing.T) {
peerCount := 500
if race.WithRace() {
peerCount = 100
}
for i := 0; i < peerCount; i++ {
ctx, cancel := context.WithCancel(context.Background())
_, err := FullMeshConnected(ctx, 2)
cancel()
if err != nil {
t.Fatal(err)
}
}
}
func
|
(t *testing.T) {
latency := time.Millisecond * 500
mn, err := WithNPeers(context.Background(), 2)
if err != nil {
t.Fatal(err)
}
// configure the Mocknet with some latency and link/connect its peers
mn.SetLinkDefaults(LinkOptions{Latency: latency})
mn.LinkAll()
mn.ConnectAllButSelf()
msg := []byte("ping")
mln := len(msg)
var wg sync.WaitGroup
// we'll write once to a single stream
wg.Add(1)
handler := func(s network.Stream) {
b := make([]byte, mln)
if _, err := io.ReadFull(s, b); err != nil {
t.Fatal(err)
}
wg.Done()
s.Close()
}
mn.Hosts()[0].SetStreamHandler(protocol.TestingID, handler)
mn.Hosts()[1].SetStreamHandler(protocol.TestingID, handler)
s, err := mn.Hosts()[0].NewStream(context.Background(), mn.Hosts()[1].ID(), protocol.TestingID)
if err != nil {
t.Fatal(err)
}
// writing to the stream will be subject to our configured latency
checkpoint := time.Now()
if _, err := s.Write(msg); err != nil {
t.Fatal(err)
}
wg.Wait()
delta := time.Since(checkpoint)
tolerance := time.Second
if !within(delta, latency, tolerance) {
t.Fatalf("Expected write to take ~%s (+/- %s), but took %s", latency.String(), tolerance.String(), delta.String())
}
}
|
TestStreamsWithLatency
|
pHTemp.js
|
const PHM = 1;
const PHB = 0;
const TEMPM = 1;
const TEMPB = 0;
//console.log('starting');
var i2c = require('i2c');
var _messenger = require("./messenger.js");
var messenger = new _messenger.client({});
exports.init = function(pHTempAddr) {
device = new i2c(pHTempAddr, {device: '/dev/i2c-1'});
//console.log('initiate i2c');
};
var pHValue;
var tempValue;
var pHResult = [];
var tempResult = [];
var pHByte;
var tempByte;
messenger.on('pHTemp', function(command) {
//console.log('received command: ' + command);
pHTemp();
//send results
if (command == 0x67) {
//console.log('sending Bytes');
messenger.emit('pH Value', pHValue);
} else if (command == 0x68) {
//console.log('sending Bytes');
messenger.emit('Temp Value', tempValue);
} else if (command == 0x69) {
//console.log('sending Bytes');
if (pHValue) {
messenger.emit('pH Value', pHValue);
messenger.emit('Temp Value', tempValue);
} else {
messenger.emit('pH Value', -1);
messenger.emit('Temp Value', -1)
}
};
});
function unBitShift(result0, result1) {
return (result0 << 8) | result1;
};
function
|
() {
/*
if(!adc.busy) {
//A0 for pH
adc.readADCSingleEnded(0, 4096, 250, function(err, data1, data2) {
if(err) {
var errorMessage = 'error reading: ' + err;
messenger.emit('miscError' + errorMessage);
return;
};
pHByte = unBitShift(data1, data2);
pHValue = (PHM * pHByte) + PHB;
});
//A2 for temperature
adc.readADCSingleEnded(2, 4096, 250, function(err, data1, data2) {
if(err) {
var errorMessage = 'error reading: ' + err;
messenger.emit('miscError' + errorMessage);
return;
};
tempByte = unBitShift(data1, data2);
tempValue = (TEMPM * pHByte) + TEMPB;
});
};
*/
device.readBytes(0x69, 4, function(err, res) {
if (err){
var errorMessage = "error reading" + err;
messenger.emit('miscError: '+ errorMessage);
return;
};
pHResult[0] = res[0];
pHResult[1] = res[1];
// console.log('pH Result: ' + pHResult);
pHByte = unBitShift(pHResult[0], pHResult[1]);
//console.log('pHByte: ' + pHByte);
tempResult[0] = res[2];
tempResult[1] = res[3];
//console.log('Temp Result: ' + tempResult);
tempByte = unBitShift(tempResult[0], tempResult[1]);
//console.log('tempByte: ' + tempByte);
pHValue = (PHM * pHByte) + PHB;
tempValue = (TEMPM * tempByte) + TEMPB;
});
};
|
pHTemp
|
transition_blocks.rs
|
use clap::ArgMatches;
use eth2_network_config::Eth2NetworkConfig;
use ssz::Encode;
use state_processing::{
per_block_processing, per_slot_processing, BlockSignatureStrategy, VerifyBlockRoot,
};
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use types::{BeaconState, ChainSpec, EthSpec, SignedBeaconBlock};
pub fn run_transition_blocks<T: EthSpec>(
testnet_dir: PathBuf,
matches: &ArgMatches,
) -> Result<(), String> {
let pre_state_path = matches
.value_of("pre-state")
.ok_or("No pre-state file supplied")?
.parse::<PathBuf>()
.map_err(|e| format!("Failed to parse pre-state path: {}", e))?;
let block_path = matches
.value_of("block")
.ok_or("No block file supplied")?
.parse::<PathBuf>()
.map_err(|e| format!("Failed to parse block path: {}", e))?;
let output_path = matches
.value_of("output")
.ok_or("No output file supplied")?
.parse::<PathBuf>()
.map_err(|e| format!("Failed to parse output path: {}", e))?;
info!("Using {} spec", T::spec_name());
info!("Pre-state path: {:?}", pre_state_path);
info!("Block path: {:?}", block_path);
let eth2_network_config = Eth2NetworkConfig::load(testnet_dir)?;
let spec = ð2_network_config.chain_spec::<T>()?;
let pre_state: BeaconState<T> =
load_from_ssz_with(&pre_state_path, spec, BeaconState::from_ssz_bytes)?;
let block: SignedBeaconBlock<T> =
load_from_ssz_with(&block_path, spec, SignedBeaconBlock::from_ssz_bytes)?;
let post_state = do_transition(pre_state, block, spec)?;
let mut output_file =
File::create(output_path).map_err(|e| format!("Unable to create output file: {:?}", e))?;
output_file
.write_all(&post_state.as_ssz_bytes())
.map_err(|e| format!("Unable to write to output file: {:?}", e))?;
Ok(())
}
fn
|
<T: EthSpec>(
mut pre_state: BeaconState<T>,
block: SignedBeaconBlock<T>,
spec: &ChainSpec,
) -> Result<BeaconState<T>, String> {
pre_state
.build_all_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?;
// Transition the parent state to the block slot.
for i in pre_state.slot().as_u64()..block.slot().as_u64() {
per_slot_processing(&mut pre_state, None, spec)
.map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?;
}
pre_state
.build_all_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?;
per_block_processing(
&mut pre_state,
&block,
None,
BlockSignatureStrategy::VerifyIndividual,
VerifyBlockRoot::True,
spec,
)
.map_err(|e| format!("State transition failed: {:?}", e))?;
Ok(pre_state)
}
pub fn load_from_ssz_with<T>(
path: &Path,
spec: &ChainSpec,
decoder: impl FnOnce(&[u8], &ChainSpec) -> Result<T, ssz::DecodeError>,
) -> Result<T, String> {
let mut file =
File::open(path).map_err(|e| format!("Unable to open file {:?}: {:?}", path, e))?;
let mut bytes = vec![];
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read from file {:?}: {:?}", path, e))?;
decoder(&bytes, spec).map_err(|e| format!("Ssz decode failed: {:?}", e))
}
|
do_transition
|
node_cpu_util.pb.go
|
/*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: node_cpu_util.proto
package cisco_ios_xr_wdsysmon_fd_oper_system_monitoring_cpu_utilization
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type NodeCpuUtil_KEYS struct {
NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeCpuUtil_KEYS) Reset() { *m = NodeCpuUtil_KEYS{} }
func (m *NodeCpuUtil_KEYS) String() string { return proto.CompactTextString(m) }
func (*NodeCpuUtil_KEYS) ProtoMessage() {}
func (*NodeCpuUtil_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_b085845089ac9f1b, []int{0}
}
func (m *NodeCpuUtil_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeCpuUtil_KEYS.Unmarshal(m, b)
}
func (m *NodeCpuUtil_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeCpuUtil_KEYS.Marshal(b, m, deterministic)
}
func (m *NodeCpuUtil_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeCpuUtil_KEYS.Merge(m, src)
}
func (m *NodeCpuUtil_KEYS) XXX_Size() int {
return xxx_messageInfo_NodeCpuUtil_KEYS.Size(m)
}
func (m *NodeCpuUtil_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_NodeCpuUtil_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_NodeCpuUtil_KEYS proto.InternalMessageInfo
func (m *NodeCpuUtil_KEYS) GetNodeName() string {
if m != nil {
return m.NodeName
}
return ""
}
type ProcessCpuUtil struct {
ProcessName string `protobuf:"bytes,1,opt,name=process_name,json=processName,proto3" json:"process_name,omitempty"`
ProcessId uint32 `protobuf:"varint,2,opt,name=process_id,json=processId,proto3" json:"process_id,omitempty"`
ProcessCpuOneMinute uint32 `protobuf:"varint,3,opt,name=process_cpu_one_minute,json=processCpuOneMinute,proto3" json:"process_cpu_one_minute,omitempty"`
ProcessCpuFiveMinute uint32 `protobuf:"varint,4,opt,name=process_cpu_five_minute,json=processCpuFiveMinute,proto3" json:"process_cpu_five_minute,omitempty"`
ProcessCpuFifteenMinute uint32 `protobuf:"varint,5,opt,name=process_cpu_fifteen_minute,json=processCpuFifteenMinute,proto3" json:"process_cpu_fifteen_minute,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProcessCpuUtil) Reset() { *m = ProcessCpuUtil{} }
func (m *ProcessCpuUtil) String() string { return proto.CompactTextString(m) }
func (*ProcessCpuUtil) ProtoMessage() {}
func (*ProcessCpuUtil) Descriptor() ([]byte, []int) {
return fileDescriptor_b085845089ac9f1b, []int{1}
}
func (m *ProcessCpuUtil) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ProcessCpuUtil.Unmarshal(m, b)
}
func (m *ProcessCpuUtil) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ProcessCpuUtil.Marshal(b, m, deterministic)
}
func (m *ProcessCpuUtil) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProcessCpuUtil.Merge(m, src)
}
func (m *ProcessCpuUtil) XXX_Size() int {
return xxx_messageInfo_ProcessCpuUtil.Size(m)
}
func (m *ProcessCpuUtil) XXX_DiscardUnknown() {
xxx_messageInfo_ProcessCpuUtil.DiscardUnknown(m)
}
var xxx_messageInfo_ProcessCpuUtil proto.InternalMessageInfo
func (m *ProcessCpuUtil) GetProcessName() string {
if m != nil {
return m.ProcessName
}
return ""
}
func (m *ProcessCpuUtil) GetProcessId() uint32 {
if m != nil {
return m.ProcessId
}
return 0
}
func (m *ProcessCpuUtil) GetProcessCpuOneMinute() uint32 {
if m != nil {
return m.ProcessCpuOneMinute
}
return 0
}
func (m *ProcessCpuUtil) GetProcessCpuFiveMinute() uint32 {
if m != nil {
return m.ProcessCpuFiveMinute
}
return 0
}
func (m *ProcessCpuUtil) GetProcessCpuFifteenMinute() uint32 {
if m != nil {
return m.ProcessCpuFifteenMinute
}
return 0
}
type NodeCpuUtil struct {
TotalCpuOneMinute uint32 `protobuf:"varint,50,opt,name=total_cpu_one_minute,json=totalCpuOneMinute,proto3" json:"total_cpu_one_minute,omitempty"`
TotalCpuFiveMinute uint32 `protobuf:"varint,51,opt,name=total_cpu_five_minute,json=totalCpuFiveMinute,proto3" json:"total_cpu_five_minute,omitempty"`
TotalCpuFifteenMinute uint32 `protobuf:"varint,52,opt,name=total_cpu_fifteen_minute,json=totalCpuFifteenMinute,proto3" json:"total_cpu_fifteen_minute,omitempty"`
ProcessCpu []*ProcessCpuUtil `protobuf:"bytes,53,rep,name=process_cpu,json=processCpu,proto3" json:"process_cpu,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeCpuUtil) Reset() { *m = NodeCpuUtil{} }
func (m *NodeCpuUtil) String() string { return proto.CompactTextString(m) }
func (*NodeCpuUtil) ProtoMessage() {}
func (*NodeCpuUtil) Descriptor() ([]byte, []int) {
return fileDescriptor_b085845089ac9f1b, []int{2}
}
func (m *NodeCpuUtil) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeCpuUtil.Unmarshal(m, b)
}
func (m *NodeCpuUtil) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeCpuUtil.Marshal(b, m, deterministic)
}
func (m *NodeCpuUtil) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeCpuUtil.Merge(m, src)
}
func (m *NodeCpuUtil) XXX_Size() int {
return xxx_messageInfo_NodeCpuUtil.Size(m)
}
func (m *NodeCpuUtil) XXX_DiscardUnknown() {
xxx_messageInfo_NodeCpuUtil.DiscardUnknown(m)
}
var xxx_messageInfo_NodeCpuUtil proto.InternalMessageInfo
func (m *NodeCpuUtil) GetTotalCpuOneMinute() uint32 {
if m != nil {
return m.TotalCpuOneMinute
}
return 0
}
func (m *NodeCpuUtil) GetTotalCpuFiveMinute() uint32 {
if m != nil {
return m.TotalCpuFiveMinute
}
return 0
}
func (m *NodeCpuUtil) GetTotalCpuFifteenMinute() uint32 {
if m != nil
|
return 0
}
func (m *NodeCpuUtil) GetProcessCpu() []*ProcessCpuUtil {
if m != nil {
return m.ProcessCpu
}
return nil
}
func init() {
proto.RegisterType((*NodeCpuUtil_KEYS)(nil), "cisco_ios_xr_wdsysmon_fd_oper.system_monitoring.cpu_utilization.node_cpu_util_KEYS")
proto.RegisterType((*ProcessCpuUtil)(nil), "cisco_ios_xr_wdsysmon_fd_oper.system_monitoring.cpu_utilization.process_cpu_util")
proto.RegisterType((*NodeCpuUtil)(nil), "cisco_ios_xr_wdsysmon_fd_oper.system_monitoring.cpu_utilization.node_cpu_util")
}
func init() { proto.RegisterFile("node_cpu_util.proto", fileDescriptor_b085845089ac9f1b) }
var fileDescriptor_b085845089ac9f1b = []byte{
// 332 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0xd2, 0x41, 0x4b, 0x02, 0x41,
0x14, 0x07, 0x70, 0x56, 0x2b, 0xf2, 0x99, 0x50, 0xa3, 0xe6, 0x52, 0x04, 0xe6, 0xc9, 0xd3, 0x86,
0x9a, 0x74, 0xe8, 0xd0, 0x21, 0x0a, 0x22, 0x2a, 0xb2, 0x53, 0xa7, 0x61, 0xdb, 0x1d, 0x63, 0xc0,
0x99, 0xb7, 0xcc, 0xcc, 0x5a, 0xf6, 0x69, 0xfa, 0xa2, 0x41, 0x38, 0x39, 0xee, 0xac, 0xd7, 0xae,
0xff, 0xb7, 0xbf, 0xc7, 0x7f, 0x87, 0x07, 0x4d, 0x89, 0x29, 0xa3, 0x49, 0x96, 0xd3, 0xdc, 0xf0,
0x59, 0x94, 0x29, 0x34, 0x48, 0xae, 0x12, 0xae, 0x13, 0xa4, 0x1c, 0x35, 0xfd, 0x54, 0xf4, 0x23,
0xd5, 0x0b, 0x2d, 0x50, 0xd2, 0x69, 0x4a, 0x31, 0x63, 0x2a, 0xd2, 0x0b, 0x6d, 0x98, 0xa0, 0x02,
0x25, 0x37, 0xa8, 0xb8, 0x7c, 0x8f, 0x9c, 0xe7, 0x5f, 0xb1, 0xe1, 0x28, 0x7b, 0x03, 0x20, 0xa5,
0xbd, 0xf4, 0xfe, 0xe6, 0xf5, 0x85, 0x1c, 0x43, 0xcd, 0xa6, 0x32, 0x16, 0x2c, 0x0c, 0xba, 0x41,
0xbf, 0x36, 0xd9, 0x5d, 0x06, 0x8f, 0xb1, 0x60, 0xbd, 0x9f, 0x00, 0xf6, 0x33, 0x85, 0x09, 0xd3,
0x7a, 0xcd, 0xc8, 0x29, 0xec, 0xb9, 0xcc, 0x43, 0xf5, 0x55, 0xb6, 0x74, 0xe4, 0x04, 0xc0, 0x7d,
0xc2, 0xd3, 0xb0, 0xd2, 0x0d, 0xfa, 0x8d, 0x49, 0x6d, 0x95, 0xdc, 0xa5, 0x64, 0x04, 0x87, 0xfe,
0x56, 0x94, 0x8c, 0x0a, 0x2e, 0x73, 0xc3, 0xc2, 0xaa, 0xfd, 0xb4, 0xb9, 0x9a, 0x5e, 0x67, 0xf9,
0x93, 0x64, 0x0f, 0x76, 0x44, 0xc6, 0xd0, 0xf1, 0xd1, 0x94, 0xcf, 0xd7, 0x6a, 0xcb, 0xaa, 0x56,
0xa1, 0x6e, 0xf9, 0xdc, 0xb1, 0x4b, 0x38, 0x2a, 0xb3, 0xa9, 0x61, 0x4c, 0x3a, 0xb9, 0x6d, 0x65,
0xc7, 0x97, 0x76, 0xfe, 0x87, 0x7b, 0xdf, 0x15, 0x68, 0x94, 0xde, 0x8c, 0x9c, 0x41, 0xcb, 0xa0,
0x89, 0x67, 0x9b, 0xc5, 0x87, 0x76, 0xd1, 0x81, 0x9d, 0x95, 0x6a, 0x0f, 0xa0, 0x5d, 0x00, 0xbf,
0xf4, 0xc8, 0x0a, 0xe2, 0x84, 0x57, 0xf9, 0x02, 0x42, 0x9f, 0x94, 0x0a, 0x9f, 0x5b, 0xd5, 0x2e,
0x94, 0x57, 0x97, 0x28, 0xa8, 0x7b, 0xff, 0x1a, 0x8e, 0xbb, 0xd5, 0x7e, 0x7d, 0xf8, 0x1c, 0xfd,
0xf3, 0x70, 0xa2, 0xcd, 0x0b, 0x98, 0x40, 0xf1, 0x5e, 0x6f, 0x3b, 0xf6, 0x3a, 0x47, 0xbf, 0x01,
0x00, 0x00, 0xff, 0xff, 0x06, 0x74, 0x5d, 0x81, 0xb4, 0x02, 0x00, 0x00,
}
|
{
return m.TotalCpuFifteenMinute
}
|
BEATLES.py
|
#!/usr/bin/python
from __future__ import division
import sys
import math
import cmath
import numpy as np
from numpy import genfromtxt
import csv
from decimal import Decimal
import os
import random
# BEATLES: Bundle of Essential and Assistive Tools Library for Electronic Structure
# A tribute to the Beatles
#
# Updated May 30, 2020 by Hassan Harb
#
# / | \
# / | \
# /O O | O O\
# //|\ /|\ /|\ /|\\
# /=/ \=/ \= / \=/ \=\
# / == == == == == \
# / == == == == == \
# (The original Beatles)
# (ASCII retrieved from https://www.asciiart.eu/music/musicians/beatles )
#
#########################################################################
#
# NBasGrab: reads in a name of .fchk file
# output: -Number of basis functions
# -Charge
# -Multiplicity
# -Number of Atoms
# -Cartesian Coordinates
# -Atomic Symbols
# -SCF Energy
# -Total Energy (needs to be added)
# Section 1: Reading from gaussian formatted checkpoint file
def NBasGrab(filename):
NBasis = 0
NElem = 0
SCFEnergy = 0.0
Charge = 0
Multiplicity = 0
NAtoms = 0
temp = 1
with open(filename, 'r') as origin:
for line in origin:
if "Number of basis functions" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NBasis = NBasis*10 + int(letter)
if "Charge " in line:
words = line.split()
for i in words:
for letter in i:
if(letter=="-"):
temp = -1
if(letter.isdigit()):
Charge = Charge*10 + int(letter)
Charge = Charge*temp
if "Multiplicity" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
Multiplicity = Multiplicity*10 + int(letter)
if "Number of atoms" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NAtoms = NAtoms*10 + int(letter)
if "SCF Energy" in line:
words = line.split()
# print "SCF Energy = ", words[3], " Hartree"
SCFEnergy = float(words[3])
# print "SCF Energy (float) = ", SCFEnergy
# if "Total Energy" in line:
# words = line.split()
# TotalEnergy = float(words[3])
# print "Total Energy = ", TotalEnergy, " Hartree"
NElem = NBasis*NBasis
# print "Number of Basis Functions (subroutine) = ", NBasis, "\n"
# print "Charge (subroutine) = ", Charge, "\n"
return NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy
# GeomGet: reads in the file name, number of atoms
# Output: -One dimensional vector (NAtoms * 3) that includes the cartesian coordinates of each atom
#
def GeomGet(filename,NAtoms):
p = 0
r = 0
n = 1
NElements = NAtoms * 3
RawCart = np.zeros(NElements)
if (NElements%5 == 0):
n = 0
RawCartLines = int(NElements/5) + n
# print "Raw Cart lines = ", RawCartLines
# print "Number of Atoms =", NAtoms
# print "Number of coordinates =", NElements
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "Current cartesian coordinates" in line:
i = i + 1
pointer = i
# print "Cartesian Coordinates starts at line :", pointer
endpointer = pointer + RawCartLines - 1
# print "Cartesian Coordinates ends at line :", endpointer
for m in range(0,endpointer - pointer +1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
RawCart[r] = nextline[p]
r = r + 1
p = 0
# print "Raw Cart (subroutine) = ", RawCart
RawCart = RawCart/1.88973
# print "Raw Cart (converted to Angstroms) = ", RawCart
return RawCart
# GetAtoms: Reads in file name, number of atoms
# output: -One dimensional vector (NAtoms) that contains the atomic numbers of the atoms
#
def GetAtoms(filename1,NAtoms):
p = 0
r = 0
n = 1
AtomicNum = np.zeros(NAtoms)
if (NAtoms%6 ==0):
n = 0
AtomLines = int(NAtoms/6) + n
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Atomic numbers" in line:
i = i + 1
pointer = i
endpointer = pointer + AtomLines -1
for m in range(0, endpointer - pointer + 1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
AtomicNum[r] = nextline[p]
r = r + 1
p = 0
return AtomicNum
# MatGrab: Reads in filename, NBasis, user-defined switch
# Output: -Alpha MO Coefficients (Done)
# -Beta MO Coefficients (Done)
# -Alpha Density Matrix (Done)
# -Beta Density Matrix (Done)
# -Alpha MO Energies (Done)
# -Beta MO Energies (Done)
#
# Switch: 1 = Alpha MO Coefficients
# -1 = Beta MO Coefficients
# 2 = Alpha and Beta Density Matrices
# 3 = Alpha MO Energies
# -3 = Beta MO Energies
#
def MatGrab(filename,NBasis,switch):
if (switch == 1):
filename1 = filename
MOElements = NBasis * NBasis
MOlines = int(MOElements/5) + 1
if (NBasis%5 == 0):
MOlines = MOlines - 1
p = 0
r = 0
AOE = 0
MOrawa = np.zeros(NBasis*NBasis)
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
AOE = i
if "Alpha MO coefficients" in line:
i=i+1
AMO=i
# print "Alpha MO coefficients starts at line :", i
j=i+MOlines-1
# print "Alpha MO coefficients ends at line :", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
MOrawa[r] = nextline[p]
r = r+1
p = 0
# print "MO Raw = ", MOrawa
return MOrawa
if (switch == -1):
filename1 = filename
MOElements = NBasis * NBasis
MOlines = int(MOElements/5) + 1
if (NBasis%5 == 0):
MOlines = MOlines - 1
p = 0
r = 0
BOE = 0
BMO = 0
MOrawb = np.zeros(NBasis*NBasis)
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Beta Orbital Energies" in line:
BOE = i
if "Beta MO coefficients" in line:
i=i+1
BMO=i
j=i+MOlines-1
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
MOrawb[r] = nextline[p]
r = r+1
p = 0
# print "MO Raw = ", MOrawb
return MOrawb
if (switch == 2):
filename1 = filename
PElements = int(NBasis*(NBasis+1)/2)
Plines = int(PElements/5) + 1
TotalPraw = np.zeros(PElements)
SpinPraw = np.zeros(PElements)
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Total SCF Density" in line:
i=i+1
r = 0
p = 0
# print "Total SCF Density starts at line :", i
j=i+Plines-1
# print "Total SCF Density ends at line :", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(0,len(nextline)):
if (r != PElements):
TotalPraw[r] = nextline[p]
r = r+1
p = 0
# HH + : Bug ... :(
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Spin SCF Density" in line:
# print "Found Spin density!"
i=i+1
r = 0
p = 0
# print "Spin SCF Density starts at line: ", i
j=i+Plines-1
# print "Spin SCF Density ends at line: ", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
if (r != PElements):
SpinPraw[r] = nextline[p]
r = r+1
p = 0
# HH - : End of bug (hopefully!)
PalphaRaw = (np.add(TotalPraw,SpinPraw)) * 0.5
PbetaRaw = (np.subtract(TotalPraw,SpinPraw)) * 0.5
Palpha = symmetrize(PalphaRaw)
Pbeta = symmetrize(PbetaRaw)
return Palpha, Pbeta
if (switch == 3):
filename1 = filename
AlphaMO = np.zeros(NBasis)
AlphaMOlines = int(NBasis/5) + 1
if (NBasis % 5 == 0):
AlphaMOlines = AlphaMOlines - 1
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
i = i + 1
r = 0
p = 0
# print "Alpha MO Energies starts at line: ", i
j = i + AlphaMOlines - 1
# print "Alpha MO Energies ends at line: ", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
AlphaMO[r] = nextline[p]
r = r + 1
p = 0
# print "Alpha MO energies = ", AlphaMO
return AlphaMO
if (switch == -3):
filename1 = filename
BetaMO = np.zeros(NBasis)
BetaMOlines = int(NBasis/5) + 1
if (NBasis % 5 == 0):
BetaMOlines = BetaMOlines - 1
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Beta Orbital Energies" in line:
i = i + 1
r = 0
p = 0
# print "Beta MO Energies starts at line: ", i
j = i + BetaMOlines - 1
# print "Beta MO Energies ends at line: ", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
BetaMO[r] = nextline[p]
r = r + 1
p = 0
# print "Beta MO energies = ", BetaMO
return BetaMO
# sci_notation: reads in a number
# output: prints the number in the desired scientific notation. note that this function has a different output than the one found in nio.py
#
def sci_notation(n):
a = '%.8f' % n
return '%.8f' % Decimal(n.real)
# fchk_notation: reads in a number
# output: prints the number in the desired notation for fchk files
#
def fchk_notation(n):
a = '%.8E' % n
return '%.8E' % Decimal(n.real)
# AtomicSymbol: Reads in atomic number of the element
# Output: -Atomic Symbol
#
def AtomicSymbol(AtomicNumber):
p = AtomicNumber - 1
PTlist = ['H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','T','Ru','Rh','Pd','Ah','Cd','In','Sn','Sb','Te','I','Xe','Cs','Ba','La','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hb','Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf','Es','Fm','Md','No','Lr','Rf','Db','Sg','Bh','Hs','Mt','Ds','Rg','Cn','Uut','Fl','Uup','Lv','Uus','Uuo']
# print "There are currently ", len(PTlist), " atoms defined"
return PTlist[p]
# Symmetrize: Reads in a packed symmetric column matrix into NBasis x NBasis square matrix
# Output: -Matrix(NBasis,NBasis)
#
def symmetrize(a):
Nbas = int((np.sqrt(8*len(a)+1)-1)/2)
b = np.zeros((Nbas,Nbas))
n = 0
for i in range(0,Nbas):
for j in range(0,i+1):
b[i,j]=a[n]
b[j,i]=a[n]
n=n+1
return b
# Column2Square: Reads in a packed column matrix, number of basis functions.
# Output: -Matrix(NBasis,NBasis)
def column2square(A,NBasis):
C = np.zeros((NBasis,NBasis))
t=0
for i in range(0,NBasis):
for j in range(0,NBasis):
C[j,i]=float(A[t])
t=t+1
return C
# GetOverlap: Reads in packed column matrix, number of basis functions.
# Output: -Overlap Matrix (NBasis,NBasis)
def GetOverlap(A,NBasis):
C = column2square(A,NBasis)
CInv = np.linalg.inv(C)
S = np.dot(np.transpose(CInv),CInv)
return S
# PrintSI: Reads in filename, user-defined switch
# Output: -SCF Energy, Charge, Multiplicity, Geometry
#
# Switch: 1 = print to new file (filename1-SI.txt)
# -1 = print to screen
#
def PrintSI(filename1,switch):
NBasis, NElementsGrab, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename1)
AtomicNum = GetAtoms(filename1,NAtoms)
RawCart = GeomGet(filename1,NAtoms)
Cart = np.resize(RawCart,(NAtoms,3))
filename2 = os.path.splitext(filename1)[0] + "-SI.txt"
filename1 = os.path.splitext(filename1)[0]
if (switch == 1):
with open(filename2,'w') as f2:
f2.write("SI info for ")
f2.write(filename1)
f2.write("\n\n")
f2.write("SCF Energy = ")
f2.write(str(SCFEnergy))
f2.write(" Hartree")
f2.write("\n\n")
f2.write(str(Charge))
f2.write(" ")
f2.write(str(Multiplicity))
f2.write("\n")
for i in range(0,NAtoms):
h = i + 1
z = AtomicNum[i]
Atom = AtomicSymbol(int(z))
f2.write(Atom)
f2.write(" ")
for j in range(0,3):
if (Cart[i,j] >= 0):
f2.write(" ")
f2.write(str(sci_notation(Cart[i,j])))
f2.write(" ")
f2.write("\n")
f2.write(" ")
f2.write("\n\n")
return filename2
if (switch == -1):
print "SCF Energy = ", SCFEnergy, " Hartree\n"
print "Charge = ", Charge, "\n"
print "Multiplicity = ", Multiplicity, "\n"
print "Cartesian Geometry:\n"
for i in range(0,NAtoms):
h = i + 1
z = AtomicNum[i]
Atom = AtomicSymbol(int(z))
print Atom, sci_notation(Cart[i,0]), sci_notation(Cart[i,1]), sci_notation(Cart[i,2])
print "\n"
# CalcNO: Reads in filename, NBasis
# Output: Natural Orbitals eigenvalues and eigenvectors (both alpha and beta)
#
def CalcNO(filename,NBasis):
Palpha, Pbeta = MatGrab(filename,NBasis,2)
C = MatGrab(filename,NBasis,1)
S = GetOverlap(C,NBasis)
Svals, Svecs = np.linalg.eig(S)
Sval_minhalf = (np.diag(Svals**(0.5)))
Shalf = np.dot(Svecs,np.dot(Sval_minhalf,np.transpose(Svecs)))
NOvalsA, NOvecsA = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Palpha)))
NOvalsB, NOvecsB = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Pbeta)))
NOvalsA = NOvalsA.real
NOvalsB = NOvalsB.real
NOvecsA = NOvecsA.real
NOvecsB = NOvecsB.real
NOvecsA = np.dot(np.linalg.inv(Shalf),NOvecsA)
NOvecsB = np.dot(np.linalg.inv(Shalf),NOvecsB)
|
# NElec: Reads in filename
# Output: Total number of electrons, Alpha Electrons, Beta Electrons
#
def NElec(filename):
NElec = 0
NAlpha = 0
NBeta = 0
with open(filename, 'r') as origin:
for line in origin:
if "Number of electrons" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NElec = NElec*10 + int(letter)
if "Number of alpha electrons" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NAlpha = NAlpha*10 + int(letter)
if "Number of beta electrons" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NBeta = NBeta*10 + int(letter)
return NElec, NAlpha, NBeta
# OrbTransform: Reads in Alpha Density Matrix, Beta Density Matrix, Overlap Matrix, n
# Output: New Density Matrices: P' = S**(1-n).P.S**(n)
#
def OrbTransform(Pa,Pb,S,n):
Svals, Svecs = np.linalg.eig(S)
Sval1 = np.diag(Svals**(n))
Sval2 = np.diag(Svals**(1-n))
Sdag1 = np.dot(Svecs,np.dot(Sval1,np.transpose(Svecs)))
Sdag2 = np.dot(Svecs,np.dot(Sval2,np.transpose(Svecs)))
PdagAlpha = np.dot(Sdag1,np.dot(Pa,Sdag2))
PdagBeta = np.dot(Sdag1,np.dot(Pb,Sdag2))
# print "OrbTransform Subroutine test:\n"
# print "PdagAlpha = ", PdagAlpha, "\n"
# print "PdagBeta = ", PdagBeta, "\n"
OvalsA, OvecsA = np.linalg.eig(PdagAlpha)
OvalsB, OvecsB = np.linalg.eig(PdagBeta)
# print "OVals A = ", OvalsA, "\n"
# print "OVecs A = ", OvecsA, "\n"
# print "OVals B = ", OvalsB, "\n"
# print "OVecs B = ", OvecsB, "\n"
return PdagAlpha, PdagBeta, OvecsA, OvecsB, OvalsA, OvalsB
# CartoZmat: Transforms Cartesian coordinates to z-matrix form
# Input: NAtoms, RawCart, AtomicNum
# Output: z-matrix printed on the screen
#
# Note that there are three other functions here, Dist, Angle, and Torsion.
# They are used to calculate the appropriate parameters for the z-matrix
# switch = 1 : print z-matrix to screen
# switch = -1 : print z-matrix to new textfile
def DistAB(e1,e2):
R = 0.0
for i in range(len(e1)):
R = R + (e1[i]-e2[i])**(2)
R = R**(0.5)
return R
def AngleABC(e1,e2,e3):
eab_x = (e2[0] - e1[0]) / DistAB(e1,e2)
eab_y = (e2[1] - e1[1]) / DistAB(e1,e2)
eab_z = (e2[2] - e1[2]) / DistAB(e1,e2)
ebc_x = - (e3[0] - e2[0]) / DistAB(e2,e3)
ebc_y = - (e3[1] - e2[1]) / DistAB(e2,e3)
ebc_z = - (e3[2] - e2[2]) / DistAB(e2,e3)
eab = [eab_x, eab_y, eab_z]
ebc = [ebc_x, ebc_y, ebc_z]
cos_angle = np.dot(eab,ebc)
angle = np.arccos(cos_angle) / 3.1415926535 * 180
return eab, ebc, angle
def TorsionABCD(e1,e2,e3,e4):
eab_x = (e2[0] - e1[0]) / DistAB(e1,e2)
eab_y = (e2[1] - e1[1]) / DistAB(e1,e2)
eab_z = (e2[2] - e1[2]) / DistAB(e1,e2)
ebc_x = (e3[0] - e2[0]) / DistAB(e2,e3)
ebc_y = (e3[1] - e2[1]) / DistAB(e2,e3)
ebc_z = (e3[2] - e2[2]) / DistAB(e2,e3)
ecd_x = (e4[0] - e3[0]) / DistAB(e3,e4)
ecd_y = (e4[1] - e3[1]) / DistAB(e3,e4)
ecd_z = (e4[2] - e3[2]) / DistAB(e3,e4)
eab = [eab_x, eab_y, eab_z]
ebc = [ebc_x, ebc_y, ebc_z]
ecd = [ecd_x, ecd_y, ecd_z]
n1 = np.cross(eab,ebc) / (np.linalg.norm(np.cross(eab,ebc)))
n2 = np.cross(ebc,ecd) / (np.linalg.norm(np.cross(ebc,ecd)))
u1 = n2
u3 = ebc/np.linalg.norm(ebc)
u2 = np.cross(u3,u1)
cos_angle = np.dot(n1,n2)
sin_angle = np.dot(n1,u2)
angle = -math.atan2(sin_angle,cos_angle) / 3.1415926535 * 180
return angle
def CartoZmat(RawCart,NAtoms,AtomicNum,filename2,switch):
if (switch == 1):
Cart = np.resize(RawCart,(NAtoms,3))
# print "Cartesian = ", Cart
# print "Atoms list = ", AtomicNum
for i in range(len(AtomicNum)):
Symbol = AtomicSymbol(int(AtomicNum[i]))
if (i > 2):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e3 = [Cart[2,0],Cart[2,1],Cart[2,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
D = TorsionABCD(e4,e1,e2,e3)
print Symbol, 1 , R , 2, A , 3, D
elif (i > 1):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
print Symbol, 1 , R , 2, A
elif (i > 0):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
print Symbol, 1, R
elif (i == 0):
print Symbol
elif (switch == -1):
Cart = np.resize(RawCart,(NAtoms,3))
#open new file
filename = os.path.splitext(filename2)[0] + "-zmat.txt"
with open(filename,'w') as f2:
NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename2)
f2.write("Z-Matrix file for ")
f2.write(filename2)
f2.write("\n\n")
f2.write(str(Charge))
f2.write(" ")
f2.write(str(Multiplicity))
f2.write("\n")
for i in range(len(AtomicNum)):
Symbol = AtomicSymbol(int(AtomicNum[i]))
if (i > 2):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e3 = [Cart[2,0],Cart[2,1],Cart[2,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
D = TorsionABCD(e4,e1,e2,e3)
f2.write(Symbol)
f2.write(" 1 ")
f2.write(str(R))
f2.write(" 2 ")
f2.write( str(A))
f2.write(" 3 ")
f2.write(str(D))
f2.write("\n")
elif (i > 1):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
f2.write(str(Symbol))
f2.write(" 1 ")
f2.write (str(R))
f2.write(" 2 ")
f2.write(str(A))
f2.write("\n")
elif (i > 0):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
f2.write(Symbol)
f2.write(" 1 ")
f2.write(str(R))
f2.write("\n")
elif (i == 0):
f2.write(Symbol)
f2.write("\n")
# print "test test"
# Section 2: Reading from gaussian matrix files
# MatGrab2: Reads in matrices from gaussian matrix file
#
# Switch: 1 : Alpha Core Hamiltonian
# -1 : Beta Core Hamiltonian
# 2 : Alpha Fock Matrix
# -2 : Beta Fock Matrix
# 3 : Dipole matrix elements (x,y,z) [IN PROGRESS]
def MatGrab2(filename,NBasis,switch):
print "Reading from Matrix file\n"
if (switch == 1):
print "Reading Alpha Core Hamiltonian Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the core hamilonian\n"
CoreHRawa = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "CORE HAMILTONIAN ALPHA" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading Core Hamolitonian"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
CoreHRawa[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return CoreHRawa
if (switch == -1):
print "Reading Beta Core Hamiltonian Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the core hamilonian\n"
CoreHRawb = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "CORE HAMILTONIAN BETA" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading Core Hamolitonian"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
CoreHRawb[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return CoreHRawb
if (switch == 2):
print "Reading Alpha Fock Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the fock matrix\n"
FockRawA = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "ALPHA FOCK MATRIX" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading fock matrix"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
FockRawA[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return FockRawA
if (switch == -2):
print "Reading Beta Fock Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the fock matrix\n"
FockRawB = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "BETA FOCK MATRIX" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading fock matrix"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
FockRawB[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return FockRawB
if (switch == 3):
print "Reading Dipole integrals, matrix x\n"
NElements = int(NBasis*(NBasis +1)/2)
print "Looking for ", NElements, " elements of the Dipole integrals matrix x\n"
DipX_Raw = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if " DIPOLE INTEGRALS, matrix 1" in line:
while (p < NElements):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done reading Dipole X matrix\n"
j = i+3
i = i + 4
end = j + NLines -1
nextline = origin.next()
words = nextline.split()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
DipX_Raw[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
print "Dip X raw = ", DipX_Raw
print "Reading Dipole integrals, matrix y\n"
NElements = int(NBasis*(NBasis +1)/2)
print "Looking for ", NElements, " elements of the Dipole integrals matrix y\n"
DipY_Raw = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if " DIPOLE INTEGRALS, matrix 2" in line:
while (p < NElements):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done reading Dipole Y matrix\n"
j = i+3
i = i + 4
end = j + NLines -1
nextline = origin.next()
words = nextline.split()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
DipY_Raw[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
print "Dip Y raw = ", DipY_Raw
print "Looking for ", NElements, " elements of the Dipole integrals matrix z\n"
DipZ_Raw = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if " DIPOLE INTEGRALS, matrix 3" in line:
while (p < NElements):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done reading Dipole Z matrix\n"
j = i+3
i = i + 4
end = j + NLines -1
nextline = origin.next()
words = nextline.split()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
DipZ_Raw[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
print "Dip Z raw = ", DipZ_Raw
return symmetrizeMat(DipX_Raw), symmetrizeMat(DipY_Raw), symmetrizeMat(DipZ_Raw)
# SymmetrizeMat: Reads in packed matrix (recovered from Matrix file) and prints out NBasis x NBasis matrix
# Input: Packed lower triangular A
# Output: N x N Matrix
def symmetrizeMat(a):
NBasis = int((np.sqrt(8*len(a)+1)-1)/2)
NewMat = np.zeros((NBasis,NBasis))
NElements = len(a)
t = 0
l = 0
start = 0
loop = NBasis
nBlock = int(NBasis/5)
nRem = NBasis%5
# print "nBlock = ", nBlock
# print "nRem = ", nRem
i = start
j = start
if (nBlock == 0):
nBlock =1
while (l < nBlock):
# print "retrieving block ", l
for i in range (start,loop):
for j in range(start,start+5):
if (j<=i):
# print "i,j = ",i,j
NewMat[i,j] = a[t]
NewMat[j,i] = a[t]
# print "A[t]= ", a[t]
t = t + 1
start = start + 5
l = l + 1
# print "t = ", t
# print "values of i and j after nBlock loop is over: ", i, j
j = j + 1
start = j
# print "NBasis - nRem = ", NBasis -nRem
i = NBasis - nRem
while (i < NBasis):
j = start
while (j <= i):
# print "i,j = ",i,j
NewMat[i,j] = a[t]
NewMat[j,i] = a[t]
# print "A[t]= ", a[t]
t = t + 1
j = j + 1
i = i + 1
# print "final value of t = ", t
return NewMat
# ERIRead: reads in regular 2e integrals from formatted matrix file
# Note that to get these integrals, use SCF=Conventional and int=NoRaff (saves integrals to disk and prints out regular 2e integrals)
# Input: matrix filename
# Output: 2D Matrix, two columns: Column 1 = compound index, Column 2 = integral value
#
# Two small functions are defined here: swap(a,b) and Fourindex(a,b,c,d)
def swap(a,b):
return b,a
def Fourindex(a,b,c,d):
a = int(a)
b = int(b)
c = int(c)
d = int(d)
if (a < b):
a, b = swap(a,b)
if (c < d):
c, d = swap(c,d)
e = int(a*(a+1)/2 + b)
f = int(c*(c+1)/2 + d)
if (e<f):
e,f = swap(e,f)
g = e*(e +1)/2 + f
return int(g)
def ERIRead(filename,NBasis):
NElements = 0
p = 0
print "Reading ERIs from Gaussian Matrix File"
print "Subroutine can only read regular 2e integrals (NO RAFINETTI)"
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "Label REGULAR 2E INTEGRALS" in line:
print "Found 2e integrals!"
words = line.split()
print "Total number of elements = ", words[9]
NElements = int(words[9])
print "NElements = ", NElements
eri_raw = np.zeros((NElements,5))
while (p < NElements):
nextline = origin.next()
words = nextline.split()
eri_raw[p,0] = words[1]
eri_raw[p,1] = words[3]
eri_raw[p,2] = words[5]
eri_raw[p,3] = words[7]
eri_raw[p,4] = float(words[9].replace('D','E'))
# print "(",int(eri_raw[p,0]),int(eri_raw[p,1]),"|",int(eri_raw[p,2]),int(eri_raw[p,3]),") = ", eri_raw[p,4]
p = p + 1
# print "ERI RAW = ", eri_raw
NTotal = Fourindex(NBasis,NBasis,NBasis,NBasis) + 1
eri_array = np.zeros(NTotal)
eri_compact = np.zeros((NElements,2))
print "Total length of sparse 1D vector =", NTotal
print "Now forming compound indices"
for i in range(0,NElements):
eri_compact[i,0] = Fourindex(eri_raw[i,0], eri_raw[i,1], eri_raw[i,2], eri_raw[i,3])
eri_compact[i,1] = eri_raw[i,4]
eri_array[int(eri_compact[i,0])] = eri_compact[i,1]
# print "mu nu lambda sigma = ", int(eri_compact[i,0]), ", int = ", eri_compact[i,1], "One D array Value =", eri_array[eri_compact[i,0]]
return eri_array
# OVParse breaks down the MO coefficient matrix (NBasis x NBasis) into an occupied (NBasis x NOcc) and a virtual (NBasis x (Nbasis-NOcc)) matrices
# Input: A: MO Coefficient (NBasis x NBasis)
# NBasis
# NOcc = number of electrons
#
# Output: A_Occ: rectangular NBasis x NOcc matrix: Columns of occupied MOs
# A_Virt: rectangular NBasis x (NBasis - NOcc) matrix: Columns of virtual MOs
## Note TO SELF: Needs to be tested more, was only tested on H2 and V jobs.
def OVParse(A,NBasis,NOcc):
A_Occ = np.zeros((NBasis,NOcc))
A_Virt = np.zeros((NBasis,NBasis-NOcc))
for i in range(0,NOcc):
A_Occ[:,i] = A[:,i]
for j in range(0,NBasis-NOcc):
A_Virt[:,j] = A[:,j+NOcc]
return A_Occ, A_Virt
# Biorthog: Calculates the overlap between two sets of MO Coefficients, prints out the final value of the overlap
# Input: A, B: MO Coefficients, can either be full or parsed (using OVParse subroutine)
# S: AO overlap matrix
#
# Output: the final value of the overlap
#
# Option: switch: 1 : print all relevant matrices
# -1 : Dont print any matrices
#
def Biorthog(A,B,S,switch): # eqn numbers based on personal notes
D = np.dot(np.transpose(B),np.dot(S,A)) # eq. 1
u, d, v = np.linalg.svd(D,full_matrices=True) # eq. 2
DtD = np.dot(np.transpose(D),D)
l, V = np.linalg.eig(DtD)
U = np.dot(D,V)
if (switch==1):
print "D = ", D
print "DtD = ", DtD
print "lambdas = ", l
print "Eig Vecs of DtD = ", V
print "Determinants = ", np.linalg.det(u), np.linalg.det(v)
print "u = ", u
print "v = ", v
overlap = np.linalg.det(u)*np.prod(d)*np.linalg.det(v)
return d, u, v, D
# PickColumn: Subroutine that selects a specific column from a two dimensional matrix (NBasis,NBasis), outputs an array (NBasis,1)
# Input: A: Two dimensional matrix
# NBasis: Number of basis functions for A
# i: the position of the column to be selected
#
# Output: One dimensional array (NBasis,1) that is the i-th column of matrix A
#
def PickColumn(A,NBasis,i):
A_Column = np.zeros((NBasis,1))
for j in range(0,NBasis):
A_Column[j,0] = A[j,i]
return A_Column
# WriteMOs: Subroutine that replaces the MO coefficients and orbital energies in a fchk file
# Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions
#
# Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies
#
def WriteMOs(filename1,filename3,V1,V2,e1,e2,NBasis):
MOlines = int(len(V1)/5) + 1
p = 0
r = 0
AOE = 0
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
AOE = i
if "Alpha MO coefficients" in line:
i=i+1
AMO=i
j=i+MOlines-1
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
r = r+1
p = 0
if "Beta Orbital Energies" in line:
BOE = i
if "Beta MO coefficients" in line:
r = 0
i=i+1
BMO = i
j=i+MOlines-1
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
r = r+1
p = 0
pointer=0
counter=1
with open(filename1,'r') as origin:
data = origin.readlines()
if "Alpha Orbital Energies" in line:
AOE = i
BOE = AOE + int(NBasis/5) + 1
with open(filename3,'w') as f2:
print "Writing results to new output file: ", filename3, " ... "
while (pointer < AOE+1):
f2.write(data[pointer])
pointer = pointer+1
for j in range(0,NBasis):
f2.write(" ")
if (e1[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e1[j].real)))
if (counter%5 == 0):
f2.write("\n")
counter=0
counter=counter+1
counter =1
BOE = AOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BOE = BOE - 1
f2.write(data[BOE])
for j in range(0,NBasis):
f2.write(" ")
if (e2[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e2[j].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter+1
counter =1
AMO = BOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
AMO = AMO - 1
f2.write(data[AMO])
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V1[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V1[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
BMO = AMO + (int(NBasis*NBasis/5))+2
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BMO = BMO - 1
f2.write(data[BMO])
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V2[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V2[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
if (NBasis%5 != 0):
f2.write("\n")
pointer = BMO + (int(NBasis*NBasis/5))+2
while (pointer < len(data)):
f2.write(data[pointer])
pointer = pointer+1
print "Done."
# OVMerge: Does the opposite of OVParse, merges back the Occ and Virt components of the MO Coefficient matrix
# Input : A (Occ Matrix), B(Vir Matrix), Number of occupied orbitals, NBasis
#
# Output : V = Full MO Coefficient Matrix
#
# (this subroutine has the exact opposite functionality of OVParse)
#
def OVMerge(A,B,NOcc,NBasis):
V = np.zeros((NBasis,NBasis))
for i in range(0,NOcc):
V[:,i] = A[:,i]
for j in range(NOcc,NBasis):
V[:,j] = B[:,j-NOcc]
return V
# DistanceMatrix: Calculates distances between all atoms in a molecule
# Input : fchk file name
#
# Output : Returns Distance Matrix and Atomic Symbol array.
#
# Unfinished part: generate and return a distance matrix (NAtoms x NAtoms)
#
def DistanceMatrix(filename):
NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename)
Atomic_Numbers = GetAtoms(filename,NAtoms)
Atomic_Symbol = [""]*NAtoms
for i in range(0,NAtoms):
Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i]))
RawCart = GeomGet(filename,NAtoms)
Cart = np.resize(RawCart,(NAtoms,3))
Distance_Matrix = np.zeros((NAtoms,NAtoms))
for i in range(0,NAtoms):
for j in range(i+1,NAtoms):
e2 = [Cart[j,0],Cart[j,1],Cart[j,2]]
e1 = [Cart[i,0],Cart[i,1],Cart[i,2]]
Distance_Matrix[i,j] = np.around(DistAB(e1,e2),decimals=2)
Distance_Matrix[j,i] = np.around(DistAB(e1,e2),decimals=2)
return Distance_Matrix, Atomic_Symbol
# GetAtomicWeights: Grabs the "real atomic weights" from the fchk file
# Input: filename, Number of Atoms
#
# Output: One dimensional array, AtomicWeight, of dimensions NAtoms.
#
def GetAtomicWeights(filename1,NAtoms):
p = 0
r = 0
n = 1
AtomicWeight = np.zeros(NAtoms)
if (NAtoms%5 ==0):
n = 0
AtomLines = int(NAtoms/5) + n
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Real atomic weights" in line:
i = i + 1
pointer = i
endpointer = pointer + AtomLines -1
for m in range(0, endpointer - pointer + 1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
AtomicWeight[r] = nextline[p]
r = r + 1
p = 0
AtomicWeight = np.around(AtomicWeight,decimals=3)
return AtomicWeight
# Work in progress: Basis set reader:
def ReadBasisSet(filename):
NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename)
print "Number of Basis functions =", NBasis
print "Number of atoms =", NAtoms
Atomic_Numbers = GetAtoms(filename,NAtoms)
print "Atomic Numbers =", Atomic_Numbers
Atomic_Symbol = [""]*NAtoms
for i in range(0,NAtoms):
Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i]))
print "Atomic Symbols =", Atomic_Symbol
# WriteMOs: Subroutine that replaces the MO coefficients and orbital energies in a fchk file
# Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions
#
# Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies
#
def WriteMOsQChem(filename1,filename3,V1,V2,e1,e2,NBasis):
MOlines = int(len(V1)/5) + 1
p = 0
r = 0
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
AOE = i+1
AOE_header = line
if "Alpha MO coefficients" in line:
AMO = i+1
AMO_header = line
if "Beta Orbital Energies" in line:
BOE = i+1
BOE_header = line
if "Beta MO coefficients" in line:
BMO = i+1
BMO_header = line
print "Alpha MO Coefficients at line", AMO
print "Beta MO Coefficients at line", BMO
print "Alpha Orbital Energies at line", AOE
print "Beta Orbital Energies at line", BOE
pointer=0
counter=1
Start_point = min(AMO,BMO,AOE,BOE)
print "Start point = ", Start_point
with open(filename1,'r') as origin:
data = origin.readlines()
with open(filename3,'w') as f2:
print "Writing results to new output file: ", filename3, " ... "
while (pointer < Start_point-1):
f2.write(data[pointer])
pointer = pointer+1
print "pointer at line = ", pointer
f2.write(AOE_header)
for j in range(0,NBasis):
f2.write(" ")
if (e1[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e1[j].real)))
if (counter%5 == 0):
f2.write("\n")
counter=0
counter=counter+1
counter =1
BOE = AOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BOE = BOE - 1
f2.write(BOE_header)
# f2.write("Beta Orbital Energies\n")
for j in range(0,NBasis):
f2.write(" ")
if (e2[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e2[j].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter+1
counter =1
AMO = BOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
AMO = AMO - 1
# f2.write("Alpha MO coefficients\n")
f2.write(AMO_header)
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V1[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V1[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
BMO = AMO + (int(NBasis*NBasis/5))+2
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BMO = BMO - 1
# f2.write("Beta MO Coefficients\n")
f2.write(BMO_header)
# f2.write(data[BMO])
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V2[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V2[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
if (NBasis%5 != 0):
f2.write("\n")
pointer = BMO + (int(NBasis*NBasis/5))+2
# while (pointer < len(data)):
# f2.write(data[pointer])
# pointer = pointer+1
print "Done."
|
return NOvecsA, NOvecsB, NOvalsA, NOvalsB
|
_ticklen.py
|
import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def
|
(
self, plotly_name="ticklen", parent_name="bar.marker.colorbar", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
__init__
|
testmodule.rs
|
use iredismodule::call_reply::ReplyType;
use iredismodule::io::{Digest, IO};
use iredismodule::key::KeyType;
use iredismodule::key::{ListPosition, ZsetRangeDirection};
use iredismodule::prelude::*;
use iredismodule::rtype::TypeMethod;
use iredismodule_macros::{rcmd, rtypedef};
use std::time::Duration;
/// Generate RString for String or str
#[macro_export]
macro_rules! rstr {
($value:expr) => {
RString::from_str($value)
};
}
macro_rules! check {
($cond:expr) => {
if $cond {
} else {
return Err(Error::new(format!("failed at line {}", line!())));
}
};
($cond:expr, $desc:expr) => {
if $cond {
} else {
return Err(Error::new(format!("{} at line {}", $desc, line!())));
}
};
}
#[rcmd("test.clear_keys")]
fn test_clear_keys(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let reply = ctx.call("keys", None, &["test:*"])?;
let result: RResult = reply.into();
ctx.notice(format!("{:?}", &result));
let mut keys: Vec<String> = vec![];
if let Value::Array(v) = result? {
v.iter().for_each(|elem| {
if let Value::BulkString(key) = elem {
keys.push(std::str::from_utf8(key).unwrap().to_string());
}
})
}
ctx.notice(format!("{:?}", &keys));
if keys.len() > 0 {
ctx.call("del".to_string(), None, &keys)?;
}
Ok("OK".into())
}
#[rcmd("test.key")]
fn test_key(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let key_string = ctx.open_write_key(&rstr!("test:key_string"));
key_string.string_set(&rstr!("abc"))?;
check!(key_string.get_type() == KeyType::String);
let key_list = ctx.open_write_key(&rstr!("test:key_list"));
key_list.list_push(ListPosition::Head, &rstr!("abc"))?;
key_list.list_push(ListPosition::Tail, &rstr!("def"))?;
key_list.list_push(ListPosition::Tail, &rstr!("ghi"))?;
check!(key_list.get_type() == KeyType::List);
let key_hash = ctx.open_write_key(&rstr!("test:key_hash"));
key_hash.hash_set(None, &rstr!("field1"), Some(&rstr!("value1")))?;
check!(key_hash.get_type() == KeyType::Hash);
ctx.call("SADD", None, &["test:key_set", "abc", "def", "ghi"])?;
let key_set = ctx.open_read_key(&rstr!("test:key_set"));
check!(key_set.get_type() == KeyType::Set);
let key_zset = ctx.open_write_key(&rstr!("test:key_zset"));
key_zset.zset_add(0.0, &rstr!("abc"), None)?;
key_zset.zset_add(2.0, &rstr!("ghi"), None)?;
key_zset.zset_add(3.0, &rstr!("def"), None)?;
key_zset.zset_incrby(&rstr!("abc"), 1.0, None)?;
check!(key_zset.zset_score(&rstr!("abc"))? == 1.0);
let range1 = key_zset.zset_score_range(ZsetRangeDirection::FristIn, 1.0, 3.0, true, false)?;
check!(range1[0].0.to_string() == "ghi" && range1[0].1 == 2.0);
check!(range1[1].0.to_string() == "def" && range1[1].1 == 3.0);
let range2 = key_zset.zset_lex_range(ZsetRangeDirection::LastIn, &rstr!("[a"), &rstr!("[z"))?;
check!(range2[0].0.to_string() == "def" && range2[0].1 == 3.0);
check!(range2[1].0.to_string() == "ghi" && range2[1].1 == 2.0);
key_zset.zset_rem(&rstr!("ghi"))?;
let length_zset = key_zset.value_length();
check!(length_zset == 2);
let key_nonexist = ctx.open_read_key(&rstr!("test:key_nonexist"));
check!(key_zset.get_type() == KeyType::ZSet);
check!(key_string.check_type(KeyType::String).is_ok());
check!(key_string.check_type(KeyType::Hash).is_err());
check!(key_nonexist.check_type(KeyType::Empty).is_ok());
check!(key_nonexist.check_type(KeyType::String).ok() == Some(false));
let value_string = key_string.string_get()?;
check!(value_string.to_str().unwrap() == "abc");
let value_list_head = key_list.list_pop(ListPosition::Head)?;
check!(value_list_head.to_str().unwrap() == "abc");
let value_hash = key_hash.hash_get(&rstr!("field1"))?;
check!(value_hash.unwrap().to_str().unwrap() == "value1");
let exist_hash = key_hash.hash_check(&rstr!("field1"))?;
check!(exist_hash == true);
check!(key_string.get_keyname().to_str().unwrap() == "test:key_string");
let key_expire = ctx.open_write_key(&rstr!("test:expire"));
key_expire.string_set(&rstr!("abc"))?;
key_expire.set_expire(Duration::from_secs(30))?;
let expire_ms = key_expire.get_expire().unwrap();
check!(expire_ms.as_secs() <= 30 && expire_ms.as_secs() > 0);
let key_delete = ctx.open_write_key(&rstr!("test:key_delete"));
key_delete.string_set(&rstr!("abc"))?;
key_delete.delete()?;
check!(key_delete.is_empty());
let key_unlink = ctx.open_write_key(&rstr!("test:key_unlink"));
key_unlink.string_set(&rstr!("abc"))?;
key_unlink.unlink()?;
Ok("OK".into())
}
#[rcmd("test.reply_integer")]
fn test_reply_integer(_ctx: &mut Context, _args: Vec<RStr>) -> RResult {
Ok(Value::Integer(123))
}
#[rcmd("test.reply_double")]
fn test_reply_float(_ctx: &mut Context, _args: Vec<RStr>) -> RResult {
Ok(Value::Double(1.23))
}
#[rcmd("test.reply_string")]
fn test_reply_string(_ctx: &mut Context, _args: Vec<RStr>) -> RResult {
Ok(Value::String("abc".into()))
}
#[rcmd("test.reply_bulk_string")]
fn test_reply_bulk_string(_ctx: &mut Context, _args: Vec<RStr>) -> RResult {
Ok(Value::BulkString(vec![1u8, 2u8, 3u8]))
}
#[rcmd("test.reply_array")]
fn test_reply_array(_ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let data: Vec<Value> = (0..10).map(|v| Value::Integer(v)).collect();
Ok(Value::Array(data))
}
#[rcmd("test.reply_null")]
fn test_reply_null(_ctx: &mut Context, _args: Vec<RStr>) -> RResult {
Ok(Value::Null)
}
#[rcmd("test.reply_error")]
fn test_reply_error(_ctx: &mut Context, _args: Vec<RStr>) -> RResult {
Err(Error::WrongArity)
}
#[rcmd("test.call_reply")]
fn test_call_reply(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let call_reply_string = ctx.call("test.reply_string", None, &[])?;
check!(call_reply_string.get_type() == ReplyType::String);
check!(call_reply_string.get_string().unwrap() == "abc".to_string());
let call_reply_integer = ctx.call("test.reply_integer", None, &[])?;
check!(call_reply_integer.get_type() == ReplyType::Integer);
check!(call_reply_integer.get_integer().unwrap() == 123);
let call_reply_double = ctx.call("test.reply_double", None, &[])?;
check!(call_reply_double.get_type() == ReplyType::String);
check!(call_reply_double.get_double().unwrap() == 1.23);
let call_reply_bulk_string = ctx.call("test.reply_bulk_string", None, &[])?;
check!(call_reply_bulk_string.get_type() == ReplyType::String);
check!(call_reply_bulk_string
.get_bulk_string()
.unwrap()
.iter()
.zip([1u8, 2u8, 3u8].iter())
.all(|(x, y)| x == y));
let call_reply_array = ctx.call("test.reply_array", None, &[])?;
check!(call_reply_array.get_length() == 10);
check!(
call_reply_array
.get_array_element(0)
.unwrap()
.get_integer()
.unwrap()
== 0
);
check!(
call_reply_array
.get_array_element(9)
.unwrap()
.get_integer()
.unwrap()
== 9
);
check!(call_reply_array.get_type() == ReplyType::Array);
let call_reply_null = ctx.call("test.reply_null", None, &[])?;
check!(call_reply_null.get_type() == ReplyType::Null);
let call_reply_error = ctx.call("test.reply_error", None, &[])?;
check!(call_reply_error.get_type() == ReplyType::Error);
Ok("OK".into())
}
#[rcmd("test.reply_value")]
fn test_reply_value(_ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let values: Vec<Value> = vec![
"abc".into(),
vec![1u8, 2u8, 3u8].into(),
123i64.into(),
1.23.into(),
Value::from(vec![
Value::from(1i64),
Value::from(2i64),
Value::from(3i64),
]),
().into(),
];
Ok(values.into())
}
#[rcmd("test.value")]
fn test_value(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let call_reply = ctx.call("test.reply_value", None, &[])?;
let value0: RResult = call_reply.get_array_element(0).unwrap().into();
if let Value::String(_) = value0.unwrap() {
} else {
check!(false)
};
let value1: RResult = call_reply.get_array_element(1).unwrap().into();
if let Value::BulkString(_) = value1.unwrap() {
} else {
check!(false)
};
let value2: RResult = call_reply.get_array_element(2).unwrap().into();
if let Value::Integer(_) = value2.unwrap() {
} else {
check!(false)
};
let value3: RResult = call_reply.get_array_element(3).unwrap().into();
if let Value::BulkString(_) = value3.unwrap() {
} else {
check!(false)
};
let value4: RResult = call_reply.get_array_element(4).unwrap().into();
if let Value::Array(_) = value4.unwrap() {
} else {
check!(false)
};
let value5: RResult = call_reply.get_array_element(5).unwrap().into();
if let Value::Null = value5.unwrap() {
} else {
check!(false)
};
Ok("OK".into())
}
#[derive(Debug, PartialEq)]
pub struct MyType {
pub v1: u64,
pub v2: i64,
pub v3: String,
pub v4: f64,
pub v5: f32,
}
#[rtypedef("mytype123", 0)]
impl TypeMethod for MyType {
fn rdb_load(io: &mut IO, encver: u32) -> Option<Box<Self>> {
println!("mytype123 load rdb");
if encver != 0 {
return None;
}
let v1 = io.load_unsigned();
let v2 = io.load_signed();
let v3 = io.load_string();
let v4 = io.load_double();
let v5 = io.load_float();
Some(Box::new(MyType { v1, v2, v3, v4, v5 }))
}
fn rdb_save(&self, io: &mut IO) {
println!("mytype123 save rdb");
io.save_unsigned(self.v1);
io.save_signed(self.v2);
io.save_string(self.v3.as_str());
io.save_double(self.v4);
io.save_float(self.v5);
}
fn free(_: Box<Self>) {
println!("mytype123 free")
}
fn mem_usage(&self) -> usize {
println!("mytype123 check mem usage");
std::mem::size_of::<Self>()
}
fn digest(&self, digest: &mut Digest) {
println!("mytype123 digest");
digest.add_integer(self.v1 as i64);
digest.add_integer(self.v2);
digest.add_string(self.v3.as_str());
digest.add_string(self.v4.to_string());
digest.add_string(self.v5.to_string());
digest.end_sequeue()
}
fn aof_rewrite<T: AsRef<str>>(&self, io: &mut IO, key: T) {
println!("mytype123 aof rewrite");
io.emit_aof(
"test.set_type".to_owned(),
&[
key.as_ref().to_string(),
self.v1.to_string(),
self.v2.to_string(),
self.v3.to_string(),
self.v4.to_string(),
self.v5.to_string(),
],
)
}
}
#[rcmd("test.set_type", "write deny-oom", 1, 1, 1)]
fn test_set_type(ctx: &mut Context, args: Vec<RStr>) -> RResult {
if args.len() != 7 {
return Err(Error::WrongArity);
}
let key = ctx.open_write_key(&args[1]);
key.check_module_type(&MYTYPE123)?;
let value = MyType {
v1: args[2].get_integer()? as u64,
v2: args[3].get_integer()?,
v3: args[4].to_string(),
v4: args[5].to_string().parse::<f64>()?,
v5: args[6].to_string().parse::<f32>()?,
};
key.set_value(&MYTYPE123, value)?;
Ok("OK".into())
}
#[rcmd("test.get_type", "readonly")]
fn test_get_type(ctx: &mut Context, args: Vec<RStr>) -> RResult {
let key = ctx.open_read_key(&args[1]);
check!(key.get_type() == KeyType::Module);
let exist = key.check_module_type(&MYTYPE123)?;
let value: &mut MyType = key.get_value(&MYTYPE123)?.unwrap();
check!(exist);
Ok(Value::Array(vec![
(value.v1 as i64).into(),
value.v2.into(),
value.v3.as_str().into(),
value.v4.into(),
(value.v5 as f64).into(),
]))
}
#[rcmd("test.type", "readonly")]
fn test_type(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
ctx.call(
"test.set_type",
None,
&["test:type", "123", "-321", "abc", "1.23", "3.21"],
)?;
let reply = ctx.call("test.get_type", None, &["test:type"])?;
check!(reply.get_array_element(0).unwrap().get_integer().unwrap() as u64 == 123);
check!(reply.get_array_element(1).unwrap().get_integer().unwrap() == -321);
check!(reply.get_array_element(2).unwrap().get_string().unwrap() == "abc".to_string());
check!(reply.get_array_element(3).unwrap().get_double().unwrap() == 1.23);
check!(reply.get_array_element(4).unwrap().get_double().unwrap() as f32 == 3.21f32);
Ok("OK".into())
}
#[rcmd("test.misc")]
fn test_misc(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
ctx.select_db(1)?;
let db = ctx.get_select_db();
check!(db == 1);
ctx.select_db(0)?;
let not_busy = iredismodule::is_module_busy("meaninglessmodule");
check!(!not_busy);
let busy = iredismodule::is_module_busy("testmodule");
check!(busy);
Ok("OK".into())
}
#[rcmd("test.example_simple")]
fn test_example_simple(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
ctx.call("hello.simple", None, &[])?;
Ok("OK".into())
}
#[rcmd("test.example_helloworld")]
fn test_example_helloworld(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let reply = ctx.call("hello.simple", None, &[])?;
check!(reply.get_type() == ReplyType::Integer);
let reply = ctx.call("hello.push.native", None, &["test:helloword:key1", "1"])?;
check!(reply.get_type() == ReplyType::Integer);
let reply = ctx.call("hello.push.call", None, &["test:helloword:key1", "2"])?;
check!(reply.get_type() == ReplyType::Integer);
let reply = ctx.call("hello.push.call2", None, &["test:helloword:key1", "3"])?;
check!(reply.get_type() == ReplyType::Integer);
let reply = ctx.call("hello.push.sum.len", None, &["test:helloword:key1"])?;
check!(reply.get_type() == ReplyType::Integer);
let reply = ctx.call(
|
None,
&["test:helloword:key1", "test:helloword:key2", "2"],
)?;
check!(reply.get_type() == ReplyType::Integer);
let reply = ctx.call("hello.rand.array", None, &["5"])?;
check!(reply.get_type() == ReplyType::Array);
let reply = ctx.call("hello.repl1", None, &[])?;
check!(reply.get_type() == ReplyType::Integer);
let reply = ctx.call("hello.repl2", None, &["test:helloword:key2"])?;
check!(reply.get_type() == ReplyType::Integer);
ctx.call("set", None, &["test:helloworld:key3", "abc"])?;
let reply = ctx.call("hello.toggle.case", None, &["test:helloworld:key3"])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call(
"hello.more.expire",
None,
&["test:helloworld:key3", "10000"],
)?;
check!(reply.get_type() == ReplyType::String);
let value = [
"test:helloworld:key4",
"1",
"a",
"2",
"b",
"3",
"c",
"4",
"d",
];
ctx.call("zadd", None, &value)?;
let reply = ctx.call("hello.zsumrange", None, &["test:helloworld:key4", "1", "4"])?;
check!(reply.get_type() == ReplyType::Array);
let reply = ctx.call("hello.lexrange", None, &["test:helloworld:key4", "-", "[c"])?;
check!(reply.get_type() == ReplyType::Array);
ctx.call("hset", None, &["test:helloworld:key5", "field1", "abc"])?;
let reply = ctx.call(
"hello.hcopy",
None,
&["test:helloworld:key5", "field1", "field2"],
)?;
check!(reply.get_type() == ReplyType::Integer);
let reply = ctx.call("hello.leftpad", None, &["123", "8", "0"])?;
check!(reply.get_type() == ReplyType::String);
Ok("OK".into())
}
#[rcmd("test.example_hellotype")]
fn test_example_hellotype(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let reply = ctx.call("hellotype.insert", None, &["test:hellotype:key1", "1"])?;
check!(reply.get_type() == ReplyType::Integer);
ctx.call("hellotype.insert", None, &["test:hellotype:key1", "2"])?;
ctx.call("hellotype.insert", None, &["test:hellotype:key1", "3"])?;
ctx.call("hellotype.insert", None, &["test:hellotype:key1", "4"])?;
let reply = ctx.call("hellotype.range", None, &["test:hellotype:key1", "1", "2"])?;
check!(reply.get_type() == ReplyType::Array);
let reply = ctx.call("hellotype.len", None, &["test:hellotype:key1"])?;
check!(reply.get_type() == ReplyType::Integer);
ctx.call(
"hellotype.brange",
None,
&["test:hellotype:key1", "1", "2", "5"],
)?;
Ok("OK".into())
}
#[rcmd("test.example_hellotimer")]
fn test_example_hellotimer(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let reply = ctx.call("hellotimer.timer", None, &[])?;
check!(reply.get_type() == ReplyType::String);
Ok("OK".into())
}
#[rcmd("test.example_helloblock")]
fn test_example_helloblock(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
ctx.call("hello.block", None, &["1", "2"])?;
ctx.call("hello.block", None, &["2", "1"])?;
ctx.call("hello.keys", None, &["2", "1"])?;
Ok("OK".into())
}
#[rcmd("test.all")]
fn test_all(ctx: &mut Context, _args: Vec<RStr>) -> RResult {
let reply = ctx.call("test.clear_keys", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.key", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.call_reply", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.value", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.type", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.misc", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.example_simple", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.example_helloworld", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.example_hellotype", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.example_hellotimer", None, &[])?;
check!(reply.get_type() == ReplyType::String);
let reply = ctx.call("test.example_helloblock", None, &[])?;
check!(reply.get_type() == ReplyType::String);
Ok("OK".into())
}
define_module! {
name: "testmodule",
version: 1,
data_types: [
MYTYPE123,
],
init_funcs: [],
commands: [
test_clear_keys_cmd,
test_key_cmd,
test_reply_integer_cmd,
test_reply_float_cmd,
test_reply_string_cmd,
test_reply_bulk_string_cmd,
test_reply_array_cmd,
test_reply_null_cmd,
test_reply_error_cmd,
test_reply_value_cmd,
test_call_reply_cmd,
test_value_cmd,
test_set_type_cmd,
test_get_type_cmd,
test_type_cmd,
test_misc_cmd,
test_example_simple_cmd,
test_example_helloworld_cmd,
test_example_hellotype_cmd,
test_example_hellotimer_cmd,
test_example_helloblock_cmd,
test_all_cmd,
]
}
|
"hello.list.splice",
|
temperature_scale_converter.py
|
"""
Temperature Scale Converter
- Converts celsius sclae to fahrenheit and vice-versa
Author : (Niyoj Oli)[https://github.com/niyoj]
Date : 24/09/21
"""
temp = input("Input the temperature you would like to convert? (e.g., 45F, 102C etc.) : ")
degree = int(temp[:-1])
i_convention = temp[-1]
if i_convention.upper() == "C":
result = int(round((9 * degree) / 5 + 32))
o_convention = "Fahrenheit"
elif i_convention.upper() == "F":
result = int(round((degree - 32) * 5 / 9))
o_convention = "Celsius"
else:
|
print("The temperature in", o_convention, "is", result, "degrees.")
|
print("Input proper convention.")
quit()
|
pokemon.js
|
import React from 'react';
import { Link } from 'gatsby';
const Pokemon = ({ pageContext: { pokemon } }) => {
return (
|
<ul>
{pokemon.abilities.map(ability => (
<li key={ability.name}>
<Link to={`./pokemon/${pokemon.name}/ability/${ability.name}`}>
{ability.name}
</Link>
</li>
))}
</ul>
<Link to="/">Back to all Pokémon</Link>
</div>
)
}
export default Pokemon;
|
<div style={{ width: 960, margin: "4rem auto" }}>
<h1>{pokemon.name}</h1>
<img src={pokemon.sprites.front_default} alt={pokemon.name} />
<h2>Abilities</h2>
|
list.rs
|
use std::io::Error;
use tracing::{debug, instrument};
use dataplane::api::{RequestMessage, ResponseMessage};
use fluvio_sc_schema::objects::{ListRequest, ListResponse};
use fluvio_auth::{AuthContext};
use crate::services::auth::AuthServiceContext;
#[instrument(skip(request, auth_ctx))]
pub async fn handle_list_request<AC: AuthContext>(
request: RequestMessage<ListRequest>,
auth_ctx: &AuthServiceContext<AC>,
) -> Result<ResponseMessage<ListResponse>, Error> {
debug!("handling list request");
let (header, req) = request.get_header_request();
let response = match req {
ListRequest::Topic(filter) => {
super::topic::handle_fetch_topics_request(filter, auth_ctx).await?
}
ListRequest::Spu(filter) => super::spu::handle_fetch_spus_request(filter, auth_ctx).await?,
ListRequest::SpuGroup(filter) => {
super::spg::handle_fetch_spu_groups_request(filter, auth_ctx).await?
}
ListRequest::CustomSpu(filter) => {
super::spu::handle_fetch_custom_spu_request(filter, auth_ctx).await?
}
ListRequest::Partition(filter) =>
|
ListRequest::ManagedConnector(filter) => {
super::connector::handle_fetch_request(filter, auth_ctx).await?
}
ListRequest::SmartModule(filter) => {
super::smartmodule::handle_fetch_request(filter, auth_ctx).await?
}
ListRequest::Table(filter) => super::table::handle_fetch_request(filter, auth_ctx).await?,
};
Ok(ResponseMessage::from_header(&header, response))
}
|
{
super::partition::handle_fetch_request(filter, auth_ctx).await?
}
|
beam_jobs.py
|
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers responsible for managing Apache Beam jobs."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import beam_job_services
import feconf
from typing import Any, Dict # isort: skip
class BeamJobHandler(base.BaseHandler):
"""Handler for getting the definitions of Apache Beam jobs."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS: Dict[str, Any] = {}
HANDLER_ARGS_SCHEMAS: Dict[str, Any] = {
'GET': {}
}
@acl_decorators.can_run_any_job
def get(self) -> None:
sorted_beam_jobs = sorted(
beam_job_services.get_beam_jobs(),
key=lambda j: j.name)
self.render_json({'jobs': [j.to_dict() for j in sorted_beam_jobs]})
class BeamJobRunHandler(base.BaseHandler):
"""Handler for managing the execution of Apache Beam jobs."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS: Dict[str, Any] = {}
HANDLER_ARGS_SCHEMAS: Dict[str, Any] = {
'GET': {},
'PUT': {
'job_name': {
'schema': {
'type': 'unicode'
}
},
},
'DELETE': {
'job_id': {
'schema': {
'type': 'unicode',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': r'[A-Za-z0-9]{22}'
}]
}
}
},
}
@acl_decorators.can_run_any_job
def get(self) -> None:
sorted_beam_job_runs = sorted(
beam_job_services.get_beam_job_runs(),
key=lambda j: j.job_updated_on,
reverse=True)
self.render_json({'runs': [r.to_dict() for r in sorted_beam_job_runs]})
@acl_decorators.can_run_any_job
def put(self) -> None:
job_name: str = (
self.normalized_payload.get('job_name')
if self.normalized_payload else '')
beam_job_run = beam_job_services.run_beam_job(job_name)
self.render_json(beam_job_run.to_dict())
@acl_decorators.can_run_any_job
def delete(self) -> None:
job_id = self.request.get('job_id')
beam_job_run = beam_job_services.cancel_beam_job(job_id)
self.render_json(beam_job_run.to_dict())
class BeamJobRunResultHandler(base.BaseHandler):
"""Handler for getting the result of Apache Beam jobs."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS: Dict[str, Any] = {}
HANDLER_ARGS_SCHEMAS: Dict[str, Any] = {
'GET': {
'job_id': {
'schema': {
'type': 'unicode',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': r'[A-Za-z0-9]{22}'
}]
}
}
}
}
@acl_decorators.can_run_any_job
def
|
(self) -> None:
job_id = self.request.get('job_id')
beam_job_run_result = beam_job_services.get_beam_job_run_result(job_id)
self.render_json(beam_job_run_result.to_dict())
|
get
|
callee.go
|
// Copyright 2019 MuGuangyi. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package chancall
import (
"fmt"
"time"
)
type callee struct {
meta *meta
callRequest chan *callRequest
functions map[string]*fcall
}
func (c *callee) Name() string {
return c.meta.name
}
func (c *callee) SetTimeout(name string, timeout float32) {
c.meta.setTimeout(name, timeout)
}
func (c *callee) handling() {
for {
err := c.process(<-c.callRequest)
if nil != err {
panic(fmt.Sprintf("Invoke error %s", err.Error()))
}
}
}
func (c *callee) process(request *callRequest) (err error) {
track(request, c.meta.timeout(request.method))
result := c.meta.call(request.method, request.args...)
return c.result(request, &callResponse{result: result})
}
func (c *callee) result(request *callRequest, response *callResponse) (err error) {
if nil == request.callResponse {
return
}
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
request.Lock()
{
if !request.done {
request.done = true
request.callResponse <- response
}
}
request.Unlock()
return
}
func track(request *callRequest, timeout float32)
|
{
go func() {
time.Sleep(time.Duration(timeout) * time.Second)
request.Lock()
{
if !request.done {
request.done = true
request.callResponse <- &callResponse{
result: nil,
err: fmt.Errorf("[%s] function call timeout!", request.method),
}
}
}
request.Unlock()
}()
}
|
|
convert.go
|
// Copyright 2019 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package org converts Emacs Org-Mode to HTML.
package org
import (
"bytes"
"github.com/strawberryssg/strawberry-v0/identity"
"github.com/strawberryssg/strawberry-v0/markup/converter"
"github.com/niklasfasching/go-org/org"
"github.com/spf13/afero"
)
// Provider is the package entry point.
var Provider converter.ProviderProvider = provide{}
type provide struct {
}
func (p provide) New(cfg converter.ProviderConfig) (converter.Provider, error) {
|
cfg: cfg,
}, nil
}), nil
}
type orgConverter struct {
ctx converter.DocumentContext
cfg converter.ProviderConfig
}
func (c *orgConverter) Convert(ctx converter.RenderContext) (converter.Result, error) {
logger := c.cfg.Logger
config := org.New()
config.Log = logger.Warn()
config.ReadFile = func(filename string) ([]byte, error) {
return afero.ReadFile(c.cfg.ContentFs, filename)
}
writer := org.NewHTMLWriter()
writer.HighlightCodeBlock = func(source, lang string, inline bool) string {
highlightedSource, err := c.cfg.Highlight(source, lang, "")
if err != nil {
logger.Errorf("Could not highlight source as lang %s. Using raw source.", lang)
return source
}
return highlightedSource
}
html, err := config.Parse(bytes.NewReader(ctx.Src), c.ctx.DocumentName).Write(writer)
if err != nil {
logger.Errorf("Could not render org: %s. Using unrendered content.", err)
return converter.Bytes(ctx.Src), nil
}
return converter.Bytes([]byte(html)), nil
}
func (c *orgConverter) Supports(feature identity.Identity) bool {
return false
}
|
return converter.NewProvider("org", func(ctx converter.DocumentContext) (converter.Converter, error) {
return &orgConverter{
ctx: ctx,
|
index.js
|
import { combineReducers } from 'redux';
import { NavigationActions } from 'react-navigation';
import { AppNavigator } from '../navigators/AppNavigator';
// Start with two routes: The Main screen, with the Login screen on top.
const initialNavState = {
index: 1,
routes: [
{ key: 'InitA', routeName: 'Main' },
{ key: 'InitB', routeName: 'Login' },
],
};
const initialAuthState = { isLoggedIn: false };
function nav(state = initialNavState, action) {
switch (action.type) {
case 'Login':
return AppNavigator.router.getStateForAction(NavigationActions.back(), state);
case 'Logout':
return AppNavigator.router.getStateForAction(NavigationActions.navigate({ routeName: 'Login' }), state);
default:
return AppNavigator.router.getStateForAction(action, state);
}
}
function
|
(state = initialAuthState, action) {
switch (action.type) {
case 'Login':
return { ...state, isLoggedIn: true };
case 'Logout':
return { ...state, isLoggedIn: false };
default:
return state;
}
}
const AppReducer = combineReducers({
nav,
auth,
});
export default AppReducer;
|
auth
|
server.go
|
// Package server generate by warden_gen
package server
import (
"context"
"net"
pb "go-common/app/service/main/broadcast/api/grpc/v1"
"go-common/app/service/main/broadcast/service"
"go-common/library/conf/paladin"
"go-common/library/net/rpc/warden"
"google.golang.org/grpc"
// use gzip decoder
_ "google.golang.org/grpc/encoding/gzip"
)
// New Zerg warden rpc server
func New(svr *service.Service) (*warden.Server, string) {
var rc struct {
Server *warden.ServerConfig
}
if err := paladin.Get("grpc.toml").UnmarshalTOML(&rc); err != nil {
panic(err)
}
_, port, _ := net.SplitHostPort(rc.Server.Addr)
ws := warden.NewServer(rc.Server, grpc.MaxRecvMsgSize(32*1024*1024), grpc.MaxSendMsgSize(32*1024*1024))
pb.RegisterZergServer(ws.Server(), &server{svr})
return ws, port
}
type server struct {
srv *service.Service
}
var _ pb.ZergServer = &server{}
// Ping Service
func (s *server) Ping(ctx context.Context, req *pb.PingReq) (*pb.PingReply, error) {
return &pb.PingReply{}, nil
}
// Close Service
func (s *server) Close(ctx context.Context, req *pb.CloseReq) (*pb.CloseReply, error) {
return &pb.CloseReply{}, nil
}
// Connect connect a conn.
func (s *server) Connect(ctx context.Context, req *pb.ConnectReq) (*pb.ConnectReply, error) {
mid, key, room, platform, accepts, err := s.srv.Connect(ctx, req.Server, req.ServerKey, req.Cookie, req.Token)
if err != nil {
return &pb.ConnectReply{}, err
}
return &pb.ConnectReply{Mid: mid, Key: key, RoomID: room, Accepts: accepts, Platform: platform}, nil
}
// Disconnect disconnect a conn.
func (s *server) Disconnect(ctx context.Context, req *pb.DisconnectReq) (*pb.DisconnectReply, error) {
has, err := s.srv.Disconnect(ctx, req.Mid, req.Key, req.Server)
if err != nil {
return &pb.DisconnectReply{}, err
}
return &pb.DisconnectReply{Has: has}, nil
}
// Heartbeat beartbeat a conn.
func (s *server) Heartbeat(ctx context.Context, req *pb.HeartbeatReq) (*pb.HeartbeatReply, error) {
if err := s.srv.Heartbeat(ctx, req.Mid, req.Key, req.Server); err != nil {
return &pb.HeartbeatReply{}, err
}
return &pb.HeartbeatReply{}, nil
}
// RenewOnline renew server online.
func (s *server) RenewOnline(ctx context.Context, req *pb.OnlineReq) (*pb.OnlineReply, error) {
roomCount, err := s.srv.RenewOnline(ctx, req.Server, req.Sharding, req.RoomCount)
if err != nil {
return &pb.OnlineReply{}, err
}
return &pb.OnlineReply{RoomCount: roomCount}, nil
}
// Receive receive a message.
func (s *server) Receive(ctx context.Context, req *pb.ReceiveReq) (*pb.ReceiveReply, error) {
if err := s.srv.Receive(ctx, req.Mid, req.Proto); err != nil {
return &pb.ReceiveReply{}, err
}
return &pb.ReceiveReply{}, nil
}
// ServerList return server list.
|
func (s *server) ServerList(ctx context.Context, req *pb.ServerListReq) (*pb.ServerListReply, error) {
return s.srv.ServerList(ctx, req.Platform), nil
}
| |
sentry~checkout-213470ac.js
|
(window.webpackJsonpCheckout=window.webpackJsonpCheckout||[]).push([[14],{1217:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),function(e){e[e.None=0]="None",e[e.Error=1]="Error",e[e.Debug=2]="Debug",e[e.Verbose=3]="Verbose"}(t.LogLevel||(t.LogLevel={}))},1218:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),function(e){e.Fatal="fatal",e.Error="error",e.Warning="warning",e.Log="log",e.Info="info",e.Debug="debug",e.Critical="critical"}(t.Severity||(t.Severity={})),function(e){e.fromString=function(t){switch(t){case"debug":return e.Debug;case"info":return e.Info;case"warn":case"warning":return e.Warning;case"error":return e.Error;case"fatal":return e.Fatal;case"critical":return e.Critical;case"log":default:return e.Log}}}(t.Severity||(t.Severity={}))},1219:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),function(e){e.Unknown="unknown",e.Skipped="skipped",e.Success="success",e.RateLimit="rate_limit",e.Invalid="invalid",e.Failed="failed"}(t.Status||(t.Status={})),function(e){e.fromHttpCode=function(t){return t>=200&&t<300?e.Success:429===t?e.RateLimit:t>=400&&t<500?e.Invalid:t>=500?e.Failed:e.Unknown}}(t.Status||(t.Status={}))},1220:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(224);function i(e){for(var t=[],n=1;n<arguments.length;n++)t[n-1]=arguments[n];var i=o.getCurrentHub();if(i&&i[e])return i[e].apply(i,r.__spread(t));throw new Error("No hub defined or "+e+" was not found on the hub, please open a bug report.")}t.captureException=function(e){var t;try{throw new Error("Sentry syntheticException")}catch(e){t=e}return i("captureException",e,{originalException:e,syntheticException:t})},t.captureMessage=function(e,t){var n;try{throw new Error(e)}catch(e){n=e}return i("captureMessage",e,t,{originalException:e,syntheticException:n})},t.captureEvent=function(e){return i("captureEvent",e)},t.configureScope=function(e){i("configureScope",e)},t.addBreadcrumb=function(e){i("addBreadcrumb",e)},t.setContext=function(e,t){i("setContext",e,t)},t.setExtras=function(e){i("setExtras",e)},t.setTags=function(e){i("setTags",e)},t.setExtra=function(e,t){i("setExtra",e,t)},t.setTag=function(e,t){i("setTag",e,t)},t.setUser=function(e){i("setUser",e)},t.withScope=function(e){i("withScope",e)},t._callOnClient=function(e){for(var t=[],n=1;n<arguments.length;n++)t[n-1]=arguments[n];i.apply(void 0,r.__spread(["_invokeClient",e],t))}},1221:function(e,t,n){n(32),n(5),n(12),n(4),n(40),n(7),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3);t.forget=function(e){e.catch((function(e){console.error(e)}))},t.filterAsync=function(e,t,n){return r.__awaiter(this,void 0,void 0,(function(){var o;return r.__generator(this,(function(r){switch(r.label){case 0:return[4,Promise.all(e.map(t,n))];case 1:return o=r.sent(),[2,e.filter((function(e,t){return o[t]}))]}}))}))}},1222:function(e,t,n){n(2),Object.defineProperty(t,"__esModule",{value:!0}),t.setPrototypeOf=Object.setPrototypeOf||({__proto__:[]}instanceof Array?function(e,t){return e.__proto__=t,e}:function(e,t){for(var n in t)e.hasOwnProperty(n)||(e[n]=t[n]);return e})},1223:function(e,t,n){(function(e){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}n(8),n(9),n(11),n(118),n(5),n(25),n(12),n(201),n(14),n(20),n(30),n(203),n(84),n(17),n(4),n(10),n(18),n(7),n(38),n(6),n(206),Object.defineProperty(t,"__esModule",{value:!0});var o=n(300),i=n(590);function a(e){return function(e){return~-encodeURI(e).split(/%..|./).length}(JSON.stringify(e))}function s(t,n){return"domain"===n&&"object"===r(t)&&t._events?"[Domain]":"domainEmitter"===n?"[DomainEmitter]":void 0!==e&&t===e?"[Global]":"undefined"!=typeof window&&t===window?"[Window]":"undefined"!=typeof document&&t===document?"[Document]":"undefined"!=typeof Event&&t instanceof Event?Object.getPrototypeOf(t)?t.constructor.name:"Event":o.isSyntheticEvent(t)?"[SyntheticEvent]":Number.isNaN(t)?"[NaN]":void 0===t?"[undefined]":"function"==typeof t?"[Function: "+(t.name||"<unknown-function-name>")+"]":t}function c(e,t,n,r){if(void 0===n&&(n=1/0),void 0===r&&(r=new i.Memo),0===n)return function(e){var t=Object.prototype.toString.call(e);if("string"==typeof e)return e;if("[object Object]"===t)return"[Object]";if("[object Array]"===t)return"[Array]";var n=s(e);return o.isPrimitive(n)?n:t}(t);if(null!=t&&"function"==typeof t.toJSON)return t.toJSON();var a=s(t,e);if(o.isPrimitive(a))return a;var u=o.isError(t)?function(e){var t={message:e.message,name:e.name,stack:e.stack};for(var n in e)Object.prototype.hasOwnProperty.call(e,n)&&(t[n]=e[n]);return t}(t):t,l=Array.isArray(t)?[]:{};if(r.memoize(t))return"[Circular ~]";for(var p in u)Object.prototype.hasOwnProperty.call(u,p)&&(l[p]=c(p,u[p],n-1,r));return r.unmemoize(t),l}function u(e,t){try{return JSON.parse(JSON.stringify(e,(function(e,n){return c(e,n,t)})))}catch(e){return"**non-serializable**"}}t.fill=function(e,t,n){if(t in e){var r=e[t],o=n(r);if("function"==typeof o)try{o.prototype=o.prototype||{},Object.defineProperties(o,{__sentry__:{enumerable:!1,value:!0},__sentry_original__:{enumerable:!1,value:r},__sentry_wrapped__:{enumerable:!1,value:o}})}catch(e){}e[t]=o}},t.urlEncode=function(e){return Object.keys(e).map((function(t){return encodeURIComponent(t)+"="+encodeURIComponent(e[t])})).join("&")},t.normalizeToSize=function e(t,n,r){void 0===n&&(n=3),void 0===r&&(r=102400);var o=u(t,n);return a(o)>r?e(t,n-1,r):o},t.walk=c,t.normalize=u}).call(this,n(90))},1224:function(e,t,n){function r(e,t){for(var n=0,r=e.length-1;r>=0;r--){var o=e[r];"."===o?e.splice(r,1):".."===o?(e.splice(r,1),n++):n&&(e.splice(r,1),n--)}if(t)for(;n--;n)e.unshift("..");return e}n(16),n(32),n(25),n(26),n(34),n(10),n(38),Object.defineProperty(t,"__esModule",{value:!0});var o=/^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;function i(e){var t=o.exec(e);return t?t.slice(1):[]}function a(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];for(var n="",o=!1,i=e.length-1;i>=-1&&!o;i--){var a=i>=0?e[i]:"/";a&&(n=a+"/"+n,o="/"===a.charAt(0))}return(o?"/":"")+(n=r(n.split("/").filter((function(e){return!!e})),!o).join("/"))||"."}function s(e){for(var t=0;t<e.length&&""===e[t];t++);for(var n=e.length-1;n>=0&&""===e[n];n--);return t>n?[]:e.slice(t,n-t+1)}function c(e){var t=u(e),n="/"===e.substr(-1),o=r(e.split("/").filter((function(e){return!!e})),!t).join("/");return o||t||(o="."),o&&n&&(o+="/"),(t?"/":"")+o}function u(e){return"/"===e.charAt(0)}t.resolve=a,t.relative=function(e,t){e=a(e).substr(1),t=a(t).substr(1);for(var n=s(e.split("/")),r=s(t.split("/")),o=Math.min(n.length,r.length),i=o,c=0;c<o;c++)if(n[c]!==r[c]){i=c;break}var u=[];for(c=i;c<n.length;c++)u.push("..");return(u=u.concat(r.slice(i))).join("/")},t.normalizePath=c,t.isAbsolute=u,t.join=function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return c(e.join("/"))},t.dirname=function(e){var t=i(e),n=t[0],r=t[1];return n||r?(r&&(r=r.substr(0,r.length-1)),n+r):"."},t.basename=function(e,t){var n=i(e)[2];return t&&n.substr(-1*t.length)===t&&(n=n.substr(0,n.length-t.length)),n}},1225:function(e,t,n){n(19),n(5),n(34),n(4),n(40),n(7),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=n(588),o=function(){function e(e){this._limit=e,this._buffer=[]}return e.prototype.isReady=function(){return void 0===this._limit||this.length()<this._limit},e.prototype.add=function(e){var t=this;return this.isReady()?(-1===this._buffer.indexOf(e)&&this._buffer.push(e),e.then((function(){return t.remove(e)})).catch((function(){return t.remove(e).catch((function(){}))})),e):Promise.reject(new r.SentryError("Not adding Promise due to buffer limit reached."))},e.prototype.remove=function(e){return this._buffer.splice(this._buffer.indexOf(e),1)[0]},e.prototype.length=function(){return this._buffer.length},e.prototype.drain=function(e){var t=this;return new Promise((function(n){var r=setTimeout((function(){e&&e>0&&n(!1)}),e);Promise.all(t._buffer).then((function(){clearTimeout(r),n(!0)})).catch((function(){n(!0)}))}))},e}();t.PromiseBuffer=o},1226:function(e,t,n){n(134),n(25),n(26),n(135),Object.defineProperty(t,"__esModule",{value:!0});var r=n(300);function o(e,t){return void 0===t&&(t=0),"string"!=typeof e||0===t?e:e.length<=t?e:e.substr(0,t)+"..."}t.truncate=o,t.snipLine=function(e,t){var n=e,r=n.length;if(r<=150)return n;t>r&&(t=r);var o=Math.max(t-60,0);o<5&&(o=0);var i=Math.min(o+140,r);return i>r-5&&(i=r),i===r&&(o=Math.max(i-140,0)),n=n.slice(o,i),o>0&&(n="'{snip} "+n),i<r&&(n+=" {snip}"),n},t.safeJoin=function(e,t){if(!Array.isArray(e))return"";for(var n=[],r=0;r<e.length;r++){var o=e[r];try{n.push(String(o))}catch(e){n.push("[value cannot be serialized]")}}return n.join(t)},t.keysToEventMessage=function(e,t){if(void 0===t&&(t=40),!e.length)return"[object has no keys]";if(e[0].length>=t)return o(e[0],t);for(var n=e.length;n>0;n--){var r=e.slice(0,n).join(", ");if(!(r.length>t))return n===e.length?r:o(r,t)}return""},t.isMatchingPattern=function(e,t){return r.isRegExp(t)?t.test(e):"string"==typeof t&&e.includes(t)}},1227:function(e,t,n){n(19),n(14),n(4),n(40),n(18),Object.defineProperty(t,"__esModule",{value:!0});var r=n(589),o=n(400);function i(){if(!("fetch"in o.getGlobalObject()))return!1;try{return new Headers,new Request(""),new Response,!0}catch(e){return!1}}t.supportsErrorEvent=function(){try{return new ErrorEvent(""),!0}catch(e){return!1}},t.supportsDOMError=function(){try{return new DOMError(""),!0}catch(e){return!1}},t.supportsDOMException=function(){try{return new DOMException(""),!0}catch(e){return!1}},t.supportsFetch=i,t.supportsNativeFetch=function(){if(!i())return!1;var e=function(e){return-1!==e.toString().indexOf("native")},t=o.getGlobalObject(),n=null,a=t.document;if(a){var s=a.createElement("iframe");s.hidden=!0;try{a.head.appendChild(s),s.contentWindow&&s.contentWindow.fetch&&(n=e(s.contentWindow.fetch)),a.head.removeChild(s)}catch(e){r.logger.warn("Could not create sandbox iframe for pure fetch check, bailing to window.fetch: ",e)}}return null===n&&(n=e(t.fetch)),n},t.supportsReportingObserver=function(){return"ReportingObserver"in o.getGlobalObject()},t.supportsReferrerPolicy=function(){if(!i())return!1;try{return new Request("_",{referrerPolicy:"origin"}),!0}catch(e){return!1}},t.supportsHistory=function(){var e=o.getGlobalObject(),t=e.chrome,n=t&&t.app&&t.app.runtime,r="history"in e&&!!e.history.pushState&&!!e.history.replaceState;return!n&&r}},1228:function(e,t,n){n(16),n(21),n(14),n(4),n(18),n(22),Object.defineProperty(t,"__esModule",{value:!0});var r,o=n(300);!function(e){e.PENDING="PENDING",e.RESOLVED="RESOLVED",e.REJECTED="REJECTED"}(r||(r={}));var i=function(){function e(e){var t=this;this._state=r.PENDING,this._handlers=[],this._resolve=function(e){t._setResult(e,r.RESOLVED)},this._reject=function(e){t._setResult(e,r.REJECTED)},this._setResult=function(e,n){t._state===r.PENDING&&(o.isThenable(e)?e.then(t._resolve,t._reject):(t._value=e,t._state=n,t._executeHandlers()))},this._executeHandlers=function(){t._state!==r.PENDING&&(t._state===r.REJECTED?t._handlers.forEach((function(e){return e.onFail&&e.onFail(t._value)})):t._handlers.forEach((function(e){return e.onSuccess&&e.onSuccess(t._value)})),t._handlers=[])},this._attachHandler=function(e){t._handlers=t._handlers.concat(e),t._executeHandlers()};try{e(this._resolve,this._reject)}catch(e){this._reject(e)}}return e.prototype.then=function(t,n){var r=this;return new e((function(e,o){r._attachHandler({onFail:function(t){if(n)try{return void e(n(t))}catch(e){return void o(e)}else o(t)},onSuccess:function(n){if(t)try{return void e(t(n))}catch(e){return void o(e)}else e(n)}})}))},e.prototype.catch=function(e){return this.then((function(e){return e}),e)},e.prototype.toString=function(){return"[object SyncPromise]"},e.resolve=function(t){return new e((function(e){e(t)}))},e.reject=function(t){return new e((function(e,n){n(t)}))},e}();t.SyncPromise=i},1229:function(e,t,n){(function(e){n(14),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(43),i=n(587);t.API_VERSION=3;var a=function(){function e(e,n,r){void 0===n&&(n=new i.Scope),void 0===r&&(r=t.API_VERSION),this._version=r,this._stack=[],this._stack.push({client:e,scope:n})}return e.prototype._invokeClient=function(e){for(var t,n=[],o=1;o<arguments.length;o++)n[o-1]=arguments[o];var i=this.getStackTop();i&&i.client&&i.client[e]&&(t=i.client)[e].apply(t,r.__spread(n,[i.scope]))},e.prototype.isOlderThan=function(e){return this._version<e},e.prototype.bindClient=function(e){this.getStackTop().client=e},e.prototype.pushScope=function(){var e=this.getStack(),t=e.length>0?e[e.length-1].scope:void 0,n=i.Scope.clone(t);return this.getStack().push({client:this.getClient(),scope:n}),n},e.prototype.popScope=function(){return void 0!==this.getStack().pop()},e.prototype.withScope=function(e){var t=this.pushScope();try{e(t)}finally{this.popScope()}},e.prototype.getClient=function(){return this.getStackTop().client},e.prototype.getScope=function(){return this.getStackTop().scope},e.prototype.getStack=function(){return this._stack},e.prototype.getStackTop=function(){return this._stack[this._stack.length-1]},e.prototype.captureException=function(e,t){var n=this._lastEventId=o.uuid4(),i=t;if(!t){var a=void 0;try{throw new Error("Sentry syntheticException")}catch(e){a=e}i={originalException:e,syntheticException:a}}return this._invokeClient("captureException",e,r.__assign({},i,{event_id:n})),n},e.prototype.captureMessage=function(e,t,n){var i=this._lastEventId=o.uuid4(),a=n;if(!n){var s=void 0;try{throw new Error(e)}catch(e){s=e}a={originalException:e,syntheticException:s}}return this._invokeClient("captureMessage",e,t,r.__assign({},a,{event_id:i})),i},e.prototype.captureEvent=function(e,t){var n=this._lastEventId=o.uuid4();return this._invokeClient("captureEvent",e,r.__assign({},t,{event_id:n})),n},e.prototype.lastEventId=function(){return this._lastEventId},e.prototype.addBreadcrumb=function(e,t){var n=this.getStackTop();if(n.scope&&n.client){var i=n.client.getOptions&&n.client.getOptions()||{},a=i.beforeBreadcrumb,s=void 0===a?null:a,c=i.maxBreadcrumbs,u=void 0===c?30:c;if(!(u<=0)){var l=(new Date).getTime()/1e3,p=r.__assign({timestamp:l},e),f=s?o.consoleSandbox((function(){return s(p,t)})):p;null!==f&&n.scope.addBreadcrumb(f,Math.min(u,100))}}},e.prototype.setUser=function(e){var t=this.getStackTop();t.scope&&t.scope.setUser(e)},e.prototype.setTags=function(e){var t=this.getStackTop();t.scope&&t.scope.setTags(e)},e.prototype.setExtras=function(e){var t=this.getStackTop();t.scope&&t.scope.setExtras(e)},e.prototype.setTag=function(e,t){var n=this.getStackTop();n.scope&&n.scope.setTag(e,t)},e.prototype.setExtra=function(e,t){var n=this.getStackTop();n.scope&&n.scope.setExtra(e,t)},e.prototype.setContext=function(e,t){var n=this.getStackTop();n.scope&&n.scope.setContext(e,t)},e.prototype.configureScope=function(e){var t=this.getStackTop();t.scope&&t.client&&e(t.scope)},e.prototype.run=function(e){var t=c(this);try{e(this)}finally{c(t)}},e.prototype.getIntegration=function(e){var t=this.getClient();if(!t)return null;try{return t.getIntegration(e)}catch(t){return o.logger.warn("Cannot retrieve integration "+e.id+" from the current Hub"),null}},e.prototype.traceHeaders=function(){var e=this.getStackTop();if(e.scope&&e.client){var t=e.scope.getSpan();if(t)return{"sentry-trace":t.toTraceparent()}}return{}},e}();function s(){var e=o.getGlobalObject();return e.__SENTRY__=e.__SENTRY__||{hub:void 0},e}function c(e){var t=s(),n=l(t);return p(t,e),n}function u(e){return!!(e&&e.__SENTRY__&&e.__SENTRY__.hub)}function l(e){return e&&e.__SENTRY__&&e.__SENTRY__.hub?e.__SENTRY__.hub:(e.__SENTRY__=e.__SENTRY__||{},e.__SENTRY__.hub=new a,e.__SENTRY__.hub)}function p(e,t){return!!e&&(e.__SENTRY__=e.__SENTRY__||{},e.__SENTRY__.hub=t,!0)}t.Hub=a,t.getMainCarrier=s,t.makeMain=c,t.getCurrentHub=function(){var n=s();u(n)&&!l(n).isOlderThan(t.API_VERSION)||p(n,new a);try{var r=o.dynamicRequire(e,"domain").active;if(!r)return l(n);if(!u(r)||l(r).isOlderThan(t.API_VERSION)){var c=l(n).getStackTop();p(r,new a(c.client,i.Scope.clone(c.scope)))}return l(r)}catch(e){return l(n)}},t.getHubFromCarrier=l,t.setHubOnCarrier=p}).call(this,n(166)(e))},1230:function(e,t,n){n(25),n(14),n(20),n(4),n(18),Object.defineProperty(t,"__esModule",{value:!0});var r=n(43),o=n(401),i=function(){function e(e){this.dsn=e,this._dsnObject=new o.Dsn(e)}return e.prototype.getDsn=function(){return this._dsnObject},e.prototype.getStoreEndpoint=function(){return""+this._getBaseUrl()+this.getStoreEndpointPath()},e.prototype.getStoreEndpointWithUrlEncodedAuth=function(){var e={sentry_key:this._dsnObject.user,sentry_version:"7"};return this.getStoreEndpoint()+"?"+r.urlEncode(e)},e.prototype._getBaseUrl=function(){var e=this._dsnObject,t=e.protocol?e.protocol+":":"",n=e.port?":"+e.port:"";return t+"//"+e.host+n},e.prototype.getStoreEndpointPath=function(){var e=this._dsnObject;return(e.path?"/"+e.path:"")+"/api/"+e.projectId+"/store/"},e.prototype.getRequestHeaders=function(e,t){var n=this._dsnObject,r=["Sentry sentry_version=7"];return r.push("sentry_timestamp="+(new Date).getTime()),r.push("sentry_client="+e+"/"+t),r.push("sentry_key="+n.user),n.pass&&r.push("sentry_secret="+n.pass),{"Content-Type":"application/json","X-Sentry-Auth":r.join(", ")}},e.prototype.getReportDialogEndpoint=function(e){void 0===e&&(e={});var t=this._dsnObject,n=this._getBaseUrl()+(t.path?"/"+t.path:"")+"/api/embed/error-page/",r=[];for(var o in r.push("dsn="+t.toString()),e)if("user"===o){if(!e.user)continue;e.user.name&&r.push("name="+encodeURIComponent(e.user.name)),e.user.email&&r.push("email="+encodeURIComponent(e.user.email))}else r.push(encodeURIComponent(o)+"="+encodeURIComponent(e[o]));return r.length?n+"?"+r.join("&"):n},e}();t.API=i},1231:function(e,t,n){n(5),n(17),n(4),n(40),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(43),i=n(401),a=n(1232),s=function(){function e(e,t){this._integrations={},this._processing=!1,this._backend=new e(t),this._options=t,t.dsn&&(this._dsn=new i.Dsn(t.dsn)),this._isEnabled()&&(this._integrations=a.setupIntegrations(this._options))}return e.prototype.captureException=function(e,t,n){var r=this,i=t&&t.event_id;return this._processing=!0,this._getBackend().eventFromException(e,t).then((function(e){return r._processEvent(e,t,n)})).then((function(e){i=e&&e.event_id,r._processing=!1})).catch((function(e){o.logger.error(e),r._processing=!1})),i},e.prototype.captureMessage=function(e,t,n,r){var i=this,a=n&&n.event_id;return this._processing=!0,(o.isPrimitive(e)?this._getBackend().eventFromMessage(""+e,t,n):this._getBackend().eventFromException(e,n)).then((function(e){return i._processEvent(e,n,r)})).then((function(e){a=e&&e.event_id,i._processing=!1})).catch((function(e){o.logger.error(e),i._processing=!1})),a},e.prototype.captureEvent=function(e,t,n){var r=this,i=t&&t.event_id;return this._processing=!0,this._processEvent(e,t,n).then((function(e){i=e&&e.event_id,r._processing=!1})).catch((function(e){o.logger.error(e),r._processing=!1})),i},e.prototype.getDsn=function(){return this._dsn},e.prototype.getOptions=function(){return this._options},e.prototype.flush=function(e){var t=this;return this._isClientProcessing(e).then((function(n){return clearInterval(n.interval),t._getBackend().getTransport().close(e).then((function(e){return n.ready&&e}))}))},e.prototype.close=function(e){var t=this;return this.flush(e).then((function(e){return t.getOptions().enabled=!1,e}))},e.prototype.getIntegrations=function(){return this._integrations||{}},e.prototype.getIntegration=function(e){try{return this._integrations[e.id]||null}catch(t){return o.logger.warn("Cannot retrieve integration "+e.id+" from the current Client"),null}},e.prototype._isClientProcessing=function(e){var t=this;return new Promise((function(n){var r=0,o=0;clearInterval(o),o=setInterval((function(){t._processing?(r+=1,e&&r>=e&&n({interval:o,ready:!1})):n({interval:o,ready:!0})}),1)}))},e.prototype._getBackend=function(){return this._backend},e.prototype._isEnabled=function(){return!1!==this.getOptions().enabled&&void 0!==this._dsn},e.prototype._prepareEvent=function(e,t,n){var i=this.getOptions(),a=i.environment,s=i.release,c=i.dist,u=i.maxValueLength,l=void 0===u?250:u,p=r.__assign({},e);void 0===p.environment&&void 0!==a&&(p.environment=a),void 0===p.release&&void 0!==s&&(p.release=s),void 0===p.dist&&void 0!==c&&(p.dist=c),p.message&&(p.message=o.truncate(p.message,l));var f=p.exception&&p.exception.values&&p.exception.values[0];f&&f.value&&(f.value=o.truncate(f.value,l));var d=p.request;d&&d.url&&(d.url=o.truncate(d.url,l)),void 0===p.event_id&&(p.event_id=o.uuid4()),this._addIntegrations(p.sdk);var h=o.SyncPromise.resolve(p);return t&&(h=t.applyToEvent(p,n)),h},e.prototype._addIntegrations=function(e){var t=Object.keys(this._integrations);e&&t.length>0&&(e.integrations=t)},e.prototype._processEvent=function(e,t,n){var r=this,i=this.getOptions(),a=i.beforeSend,s=i.sampleRate;return this._isEnabled()?"number"==typeof s&&Math.random()>s?o.SyncPromise.reject("This event has been sampled, will not send event."):new o.SyncPromise((function(i,s){r._prepareEvent(e,n,t).then((function(e){if(null!==e){var n=e;try{if(t&&t.data&&!0===t.data.__sentry__||!a)return r._getBackend().sendEvent(n),void i(n);var c=a(e,t);if(void 0===c)o.logger.error("`beforeSend` method has to return `null` or a valid event.");else if(o.isThenable(c))r._handleAsyncBeforeSend(c,i,s);else{if(null===(n=c))return o.logger.log("`beforeSend` returned `null`, will not send event."),void i(null);r._getBackend().sendEvent(n),i(n)}}catch(e){r.captureException(e,{data:{__sentry__:!0},originalException:e}),s("`beforeSend` throw an error, will not send event.")}}else s("An event processor returned null, will not send event.")}))})):o.SyncPromise.reject("SDK not enabled, will not send event.")},e.prototype._handleAsyncBeforeSend=function(e,t,n){var r=this;e.then((function(e){null!==e?(r._getBackend().sendEvent(e),t(e)):n("`beforeSend` returned `null`, will not send event.")})).catch((function(e){n("beforeSend rejected with "+e)}))},e}();t.BaseClient=s},1232:function(e,t,n){n(21),n(19),n(12),n(20),n(22),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(224),i=n(43);function a(e){var t=e.defaultIntegrations&&r.__spread(e.defaultIntegrations)||[],n=e.integrations,o=[];if(Array.isArray(n)){var i=n.map((function(e){return e.name})),a=[];t.forEach((function(e){-1===i.indexOf(e.name)&&-1===a.indexOf(e.name)&&(o.push(e),a.push(e.name))})),n.forEach((function(e){-1===a.indexOf(e.name)&&(o.push(e),a.push(e.name))}))}else{if("function"!=typeof n)return r.__spread(t);o=n(t),o=Array.isArray(o)?o:[o]}return o}function s(e){-1===t.installedIntegrations.indexOf(e.name)&&(e.setupOnce(o.addGlobalEventProcessor,o.getCurrentHub),t.installedIntegrations.push(e.name),i.logger.log("Integration installed: "+e.name))}t.installedIntegrations=[],t.getIntegrationsToSetup=a,t.setupIntegration=s,t.setupIntegrations=function(e){var t={};return a(e).forEach((function(e){t[e.name]=e,s(e)})),t}},1233:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(43),o=n(592),i=function(){function e(e){this._options=e,this._options.dsn||r.logger.warn("No DSN provided, backend will not do anything."),this._transport=this._setupTransport()}return e.prototype._setupTransport=function(){return new o.NoopTransport},e.prototype.eventFromException=function(e,t){throw new r.SentryError("Backend has to implement `eventFromException` method")},e.prototype.eventFromMessage=function(e,t,n){throw new r.SentryError("Backend has to implement `eventFromMessage` method")},e.prototype.sendEvent=function(e){this._transport.sendEvent(e).catch((function(e){r.logger.error("Error while sending event: "+e)}))},e.prototype.getTransport=function(){return this._transport},e}();t.BaseBackend=i},1234:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(224),o=n(43);t.initAndBind=function(e,t){!0===t.debug&&o.logger.enable(),r.getCurrentHub().bindClient(new e(t))}},1235:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(1236);t.FunctionToString=r.FunctionToString;var o=n(1237);t.InboundFilters=o.InboundFilters},1236:function(e,t,n){var r;n(14),n(20),n(4),n(18),Object.defineProperty(t,"__esModule",{value:!0});var o=function(){function e(){this.name=e.id}return e.prototype.setupOnce=function(){r=Function.prototype.toString,Function.prototype.toString=function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var n=this.__sentry__?this.__sentry_original__:this;return r.apply(n,e)}},e.id="FunctionToString",e}();t.FunctionToString=o},1237:function(e,t,n){n(5),n(131),n(20),n(4),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(224),i=n(43),a=[/^Script error\.?$/,/^Javascript error: Script error\.? on line 0$/],s=function(){function e(t){void 0===t&&(t={}),this._options=t,this.name=e.id}return e.prototype.setupOnce=function(){o.addGlobalEventProcessor((function(t){var n=o.getCurrentHub();if(!n)return t;var r=n.getIntegration(e);if(r){var i=n.getClient(),a=i?i.getOptions():{},s=r._mergeOptions(a);if(r._shouldDropEvent(t,s))return null}return t}))},e.prototype._shouldDropEvent=function(e,t){return this._isSentryError(e,t)?(i.logger.warn("Event dropped due to being internal Sentry Error.\nEvent: "+i.getEventDescription(e)),!0):this._isIgnoredError(e,t)?(i.logger.warn("Event dropped due to being matched by `ignoreErrors` option.\nEvent: "+i.getEventDescription(e)),!0):this._isBlacklistedUrl(e,t)?(i.logger.warn("Event dropped due to being matched by `blacklistUrls` option.\nEvent: "+i.getEventDescription(e)+".\nUrl: "+this._getEventFilterUrl(e)),!0):!this._isWhitelistedUrl(e,t)&&(i.logger.warn("Event dropped due to not being matched by `whitelistUrls` option.\nEvent: "+i.getEventDescription(e)+".\nUrl: "+this._getEventFilterUrl(e)),!0)},e.prototype._isSentryError=function(e,t){if(void 0===t&&(t={}),!t.ignoreInternal)return!1;try{return"SentryError"===e.exception.values[0].type}catch(e){return!1}},e.prototype._isIgnoredError=function(e,t){return void 0===t&&(t={}),!(!t.ignoreErrors||!t.ignoreErrors.length)&&this._getPossibleEventMessages(e).some((function(e){return t.ignoreErrors.some((function(t){return i.isMatchingPattern(e,t)}))}))},e.prototype._isBlacklistedUrl=function(e,t){if(void 0===t&&(t={}),!t.blacklistUrls||!t.blacklistUrls.length)return!1;var n=this._getEventFilterUrl(e);return!!n&&t.blacklistUrls.some((function(e){return i.isMatchingPattern(n,e)}))},e.prototype._isWhitelistedUrl=function(e,t){if(void 0===t&&(t={}),!t.whitelistUrls||!t.whitelistUrls.length)return!0;var n=this._getEventFilterUrl(e);return!n||t.whitelistUrls.some((function(e){return i.isMatchingPattern(n,e)}))},e.prototype._mergeOptions=function(e){return void 0===e&&(e={}),{blacklistUrls:r.__spread(this._options.blacklistUrls||[],e.blacklistUrls||[]),ignoreErrors:r.__spread(this._options.ignoreErrors||[],e.ignoreErrors||[],a),ignoreInternal:void 0===this._options.ignoreInternal||this._options.ignoreInternal,whitelistUrls:r.__spread(this._options.whitelistUrls||[],e.whitelistUrls||[])}},e.prototype._getPossibleEventMessages=function(e){if(e.message)return[e.message];if(e.exception)try{var t=e.exception.values[0],n=t.type,r=t.value;return[""+r,n+": "+r]}catch(t){return i.logger.error("Cannot extract message for event "+i.getEventDescription(e)),[]}return[]},e.prototype._getEventFilterUrl=function(e){try{if(e.stacktrace){var t=e.stacktrace.frames;return t[t.length-1].filename}if(e.exception){var n=e.exception.values[0].stacktrace.frames;return n[n.length-1].filename}return null}catch(t){return i.logger.error("Cannot extract url for event "+i.getEventDescription(e)),null}},e.id="InboundFilters",e}();t.InboundFilters=s},1238:function(e,t,n){n(20),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(99),i=n(129),a=n(43),s=n(402),c=n(301),u=n(594),l=function(e){function t(){return null!==e&&e.apply(this,arguments)||this}return r.__extends(t,e),t.prototype._setupTransport=function(){if(!this._options.dsn)return e.prototype._setupTransport.call(this);var t=r.__assign({},this._options.transportOptions,{dsn:this._options.dsn});return this._options.transport?new this._options.transport(t):a.supportsFetch()?new u.FetchTransport(t):new u.XHRTransport(t)},t.prototype.eventFromException=function(e,t){var n,r=this;if(a.isErrorEvent(e)&&e.error)return e=e.error,n=s.eventFromStacktrace(c._computeStackTrace(e)),a.SyncPromise.resolve(this._buildEvent(n,t));if(a.isDOMError(e)||a.isDOMException(e)){var o=e,u=o.name||(a.isDOMError(o)?"DOMError":"DOMException"),l=o.message?u+": "+o.message:u;return this.eventFromMessage(l,i.Severity.Error,t).then((function(e){return a.addExceptionTypeValue(e,l),a.SyncPromise.resolve(r._buildEvent(e,t))}))}if(a.isError(e))return n=s.eventFromStacktrace(c._computeStackTrace(e)),a.SyncPromise.resolve(this._buildEvent(n,t));if(a.isPlainObject(e)&&t&&t.syntheticException){var p=e;return n=s.eventFromPlainObject(p,t.syntheticException),a.addExceptionTypeValue(n,"Custom Object",void 0,{handled:!0,synthetic:!0,type:"generic"}),n.level=i.Severity.Error,a.SyncPromise.resolve(this._buildEvent(n,t))}var f=e;return this.eventFromMessage(f,void 0,t).then((function(e){return a.addExceptionTypeValue(e,""+f,void 0,{handled:!0,synthetic:!0,type:"generic"}),e.level=i.Severity.Error,a.SyncPromise.resolve(r._buildEvent(e,t))}))},t.prototype._buildEvent=function(e,t){return r.__assign({},e,{event_id:t&&t.event_id})},t.prototype.eventFromMessage=function(e,t,n){void 0===t&&(t=i.Severity.Info);var r={event_id:n&&n.event_id,level:t,message:e};if(this._options.attachStacktrace&&n&&n.syntheticException){var o=c._computeStackTrace(n.syntheticException),u=s.prepareFramesForEvent(o.stack);r.stacktrace={frames:u}}return a.SyncPromise.resolve(r)},t}(o.BaseBackend);t.BrowserBackend=l},1239:function(e,t,n){n(4),n(40),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(129),i=n(43),a=n(403),s=i.getGlobalObject(),c=function(e){function t(){return null!==e&&e.apply(this,arguments)||this}return r.__extends(t,e),t.prototype.sendEvent=function(e){var t={body:JSON.stringify(e),method:"POST",referrerPolicy:i.supportsReferrerPolicy()?"origin":""};return this._buffer.add(s.fetch(this.url,t).then((function(e){return{status:o.Status.fromHttpCode(e.status)}})))},t}(a.BaseTransport);t.FetchTransport=c},1240:function(e,t,n){n(4),n(40),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(129),i=function(e){function t(){return null!==e&&e.apply(this,arguments)||this}return r.__extends(t,e),t.prototype.sendEvent=function(e){var t=this;return this._buffer.add(new Promise((function(n,r){var i=new XMLHttpRequest;i.onreadystatechange=function(){4===i.readyState&&(200===i.status&&n({status:o.Status.fromHttpCode(i.status)}),r(i))},i.open("POST",t.url),i.send(JSON.stringify(e))})))},t}(n(403).BaseTransport);t.XHRTransport=i},1241:function(e,t,n){n(4),n(40),Object.defineProperty(t,"__esModule",{value:!0});var r=n(99),o=n(43),i=n(593),a=n(302),s=n(596);t.defaultIntegrations=[new r.Integrations.InboundFilters,new r.Integrations.FunctionToString,new s.TryCatch,new s.Breadcrumbs,new s.GlobalHandlers,new s.LinkedErrors,new s.UserAgent],t.init=function(e){if(void 0===e&&(e={}),void 0===e.defaultIntegrations&&(e.defaultIntegrations=t.defaultIntegrations),void 0===e.release){var n=o.getGlobalObject();n.SENTRY_RELEASE&&n.SENTRY_RELEASE.id&&(e.release=n.SENTRY_RELEASE.id)}r.initAndBind(i.BrowserClient,e)},t.showReportDialog=function(e){void 0===e&&(e={}),e.eventId||(e.eventId=r.getCurrentHub().lastEventId());var t=r.getCurrentHub().getClient();t&&t.showReportDialog(e)},t.lastEventId=function(){return r.getCurrentHub().lastEventId()},t.forceLoad=function(){},t.onLoad=function(e){e()},t.flush=function(e){var t=r.getCurrentHub().getClient();return t?t.flush(e):Promise.reject(!1)},t.close=function(e){var t=r.getCurrentHub().getClient();return t?t.close(e):Promise.reject(!1)},t.wrap=function(e){return a.wrap(e)()}},1242:function(e,t,n){n(5),n(76),n(20),n(17),n(4),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(99),i=n(129),a=n(43),s=n(302),c=n(402),u=n(301),l=function(){function e(t){this.name=e.id,this._options=r.__assign({onerror:!0,onunhandledrejection:!0},t)}return e.prototype.setupOnce=function(){Error.stackTraceLimit=50,u._subscribe((function(t,n,r){var i=r&&!0===r.__sentry_own_request__;if(!s.shouldIgnoreOnError()&&!i){var a=o.getCurrentHub().getIntegration(e);a&&o.getCurrentHub().captureEvent(a._eventFromGlobalHandler(t,r),{data:{stack:t},originalException:r})}})),this._options.onerror&&(a.logger.log("Global Handler attached: onerror"),u._installGlobalHandler()),this._options.onunhandledrejection&&(a.logger.log("Global Handler attached: onunhandledrejection"),u._installGlobalUnhandledRejectionHandler())},e.prototype._eventFromGlobalHandler=function(e,t){if(!a.isString(e.message)&&"onunhandledrejection"!==e.mechanism){var n=e.message;e.message=n.error&&a.isString(n.error.message)?n.error.message:"No error message"}if("onunhandledrejection"===e.mechanism&&(e.incomplete||"failed"===e.mode))return this._eventFromIncompleteRejection(e,t);var r=c.eventFromStacktrace(e),i={mode:e.mode};e.message&&(i.message=e.message),e.name&&(i.name=e.name);var s=o.getCurrentHub().getClient(),u=s&&s.getOptions().maxValueLength||250,l=e.original?a.truncate(JSON.stringify(a.normalize(e.original)),u):"",p="onunhandledrejection"===e.mechanism?"UnhandledRejection":"Error";return a.addExceptionTypeValue(r,l,p,{data:i,handled:!1,type:e.mechanism}),r},e.prototype._eventFromIncompleteRejection=function(e,t){var n={level:i.Severity.Error};return a.isPrimitive(t)?n.exception={values:[{type:"UnhandledRejection",value:"Non-Error promise rejection captured with value: "+t}]}:(n.exception={values:[{type:"UnhandledRejection",value:"Non-Error promise rejection captured with keys: "+a.keysToEventMessage(Object.keys(t).sort())}]},n.extra={__serialized__:a.normalizeToSize(t)}),n.exception.values&&n.exception.values[0]&&(n.exception.values[0].mechanism={data:r.__assign({mode:e.mode},e.incomplete&&{incomplete:e.incomplete},e.message&&{message:e.message},e.name&&{name:e.name}),handled:!1,type:e.mechanism}),n},e.id="GlobalHandlers",e}();t.GlobalHandlers=l},1243:function(e,t,n){n(118),n(21),n(20),Object.defineProperty(t,"__esModule",{value:!0});var r=n(43),o=n(302),i=function(){function e(){this._ignoreOnError=0,this.name=e.id}return e.prototype._wrapTimeFunction=function(e){return function(){for(var t=[],n=0;n<arguments.length;n++)t[n]=arguments[n];var r=t[0];return t[0]=o.wrap(r,{mechanism:{data:{function:a(e)},handled:!0,type:"instrument"}}),e.apply(this,t)}},e.prototype._wrapRAF=function(e){return function(t){return e(o.wrap(t,{mechanism:{data:{function:"requestAnimationFrame",handler:a(e)},handled:!0,type:"instrument"}}))}},e.prototype._wrapEventTarget=function(e){var t=r.getGlobalObject(),n=t[e]&&t[e].prototype;n&&n.hasOwnProperty&&n.hasOwnProperty("addEventListener")&&(r.fill(n,"addEventListener",(function(t){return function(n,r,i){try{"function"==typeof r.handleEvent&&(r.handleEvent=o.wrap(r.handleEvent.bind(r),{mechanism:{data:{function:"handleEvent",handler:a(r),target:e},handled:!0,type:"instrument"}}))}catch(e){}return t.call(this,n,o.wrap(r,{mechanism:{data:{function:"addEventListener",handler:a(r),target:e},handled:!0,type:"instrument"}}),i)}})),r.fill(n,"removeEventListener",(function(e){return function(t,n,r){var o=n;try{o=o&&(o.__sentry_wrapped__||o)}catch(e){}return e.call(this,t,o,r)}})))},e.prototype.setupOnce=function(){this._ignoreOnError=this._ignoreOnError;var e=r.getGlobalObject();r.fill(e,"setTimeout",this._wrapTimeFunction.bind(this)),r.fill(e,"setInterval",this._wrapTimeFunction.bind(this)),r.fill(e,"requestAnimationFrame",this._wrapRAF.bind(this)),["EventTarget","Window","Node","ApplicationCache","AudioTrackList","ChannelMergerNode","CryptoOperation","EventSource","FileReader","HTMLUnknownElement","IDBDatabase","IDBRequest","IDBTransaction","KeyOperation","MediaController","MessagePort","ModalWindow","Notification","SVGElementInstance","Screen","TextTrack","TextTrackCue","TextTrackList","WebSocket","WebSocketWorker","Worker","XMLHttpRequest","XMLHttpRequestEventTarget","XMLHttpRequestUpload"].forEach(this._wrapEventTarget.bind(this))},e.id="TryCatch",e}();function a(e){try{return e&&e.name||"<anonymous>"}catch(e){return"<anonymous>"}}t.TryCatch=i},1244:function(e,t,n){n(118),n(21),n(134),n(26),n(20),n(135),Object.defineProperty(t,"__esModule",{value:!0});var r,o=n(3),i=n(99),a=n(129),s=n(43),c=n(302),u=s.getGlobalObject(),l=function(){function e(t){this.name=e.id,this._options=o.__assign({console:!0,dom:!0,fetch:!0,history:!0,sentry:!0,xhr:!0},t)}return e.prototype._instrumentConsole=function(){"console"in u&&["debug","info","warn","error","log","assert"].forEach((function(t){t in u.console&&s.fill(u.console,t,(function(n){return function(){for(var r=[],o=0;o<arguments.length;o++)r[o]=arguments[o];var i={category:"console",data:{extra:{arguments:s.normalize(r,3)},logger:"console"},level:a.Severity.fromString(t),message:s.safeJoin(r," ")};"assert"===t&&!1===r[0]&&(i.message="Assertion failed: "+(s.safeJoin(r.slice(1)," ")||"console.assert"),i.data.extra.arguments=s.normalize(r.slice(1),3)),e.addBreadcrumb(i,{input:r,level:t}),n&&Function.prototype.apply.call(n,u.console,r)}}))}))},e.prototype._instrumentDOM=function(){"document"in u&&(u.document.addEventListener("click",c.breadcrumbEventHandler("click"),!1),u.document.addEventListener("keypress",c.keypressEventHandler(),!1),["EventTarget","Node"].forEach((function(e){var t=u[e]&&u[e].prototype;t&&t.hasOwnProperty&&t.hasOwnProperty("addEventListener")&&(s.fill(t,"addEventListener",(function(e){return function(t,n,r){return n&&n.handleEvent?("click"===t&&s.fill(n,"handleEvent",(function(e){return function(t){return c.breadcrumbEventHandler("click")(t),e.call(this,t)}})),"keypress"===t&&s.fill(n,"handleEvent",(function(e){return function(t){return c.keypressEventHandler()(t),e.call(this,t)}}))):("click"===t&&c.breadcrumbEventHandler("click",!0)(this),"keypress"===t&&c.keypressEventHandler()(this)),e.call(this,t,n,r)}})),s.fill(t,"removeEventListener",(function(e){return function(t,n,r){var o=n;try{o=o&&(o.__sentry_wrapped__||o)}catch(e){}return e.call(this,t,o,r)}})))})))},e.prototype._instrumentFetch=function(){s.supportsNativeFetch()&&s.fill(u,"fetch",(function(t){return function(){for(var n=[],r=0;r<arguments.length;r++)n[r]=arguments[r];var o,c=n[0],l="GET";"string"==typeof c?o=c:"Request"in u&&c instanceof Request?(o=c.url,c.method&&(l=c.method)):o=String(c),n[1]&&n[1].method&&(l=n[1].method);var f=i.getCurrentHub().getClient(),d=f&&f.getDsn();if(d){var h=new i.API(d).getStoreEndpoint();if(h&&o.includes(h))return"POST"===l&&n[1]&&n[1].body&&p(n[1].body),t.apply(u,n)}var _={method:s.isString(l)?l.toUpperCase():l,url:o};return t.apply(u,n).then((function(t){return _.status_code=t.status,e.addBreadcrumb({category:"fetch",data:_,type:"http"},{input:n,response:t}),t})).catch((function(t){throw e.addBreadcrumb({category:"fetch",data:_,level:a.Severity.Error,type:"http"},{error:t,input:n}),t}))}}))},e.prototype._instrumentHistory=function(){var t=this;if(s.supportsHistory()){var n=function(t,n){var o=s.parseUrl(u.location.href),i=s.parseUrl(n),a=s.parseUrl(t);a.path||(a=o),r=n,o.protocol===i.protocol&&o.host===i.host&&(n=i.relative),o.protocol===a.protocol&&o.host===a.host&&(t=a.relative),e.addBreadcrumb({category:"navigation",data:{from:t,to:n}})},o=u.onpopstate;u.onpopstate=function(){for(var e=[],i=0;i<arguments.length;i++)e[i]=arguments[i];var a=u.location.href;if(n(r,a),o)return o.apply(t,e)},s.fill(u.history,"pushState",i),s.fill(u.history,"replaceState",i)}function i(e){return function(){for(var t=[],o=0;o<arguments.length;o++)t[o]=arguments[o];var i=t.length>2?t[2]:void 0;return i&&n(r,String(i)),e.apply(this,t)}}},e.prototype._instrumentXHR=function(){if("XMLHttpRequest"in u){var t=XMLHttpRequest.prototype;s.fill(t,"open",(function(e){return function(){for(var t=[],n=0;n<arguments.length;n++)t[n]=arguments[n];var r=t[1];this.__sentry_xhr__={method:s.isString(t[0])?t[0].toUpperCase():t[0],url:t[1]};var o=i.getCurrentHub().getClient(),a=o&&o.getDsn();if(a){var c=new i.API(a).getStoreEndpoint();s.isString(r)&&c&&r.includes(c)&&(this.__sentry_own_request__=!0)}return e.apply(this,t)}})),s.fill(t,"send",(function(t){return function(){for(var r=[],o=0;o<arguments.length;o++)r[o]=arguments[o];var i=this;function a(){if(4===i.readyState){if(i.__sentry_own_request__)return;try{i.__sentry_xhr__&&(i.__sentry_xhr__.status_code=i.status)}catch(e){}e.addBreadcrumb({category:"xhr",data:i.__sentry_xhr__,type:"http"},{xhr:i})}}return i.__sentry_own_request__&&p(r[0]),["onload","onerror","onprogress"].forEach((function(e){n(e,i)})),"onreadystatechange"in i&&"function"==typeof i.onreadystatechange?s.fill(i,"onreadystatechange",(function(e){return c.wrap(e,{mechanism:{data:{function:"onreadystatechange",handler:e&&e.name||"<anonymous>"},handled:!0,type:"instrument"}},a)})):i.onreadystatechange=a,t.apply(this,r)}}))}function n(e,t){e in t&&"function"==typeof t[e]&&s.fill(t,e,(function(t){return c.wrap(t,{mechanism:{data:{function:e,handler:t&&t.name||"<anonymous>"},handled:!0,type:"instrument"}})}))}},e.addBreadcrumb=function(t,n){i.getCurrentHub().getIntegration(e)&&i.getCurrentHub().addBreadcrumb(t,n)},e.prototype.setupOnce=function(){this._options.console&&this._instrumentConsole(),this._options.dom&&this._instrumentDOM(),this._options.xhr&&this._instrumentXHR(),this._options.fetch&&this._instrumentFetch(),this._options.history&&this._instrumentHistory()},e.id="Breadcrumbs",e}();function p(e){try{var t=JSON.parse(e);l.addBreadcrumb({category:"sentry",event_id:t.event_id,level:t.level||a.Severity.fromString("error"),message:s.getEventDescription(t)},{event:t})}catch(e){s.logger.error("Error while adding sentry type breadcrumb")}}t.Breadcrumbs=l},1245:function(e,t,n){n(5),n(20),n(4),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(99),i=n(402),a=n(301),s="cause",c=5,u=function(){function e(t){void 0===t&&(t={}),this.name=e.id,this._key=t.key||s,this._limit=t.limit||c}return e.prototype.setupOnce=function(){o.addGlobalEventProcessor((function(t,n){var r=o.getCurrentHub().getIntegration(e);return r?r._handler(t,n):t}))},e.prototype._handler=function(e,t){if(!(e.exception&&e.exception.values&&t&&t.originalException instanceof Error))return e;var n=this._walkErrorTree(t.originalException,this._key);return e.exception.values=r.__spread(n,e.exception.values),e},e.prototype._walkErrorTree=function(e,t,n){if(void 0===n&&(n=[]),!(e[t]instanceof Error)||n.length+1>=this._limit)return n;var o=a._computeStackTrace(e[t]),s=i.exceptionFromStacktrace(o);return this._walkErrorTree(e[t],t,r.__spread([s],n))},e.id="LinkedErrors",e}();t.LinkedErrors=u},1246:function(e,t,n){n(20),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(99),i=n(43).getGlobalObject(),a=function(){function e(){this.name=e.id}return e.prototype.setupOnce=function(){o.addGlobalEventProcessor((function(t){if(o.getCurrentHub().getIntegration(e)){if(!i.navigator||!i.location)return t;var n=t.request||{};return n.url=n.url||i.location.href,n.headers=n.headers||{},n.headers["User-Agent"]=i.navigator.userAgent,r.__assign({},t,{request:n})}return t}))},e.id="UserAgent",e}();t.UserAgent=a},1247:function(e,t,n){n(5),n(20),n(4),n(10),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(43),i=/^\[((?:[$a-zA-Z0-9]+:)?(?:[$a-zA-Z0-9]+))\] (.*?)\n?(\S+)$/,a=function(){function e(t){void 0===t&&(t={}),this.name=e.id,this._angular=t.angular||o.getGlobalObject().angular}return e.prototype.setupOnce=function(t,n){var r=this;this._angular?(this._getCurrentHub=n,this._angular.module(e.moduleName,[]).config(["$provide",function(e){e.decorator("$exceptionHandler",["$delegate",r._$exceptionHandlerDecorator.bind(r)])}])):o.logger.error("AngularIntegration is missing an Angular instance")},e.prototype._$exceptionHandlerDecorator=function(t){var n=this;return function(o,a){var s=n._getCurrentHub&&n._getCurrentHub();s&&s.getIntegration(e)&&s.withScope((function(e){a&&e.setExtra("cause",a),e.addEventProcessor((function(e){var t=e.exception&&e.exception.values&&e.exception.values[0];if(t){var n=i.exec(t.value||"");n&&(t.type=n[1],t.value=n[2],e.message=t.type+": "+t.value,e.extra=r.__assign({},e.extra,{angularDocs:n[3].substr(0,250)}))}return e})),s.captureException(o)})),t(o,a)}},e.id="AngularJS",e.moduleName="ngSentry",e}();t.Angular=a},1248:function(e,t,n){n(118),n(21),n(26),n(20),n(22),Object.defineProperty(t,"__esModule",{value:!0});var r=n(129),o=n(43),i=o.getGlobalObject(),a=function(){function e(t){void 0===t&&(t={}),this.name=e.id,this._levels=["log","info","warn","error","debug","assert"],t.levels&&(this._levels=t.levels)}return e.prototype.setupOnce=function(t,n){"console"in i&&this._levels.forEach((function(t){t in i.console&&o.fill(i.console,t,(function(a){return function(){for(var s=[],c=0;c<arguments.length;c++)s[c]=arguments[c];var u=n();u.getIntegration(e)&&u.withScope((function(e){e.setLevel(r.Severity.fromString(t)),e.setExtra("arguments",o.normalize(s,3)),e.addEventProcessor((function(e){return e.logger="console",e}));var n=o.safeJoin(s," ");"assert"===t?!1===s[0]&&(n="Assertion failed: "+(o.safeJoin(s.slice(1)," ")||"console.assert"),e.setExtra("arguments",o.normalize(s.slice(1),3)),u.captureMessage(n)):u.captureMessage(n)})),a&&Function.prototype.apply.call(a,i.console,s)}}))}))},e.id="CaptureConsole",e}();t.CaptureConsole=a},1249:function(e,t,n){n(20),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(43),i=function(){function e(t){this.name=e.id,this._options=r.__assign({debugger:!1,stringify:!1},t)}return e.prototype.setupOnce=function(t,n){t((function(t,r){var i=n().getIntegration(e);return i&&(i._options.debugger,o.consoleSandbox((function(){i._options.stringify?(console.log(JSON.stringify(t,null,2)),r&&console.log(JSON.stringify(r,null,2))):(console.log(t),r&&console.log(r))}))),t}))},e.id="Debug",e}();t.Debug=i},1250:function(e,t,n){n(5),n(25),n(20),n(4),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=function(){function e(){this.name=e.id}return e.prototype.setupOnce=function(t,n){t((function(t){var r=n().getIntegration(e);if(r){try{if(r._shouldDropEvent(t,r._previousEvent))return null}catch(e){return r._previousEvent=t}return r._previousEvent=t}return t}))},e.prototype._shouldDropEvent=function(e,t){return!!t&&(!!this._isSameMessageEvent(e,t)||!!this._isSameExceptionEvent(e,t))},e.prototype._isSameMessageEvent=function(e,t){var n=e.message,r=t.message;return!(!n&&!r)&&(!(n&&!r||!n&&r)&&(n===r&&(!!this._isSameFingerprint(e,t)&&!!this._isSameStacktrace(e,t))))},e.prototype._getFramesFromEvent=function(e){var t=e.exception;if(t)try{return t.values[0].stacktrace.frames}catch(e){return}else if(e.stacktrace)return e.stacktrace.frames},e.prototype._isSameStacktrace=function(e,t){var n=this._getFramesFromEvent(e),r=this._getFramesFromEvent(t);if(!n&&!r)return!0;if(n&&!r||!n&&r)return!1;if(n=n,(r=r).length!==n.length)return!1;for(var o=0;o<r.length;o++){var i=r[o],a=n[o];if(i.filename!==a.filename||i.lineno!==a.lineno||i.colno!==a.colno||i.function!==a.function)return!1}return!0},e.prototype._getExceptionFromEvent=function(e){return e.exception&&e.exception.values&&e.exception.values[0]},e.prototype._isSameExceptionEvent=function(e,t){var n=this._getExceptionFromEvent(t),r=this._getExceptionFromEvent(e);return!(!n||!r)&&(n.type===r.type&&n.value===r.value&&(!!this._isSameFingerprint(e,t)&&!!this._isSameStacktrace(e,t)))},e.prototype._isSameFingerprint=function(e,t){var n=e.fingerprint,r=t.fingerprint;if(!n&&!r)return!0;if(n&&!r||!n&&r)return!1;n=n,r=r;try{return!(n.join("")!==r.join(""))}catch(e){return!1}},e.id="Dedupe",e}();t.Dedupe=r},1251:function(e,t,n){n(20),Object.defineProperty(t,"__esModule",{value:!0});var r=n(43),o=function(){function e(t){void 0===t&&(t={}),this.name=e.id,this._Ember=t.Ember||r.getGlobalObject().Ember}return e.prototype.setupOnce=function(t,n){var o=this;if(this._Ember){var i=this._Ember.onerror;this._Ember.onerror=function(t){if(n().getIntegration(e)&&n().captureException(t,{originalException:t}),"function"==typeof i)i.call(o._Ember,t);else if(o._Ember.testing)throw t},this._Ember.RSVP.on("error",(function(t){n().getIntegration(e)&&n().withScope((function(e){t instanceof Error?(e.setExtra("context","Unhandled Promise error detected"),n().captureException(t,{originalException:t})):(e.setExtra("reason",t),n().captureMessage("Unhandled Promise error detected"))}))}))}else r.logger.error("EmberIntegration is missing an Ember instance")},e.id="Ember",e}();t.Ember=o},1252:function(e,t,n){n(32),n(19),n(14),n(20),n(142),n(4),n(18),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(43),i=function(){function e(t){void 0===t&&(t={depth:3}),this._options=t,this.name=e.id}return e.prototype.setupOnce=function(t,n){t((function(t,r){var o=n().getIntegration(e);return o?o.enhanceEventWithErrorData(t,r):t}))},e.prototype.enhanceEventWithErrorData=function(e,t){if(!t||!t.originalException||!o.isError(t.originalException))return e;var n=this._extractErrorData(t.originalException);if(n){var i=r.__assign({},e.extra),a=o.normalize(n,this._options.depth);return o.isPlainObject(a)&&(i=r.__assign({},e.extra,a)),r.__assign({},e,{extra:i})}return e},e.prototype._extractErrorData=function(e){var t,n,i,a=null;try{var s=["name","message","stack","line","column","fileName","lineNumber","columnNumber"],c=e.name||e.constructor.name,u=Object.getOwnPropertyNames(e).filter((function(e){return-1===s.indexOf(e)}));if(u.length){var l={};try{for(var p=r.__values(u),f=p.next();!f.done;f=p.next()){var d=f.value,h=e[d];o.isError(h)&&(h=h.toString()),l[d]=h}}catch(e){t={error:e}}finally{try{f&&!f.done&&(n=p.return)&&n.call(p)}finally{if(t)throw t.error}}(i={})[c]=l,a=i}}catch(e){o.logger.error("Unable to extract extra data from the Error object:",e)}return a},e.id="ExtraErrorData",e}();t.ExtraErrorData=i},1253:function(e,t,n){n(25),n(20),n(58),Object.defineProperty(t,"__esModule",{value:!0});var r,o=n(3),i=n(43);!function(e){e.Crash="crash",e.Deprecation="deprecation",e.Intervention="intervention"}(r||(r={}));var a=function(){function e(t){void 0===t&&(t={types:[r.Crash,r.Deprecation,r.Intervention]}),this._options=t,this.name=e.id}return e.prototype.setupOnce=function(e,t){i.supportsReportingObserver()&&(this._getCurrentHub=t,new(i.getGlobalObject().ReportingObserver)(this.handler.bind(this),{buffered:!0,types:this._options.types}).observe())},e.prototype.handler=function(t){var n,i,a=this._getCurrentHub&&this._getCurrentHub();if(a&&a.getIntegration(e)){var s=function(e){a.withScope((function(t){t.setExtra("url",e.url);var n="ReportingObserver ["+e.type+"]",o="No details available";if(e.body){var i,s={};for(var c in e.body)s[c]=e.body[c];if(t.setExtra("body",s),e.type===r.Crash)o=[(i=e.body).crashId||"",i.reason||""].join(" ").trim()||o;else o=(i=e.body).message||o}a.captureMessage(n+": "+o)}))};try{for(var c=o.__values(t),u=c.next();!u.done;u=c.next()){s(u.value)}}catch(e){n={error:e}}finally{try{u&&!u.done&&(i=c.return)&&i.call(c)}finally{if(n)throw n.error}}}},e.id="ReportingObserver",e}();t.ReportingObserver=a},1254:function(e,t,n){n(5),n(20),n(4),n(344),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=n(43),o=function(){function e(t){var n=this;void 0===t&&(t={}),this.name=e.id,this._iteratee=function(e){if(e.filename&&e.filename.startsWith("/")){var t=n._root?r.relative(n._root,e.filename):r.basename(e.filename);e.filename="app:///"+t}return e},t.root&&(this._root=t.root),t.iteratee&&(this._iteratee=t.iteratee)}return e.prototype.setupOnce=function(t,n){t((function(t){var r=n().getIntegration(e);return r?r.process(t):t}))},e.prototype.process=function(e){var t=this._getFramesFromEvent(e);if(t)for(var n in t)t[n]=this._iteratee(t[n]);return e},e.prototype._getFramesFromEvent=function(e){var t=e.exception;if(t)try{return t.values[0].stacktrace.frames}catch(e){return}else if(e.stacktrace)return e.stacktrace.frames},e.id="RewriteFrames",e}();t.RewriteFrames=o},1255:function(e,t,n){n(14),n(20),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=function(){function e(){this.name=e.id,this._startTime=Date.now()}return e.prototype.setupOnce=function(t,n){t((function(t){var r=n().getIntegration(e);return r?r.process(t):t}))},e.prototype.process=function(e){var t,n=Date.now();return r.__assign({},e,{extra:r.__assign({},e.extra,(t={},t["session:start"]=this._startTime,t["session:duration"]=n-this._startTime,t["session:end"]=n,t))})},e.id="SessionTiming",e}();t.SessionTiming=o},1256:function(e,t,n){n(118),n(21),n(131),n(20),n(473),n(17),n(22),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(43),i=function(){function e(t){if(void 0===t&&(t={}),this._options=t,this.name=e.id,!Array.isArray(t.tracingOrigins)||0===t.tracingOrigins.length){var n=["localhost",/^\//];o.logger.warn("Sentry: You need to define `tracingOrigins` in the options. Set an array of urls or patterns to trace."),o.logger.warn("Sentry: We added a reasonable default for you: "+n),t.tracingOrigins=n}}return e.prototype.setupOnce=function(t,n){!1!==this._options.traceXHR&&this._traceXHR(n),!1!==this._options.traceFetch&&this._traceFetch(n),!1!==this._options.autoStartOnDomReady&&(o.getGlobalObject().addEventListener("DOMContentLoaded",(function(){e.startTrace(n(),o.getGlobalObject().location.href)})),o.getGlobalObject().document.onreadystatechange=function(){"complete"===document.readyState&&e.startTrace(n(),o.getGlobalObject().location.href)})},e.startTrace=function(e,t){e.configureScope((function(e){e.startSpan(),e.setTransaction(t)}))},e.prototype._traceXHR=function(t){if("XMLHttpRequest"in o.getGlobalObject()){var n=XMLHttpRequest.prototype;o.fill(n,"open",(function(n){return function(){for(var r=[],o=0;o<arguments.length;o++)r[o]=arguments[o];var i=t().getIntegration(e);return i&&(i._xhrUrl=r[1]),n.apply(this,r)}})),o.fill(n,"send",(function(n){return function(){for(var r=this,i=[],a=0;a<arguments.length;a++)i[a]=arguments[a];var s=t().getIntegration(e);if(s&&s._xhrUrl&&s._options.tracingOrigins){var c=s._xhrUrl,u=t().traceHeaders(),l=s._options.tracingOrigins.some((function(e){return o.isMatchingPattern(c,e)}));l&&this.setRequestHeader&&Object.keys(u).forEach((function(e){r.setRequestHeader(e,u[e])}))}return n.apply(this,i)}}))}},e.prototype._traceFetch=function(t){o.supportsNativeFetch()&&o.fill(o.getGlobalObject(),"fetch",(function(n){return function(){for(var i=[],a=0;a<arguments.length;a++)i[a]=arguments[a];var s=t().getIntegration(e);if(s&&s._options.tracingOrigins){var c=i[0],u=i[1]=i[1]||{},l=!1;s._options.tracingOrigins.forEach((function(e){l||(l=o.isMatchingPattern(c,e))})),l&&(u.headers?Array.isArray(u.headers)?u.headers=r.__spread(u.headers,Object.entries(t().traceHeaders())):u.headers=r.__assign({},u.headers,t().traceHeaders()):u.headers=t().traceHeaders())}return n.apply(o.getGlobalObject(),i)}}))},e.id="Tracing",e}();t.Tracing=i},1257:function(e,t,n){n(5),n(20),n(4),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=function(){function e(){this.name=e.id}return e.prototype.setupOnce=function(t,n){t((function(t){var r=n().getIntegration(e);return r?r.process(t):t}))},e.prototype.process=function(e){for(var t=this._getFramesFromEvent(e),n=t.length-1;n>=0;n--){var r=t[n];if(!0===r.in_app){e.transaction=this._getTransaction(r);break}}return e},e.prototype._getFramesFromEvent=function(e){var t=e.exception&&e.exception.values&&e.exception.values[0];return t&&t.stacktrace&&t.stacktrace.frames||[]},e.prototype._getTransaction=function(e){return e.module||e.function?(e.module||"?")+"/"+(e.function||"?"):"<unknown>"},e.id="Transaction",e}();t.Transaction=r},1258:function(e,t,n){(function(e){n(14),n(20),n(4),n(18),Object.defineProperty(t,"__esModule",{value:!0});var r=n(43),o=function(){function t(e){void 0===e&&(e={}),this.name=t.id,this._attachProps=!0,this._logErrors=!1,this._Vue=e.Vue||r.getGlobalObject().Vue,void 0!==e.logErrors&&(this._logErrors=e.logErrors),!1===e.attachProps&&(this._attachProps=!1)}return t.prototype._formatComponentName=function(e){if(e.$root===e)return"root instance";var t=e._isVue?e.$options.name||e.$options._componentTag:e.name;return(t?"component <"+t+">":"anonymous component")+(e._isVue&&e.$options.__file?" at "+e.$options.__file:"")},t.prototype.setupOnce=function(n,o){var i=this;if(this._Vue&&this._Vue.config){var a=this._Vue.config.errorHandler;this._Vue.config.errorHandler=function(n,s,c){var u={};r.isPlainObject(s)&&(u.componentName=i._formatComponentName(s),i._attachProps&&(u.propsData=s.$options.propsData)),void 0!==c&&(u.lifecycleHook=c),o().getIntegration(t)&&setTimeout((function(){o().withScope((function(e){e.setContext("vue",u),o().captureException(n)}))})),"function"==typeof a&&a.call(i._Vue,n,s,c),i._logErrors&&(e&&e.env,console.error(n))}}else r.logger.error("VueIntegration is missing a Vue instance")},t.id="Vue",t}();t.Vue=o}).call(this,n(399))},129:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(1217);t.LogLevel=r.LogLevel;var o=n(1218);t.Severity=o.Severity;var i=n(1219);t.Status=i.Status},130:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(129);t.Severity=o.Severity,t.Status=o.Status;var i=n(99);t.addGlobalEventProcessor=i.addGlobalEventProcessor,t.addBreadcrumb=i.addBreadcrumb,t.captureException=i.captureException,t.captureEvent=i.captureEvent,t.captureMessage=i.captureMessage,t.configureScope=i.configureScope,t.getHubFromCarrier=i.getHubFromCarrier,t.getCurrentHub=i.getCurrentHub,t.Hub=i.Hub,t.Scope=i.Scope,t.setContext=i.setContext,t.setExtra=i.setExtra,t.setExtras=i.setExtras,t.setTag=i.setTag,t.setTags=i.setTags,t.setUser=i.setUser,t.Span=i.Span,t.withScope=i.withScope;var a=n(593);t.BrowserClient=a.BrowserClient;var s=n(1241);t.defaultIntegrations=s.defaultIntegrations,t.forceLoad=s.forceLoad,t.init=s.init,t.lastEventId=s.lastEventId,t.onLoad=s.onLoad,t.showReportDialog=s.showReportDialog,t.flush=s.flush,t.close=s.close,t.wrap=s.wrap;var c=n(595);t.SDK_NAME=c.SDK_NAME,t.SDK_VERSION=c.SDK_VERSION;var u=n(99),l=n(43),p=n(596),f=n(594);t.Transports=f;var d={},h=l.getGlobalObject();h.Sentry&&h.Sentry.Integrations&&(d=h.Sentry.Integrations);var _=r.__assign({},d,u.Integrations,p);t.Integrations=_},224:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(587);t.addGlobalEventProcessor=r.addGlobalEventProcessor,t.Scope=r.Scope;var o=n(1229);t.getCurrentHub=o.getCurrentHub,t.getHubFromCarrier=o.getHubFromCarrier,t.getMainCarrier=o.getMainCarrier,t.Hub=o.Hub,t.makeMain=o.makeMain,t.setHubOnCarrier=o.setHubOnCarrier;var i=n(591);t.Span=i.Span,t.TRACEPARENT_REGEXP=i.TRACEPARENT_REGEXP},300:function(e,t,n){function r(e){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function o(e){return"[object Object]"===Object.prototype.toString.call(e)}n(8),n(9),n(11),n(5),n(14),n(4),n(18),n(7),n(6),Object.defineProperty(t,"__esModule",{value:!0}),t.isError=function(e){switch(Object.prototype.toString.call(e)){case"[object Error]":case"[object Exception]":case"[object DOMException]":return!0;default:return e instanceof Error}},t.isErrorEvent=function(e){return"[object ErrorEvent]"===Object.prototype.toString.call(e)},t.isDOMError=function(e){return"[object DOMError]"===Object.prototype.toString.call(e)},t.isDOMException=function(e){return"[object DOMException]"===Object.prototype.toString.call(e)},t.isString=function(e){return"[object String]"===Object.prototype.toString.call(e)},t.isPrimitive=function(e){return null===e||"object"!==r(e)&&"function"!=typeof e},t.isPlainObject=o,t.isRegExp=function(e){return"[object RegExp]"===Object.prototype.toString.call(e)},t.isThenable=function(e){return Boolean(e&&e.then&&"function"==typeof e.then)},t.isSyntheticEvent=function(e){return o(e)&&"nativeEvent"in e&&"preventDefault"in e&&"stopPropagation"in e}},301:function(e,t,n){n(8),n(9),n(19),n(26),n(34),n(14),n(20),n(30),n(203),n(4),n(10),n(18),n(68),n(23),n(38),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(43),i=o.getGlobalObject(),a={_report:!1,_collectWindowErrors:!1,_computeStackTrace:!1,_linesOfContext:!1},s="?",c=/^(?:[Uu]ncaught (?:exception: )?)?(?:((?:Eval|Internal|Range|Reference|Syntax|Type|URI|)Error): )?(.*)$/;function u(e,t){return Object.prototype.hasOwnProperty.call(e,t)}function l(){return"undefined"==typeof document||null==document.location?"":document.location.href}a._report=function(){var e,t,n=[],p=null,f=null;function d(e,t,r){var o=null;if(!t||a._collectWindowErrors){for(var i in n)if(u(n,i))try{n[i](e,t,r)}catch(e){o=e}if(o)throw o}}function h(t,n,i,u,p){var h=null;if(p=o.isErrorEvent(p)?p.error:p,t=o.isErrorEvent(t)?t.message:t,f)a._computeStackTrace._augmentStackTraceWithInitialElement(f,n,i,t),v();else if(p&&o.isError(p))(h=a._computeStackTrace(p)).mechanism="onerror",d(h,!0,p);else{var _,g={url:n,line:i,column:u},y=t;if("[object String]"==={}.toString.call(t)){var m=t.match(c);m&&(_=m[1],y=m[2])}g.func=s,g.context=null,d(h={name:_,message:y,mode:"onerror",mechanism:"onerror",stack:[r.__assign({},g,{url:g.url||l()})]},!0,null)}return!!e&&e.apply(this,arguments)}function _(e){var t=e;try{t=e&&"reason"in e?e.reason:e}catch(e){}var n=a._computeStackTrace(t);n.mechanism="onunhandledrejection",d(n,!0,t)}function v(){var e=f,t=p;f=null,p=null,d(e,!1,t)}function g(e){if(f){if(p===e)return;v()}var t=a._computeStackTrace(e);throw f=t,p=e,setTimeout((function(){p===e&&v()}),t.incomplete?2e3:0),e}return g._subscribe=function(e){n.push(e)},g._installGlobalHandler=function(){!0!==t&&(e=i.onerror,i.onerror=h,t=!0)},g._installGlobalUnhandledRejectionHandler=function(){i.onunhandledrejection=_},g}(),a._computeStackTrace=function(){function e(e){if(!e||!e.stack)return null;for(var t,n,r,o=/^\s*at (?:(.*?) ?\()?((?:file|https?|blob|chrome-extension|native|eval|webpack|<anonymous>|[-a-z]+:|\/).*?)(?::(\d+))?(?::(\d+))?\)?\s*$/i,i=/^\s*(.*?)(?:\((.*?)\))?(?:^|@)?((?:file|https?|blob|chrome|webpack|resource|moz-extension).*?:\/.*?|\[native code\]|[^@]*(?:bundle|\d+\.js))(?::(\d+))?(?::(\d+))?\s*$/i,a=/^\s*at (?:((?:\[object object\])?.+) )?\(?((?:file|ms-appx|https?|webpack|blob):.*?):(\d+)(?::(\d+))?\)?\s*$/i,c=/(\S+) line (\d+)(?: > eval line \d+)* > eval/i,u=/\((\S*)(?::(\d+))(?::(\d+))\)/,l=e.stack.split("\n"),p=[],f=/^(.*) is undefined$/.exec(e.message),d=0,h=l.length;d<h;++d){if(n=o.exec(l[d])){var _=n[2]&&0===n[2].indexOf("native");n[2]&&0===n[2].indexOf("eval")&&(t=u.exec(n[2]))&&(n[2]=t[1],n[3]=t[2],n[4]=t[3]),r={url:n[2],func:n[1]||s,args:_?[n[2]]:[],line:n[3]?+n[3]:null,column:n[4]?+n[4]:null}}else if(n=a.exec(l[d]))r={url:n[2],func:n[1]||s,args:[],line:+n[3],column:n[4]?+n[4]:null};else{if(!(n=i.exec(l[d])))continue;n[3]&&n[3].indexOf(" > eval")>-1&&(t=c.exec(n[3]))?(n[1]=n[1]||"eval",n[3]=t[1],n[4]=t[2],n[5]=""):0!==d||n[5]||void 0===e.columnNumber||(p[0].column=e.columnNumber+1),r={url:n[3],func:n[1]||s,args:n[2]?n[2].split(","):[],line:n[4]?+n[4]:null,column:n[5]?+n[5]:null}}!r.func&&r.line&&(r.func=s),r.context=null,p.push(r)}return p.length?(p[0]&&p[0].line&&!p[0].column&&f&&(p[0].column=null),{mode:"stack",name:e.name,message:e.message,stack:p}):null}function t(e,t,n,r){var o={url:t,line:n};if(o.url&&o.line){if(e.incomplete=!1,o.func||(o.func=s),o.context||(o.context=null),/ '([^']+)' /.exec(r)&&(o.column=null),e.stack.length>0&&e.stack[0].url===o.url){if(e.stack[0].line===o.line)return!1;if(!e.stack[0].line&&e.stack[0].func===o.func)return e.stack[0].line=o.line,e.stack[0].context=o.context,!1}return e.stack.unshift(o),e.partial=!0,!0}return e.incomplete=!0,!1}function
|
(e,r){for(var o,i,c=/function\s+([_$a-zA-Z\xA0-\uFFFF][_$a-zA-Z0-9\xA0-\uFFFF]*)?\s*\(/i,u=[],l={},p=!1,f=n.caller;f&&!p;f=f.caller)if(f!==h&&f!==a._report){if(i={url:null,func:s,args:[],line:null,column:null},f.name?i.func=f.name:(o=c.exec(f.toString()))&&(i.func=o[1]),void 0===i.func)try{i.func=o.input.substring(0,o.input.indexOf("{"))}catch(e){}l[""+f]?p=!0:l[""+f]=!0,u.push(i)}r&&u.splice(0,r);var d={mode:"callers",name:e.name,message:e.message,stack:u};return t(d,e.sourceURL||e.fileName,e.line||e.lineNumber,e.message||e.description),d}function o(t,r){var o=null,a=t&&t.framesToPop;r=null==r?0:+r;try{if(o=function(e){var t=e.stacktrace;if(t){for(var n,r=/ line (\d+).*script (?:in )?(\S+)(?:: in function (\S+))?$/i,o=/ line (\d+), column (\d+)\s*(?:in (?:<anonymous function: ([^>]+)>|([^\)]+))\((.*)\))? in (.*):\s*$/i,i=t.split("\n"),a=[],c=0;c<i.length;c+=2){var u=null;(n=r.exec(i[c]))?u={url:n[2],line:+n[1],column:null,func:n[3],args:[]}:(n=o.exec(i[c]))&&(u={url:n[6],line:+n[1],column:+n[2],func:n[3]||n[4],args:n[5]?n[5].split(","):[]}),u&&(!u.func&&u.line&&(u.func=s),u.line&&(u.context=null),u.context||(u.context=[i[c+1]]),a.push(u))}return a.length?{mode:"stacktrace",name:e.name,message:e.message,stack:a}:null}}(t))return c(o,a)}catch(e){}try{if(o=e(t))return c(o,a)}catch(e){}try{if(o=function(e){var t=e.message.split("\n");if(t.length<4)return null;var n,r=/^\s*Line (\d+) of linked script ((?:file|https?|blob)\S+)(?:: in function (\S+))?\s*$/i,o=/^\s*Line (\d+) of inline#(\d+) script in ((?:file|https?|blob)\S+)(?:: in function (\S+))?\s*$/i,a=/^\s*Line (\d+) of function script\s*$/i,c=[],p=i&&i.document&&i.document.getElementsByTagName("script"),f=[];for(var d in p)u(p,d)&&!p[d].src&&f.push(p[d]);for(var h=2;h<t.length;h+=2){var _=null;if(n=r.exec(t[h]))_={url:n[2],func:n[3],args:[],line:+n[1],column:null};else if(n=o.exec(t[h]))_={url:n[3],func:n[4],args:[],line:+n[1],column:null};else if(n=a.exec(t[h])){_={url:l().replace(/#.*$/,""),func:"",args:[],line:n[1],column:null}}_&&(_.func||(_.func=s),_.context=[t[h+1]],c.push(_))}return c.length?{mode:"multiline",name:e.name,message:t[0],stack:c}:null}(t))return c(o,a)}catch(e){}try{if(o=n(t,r+1))return c(o,a)}catch(e){}return{original:t,name:t&&t.name,message:t&&t.message,mode:"failed"}}function c(e,t){if(Number.isNaN(t))return e;try{return r.__assign({},e,{stack:e.stack.slice(t)})}catch(t){return e}}return o._augmentStackTraceWithInitialElement=t,o._computeStackTraceFromStackProp=e,o}(),a._collectWindowErrors=!0,a._linesOfContext=11;var p=a._report._subscribe;t._subscribe=p;var f=a._report._installGlobalHandler;t._installGlobalHandler=f;var d=a._report._installGlobalUnhandledRejectionHandler;t._installGlobalUnhandledRejectionHandler=d;var h=a._computeStackTrace;t._computeStackTrace=h},302:function(e,t,n){n(25),n(12),n(26),n(20),n(91),n(10),n(38),Object.defineProperty(t,"__esModule",{value:!0});var r,o,i=n(3),a=n(99),s=n(43),c=1e3,u=0;function l(){u+=1,setTimeout((function(){u-=1}))}t.shouldIgnoreOnError=function(){return u>0},t.ignoreNextOnError=l,t.wrap=function e(t,n,r){if(void 0===n&&(n={}),"function"!=typeof t)return t;try{if(t.__sentry__)return t;if(t.__sentry_wrapped__)return t.__sentry_wrapped__}catch(e){return t}var o=function(){r&&"function"==typeof r&&r.apply(this,arguments);var o=Array.prototype.slice.call(arguments);try{var c=o.map((function(t){return e(t,n)}));return t.handleEvent?t.handleEvent.apply(this,c):t.apply(this,c)}catch(e){throw l(),a.withScope((function(t){t.addEventProcessor((function(e){var t=i.__assign({},e);return n.mechanism&&s.addExceptionTypeValue(t,void 0,void 0,n.mechanism),t.extra=i.__assign({},t.extra,{arguments:s.normalize(o,3)}),t})),a.captureException(e)})),e}};try{for(var c in t)Object.prototype.hasOwnProperty.call(t,c)&&(o[c]=t[c])}catch(e){}t.prototype=t.prototype||{},o.prototype=t.prototype,Object.defineProperty(t,"__sentry_wrapped__",{enumerable:!1,value:o}),Object.defineProperties(o,{__sentry__:{enumerable:!1,value:!0},__sentry_original__:{enumerable:!1,value:t}});try{Object.getOwnPropertyDescriptor(o,"name").configurable&&Object.defineProperty(o,"name",{get:function(){return t.name}})}catch(e){}return o};var p=0;function f(e,t){return void 0===t&&(t=!1),function(n){if(r=void 0,n&&o!==n){o=n;var i=function(){var t;try{t=n.target?d(n.target):d(n)}catch(e){t="<unknown>"}0!==t.length&&a.getCurrentHub().addBreadcrumb({category:"ui."+e,message:t},{event:n,name:e})};p&&clearTimeout(p),t?p=setTimeout(i):i()}}}function d(e){for(var t,n=e,r=[],o=0,i=0,a=" > ".length;n&&o++<5&&!("html"===(t=h(n))||o>1&&i+r.length*a+t.length>=80);)r.push(t),i+=t.length,n=n.parentNode;return r.reverse().join(" > ")}function h(e){var t,n,r,o,i,a=[];if(!e||!e.tagName)return"";if(a.push(e.tagName.toLowerCase()),e.id&&a.push("#"+e.id),(t=e.className)&&s.isString(t))for(n=t.split(/\s+/),i=0;i<n.length;i++)a.push("."+n[i]);var c=["type","name","title","alt"];for(i=0;i<c.length;i++)r=c[i],(o=e.getAttribute(r))&&a.push("["+r+'="'+o+'"]');return a.join("")}t.breadcrumbEventHandler=f,t.keypressEventHandler=function(){return function(e){var t;try{t=e.target}catch(e){return}var n=t&&t.tagName;n&&("INPUT"===n||"TEXTAREA"===n||t.isContentEditable)&&(r||f("input")(e),clearTimeout(r),r=setTimeout((function(){r=void 0}),c))}}},400:function(e,t,n){(function(e,r){function o(){return"[object process]"===Object.prototype.toString.call(void 0!==e?e:0)}n(21),n(5),n(159),n(14),n(17),n(4),n(10),n(18),n(68),n(23),n(494),n(253),n(254),n(255),n(256),n(257),n(258),n(259),n(260),n(261),n(262),n(263),n(264),n(265),n(266),n(267),n(268),n(269),n(270),n(271),n(272),n(273),n(274),n(275),n(22),n(6),Object.defineProperty(t,"__esModule",{value:!0}),t.dynamicRequire=function(e,t){return e.require(t)},t.isNodeEnv=o;var i={};function a(){return o()?r:"undefined"!=typeof window?window:"undefined"!=typeof self?self:i}t.getGlobalObject=a,t.uuid4=function(){var e=a(),t=e.crypto||e.msCrypto;if(void 0!==t&&t.getRandomValues){var n=new Uint16Array(8);t.getRandomValues(n),n[3]=4095&n[3]|16384,n[4]=16383&n[4]|32768;var r=function(e){for(var t=e.toString(16);t.length<4;)t="0"+t;return t};return r(n[0])+r(n[1])+r(n[2])+r(n[3])+r(n[4])+r(n[5])+r(n[6])+r(n[7])}return"xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx".replace(/[xy]/g,(function(e){var t=16*Math.random()|0;return("x"===e?t:3&t|8).toString(16)}))},t.parseUrl=function(e){if(!e)return{};var t=e.match(/^(([^:\/?#]+):)?(\/\/([^\/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?$/);if(!t)return{};var n=t[6]||"",r=t[8]||"";return{host:t[4],path:t[5],protocol:t[2],relative:t[5]+n+r}},t.getEventDescription=function(e){if(e.message)return e.message;if(e.exception&&e.exception.values&&e.exception.values[0]){var t=e.exception.values[0];return t.type&&t.value?t.type+": "+t.value:t.type||t.value||e.event_id||"<unknown>"}return e.event_id||"<unknown>"},t.consoleSandbox=function(e){var t=a();if(!("console"in t))return e();var n=t.console,r={};["debug","info","warn","error","log","assert"].forEach((function(e){e in t.console&&n[e].__sentry__&&(r[e]=n[e].__sentry_wrapped__,n[e]=n[e].__sentry_original__)}));var o=e();return Object.keys(r).forEach((function(e){n[e]=r[e]})),o},t.addExceptionTypeValue=function(e,t,n,r){void 0===r&&(r={handled:!0,type:"generic"}),e.exception=e.exception||{},e.exception.values=e.exception.values||[],e.exception.values[0]=e.exception.values[0]||{},e.exception.values[0].value=e.exception.values[0].value||t||"",e.exception.values[0].type=e.exception.values[0].type||n||"Error",e.exception.values[0].mechanism=e.exception.values[0].mechanism||r}}).call(this,n(399),n(90))},401:function(e,t,n){n(21),n(25),n(26),n(14),n(30),n(203),n(75),n(4),n(89),n(10),n(18),n(38),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(43),i=/^(?:(\w+):)\/\/(?:(\w+)(?::(\w+))?@)([\w\.-]+)(?::(\d+))?\/(.+)/,a=function(){function e(e){"string"==typeof e?this._fromString(e):this._fromComponents(e),this._validate()}return e.prototype.toString=function(e){void 0===e&&(e=!1);var t=this,n=t.host,r=t.path,o=t.pass,i=t.port,a=t.projectId;return t.protocol+"://"+t.user+(e&&o?":"+o:"")+"@"+n+(i?":"+i:"")+"/"+(r?r+"/":r)+a},e.prototype._fromString=function(e){var t=i.exec(e);if(!t)throw new o.SentryError("Invalid Dsn");var n=r.__read(t.slice(1),6),a=n[0],s=n[1],c=n[2],u=void 0===c?"":c,l=n[3],p=n[4],f=void 0===p?"":p,d="",h=n[5],_=h.split("/");_.length>1&&(d=_.slice(0,-1).join("/"),h=_.pop()),Object.assign(this,{host:l,pass:u,path:d,projectId:h,port:f,protocol:a,user:s})},e.prototype._fromComponents=function(e){this.protocol=e.protocol,this.user=e.user,this.pass=e.pass||"",this.host=e.host,this.port=e.port||"",this.path=e.path||"",this.projectId=e.projectId},e.prototype._validate=function(){var e=this;if(["protocol","user","host","projectId"].forEach((function(t){if(!e[t])throw new o.SentryError("Invalid Dsn")})),"http"!==this.protocol&&"https"!==this.protocol)throw new o.SentryError("Invalid Dsn");if(this.port&&Number.isNaN(parseInt(this.port,10)))throw new o.SentryError("Invalid Dsn")},e}();t.Dsn=a},402:function(e,t,n){n(134),n(12),n(26),n(76),n(20),n(17),n(135),Object.defineProperty(t,"__esModule",{value:!0});var r=n(43),o=n(301),i=50;function a(e){var t=s(e.stack),n={type:e.name,value:e.message};return t&&t.length&&(n.stacktrace={frames:t}),void 0===n.type&&""===n.value&&(n.value="Unrecoverable error caught"),n}function s(e){if(!e||!e.length)return[];var t=e,n=t[0].func||"",r=t[t.length-1].func||"";return(n.includes("captureMessage")||n.includes("captureException"))&&(t=t.slice(1)),r.includes("sentryWrapped")&&(t=t.slice(0,-1)),t.map((function(e){return{colno:e.column,filename:e.url||t[0].url,function:e.func||"?",in_app:!0,lineno:e.line}})).slice(0,i).reverse()}t.exceptionFromStacktrace=a,t.eventFromPlainObject=function(e,t){var n=Object.keys(e).sort(),i={extra:{__serialized__:r.normalizeToSize(e)},message:"Non-Error exception captured with keys: "+r.keysToEventMessage(n)};if(t){var a=s(o._computeStackTrace(t).stack);i.stacktrace={frames:a}}return i},t.eventFromStacktrace=function(e){return{exception:{values:[a(e)]}}},t.prepareFramesForEvent=s},403:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(99),o=n(43),i=function(){function e(e){this.options=e,this._buffer=new o.PromiseBuffer(30),this.url=new r.API(this.options.dsn).getStoreEndpointWithUrlEncodedAuth()}return e.prototype.sendEvent=function(e){throw new o.SentryError("Transport Class has to implement `sendEvent` method")},e.prototype.close=function(e){return this._buffer.drain(e)},e}();t.BaseTransport=i},43:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(3);r.__exportStar(n(1221),t),r.__exportStar(n(588),t),r.__exportStar(n(300),t),r.__exportStar(n(589),t),r.__exportStar(n(590),t),r.__exportStar(n(400),t),r.__exportStar(n(1223),t),r.__exportStar(n(1224),t),r.__exportStar(n(1225),t),r.__exportStar(n(1226),t),r.__exportStar(n(1227),t),r.__exportStar(n(1228),t)},587:function(e,t,n){n(16),n(21),n(26),n(14),n(75),n(17),n(22),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(43),i=n(591),a=function(){function e(){this._notifyingListeners=!1,this._scopeListeners=[],this._eventProcessors=[],this._breadcrumbs=[],this._user={},this._tags={},this._extra={},this._context={}}return e.prototype.addScopeListener=function(e){this._scopeListeners.push(e)},e.prototype.addEventProcessor=function(e){return this._eventProcessors.push(e),this},e.prototype._notifyScopeListeners=function(){var e=this;this._notifyingListeners||(this._notifyingListeners=!0,setTimeout((function(){e._scopeListeners.forEach((function(t){t(e)})),e._notifyingListeners=!1})))},e.prototype._notifyEventProcessors=function(e,t,n,i){var a=this;return void 0===i&&(i=0),new o.SyncPromise((function(s,c){var u=e[i];if(null===t||"function"!=typeof u)s(t);else{var l=u(r.__assign({},t),n);o.isThenable(l)?l.then((function(t){return a._notifyEventProcessors(e,t,n,i+1).then(s)})).catch(c):a._notifyEventProcessors(e,l,n,i+1).then(s).catch(c)}}))},e.prototype.setUser=function(e){return this._user=o.normalize(e),this._notifyScopeListeners(),this},e.prototype.setTags=function(e){return this._tags=r.__assign({},this._tags,o.normalize(e)),this._notifyScopeListeners(),this},e.prototype.setTag=function(e,t){var n;return this._tags=r.__assign({},this._tags,((n={})[e]=o.normalize(t),n)),this._notifyScopeListeners(),this},e.prototype.setExtras=function(e){return this._extra=r.__assign({},this._extra,o.normalize(e)),this._notifyScopeListeners(),this},e.prototype.setExtra=function(e,t){var n;return this._extra=r.__assign({},this._extra,((n={})[e]=o.normalize(t),n)),this._notifyScopeListeners(),this},e.prototype.setFingerprint=function(e){return this._fingerprint=o.normalize(e),this._notifyScopeListeners(),this},e.prototype.setLevel=function(e){return this._level=o.normalize(e),this._notifyScopeListeners(),this},e.prototype.setTransaction=function(e){return this._transaction=e,this._notifyScopeListeners(),this},e.prototype.setContext=function(e,t){return this._context[e]=t?o.normalize(t):void 0,this._notifyScopeListeners(),this},e.prototype.setSpan=function(e){return this._span=e,this._notifyScopeListeners(),this},e.prototype.startSpan=function(e){var t=new i.Span;return t.setParent(e),this.setSpan(t),t},e.prototype.getSpan=function(){return this._span},e.clone=function(t){var n=new e;return Object.assign(n,t,{_scopeListeners:[]}),t&&(n._breadcrumbs=r.__spread(t._breadcrumbs),n._tags=r.__assign({},t._tags),n._extra=r.__assign({},t._extra),n._context=r.__assign({},t._context),n._user=t._user,n._level=t._level,n._span=t._span,n._transaction=t._transaction,n._fingerprint=t._fingerprint,n._eventProcessors=r.__spread(t._eventProcessors)),n},e.prototype.clear=function(){return this._breadcrumbs=[],this._tags={},this._extra={},this._user={},this._context={},this._level=void 0,this._transaction=void 0,this._fingerprint=void 0,this._span=void 0,this._notifyScopeListeners(),this},e.prototype.addBreadcrumb=function(e,t){var n=(new Date).getTime()/1e3,i=r.__assign({timestamp:n},e);return this._breadcrumbs=void 0!==t&&t>=0?r.__spread(this._breadcrumbs,[o.normalize(i)]).slice(-t):r.__spread(this._breadcrumbs,[o.normalize(i)]),this._notifyScopeListeners(),this},e.prototype.clearBreadcrumbs=function(){return this._breadcrumbs=[],this._notifyScopeListeners(),this},e.prototype._applyFingerprint=function(e){e.fingerprint=e.fingerprint?Array.isArray(e.fingerprint)?e.fingerprint:[e.fingerprint]:[],this._fingerprint&&(e.fingerprint=e.fingerprint.concat(this._fingerprint)),e.fingerprint&&!e.fingerprint.length&&delete e.fingerprint},e.prototype.applyToEvent=function(e,t){return this._extra&&Object.keys(this._extra).length&&(e.extra=r.__assign({},this._extra,e.extra)),this._tags&&Object.keys(this._tags).length&&(e.tags=r.__assign({},this._tags,e.tags)),this._user&&Object.keys(this._user).length&&(e.user=r.__assign({},this._user,e.user)),this._context&&Object.keys(this._context).length&&(e.contexts=r.__assign({},this._context,e.contexts)),this._level&&(e.level=this._level),this._transaction&&(e.transaction=this._transaction),this._span&&(e.contexts=e.contexts||{},e.contexts.trace=this._span),this._applyFingerprint(e),e.breadcrumbs=r.__spread(e.breadcrumbs||[],this._breadcrumbs),e.breadcrumbs=e.breadcrumbs.length>0?e.breadcrumbs:void 0,this._notifyEventProcessors(r.__spread(s(),this._eventProcessors),e,t)},e}();function s(){var e=o.getGlobalObject();return e.__SENTRY__=e.__SENTRY__||{},e.__SENTRY__.globalEventProcessors=e.__SENTRY__.globalEventProcessors||[],e.__SENTRY__.globalEventProcessors}t.Scope=a,t.addGlobalEventProcessor=function(e){s().push(e)}},588:function(e,t,n){n(20),Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(1222),i=function(e){function t(t){var n=this.constructor,r=e.call(this,t)||this;return r.message=t,r.name=n.prototype.constructor.name,o.setPrototypeOf(r,n.prototype),r}return r.__extends(t,e),t}(Error);t.SentryError=i},589:function(e,t,n){n(25),Object.defineProperty(t,"__esModule",{value:!0});var r=n(400),o=r.getGlobalObject(),i="Sentry Logger ",a=function(){function e(){this._enabled=!1}return e.prototype.disable=function(){this._enabled=!1},e.prototype.enable=function(){this._enabled=!0},e.prototype.log=function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];this._enabled&&r.consoleSandbox((function(){o.console.log(i+"[Log]: "+e.join(" "))}))},e.prototype.warn=function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];this._enabled&&r.consoleSandbox((function(){o.console.warn(i+"[Warn]: "+e.join(" "))}))},e.prototype.error=function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];this._enabled&&r.consoleSandbox((function(){o.console.error(i+"[Error]: "+e.join(" "))}))},e}();o.__SENTRY__=o.__SENTRY__||{};var s=o.__SENTRY__.logger||(o.__SENTRY__.logger=new a);t.logger=s},590:function(e,t,n){n(5),n(34),n(4),n(7),n(348),n(6),Object.defineProperty(t,"__esModule",{value:!0});var r=function(){function e(){this._hasWeakSet="function"==typeof WeakSet,this._inner=this._hasWeakSet?new WeakSet:[]}return e.prototype.memoize=function(e){if(this._hasWeakSet)return!!this._inner.has(e)||(this._inner.add(e),!1);for(var t=0;t<this._inner.length;t++){if(this._inner[t]===e)return!0}return this._inner.push(e),!1},e.prototype.unmemoize=function(e){if(this._hasWeakSet)this._inner.delete(e);else for(var t=0;t<this._inner.length;t++)if(this._inner[t]===e){this._inner.splice(t,1);break}},e}();t.Memo=r},591:function(e,t,n){n(201),n(10),n(68),n(206),Object.defineProperty(t,"__esModule",{value:!0});var r=n(43);t.TRACEPARENT_REGEXP=/^[ \t]*([0-9a-f]{32})?-?([0-9a-f]{16})?-?([01])?[ \t]*$/;var o=function(){function e(e,t,n,o){void 0===e&&(e=r.uuid4()),void 0===t&&(t=r.uuid4().substring(16)),this._traceId=e,this._spanId=t,this._sampled=n,this._parent=o}return e.prototype.setParent=function(e){return this._parent=e,this},e.prototype.setSampled=function(e){return this._sampled=e,this},e.fromTraceparent=function(n){var r=n.match(t.TRACEPARENT_REGEXP);if(r){var o=void 0;"1"===r[3]?o=!0:"0"===r[3]&&(o=!1);var i=new e(r[1],r[2],o);return new e(r[1],void 0,o,i)}},e.prototype.toTraceparent=function(){var e="";return!0===this._sampled?e="-1":!1===this._sampled&&(e="-0"),this._traceId+"-"+this._spanId+e},e.prototype.toJSON=function(){return{parent:this._parent&&this._parent.toJSON()||void 0,sampled:this._sampled,span_id:this._spanId,trace_id:this._traceId}},e}();t.Span=o},592:function(e,t,n){n(4),n(40),Object.defineProperty(t,"__esModule",{value:!0});var r=n(129),o=function(){function e(){}return e.prototype.sendEvent=function(e){return Promise.resolve({reason:"NoopTransport: Event has been skipped because no Dsn is configured.",status:r.Status.Skipped})},e.prototype.close=function(e){return Promise.resolve(!0)},e}();t.NoopTransport=o},593:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(3),o=n(99),i=n(43),a=n(1238),s=n(595),c=function(e){function t(t){return void 0===t&&(t={}),e.call(this,a.BrowserBackend,t)||this}return r.__extends(t,e),t.prototype._prepareEvent=function(t,n,o){return t.platform=t.platform||"javascript",t.sdk=r.__assign({},t.sdk,{name:s.SDK_NAME,packages:r.__spread(t.sdk&&t.sdk.packages||[],[{name:"npm:@sentry/browser",version:s.SDK_VERSION}]),version:s.SDK_VERSION}),e.prototype._prepareEvent.call(this,t,n,o)},t.prototype.showReportDialog=function(e){void 0===e&&(e={});var t=i.getGlobalObject().document;if(t)if(this._isEnabled()){var n=e.dsn||this.getDsn();if(e.eventId)if(n){var r=t.createElement("script");r.async=!0,r.src=new o.API(n).getReportDialogEndpoint(e),e.onLoad&&(r.onload=e.onLoad),(t.head||t.body).appendChild(r)}else i.logger.error("Missing `Dsn` option in showReportDialog call");else i.logger.error("Missing `eventId` option in showReportDialog call")}else i.logger.error("Trying to call showReportDialog with Sentry Client is disabled")},t}(o.BaseClient);t.BrowserClient=c},594:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(403);t.BaseTransport=r.BaseTransport;var o=n(1239);t.FetchTransport=o.FetchTransport;var i=n(1240);t.XHRTransport=i.XHRTransport},595:function(e,t){Object.defineProperty(t,"__esModule",{value:!0}),t.SDK_NAME="sentry.javascript.browser",t.SDK_VERSION="5.6.3"},596:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(1242);t.GlobalHandlers=r.GlobalHandlers;var o=n(1243);t.TryCatch=o.TryCatch;var i=n(1244);t.Breadcrumbs=i.Breadcrumbs;var a=n(1245);t.LinkedErrors=a.LinkedErrors;var s=n(1246);t.UserAgent=s.UserAgent},649:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(1247);t.Angular=r.Angular;var o=n(1248);t.CaptureConsole=o.CaptureConsole;var i=n(1249);t.Debug=i.Debug;var a=n(1250);t.Dedupe=a.Dedupe;var s=n(1251);t.Ember=s.Ember;var c=n(1252);t.ExtraErrorData=c.ExtraErrorData;var u=n(1253);t.ReportingObserver=u.ReportingObserver;var l=n(1254);t.RewriteFrames=l.RewriteFrames;var p=n(1255);t.SessionTiming=p.SessionTiming;var f=n(1256);t.Tracing=f.Tracing;var d=n(1257);t.Transaction=d.Transaction;var h=n(1258);t.Vue=h.Vue},99:function(e,t,n){Object.defineProperty(t,"__esModule",{value:!0});var r=n(1220);t.addBreadcrumb=r.addBreadcrumb,t.captureException=r.captureException,t.captureEvent=r.captureEvent,t.captureMessage=r.captureMessage,t.configureScope=r.configureScope,t.setContext=r.setContext,t.setExtra=r.setExtra,t.setExtras=r.setExtras,t.setTag=r.setTag,t.setTags=r.setTags,t.setUser=r.setUser,t.withScope=r.withScope;var o=n(224);t.addGlobalEventProcessor=o.addGlobalEventProcessor,t.getCurrentHub=o.getCurrentHub,t.getHubFromCarrier=o.getHubFromCarrier,t.Hub=o.Hub,t.Scope=o.Scope,t.Span=o.Span;var i=n(1230);t.API=i.API;var a=n(1231);t.BaseClient=a.BaseClient;var s=n(1233);t.BaseBackend=s.BaseBackend;var c=n(401);t.Dsn=c.Dsn;var u=n(1234);t.initAndBind=u.initAndBind;var l=n(592);t.NoopTransport=l.NoopTransport;var p=n(1235);t.Integrations=p}}]);
//# sourceMappingURL=sentry~checkout-213470ac.js.map
|
n
|
utils.py
|
import base64
import json
import os
import os.path
import shlex
import string
from datetime import datetime
from distutils.version import StrictVersion
from .. import errors
from .. import tls
from ..constants import DEFAULT_HTTP_HOST
from ..constants import DEFAULT_UNIX_SOCKET
from ..constants import DEFAULT_NPIPE
from ..constants import BYTE_UNITS
from urllib.parse import splitnport, urlparse
def create_ipam_pool(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_pool has been removed. Please use a '
'docker.types.IPAMPool object instead.'
)
def create_ipam_config(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_config has been removed. Please use a '
'docker.types.IPAMConfig object instead.'
)
def decode_json_header(header):
data = base64.b64decode(header)
data = data.decode('utf-8')
return json.loads(data)
def compare_version(v1, v2):
"""Compare docker versions
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0
elif s1 > s2:
return -1
else:
return 1
def version_lt(v1, v2):
return compare_version(v1, v2) > 0
def version_gte(v1, v2):
return not version_lt(v1, v2)
def _convert_port_binding(binding):
result = {'HostIp': '', 'HostPort': ''}
if isinstance(binding, tuple):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
elif isinstance(binding[0], str):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
elif isinstance(binding, dict):
if 'HostPort' in binding:
result['HostPort'] = binding['HostPort']
if 'HostIp' in binding:
result['HostIp'] = binding['HostIp']
else:
raise ValueError(binding)
else:
result['HostPort'] = binding
if result['HostPort'] is None:
result['HostPort'] = ''
else:
result['HostPort'] = str(result['HostPort'])
return result
def convert_port_bindings(port_bindings):
result = {}
for k, v in iter(port_bindings.items()):
key = str(k)
if '/' not in key:
key += '/tcp'
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
result[key] = [_convert_port_binding(v)]
return result
def convert_volume_binds(binds):
if isinstance(binds, list):
return binds
|
for k, v in binds.items():
if isinstance(k, bytes):
k = k.decode('utf-8')
if isinstance(v, dict):
if 'ro' in v and 'mode' in v:
raise ValueError(
'Binding cannot contain both "ro" and "mode": {}'
.format(repr(v))
)
bind = v['bind']
if isinstance(bind, bytes):
bind = bind.decode('utf-8')
if 'ro' in v:
mode = 'ro' if v['ro'] else 'rw'
elif 'mode' in v:
mode = v['mode']
else:
mode = 'rw'
result.append(
str('{0}:{1}:{2}').format(k, bind, mode)
)
else:
if isinstance(v, bytes):
v = v.decode('utf-8')
result.append(
str('{0}:{1}:rw').format(k, v)
)
return result
def convert_tmpfs_mounts(tmpfs):
if isinstance(tmpfs, dict):
return tmpfs
if not isinstance(tmpfs, list):
raise ValueError(
'Expected tmpfs value to be either a list or a dict, found: {}'
.format(type(tmpfs).__name__)
)
result = {}
for mount in tmpfs:
if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
name = mount
options = ""
else:
raise ValueError(
"Expected item in tmpfs list to be a string, found: {}"
.format(type(mount).__name__)
)
result[name] = options
return result
def convert_service_networks(networks):
if not networks:
return networks
if not isinstance(networks, list):
raise TypeError('networks parameter must be a list.')
result = []
for n in networks:
if isinstance(n, str):
n = {'Target': n}
result.append(n)
return result
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
return tuple(parts)
parts = repo_name.rsplit(':', 1)
if len(parts) == 2 and '/' not in parts[1]:
return tuple(parts)
return repo_name, None
def parse_host(addr, is_win32=False, tls=False):
path = ''
port = None
host = None
# Sensible defaults
if not addr and is_win32:
return DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
parsed_url = urlparse(addr)
proto = parsed_url.scheme
if not proto or any([x not in string.ascii_letters + '+' for x in proto]):
# https://bugs.python.org/issue754016
parsed_url = urlparse('//' + addr, 'tcp')
proto = 'tcp'
if proto == 'fd':
raise errors.DockerException('fd protocol is not implemented')
# These protos are valid aliases for our library but not for the
# official spec
if proto == 'http' or proto == 'https':
tls = proto == 'https'
proto = 'tcp'
elif proto == 'http+unix':
proto = 'unix'
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
"Invalid bind address protocol: {}".format(addr)
)
if proto == 'tcp' and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(
'Invalid bind address format: {}'.format(addr)
)
if any([
parsed_url.params, parsed_url.query, parsed_url.fragment,
parsed_url.password
]):
raise errors.DockerException(
'Invalid bind address format: {}'.format(addr)
)
if parsed_url.path and proto == 'ssh':
raise errors.DockerException(
'Invalid bind address format: no path allowed for this protocol:'
' {}'.format(addr)
)
else:
path = parsed_url.path
if proto == 'unix' and parsed_url.hostname is not None:
# For legacy reasons, we consider unix://path
# to be valid and equivalent to unix:///path
path = '/'.join((parsed_url.hostname, path))
if proto in ('tcp', 'ssh'):
# parsed_url.hostname strips brackets from IPv6 addresses,
# which can be problematic hence our use of splitnport() instead.
host, port = splitnport(parsed_url.netloc)
if port is None or port < 0:
if proto != 'ssh':
raise errors.DockerException(
'Invalid bind address format: port is required:'
' {}'.format(addr)
)
port = 22
if not host:
host = DEFAULT_HTTP_HOST
# Rewrite schemes to fit library internals (requests adapters)
if proto == 'tcp':
proto = 'http{}'.format('s' if tls else '')
elif proto == 'unix':
proto = 'http+unix'
if proto in ('http+unix', 'npipe'):
return "{}://{}".format(proto, path).rstrip('/')
return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/')
def parse_devices(devices):
device_list = []
for device in devices:
if isinstance(device, dict):
device_list.append(device)
continue
if not isinstance(device, str):
raise errors.DockerException(
'Invalid device type {0}'.format(type(device))
)
device_mapping = device.split(':')
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
path_in_container = device_mapping[1]
else:
path_in_container = path_on_host
if len(device_mapping) > 2:
permissions = device_mapping[2]
else:
permissions = 'rwm'
device_list.append({
'PathOnHost': path_on_host,
'PathInContainer': path_in_container,
'CgroupPermissions': permissions
})
return device_list
def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
if not environment:
environment = os.environ
host = environment.get('DOCKER_HOST')
# empty string for cert path is the same as unset.
cert_path = environment.get('DOCKER_CERT_PATH') or None
# empty string for tls verify counts as "false".
# Any value or 'unset' counts as true.
tls_verify = environment.get('DOCKER_TLS_VERIFY')
if tls_verify == '':
tls_verify = False
else:
tls_verify = tls_verify is not None
enable_tls = cert_path or tls_verify
params = {}
if host:
params['base_url'] = host
if not enable_tls:
return params
if not cert_path:
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
assert_hostname = False
params['tls'] = tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=tls_verify,
ssl_version=ssl_version,
assert_hostname=assert_hostname,
)
return params
def convert_filters(filters):
result = {}
for k, v in iter(filters.items()):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = [
str(item) if not isinstance(item, str) else item
for item in v
]
return json.dumps(result)
def datetime_to_timestamp(dt):
"""Convert a UTC datetime to a Unix timestamp"""
delta = dt - datetime.utcfromtimestamp(0)
return delta.seconds + delta.days * 24 * 3600
def parse_bytes(s):
if isinstance(s, (int, float,)):
return s
if len(s) == 0:
return 0
if s[-2:-1].isalpha() and s[-1].isalpha():
if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = 'b'
else:
digits_part = s[:-1]
if suffix in units.keys() or suffix.isdigit():
try:
digits = float(digits_part)
except ValueError:
raise errors.DockerException(
'Failed converting the string value for memory ({0}) to'
' an integer.'.format(digits_part)
)
# Reconvert to long for the final result
s = int(digits * units[suffix])
else:
raise errors.DockerException(
'The specified value for memory ({0}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
return s
def normalize_links(links):
if isinstance(links, dict):
links = iter(links.items())
return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
The format of each line should be "key=value".
"""
environment = {}
with open(env_file, 'r') as f:
for line in f:
if line[0] == '#':
continue
line = line.strip()
if not line:
continue
parse_line = line.split('=', 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
else:
raise errors.DockerException(
'Invalid line in environment file {0}:\n{1}'.format(
env_file, line))
return environment
def split_command(command):
return shlex.split(command)
def format_environment(environment):
def format_env(key, value):
if value is None:
return key
if isinstance(value, bytes):
value = value.decode('utf-8')
return u'{key}={value}'.format(key=key, value=value)
return [format_env(*var) for var in iter(environment.items())]
def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task
if task:
return [
'{} {}'.format(v, k) for k, v in sorted(iter(extra_hosts.items()))
]
return [
'{}:{}'.format(k, v) for k, v in sorted(iter(extra_hosts.items()))
]
def create_host_config(self, *args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_host_config has been removed. Please use a '
'docker.types.HostConfig object instead.'
)
|
result = []
|
delete_post.rs
|
extern crate diesel_demo;
extern crate diesel;
use self::diesel::prelude::*;
use self::diesel_demo::*;
use std::env::args;
fn main()
|
{
use diesel_demo::schema::posts::dsl::*;
let target = args().nth(1).expect("Need a title");
let pattern = format!("%{}%", target);
let connection = establish_connection();
let num_deleted = diesel::delete(posts.filter(title.like(pattern)))
.execute(&connection)
.expect("Error deleting");
println!("Deleted {} posts", num_deleted);
}
|
|
constants.rs
|
///////////////////////////////////////////////////////////////////////////////
//
// Copyright 2018-2021 Robonomics Network <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
//! A set of constant values used in substrate runtime.
/// Money matters.
pub mod currency {
#[cfg(feature = "std")]
use hex_literal::hex;
#[cfg(feature = "std")]
use node_primitives::AccountId;
use node_primitives::Balance;
pub const COASE: Balance = 1_000;
pub const GLUSHKOV: Balance = 1_000 * COASE;
pub const XRT: Balance = 1_000 * GLUSHKOV;
pub const fn deposit(items: u32, bytes: u32) -> Balance
|
#[cfg(feature = "std")]
lazy_static::lazy_static! {
pub static ref STAKE_HOLDERS: Vec<(AccountId, Balance)> = sp_std::vec![
(AccountId::from(hex!["5c63763273b539fa6ed09b6b9844553922f7c5eb30195062b139b057ac861568"]), 1000 * XRT),
(AccountId::from(hex!["caafae0aaa6333fcf4dc193146945fe8e4da74aa6c16d481eef0ca35b8279d73"]), 5000 * XRT),
(AccountId::from(hex!["9c322cfa42b80ffb1fa0a096ffbbe08ff44423ea7e6626183ba14bfb20c98c53"]), 5305599999),
(AccountId::from(hex!["1a84dfd9e4e30b0d48c4110bf7c509d5f27a68d4fade696dff3274e0afa09062"]), 1 * XRT),
(AccountId::from(hex!["8e5cda83432e069937b7e032ed8f88280a020aba933ee928eb936ab265f4c364"]), 10_000 * XRT),
];
}
}
/// Time.
pub mod time {
use node_primitives::{BlockNumber, Moment};
pub const MILLISECS_PER_BLOCK: Moment = 12000;
pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000;
pub const MINUTES: BlockNumber = 60 / (SECS_PER_BLOCK as BlockNumber);
pub const HOURS: BlockNumber = MINUTES * 60;
pub const DAYS: BlockNumber = HOURS * 24;
}
// CRITICAL NOTE: The system module maintains two constants: a _maximum_ block weight and a
// _ratio_ of it yielding the portion which is accessible to normal transactions (reserving the rest
// for operational ones). `TARGET_BLOCK_FULLNESS` is entirely independent and the system module is
// not aware of if, nor should it care about it. This constant simply denotes on which ratio of the
// _maximum_ block weight we tweak the fees. It does NOT care about the type of the dispatch.
//
// For the system to be configured in a sane way, `TARGET_BLOCK_FULLNESS` should always be less than
// the ratio that `system` module uses to find normal transaction quota.
/// Fee-related.
pub mod fee {
pub use sp_runtime::Perbill;
/// The block saturation level. Fees will be updates based on this value.
pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25);
}
|
{
items as Balance * 150 * GLUSHKOV / 100 + (bytes as Balance) * 60 * GLUSHKOV
}
|
model.py
|
"""
Vanilla DenseNet implementation
Paper: https://arxiv.org/abs/1608.06993
Implementation taken from: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py
"""
import re
from collections import OrderedDict
from functools import partial
from typing import Any, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch import Tensor
class _DenseLayer(nn.Module):
def __init__(
self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False
) -> None:
super().__init__()
self.norm1: nn.BatchNorm2d
self.add_module("norm1", nn.BatchNorm2d(num_input_features))
self.relu1: nn.ReLU
self.add_module("relu1", nn.ReLU(inplace=True))
self.conv1: nn.Conv2d
self.add_module(
"conv1", nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)
)
self.norm2: nn.BatchNorm2d
self.add_module("norm2", nn.BatchNorm2d(bn_size * growth_rate))
self.relu2: nn.ReLU
self.add_module("relu2", nn.ReLU(inplace=True))
self.conv2: nn.Conv2d
self.add_module(
"conv2", nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)
)
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs: List[Tensor]) -> Tensor:
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, input: List[Tensor]) -> bool:
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
def closure(*inputs):
return self.bn_function(inputs)
return cp.checkpoint(closure, *input)
@torch.jit._overload_method # noqa: F811
def forward(self, input: List[Tensor]) -> Tensor: # noqa: F811
pass
@torch.jit._overload_method # noqa: F811
def forward(self, input: Tensor) -> Tensor: # noqa: F811
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, input: Tensor) -> Tensor: # noqa: F811
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers: int,
num_input_features: int,
bn_size: int,
growth_rate: int,
drop_rate: float,
memory_efficient: bool = False,
) -> None:
super().__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module("denselayer%d" % (i + 1), layer)
def forward(self, init_features: Tensor) -> Tensor:
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features: int, num_output_features: int) -> None:
|
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
"""
def __init__(
self,
growth_rate: int = 32,
block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
num_init_features: int = 64,
bn_size: int = 4,
drop_rate: float = 0,
num_classes: int = 1000,
memory_efficient: bool = False,
) -> None:
super().__init__()
# First convolution
self.features = nn.Sequential(
OrderedDict(
[
("conv0", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
("norm0", nn.BatchNorm2d(num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.features.add_module("denseblock%d" % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module("transition%d" % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module("norm5", nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor) -> Tensor:
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
|
super().__init__()
self.add_module("norm", nn.BatchNorm2d(num_input_features))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module("conv", nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=2))
|
circuitinfo.rs
|
use dialoguer::{theme::ColorfulTheme, MultiSelect};
use humansize::{file_size_opts, FileSize};
use log::{info, warn};
use structopt::StructOpt;
use bellperson::util_cs::bench_cs::BenchCS;
use bellperson::Circuit;
use filecoin_proofs::constants::*;
use filecoin_proofs::parameters::{
public_params, window_post_public_params, winning_post_public_params,
};
use filecoin_proofs::types::*;
use filecoin_proofs::with_shape;
use filecoin_proofs::PoStType;
use paired::bls12_381::Bls12;
use storage_proofs::compound_proof::CompoundProof;
use storage_proofs::porep::stacked::{StackedCompound, StackedDrg};
use storage_proofs::post::fallback::{FallbackPoSt, FallbackPoStCircuit, FallbackPoStCompound};
struct CircuitInfo {
constraints: usize,
inputs: usize,
}
fn circuit_info<C: Circuit<Bls12>>(circuit: C) -> CircuitInfo {
let mut cs_blank = BenchCS::new();
circuit
.synthesize(&mut cs_blank)
.expect("failed to synthesize");
CircuitInfo {
constraints: cs_blank.num_constraints(),
inputs: cs_blank.num_inputs(),
}
}
fn get_porep_info<Tree: 'static + MerkleTreeTrait>(porep_config: PoRepConfig) -> CircuitInfo {
info!("PoRep info");
let public_params = public_params(
PaddedBytesAmount::from(porep_config),
usize::from(PoRepProofPartitions::from(porep_config)),
porep_config.porep_id,
)
.expect("failed to get public params from config");
let circuit = <StackedCompound<Tree, DefaultPieceHasher> as CompoundProof<
StackedDrg<Tree, DefaultPieceHasher>,
_,
>>::blank_circuit(&public_params);
circuit_info(circuit)
}
fn get_winning_post_info<Tree: 'static + MerkleTreeTrait>(post_config: &PoStConfig) -> CircuitInfo {
info!("Winning PoSt info");
let post_public_params = winning_post_public_params::<Tree>(post_config)
.expect("failed to get public params from config");
let circuit: FallbackPoStCircuit<Tree> = <FallbackPoStCompound<Tree> as CompoundProof<
FallbackPoSt<Tree>,
FallbackPoStCircuit<Tree>,
>>::blank_circuit(&post_public_params);
circuit_info(circuit)
}
fn get_window_post_info<Tree: 'static + MerkleTreeTrait>(post_config: &PoStConfig) -> CircuitInfo {
info!("Window PoSt info");
let post_public_params = window_post_public_params::<Tree>(post_config)
.expect("failed to get public params from config");
let circuit: FallbackPoStCircuit<Tree> = <FallbackPoStCompound<Tree> as CompoundProof<
FallbackPoSt<Tree>,
FallbackPoStCircuit<Tree>,
>>::blank_circuit(&post_public_params);
circuit_info(circuit)
}
#[derive(Debug, StructOpt)]
#[structopt(name = "paramcache")]
struct Opt {
#[structopt(long)]
winning: bool,
#[structopt(long)]
window: bool,
#[structopt(long)]
porep: bool,
#[structopt(short = "z", long, use_delimiter = true)]
constraints_for_sector_sizes: Vec<u64>,
}
fn winning_post_info(sector_size: u64) -> CircuitInfo {
with_shape!(
sector_size,
get_winning_post_info,
&PoStConfig {
sector_size: SectorSize(sector_size),
challenge_count: WINNING_POST_CHALLENGE_COUNT,
sector_count: WINNING_POST_SECTOR_COUNT,
typ: PoStType::Winning,
priority: true,
}
)
}
fn window_post_info(sector_size: u64) -> CircuitInfo {
with_shape!(
sector_size,
|
get_window_post_info,
&PoStConfig {
sector_size: SectorSize(sector_size),
challenge_count: WINDOW_POST_CHALLENGE_COUNT,
sector_count: *WINDOW_POST_SECTOR_COUNT
.read()
.expect("WINDOW_POST_SECTOR_COUNT poisoned")
.get(§or_size)
.expect("unknown sector size"),
typ: PoStType::Window,
priority: true,
}
)
}
fn porep_info(sector_size: u64) -> (CircuitInfo, usize) {
let partitions = PoRepProofPartitions(
*POREP_PARTITIONS
.read()
.expect("POREP_PARTITIONS poisoned")
.get(§or_size)
.expect("unknown sector size"),
);
let info = with_shape!(
sector_size,
get_porep_info,
PoRepConfig {
sector_size: SectorSize(sector_size),
partitions,
porep_id: [0; 32],
}
);
(info, partitions.into())
}
// Run this from the command-line to get info about circuits.
pub fn main() {
// The logger is used and every message from this tool is also logged into those logs.
// Though the information is also printed to stdout, so that users who haven't set the
// `RUST_LOG` environment variable also see warngings/progress.
fil_logger::init();
let opts = Opt::from_args();
// Display interactive menu if no sizes are given
let sizes: Vec<u64> = if opts.constraints_for_sector_sizes.is_empty() {
let sector_sizes = PUBLISHED_SECTOR_SIZES
.iter()
.map(|sector_size| {
// Right aligning the numbers makes them easier to read
format!(
"{: >7}",
sector_size
.file_size(file_size_opts::BINARY)
.expect("failed to format sector size"),
)
})
.collect::<Vec<_>>();
let selected_sector_sizes = MultiSelect::with_theme(&ColorfulTheme::default())
.with_prompt("Select the sizes for which constraints should be counted [use space key to select]")
.items(§or_sizes[..])
.interact()
.expect("interaction failed");
// Extract the selected sizes
PUBLISHED_SECTOR_SIZES
.iter()
.enumerate()
.filter_map(|(index, size)| {
if selected_sector_sizes.contains(&index) {
Some(*size)
} else {
None
}
})
.collect()
} else {
opts.constraints_for_sector_sizes
.into_iter()
.filter(|size| {
if PUBLISHED_SECTOR_SIZES.contains(size) {
return true;
}
warn!("ignoring invalid sector size: {}", size);
println!("ignoring invalid sector size: {}", size);
false
})
.collect()
};
if sizes.is_empty() {
info!("No valid sector sizes given. Abort.");
println!("No valid sector sizes given. Abort.");
}
let count_winning = opts.winning;
let count_window = opts.window;
let count_porep = opts.porep;
for sector_size in sizes {
let human_size = sector_size
.file_size(file_size_opts::BINARY)
.expect("failed to format sector size");
println!("Getting circuit info for sector size: {}", human_size);
if count_winning {
let info = winning_post_info(sector_size);
println!(
"{} Winning PoSt constraints: {}, public inputs: {}, partitions: 1",
human_size, info.constraints, info.inputs
);
}
if count_window {
let info = window_post_info(sector_size);
println!(
"{} Window PoSt constraints (per partition): {}, public inputs (per partition): {}, partitions: <depends on input size>",
human_size, info.constraints, info.inputs
);
}
if count_porep {
let (info, partitions) = porep_info(sector_size);
println!(
"{} PoRep constraints: {}, public inputs: {}, partitions: {}",
human_size, info.constraints, info.inputs, partitions
);
}
}
}
| |
public_import_response.py
|
# coding: utf-8
"""
CRM Imports
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.imports.configuration import Configuration
class PublicImportResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'state': 'str',
'import_request_json': 'object',
'created_at': 'datetime',
'metadata': 'PublicImportMetadata',
'import_name': 'str',
'updated_at': 'datetime',
'opt_out_import': 'bool',
'id': 'str'
}
attribute_map = {
'state': 'state',
'import_request_json': 'importRequestJson',
'created_at': 'createdAt',
'metadata': 'metadata',
'import_name': 'importName',
'updated_at': 'updatedAt',
'opt_out_import': 'optOutImport',
'id': 'id'
}
def __init__(self, state=None, import_request_json=None, created_at=None, metadata=None, import_name=None, updated_at=None, opt_out_import=None, id=None, local_vars_configuration=None): # noqa: E501
|
@property
def state(self):
"""Gets the state of this PublicImportResponse. # noqa: E501
The status of the import. # noqa: E501
:return: The state of this PublicImportResponse. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this PublicImportResponse.
The status of the import. # noqa: E501
:param state: The state of this PublicImportResponse. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and state is None: # noqa: E501
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
allowed_values = ["STARTED", "PROCESSING", "DONE", "FAILED", "CANCELED", "DEFERRED"] # noqa: E501
if self.local_vars_configuration.client_side_validation and state not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def import_request_json(self):
"""Gets the import_request_json of this PublicImportResponse. # noqa: E501
:return: The import_request_json of this PublicImportResponse. # noqa: E501
:rtype: object
"""
return self._import_request_json
@import_request_json.setter
def import_request_json(self, import_request_json):
"""Sets the import_request_json of this PublicImportResponse.
:param import_request_json: The import_request_json of this PublicImportResponse. # noqa: E501
:type: object
"""
self._import_request_json = import_request_json
@property
def created_at(self):
"""Gets the created_at of this PublicImportResponse. # noqa: E501
:return: The created_at of this PublicImportResponse. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this PublicImportResponse.
:param created_at: The created_at of this PublicImportResponse. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created_at is None: # noqa: E501
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def metadata(self):
"""Gets the metadata of this PublicImportResponse. # noqa: E501
:return: The metadata of this PublicImportResponse. # noqa: E501
:rtype: PublicImportMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this PublicImportResponse.
:param metadata: The metadata of this PublicImportResponse. # noqa: E501
:type: PublicImportMetadata
"""
if self.local_vars_configuration.client_side_validation and metadata is None: # noqa: E501
raise ValueError("Invalid value for `metadata`, must not be `None`") # noqa: E501
self._metadata = metadata
@property
def import_name(self):
"""Gets the import_name of this PublicImportResponse. # noqa: E501
:return: The import_name of this PublicImportResponse. # noqa: E501
:rtype: str
"""
return self._import_name
@import_name.setter
def import_name(self, import_name):
"""Sets the import_name of this PublicImportResponse.
:param import_name: The import_name of this PublicImportResponse. # noqa: E501
:type: str
"""
self._import_name = import_name
@property
def updated_at(self):
"""Gets the updated_at of this PublicImportResponse. # noqa: E501
:return: The updated_at of this PublicImportResponse. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this PublicImportResponse.
:param updated_at: The updated_at of this PublicImportResponse. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and updated_at is None: # noqa: E501
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
@property
def opt_out_import(self):
"""Gets the opt_out_import of this PublicImportResponse. # noqa: E501
Whether or not the import is a list of people disqualified from receiving emails. # noqa: E501
:return: The opt_out_import of this PublicImportResponse. # noqa: E501
:rtype: bool
"""
return self._opt_out_import
@opt_out_import.setter
def opt_out_import(self, opt_out_import):
"""Sets the opt_out_import of this PublicImportResponse.
Whether or not the import is a list of people disqualified from receiving emails. # noqa: E501
:param opt_out_import: The opt_out_import of this PublicImportResponse. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and opt_out_import is None: # noqa: E501
raise ValueError("Invalid value for `opt_out_import`, must not be `None`") # noqa: E501
self._opt_out_import = opt_out_import
@property
def id(self):
"""Gets the id of this PublicImportResponse. # noqa: E501
:return: The id of this PublicImportResponse. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PublicImportResponse.
:param id: The id of this PublicImportResponse. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PublicImportResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PublicImportResponse):
return True
return self.to_dict() != other.to_dict()
|
"""PublicImportResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._state = None
self._import_request_json = None
self._created_at = None
self._metadata = None
self._import_name = None
self._updated_at = None
self._opt_out_import = None
self._id = None
self.discriminator = None
self.state = state
if import_request_json is not None:
self.import_request_json = import_request_json
self.created_at = created_at
self.metadata = metadata
if import_name is not None:
self.import_name = import_name
self.updated_at = updated_at
self.opt_out_import = opt_out_import
self.id = id
|
upload.ts
|
import { Network } from '.';
import { UploadInvalidMnemonicError } from './errors';
import { Crypto, EncryptFileFunction, UploadFileFunction } from './types';
export async function
|
(
network: Network,
crypto: Crypto,
bucketId: string,
mnemonic: string,
fileSize: number,
encryptFile: EncryptFileFunction,
uploadFile: UploadFileFunction
): Promise<string> {
const mnemonicIsValid = crypto.validateMnemonic(mnemonic);
if (!mnemonicIsValid) {
throw new UploadInvalidMnemonicError();
}
const index = crypto.randomBytes(crypto.algorithm.ivSize);
const iv = index.slice(0, 16);
const key = await crypto.generateFileKey(mnemonic, bucketId, index);
const { uploads } = await network.startUpload(bucketId, {
uploads: [{
index: 0,
size: fileSize
}]
});
const [{ url, uuid }] = uploads;
await encryptFile(crypto.algorithm.type, key, iv);
const hash = await uploadFile(url);
const finishUploadPayload = {
index: index.toString('hex'),
shards: [{ hash, uuid }]
};
const finishUploadResponse = await network.finishUpload(bucketId, finishUploadPayload);
return finishUploadResponse.id;
}
|
uploadFile
|
embedding.py
|
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.manifold import TSNE
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pandas
import matplotlib.cm as cm
import umap
import tqdm
import scanpy as sc
import matplotlib.gridspec as gridspec
import networkx as nx
import numpy
import operator
import random
import pickle
import collections
import sys
import os
class GeneEmbedding(object):
def __init__(self, embedding_file, context):
self.vector = []
self.context = context
self.embedding_file = embedding_file
self.embeddings = self.read_embedding(self.embedding_file)
self.vector = []
self.genes = []
for gene in tqdm.tqdm(self.context.expressed_genes):
if gene in self.embeddings:
self.vector.append(self.embeddings[gene])
self.genes.append(gene)
def read_embedding(self, filename):
embedding = dict()
lines = open(filename,"r").read().splitlines()[1:]
for line in lines:
vector = line.split()
gene = vector.pop(0)
embedding[gene] = [float(x) for x in vector]
return embedding
def compute_similarities(self, gene, subset=None):
print("hit")
if gene not in self.embeddings:
return None
embedding = self.embeddings[gene]
distances = dict()
if subset:
targets = set(list(self.embeddings.keys())).intersection(set(subset))
else:
targets = list(self.embeddings.keys())
for target in targets:
if target not in self.embeddings:
continue
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(embedding).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
genes = [x[0] for x in sorted_distances]
distance = [x[1] for x in sorted_distances]
df = pandas.DataFrame.from_dict({"Gene":genes, "Similarity":distance})
return df
def cluster(self, n=12):
kmeans = KMeans(n_clusters=n)
kmeans.fit(self.vector)
clusters = kmeans.labels_
clusters = zip(self.context.expressed_genes, clusters)
_clusters = []
for gene, cluster in clusters:
_clusters.append("G"+str(cluster))
return _clusters
def clusters(self, clusters):
average_vector = dict()
gene_to_cluster = collections.defaultdict(list)
matrix = collections.defaultdict(list)
total_average_vector = []
for gene, cluster in zip(self.context.expressed_genes, clusters):
if gene in self.embeddings:
matrix[cluster].append(self.embeddings[gene])
gene_to_cluster[cluster].append(gene)
total_average_vector.append(self.embeddings[gene])
self.total_average_vector = list(numpy.average(total_average_vector, axis=0))
for cluster, vectors in matrix.items():
xvec = list(numpy.average(vectors, axis=0))
average_vector[cluster] = numpy.subtract(xvec,self.total_average_vector)
return average_vector, gene_to_cluster
def generate_vector(self, genes):
vector = []
for gene, vec in zip(self.genes, self.vector):
if gene in genes:
vector.append(vec)
return list(numpy.median(vector, axis=0))
def cluster_definitions(self, clusters):
average_vector, gene_to_cluster = self.clusters(clusters)
similarities = collections.defaultdict(dict)
for cluster, vector in average_vector.items():
distances = dict()
for target in gene_to_cluster[cluster]:
v = self.embeddings[target]
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(v).reshape(1, -1))[0])
distances[target] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
similarities[cluster] = [x[0] for x in sorted_distances if x[0]]
return similarities
def cluster_definitions_as_df(self, similarities, top_n=20):
clusters = []
symbols = []
for key, genes in similarities.items():
clusters.append(key)
symbols.append(", ".join(genes[:top_n]))
df = pandas.DataFrame.from_dict({"Cluster Name":clusters, "Top Genes":symbols})
return df
def plot(self, clusters, png=None, method="TSNE", labels=[], pcs=None, remove=[]):
plt.figure(figsize = (8, 8))
ax = plt.subplot(1,1,1)
pcs = self.plot_reduction(clusters, ax, labels=labels, method=method, pcs=pcs, remove=remove)
if png:
plt.savefig(png)
plt.close()
else:
plt.show()
return pcs
def plot_reduction(self, clusters, ax, method="TSNE", labels=[], pcs=None, remove=[]):
if type(pcs) != numpy.ndarray:
if method == "TSNE":
print("Running t-SNE")
pca = TSNE(n_components=2, n_jobs=-1, metric="cosine")
pcs = pca.fit_transform(self.vector)
pcs = numpy.transpose(pcs)
print("Finished.")
else:
print("Running UMAP")
trans = umap.UMAP(random_state=42,metric='cosine').fit(self.vector)
x = trans.embedding_[:, 0]
y = trans.embedding_[:, 1]
pcs = [x,y]
print("Finished.")
if len(remove) != 0:
_pcsx = []
_pcsy = []
_clusters = []
for x, y, c in zip(pcs[0],pcs[1],clusters):
if c not in remove:
_pcsx.append(x)
_pcsy.append(y)
_clusters.append(c)
pcs = []
pcs.append(_pcsx)
pcs.append(_pcsy)
clusters = _clusters
data = {"x":pcs[0],"y":pcs[1], "Cluster":clusters}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y",hue="Cluster", ax=ax)
plt.xlabel("{}-1".format(method))
plt.ylabel("{}-2".format(method))
ax.set_xticks([])
ax.set_yticks([])
if len(labels):
for x, y, gene in zip(pcs[0], pcs[1], self.context.expressed_genes):
if gene in labels:
ax.text(x+.02, y, str(gene), fontsize=8)
return pcs
def subtract_vector(self, vector):
for gene, vec in self.embeddings.items():
vec = numpy.subtract(vec-vector)
self.embeddings[gene] = vec
@staticmethod
def relabel_cluster(similarities, clusters, old_label, new_label):
genes = similarities[old_label]
del similarities[old_label]
similarities[new_label] = genes
_clusters = []
for cluster in clusters:
if cluster == old_label:
_clusters.append(new_label)
else:
_clusters.append(cluster)
return similarities, _clusters
def plot_similarity_matrix(self, markers, marker_labels=None, png=None):
cmap = matplotlib.cm.tab20
if marker_labels:
marker_colors = {}
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
for key, value in marker_labels.items():
marker_colors[key] = cmap(ctypes.index(value))
colors = pandas.DataFrame(markers)[0].map(marker_colors)
similarity_matrix = []
print("Running")
markers = set(list(self.embeddings.keys())).intersection(set(markers))
markers = list(markers)
for marker in markers:
print(marker)
row = []
res = self.compute_similarities(marker, subset=markers)
resdict = dict(zip(res["Gene"],res["Similarity"]))
for gene in markers:
row.append(resdict[gene])
similarity_matrix.append(row)
plt.figure(figsize = (12, 10))
matrix = numpy.array(similarity_matrix)
df = pandas.DataFrame(matrix,index=markers,columns=markers)
sns.clustermap(df,figsize=(12,8), dendrogram_ratio=0.1)
plt.tight_layout()
if png:
plt.savefig("marker_similarity.png")
else:
plt.show()
def plot_similarity_network(self, markers, marker_labels=None, png=None):
cmap = matplotlib.cm.tab20
G = nx.petersen_graph()
node_color = []
node_order = []
node_size = []
edge_order = []
edge_color = []
edge_labels = dict()
for marker in markers:
node_order.append(marker)
if marker_labels:
ctypes = []
for value in marker_labels.values():
ctypes.append(value)
ctypes = list(set(ctypes))
node_color.append(ctypes.index(marker_labels[marker]))
node_size.append(400)
G.add_node(marker)
for marker in markers:
res = self.compute_similarities(marker)
resdict = dict(zip(res["Gene"],res["Similarity"]))
i = 0
for gene, similarity in resdict.items():
if i > 9: break
if gene != marker:
if gene not in G.nodes():
node_size.append(0)
G.add_node(gene)
node_order.append(gene)
node_color.append(len(set(marker_labels.values())))
print(marker, gene)
G.add_edge(marker, gene, weight=similarity)
edge_color.append(similarity)
edge_order.append((marker,gene))
edge_labels[(marker,gene)] = str(round(similarity,2))
i += 1
# print(node_color)
# c = max(nx.connected_components(G), key=len)
# G = G.subgraph(c).copy()
for i in range(10):
G.remove_node(i)
print(G.nodes())
print(G.edges())
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(1,1,1)
#pos = nx.nx_agraph.graphviz_layout(G, prog="neato",args="-Goverlap=scale")
pos = nx.nx_agraph.graphviz_layout(G, prog="neato",args="-Goverlap=scale -Elen=5 -Eweight=0.2")
#pos = nx.spring_layout(G)
nx.draw(G,pos,ax=ax, cmap=cmap,nodelist=node_order, node_size=node_size,edgelist=edge_order, node_color=node_color, edge_color=edge_color, edge_vmin=0, edge_vmax=1.0, edge_cmap=plt.cm.Greys, with_labels=True, width=1,font_size=7)
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels, font_size=6)
plt.axis('off')
plt.tight_layout()
if png:
|
else:
plt.show()
class CellEmbedding(object):
def __init__(self, context, embed):
cell_to_gene = list(context.cell_to_gene.items())
self.context = context
self.embed = embed
self.expression = context.expression
self.data = collections.defaultdict(list)
self.weights = collections.defaultdict(list)
for cell, genes in tqdm.tqdm(cell_to_gene):
if len(genes) < 2: continue
if cell in self.expression:
cell_weights = self.expression[cell]
for gene in set(genes).intersection(set(embed.embeddings.keys())):
if gene in cell_weights:
weight = self.expression[cell][gene]
if weight > 0:
self.data[cell].append(embed.embeddings[gene])
self.weights[cell].append(weight)
self.matrix = []
dataset_vector = []
for cell, vectors in self.data.items():
weights = self.weights[cell]
xvec = list(numpy.average(vectors, axis=0, weights=weights))
self.matrix.append(xvec)
dataset_vector += vectors
self.dataset_vector = numpy.average(dataset_vector, axis=0)
_matrix = []
for vec in self.matrix:
_matrix.append(numpy.subtract(vec, self.dataset_vector))
self.matrix = _matrix
def batch_correct(self, column=None, clusters=None):
if not column or not clusters:
raise ValueError("Must supply batch column and clusters!")
column_labels = dict(zip(self.context.cells,self.context.metadata[column]))
labels = []
for key in self.data.keys():
labels.append(column_labels[key])
local_correction = collections.defaultdict(lambda : collections.defaultdict(list))
correction_vectors = collections.defaultdict(dict)
for cluster, batch, vec in zip(clusters, labels, self.matrix):
local_correction[cluster][batch].append(vec)
for cluster, batches in local_correction.items():
cluster_vec = []
batch_keys = list(batches.keys())
base_batch = batch_keys.pop(0)
max_distance = 1.0
cluster_vec = numpy.average(batches[base_batch], axis=0)
for batch in batch_keys:
bvec = list(numpy.average(batches[batch], axis=0))
distance = float(cosine_similarity(numpy.array(bvec).reshape(1, -1),numpy.array(cluster_vec).reshape(1, -1))[0])
if max_distance > distance:
max_distance = distance
offset = numpy.subtract(cluster_vec,bvec)
bvec = numpy.add(bvec,offset)
distance = float(cosine_similarity(numpy.array(bvec).reshape(1, -1),numpy.array(cluster_vec).reshape(1, -1))[0])
correction_vectors[cluster][batch] = offset
self.matrix = []
self.sample_vector = collections.defaultdict(list)
i = 0
self.cell_order = []
for cell, vectors in self.data.items():
cluster = clusters[i]
xvec = list(numpy.average(vectors, axis=0))
batch = column_labels[cell]
if cluster in correction_vectors and batch in correction_vectors[cluster]:
offset = correction_vectors[cluster][batch]
xvec = numpy.add(xvec,offset)
self.matrix.append(xvec)
self.cell_order.append(cell)
i += 1
def cluster(self, k=12):
kmeans = KMeans(n_clusters=k)
kmeans.fit(self.matrix)
clusters = kmeans.labels_
_clusters = []
for cluster in clusters:
_clusters.append("C"+str(cluster))
self.clusters = _clusters
return _clusters
def subtract_vector(self, vector):
corrected_matrix = []
for cell_vector in self.matrix:
corrected_matrix.append(numpy.subtract(cell_vector, vector))
self.matrix = corrected_matrix
def compute_gene_similarities(self):
gene_similarities = dict()
vectors = collections.defaultdict(list)
for vec, label in zip(self.matrix, self.clusters):
vectors[label].append(vec)
for label, vecs in vectors.items():
distances = dict()
cell_vector = list(numpy.mean(vecs, axis=0))
for gene, vector in self.embed.embeddings.items():
distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
distances[gene] = distance
sorted_distances = list(reversed(sorted(distances.items(), key=operator.itemgetter(1))))
gene_similarities[label] = [x[0] for x in sorted_distances]
print(label, sorted_distances[:10])
return gene_similarities
def group_cell_vectors(self, barcode_to_label):
label_vector = dict()
labels = []
for cell, vectors in self.data.items():
vector = list(numpy.median(vectors, axis=0))
labels.append(barcode_to_label[cell])
label_vector[barcode_to_label[cell]] = vector
for cell, vectors in self.data.items():
_vectors = []
for vector in vectors:
_vectors.append(numpy.subtract(vector, label_vector[barcode_to_label[cell]]))
vectors = _vectors
vector = list(numpy.median(vectors, axis=0))
label_vector[barcode_to_label[cell]] = vector
return label_vector, labels
def compute_cell_similarities(self, barcode_to_label):
vectors = dict()
cell_similarities = dict()
vectors, labels = self.group_cell_vectors(barcode_to_label)
for label, vector in vectors.items():
distances = dict()
for label2, vector2 in vectors.items():
xdist = []
distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(vector2).reshape(1, -1))[0])
xdist.append(distance)
distances[label2] = distance
cell_similarities[label] = distances
return cell_similarities
def plot_reduction(self, ax, pcs=None, method="TSNE", clusters=None, labels=None):
if type(pcs) != numpy.ndarray:
if method == "TSNE":
print("Running t-SNE")
pca = TSNE(n_components=2, n_jobs=-1, metric="cosine")
pcs = pca.fit_transform(self.matrix)
pcs = numpy.transpose(pcs)
print("Finished.")
else:
print("Running UMAP")
trans = umap.UMAP(random_state=42,metric='cosine').fit(self.matrix)
x = trans.embedding_[:, 0]
y = trans.embedding_[:, 1]
pcs = [x,y]
print("Finished.")
data = {"x":pcs[0],"y":pcs[1],"Cluster": clusters}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Cluster', ax=ax,linewidth=0.1,s=13,alpha=1.0)
return pcs
def plot(self, png=None, pcs=None, method="TSNE", column=None):
if column:
column_labels = dict(zip(self.context.cells,self.context.metadata[column]))
labels = []
for key in self.data.keys():
labels.append(column_labels[key])
else:
labels = self.clusters
plt.figure(figsize = (8, 8))
ax1 = plt.subplot(1,1,1)
pcs = self.plot_reduction(ax1, pcs=pcs, clusters=labels, method=method)
plt.xlabel("{}-1".format(method))
plt.ylabel("{}-2".format(method))
ax1.set_xticks([])
ax1.set_yticks([])
if png:
plt.savefig(png)
plt.close()
else:
plt.show()
return pcs
def plot_distance(self, vector, pcs=None):
plt.figure(figsize = (8,8))
ax = plt.subplot(1,1, 1)
if type(pcs) != numpy.ndarray:
pca = TSNE(n_components=2)
pcs = pca.fit_transform(self.matrix)
pcs = numpy.transpose(pcs)
distances = []
dataset_distance = float(cosine_similarity(numpy.array(vector).reshape(1, -1),numpy.array(self.dataset_vector).reshape(1, -1))[0])
for cell_vector in self.matrix:
distance = float(cosine_similarity(numpy.array(cell_vector).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
distances.append(distance-dataset_distance)
data = {"x":pcs[0],"y":pcs[1],"Distance": distances}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Distance', ax=ax,linewidth=0.00,s=7,alpha=0.7)
return pcs
def plot_gene_tsne(self, title, ax, genes, pcs=None):
expression = [0 for _ in range(len(list(self.data.keys())))]
for gene in genes:
for i, cell in enumerate(self.data.keys()):
if gene in self.expression[cell]:
expression[i] += self.expression[cell][gene]
if type(pcs) != numpy.ndarray:
pca = TSNE(n_components=2)
pcs = pca.fit_transform(self.matrix)
pcs = numpy.transpose(pcs)
data = {"x":pcs[0],"y":pcs[1],"Gene Expression": expression}
df = pandas.DataFrame.from_dict(data)
sns.scatterplot(data=df,x="x", y="y", hue='Gene Expression', ax=ax,linewidth=0.00,s=7,alpha=0.7)
ax.set_title(title,fontsize=16)
return pcs
def plot_gene_expression(self, genes, pcs=None, png=None):
plt.figure(figsize = (8,8))
ax = plt.subplot(1,1, 1)
pcs = self.plot_gene_tsne(",".join(genes[:10]), ax, genes, pcs=pcs)
ax.set_xticks([])
ax.set_yticks([])
if not png:
plt.show()
else:
plt.savefig(png)
plt.close()
return pcs
def plot_similarity_matrix(self, vectors, column):
similarity_matrix = []
plt.figure(figsize = (12, 10))
barcode_to_label = dict(zip(cembed.context.metadata.index, cembed.context.metadata[column]))
ctypes = cembed.group_cell_vectors()
matrix = []
clusters = list(vectors.keys())
celltypes = list(cytpes.keys())
for cluster, genes in vectors.items():
vector = embed.generate_vector(genes)
row = []
for cell in ctypes.keys():
distance = float(cosine_similarity(numpy.array(ctypes[cell]).reshape(1, -1),numpy.array(vector).reshape(1, -1))[0])
row.append()
matrix.append(row)
matrix = numpy.array(matrix)
df = pandas.DataFrame(matrix,index=celltypes,columns=celltypes)
sns.clustermap(df,figsize=(17,8))
plt.tight_layout()
plt.savefig(os.path.join(output_path,"celltype_similarities_{}.png".format(sample)))
|
plt.savefig(png)
|
index.test.tsx
|
import React from "react";
import { render } from "@testing-library/react";
import { BrowserRouter as Router } from "react-router-dom";
import ReefFooter from ".";
test("renders as expected", () => {
const { container } = render(
<Router>
<ReefFooter />
</Router>
|
);
expect(container).toMatchSnapshot();
});
|
|
app.e2e-spec.ts
|
import { AppPage } from './app.po';
import { browser, logging } from 'protractor';
describe('workspace-project App', () => {
let page: AppPage;
beforeEach(() => {
page = new AppPage();
});
|
expect(page.getTitleText()).toEqual('Welcome to calender!');
});
afterEach(async () => {
// Assert that there are no errors emitted from the browser
const logs = await browser.manage().logs().get(logging.Type.BROWSER);
expect(logs).not.toContain(jasmine.objectContaining({
level: logging.Level.SEVERE,
} as logging.Entry));
});
});
|
it('should display welcome message', () => {
page.navigateTo();
|
pow_blockchain.rs
|
// Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use super::block_builders::chain_block;
use monero::{
blockdata::Block as MoneroBlock,
consensus::deserialize,
cryptonote::hash::{Hash as MoneroHash, Hashable as MoneroHashable},
};
use tari_core::{
blocks::Block,
chain_storage::{BlockchainBackend, BlockchainDatabase},
consensus::{ConsensusConstants, ConsensusManager},
proof_of_work::{
lwma_diff::LinearWeightedMovingAverage,
monero_rx::{append_merge_mining_tag, tree_hash, MoneroData},
Difficulty,
DifficultyAdjustment,
PowAlgorithm,
},
test_helpers::blockchain::TempDatabase,
};
pub fn create_test_pow_blockchain<T: BlockchainBackend>(
db: &BlockchainDatabase<T>,
mut pow_algos: Vec<PowAlgorithm>,
consensus_manager: &ConsensusManager,
)
{
// Remove the first as it will be replaced by the genesis block
pow_algos.remove(0);
let block0 = db.fetch_block(0).unwrap().block().clone();
append_to_pow_blockchain(db, block0, pow_algos, consensus_manager);
}
pub fn append_to_pow_blockchain<T: BlockchainBackend>(
db: &BlockchainDatabase<T>,
chain_tip: Block,
pow_algos: Vec<PowAlgorithm>,
consensus_manager: &ConsensusManager,
)
{
let mut prev_block = chain_tip;
for pow_algo in pow_algos {
let new_block = chain_block(&prev_block, Vec::new(), consensus_manager);
let mut new_block = db.prepare_block_merkle_roots(new_block).unwrap();
new_block.header.timestamp = prev_block.header.timestamp.increase(120);
new_block.header.pow.pow_algo = pow_algo;
if new_block.header.pow.pow_algo == PowAlgorithm::Monero {
let blocktemplate_blob = "0c0c8cd6a0fa057fe21d764e7abf004e975396a2160773b93712bf6118c3b4959ddd8ee0f76aad0000000002e1ea2701ffa5ea2701d5a299e2abb002028eb3066ced1b2cc82ea046f3716a48e9ae37144057d5fb48a97f941225a1957b2b0106225b7ec0a6544d8da39abe68d8bd82619b4a7c5bdae89c3783b256a8fa47820208f63aa86d2e857f070000".to_string();
let seed_hash = "9f02e032f9b15d2aded991e0f68cc3c3427270b568b782e55fbd269ead0bad97".to_string();
let bytes = hex::decode(blocktemplate_blob.clone()).unwrap();
let mut block = deserialize::<MoneroBlock>(&bytes[..]).unwrap();
let hash = MoneroHash::from_slice(new_block.header.merged_mining_hash().as_ref());
append_merge_mining_tag(&mut block, hash).unwrap();
let count = 1 + (block.tx_hashes.len() as u16);
let mut hashes = Vec::with_capacity(count as usize);
let mut proof = Vec::with_capacity(count as usize);
hashes.push(block.miner_tx.hash());
proof.push(block.miner_tx.hash());
|
for item in block.clone().tx_hashes {
hashes.push(item);
proof.push(item);
}
let root = tree_hash(hashes.clone().as_ref()).unwrap();
let monero_data = MoneroData {
header: block.header,
key: seed_hash.clone(),
count,
transaction_root: root.to_fixed_bytes(),
transaction_hashes: hashes.into_iter().map(|h| h.to_fixed_bytes()).collect(),
coinbase_tx: block.miner_tx,
};
let serialized = bincode::serialize(&monero_data).unwrap();
new_block.header.pow.pow_data = serialized.clone();
}
db.add_block(new_block.clone().into()).unwrap();
prev_block = new_block;
}
}
// Calculated the accumulated difficulty for the selected blocks in the blockchain db.
pub fn calculate_accumulated_difficulty(
db: &BlockchainDatabase<TempDatabase>,
pow_algo: PowAlgorithm,
heights: Vec<u64>,
consensus_constants: &ConsensusConstants,
) -> Difficulty
{
let mut lwma = LinearWeightedMovingAverage::new(
consensus_constants.get_difficulty_block_window() as usize,
consensus_constants.get_diff_target_block_interval(pow_algo),
consensus_constants.get_difficulty_max_block_interval(pow_algo),
);
for height in heights {
let (header, accum) = db.fetch_header_and_accumulated_data(height).unwrap();
lwma.add(header.timestamp, accum.target_difficulty).unwrap();
}
lwma.get_difficulty().unwrap()
}
| |
graph.py
|
##
## Weirdo Tree Graph that powers jobChomper
## --
##
## Assertions:
## * DAG is made up of named edges
## * Each edge is a triple (A, B, NEEDSPREVIOUSTOPASS)
## A, B are the named nodes
## B will execute after A has evaluated
## NEEDSPREVIOUSTOPASS is True or False; if it is True then A _must_ evaluate as True for B to run
## * There's a special node called STARTNODE from where execution starts
## * Comment lines in graph file start with #
## * Elements in graph lines separated by ',' - for example:
## A, B, True
##
import jobChomper.node
import logging
STARTNODENAME = "STARTNODE"
RUNONLYONPASS = "onlyOnPass"
RUNONFAIL = "onFail"
def findCycle(graph):
todo = set(graph.keys())
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in graph[top]:
if node in stack:
return stack[stack.index(node):]
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return None
class Graph(object):
""" Graph Object """
def __init__(self):
self.init = True
self.edges = set()
self.runDict = {}
self.nodeSet = set()
def buildRunDict(self):
self.runDict = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
self.nodeSet.add(nodeA)
self.nodeSet.add(nodeB)
priorSuccess = edge[2]
if nodeA not in self.runDict.keys():
self.runDict[nodeA] = {}
self.runDict[nodeA][RUNONLYONPASS]=[]
self.runDict[nodeA][RUNONFAIL]=[]
if priorSuccess == True:
self.runDict[nodeA][RUNONLYONPASS].append(nodeB)
else:
self.runDict[nodeA][RUNONFAIL].append(nodeB)
for node in self.nodeSet:
if node not in self.runDict.keys():
self.runDict[node]={}
self.runDict[node][RUNONLYONPASS]=[]
self.runDict[node][RUNONFAIL]=[]
def findCycles(self):
connectivity = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA not in connectivity.keys():
connectivity[nodeA] = []
connectivity[nodeA].append(nodeB)
return findCycle(connectivity)
def checkEdgeNodesValid(self):
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA == STARTNODENAME:
continue
if not jobChomper.node.nodeExists(nodeA):
raise ValueError("[Graph] no such node as: " + nodeA)
if not jobChomper.node.nodeExists(nodeB):
raise ValueError("[Graph] no such node as: " + nodeB)
def loadGraphFromFile(self, filename):
foundStart = False
with open(filename) as graphBody:
data = graphBody.read()
for line in data.split('\n'):
line = line.strip()
# Empty line
if line == '':
continue
# Comment line
if line[0] == '#':
continue
spl = line.split(',')
# Not a triple
if len(spl) != 3:
logging.error("Problem parsing: " + filename + " file has invalid triple: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " file has invalid triple: " + line)
nodeA = spl[0].strip()
nodeB = spl[1].strip()
prevEval = False
if spl[2].lower().strip() == 'true':
prevEval = True
if nodeA == STARTNODENAME:
if foundStart == True:
|
else:
foundStart = True
triple = (nodeA, nodeB, prevEval)
self.edges.add(triple)
if foundStart == False:
logging.error("Problem parsing: " + filename + " cound not find " + STARTNODENAME)
raise ValueError("[Graph] Problem parsing: " + filename + " cound not find " + STARTNODENAME)
self.buildRunDict()
cycles = self.findCycles()
if cycles != None:
logging.error("Problem parsing: " + filename + " cycle detected:" + str(cycles))
raise ValueError("[Graph] Problem parsing: " + filename + " cycle detected:" + str(cycles))
self.checkEdgeNodesValid()
|
logging.error("Problem parsing: " + filename + " start node defined again: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " start node defined again: " + line)
|
selectors.js
|
/**
* The global state selectors
*/
import { createSelector } from 'reselect';
const selectGlobal = (state) => state.get('global');
const selectRoute = (state) => state.get('route');
const makeSelectLocation = () => createSelector(
selectRoute,
(routeState) => routeState.get('location').toJS()
);
const makeSelectProjects = () => createSelector(
selectGlobal,
(state) => state.get('projects')
);
const makeSelectOrder = () => createSelector(
selectGlobal,
(state) => state.get('order').toJS()
);
const makeSelectHeader = () => createSelector(
selectGlobal,
(state) => state.get('header')
);
const makeSelectSearch = () => createSelector(
selectGlobal,
(state) => state.get('search').toJS()
);
const makeSelectSelected = () => createSelector(
selectGlobal,
(state) => state.get('selected')
);
const makeSelectTarget = () => createSelector(
selectGlobal,
(state) => state.get('target').toJS()
);
const makeSelectNews = () => createSelector(
selectGlobal,
(state) => state.get('news')
);
const makeSelectReady = () => createSelector(
selectGlobal,
(state) => state.get('ready')
);
const makeSelectError = () => createSelector(
selectGlobal,
(state) => state.get('error')
);
const makeSelectAbout = () => createSelector(
selectGlobal,
(state) => state.get('about')
);
|
selectGlobal,
(state) => state.get('feature')
);
const makeSelectLang = () => createSelector(
selectGlobal,
(state) => state.get('language')
);
const makeSelectHover = () => createSelector(
selectGlobal,
(state) => state.get('hover')
);
export {
selectGlobal,
makeSelectLocation,
makeSelectProjects,
makeSelectNews,
makeSelectAbout,
makeSelectOrder,
makeSelectSelected,
makeSelectSearch,
makeSelectHeader,
makeSelectFeature,
makeSelectError,
makeSelectReady,
makeSelectLang,
makeSelectTarget,
makeSelectHover
};
|
const makeSelectFeature = () => createSelector(
|
image_turk.py
|
from web import app
from gevent.pywsgi import WSGIServer
|
if __name__ == '__main__':
http_server = WSGIServer(('', 5000), app)
http_server.start()
http_server.serve_forever()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.