file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
I_love_Easin.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 28 12:20:40 2021
| """
in1 = input()
in1 = int(in1)
in2 = input().split()
list1 = []
for elem in range(len(in2)):
list1.append(int(in2[elem]))
#print(list1)
amazing = 0
min1 = list1[0]
max1 = list1[0]
for val in range(len(list1)-1):
if list1[val +1] > min1:
amazing +=1
min1 = list1[val+1]
elif list1[val +1] < max1:
amazing += 1
max1 = list1[val+1]
print(amazing) | @author: Easin
|
error.rs | //! Things that can go wrong with sequences.
use thiserror::Error;
/// Invalid states a sequence can be in.
#[derive(Debug, Error)]
pub enum SequenceError {
/// The sequence has a looping section without a `wait`
/// or `wait_for_interval` command.
///
/// This is invalid because if this sequence were to run,
/// the looping section would lock up the audio thread | } | /// by processing forever.
#[error("The looping section of a sequence must have a wait-related command")]
InfiniteLoop, |
movie.js | const { genreSchema } = require("./genre");
const Joi = require("@hapi/joi");
const mongoose = require("mongoose");
const movieSchema = new mongoose.Schema({
title: {
type: String,
require: true,
trim: true,
minlength: 5,
maxlength: 255,
},
genre: { type: genreSchema, required: true },
numberInStock: { type: Number, min: 0, max: 255, required: true },
dailyRentalRate: { type: Number, min: 0, max: 255, required: true },
});
const Movie = mongoose.model("Movie", movieSchema);
function | (movie) {
const schema = Joi.object({
title: Joi.string().min(5).max(50).required(),
genreId: Joi.objectId().required(),
numberInStock: Joi.number().min(0).required(),
dailyRentalRate: Joi.number().min(0).required(),
});
return schema.validate(movie);
}
exports.Movie = Movie;
exports.validate = validateMovie;
| validateMovie |
utils.ts | /**
* Swap two elements of an array in place.
*/
export function swap(list: any[], a: number, b: number) {
const temp = list[a]
list[a] = list[b]
list[b] = temp
return list
}
/**
* Convert a Buffer or a DataView to an array.
*
* @param obj a Buffer or a DataView instance
*/
export function toArray(obj: Buffer|DataView) {
if (obj instanceof DataView) {
return Array.from(new Uint8Array(obj.buffer)) | } | }
return Array.from(obj) |
http_api.go | package web
import (
"context"
"fmt"
"github.com/bwmarrin/discordgo"
"github.com/gin-gonic/gin"
"github.com/leighmacdonald/gbans/internal/action"
"github.com/leighmacdonald/gbans/internal/config"
"github.com/leighmacdonald/gbans/internal/discord"
"github.com/leighmacdonald/gbans/internal/external"
"github.com/leighmacdonald/gbans/internal/model"
"github.com/leighmacdonald/gbans/internal/steam"
"github.com/leighmacdonald/gbans/internal/store"
"github.com/leighmacdonald/gbans/internal/web/ws"
"github.com/leighmacdonald/gbans/pkg/ip2location"
"github.com/leighmacdonald/golib"
"github.com/leighmacdonald/steamid/v2/steamid"
"github.com/leighmacdonald/steamweb"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"net"
"net/http"
"strconv"
"strings"
"time"
)
type APIResponse struct {
Status bool `json:"status"`
Message string `json:"message"`
Data interface{} `json:"data"`
}
func responseErr(c *gin.Context, status int, data interface{}) {
c.JSON(status, APIResponse{
Status: false,
Data: data,
})
}
func responseOK(c *gin.Context, status int, data interface{}) {
c.JSON(status, APIResponse{
Status: true,
Data: data,
})
}
type demoPostRequest struct {
ServerName string `form:"server_name"`
}
func (w *web) onPostDemo(db store.Store) gin.HandlerFunc {
return func(c *gin.Context) {
var r demoPostRequest
if errR := c.Bind(&r); errR != nil {
responseErr(c, http.StatusBadRequest, nil)
return
}
f, hdr, err := c.Request.FormFile("file")
if err != nil {
responseErr(c, http.StatusBadRequest, nil)
return
}
var server model.Server
if errS := db.GetServerByName(c, r.ServerName, &server); errS != nil {
responseErr(c, http.StatusNotFound, nil)
return
}
var d []byte
_, errRead := f.Read(d)
if errRead != nil {
responseErr(c, http.StatusInternalServerError, nil)
return
}
demo, errDF := model.NewDemoFile(server.ServerID, hdr.Filename, d)
if errDF != nil {
responseErr(c, http.StatusInternalServerError, nil)
return
}
if errSave := db.SaveDemo(c, &demo); errSave != nil {
log.Errorf("Failed to save demo to store: %v", errSave)
responseErr(c, http.StatusInternalServerError, nil)
return
}
responseOK(c, http.StatusCreated, demo)
}
}
func (w *web) onPostPingMod(bot discord.ChatBot) gin.HandlerFunc {
type pingReq struct {
ServerName string `json:"server_name"`
Name string `json:"name"`
SteamID steamid.SID64 `json:"steam_id"`
Reason string `json:"reason"`
Client int `json:"client"`
}
return func(c *gin.Context) {
var req pingReq
if err := c.BindJSON(&req); err != nil {
responseErr(c, http.StatusBadRequest, nil)
return
}
var pi model.PlayerInfo
err := w.executor.Find(req.SteamID.String(), "", &pi)
if err != nil {
log.Error("Failed to find player on /mod call")
}
//name := req.SteamID.String()
//if pi.InGame {
// name = fmt.Sprintf("%s (%s)", name, pi.Player.Name)
//}
var roleStrings []string
for _, i := range config.Discord.ModRoleIDs {
roleStrings = append(roleStrings, fmt.Sprintf("<@&%s>", i))
}
e := discord.RespOk(nil, "New User Report")
e.Description = fmt.Sprintf("%s | %s", req.Reason, strings.Join(roleStrings, " "))
if pi.Player.Name != "" {
e.Fields = append(e.Fields, &discordgo.MessageEmbedField{
Name: "Reporter",
Value: pi.Player.Name,
Inline: true,
})
}
if req.SteamID.String() != "" {
e.Fields = append(e.Fields, &discordgo.MessageEmbedField{
Name: "ReporterSID",
Value: req.SteamID.String(),
Inline: true,
})
}
if req.ServerName != "" {
e.Fields = append(e.Fields, &discordgo.MessageEmbedField{
Name: "Server",
Value: req.ServerName,
Inline: true,
})
}
for _, chanId := range config.Discord.ModChannels {
if errSend := bot.SendEmbed(chanId, e); errSend != nil {
responseErr(c, http.StatusInternalServerError, nil)
return
}
}
responseOK(c, http.StatusOK, gin.H{
"client": req.Client,
"message": "Moderators have been notified",
})
}
}
type apiBanRequest struct {
SteamID steamid.SID64 `json:"steam_id"`
Duration string `json:"duration"`
BanType model.BanType `json:"ban_type"`
Reason model.Reason `json:"reason"`
ReasonText string `json:"reason_text"`
Network string `json:"network"`
}
func (w *web) onAPIPostBanCreate() gin.HandlerFunc {
return func(c *gin.Context) {
var r apiBanRequest
if err := c.BindJSON(&r); err != nil {
responseErr(c, http.StatusBadRequest, "Failed to perform ban")
return
}
// duration, err := config.ParseDuration(r.Duration)
// if err != nil {
// responseErr(c, http.StatusNotAcceptable, `Invalid duration. Examples: "300m", "1.5h" or "2h45m".
//Valid time units are "s", "ws", "h".`)
// return
// }
var (
ban *model.Ban
banNet *model.BanNet
e error
)
if r.Network != "" {
_, _, e = net.ParseCIDR(r.Network)
if e != nil {
responseErr(c, http.StatusBadRequest, "Invalid network cidr definition")
return
}
}
if !r.SteamID.Valid() {
responseErr(c, http.StatusBadRequest, "Invalid steamid")
return
}
if r.Network != "" {
var b model.BanNet
if bErr := w.executor.BanNetwork(action.NewBanNet(model.Web, r.SteamID.String(),
currentPerson(c).SteamID.String(), r.ReasonText, r.Duration, r.Network), &b); bErr != nil {
if errors.Is(bErr, store.ErrDuplicate) {
responseErr(c, http.StatusConflict, "Duplicate ban")
return
}
responseErr(c, http.StatusBadRequest, "Failed to perform ban")
return
}
responseOK(c, http.StatusCreated, banNet)
} else {
var b model.Ban
if bErr := w.executor.Ban(action.NewBan(model.Web, r.SteamID.String(), currentPerson(c).SteamID.String(),
r.ReasonText, r.Duration), &b); bErr != nil {
if errors.Is(bErr, store.ErrDuplicate) {
responseErr(c, http.StatusConflict, "Duplicate ban")
return
}
responseErr(c, http.StatusBadRequest, "Failed to perform ban")
return
}
responseOK(c, http.StatusCreated, ban)
}
}
}
func (w *web) onSAPIPostServerAuth(db store.Store) gin.HandlerFunc {
type authReq struct {
ServerName string `json:"server_name"`
Key string `json:"key"`
}
type authResp struct {
Status bool `json:"status"`
Token string `json:"token"`
}
return func(c *gin.Context) {
var req authReq
if err := c.BindJSON(&req); err != nil {
log.Errorf("Failed to decode auth request: %v", err)
responseErr(c, http.StatusInternalServerError, nil)
return
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
var srv model.Server
err := db.GetServerByName(ctx, req.ServerName, &srv)
if err != nil {
responseErr(c, http.StatusNotFound, nil)
return
}
if srv.Password != req.Key {
responseErr(c, http.StatusForbidden, nil)
log.Warnf("Invalid server key used: %s", req.ServerName)
return
}
srv.Token = golib.RandomString(40)
srv.TokenCreatedOn = config.Now()
if err2 := db.SaveServer(ctx, &srv); err2 != nil {
log.Errorf("Failed to updated server token: %v", err2)
responseErr(c, http.StatusInternalServerError, nil)
return
}
responseOK(c, http.StatusOK, authResp{
Status: true,
Token: srv.Token,
})
}
}
func (w *web) onPostServerCheck(db store.Store) gin.HandlerFunc {
type checkRequest struct {
ClientID int `json:"client_id"`
SteamID string `json:"steam_id"`
IP net.IP `json:"ip"`
}
type checkResponse struct {
ClientID int `json:"client_id"`
SteamID string `json:"steam_id"`
BanType model.BanType `json:"ban_type"`
Msg string `json:"msg"`
}
return func(c *gin.Context) {
var req checkRequest
if err := c.BindJSON(&req); err != nil {
responseErr(c, http.StatusInternalServerError, checkResponse{
BanType: model.Unknown,
Msg: "Error determining state",
})
return
}
resp := checkResponse{
ClientID: req.ClientID,
SteamID: req.SteamID,
BanType: model.Unknown,
Msg: "",
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*15)
defer cancel()
// Check IP first
banNet, err := db.GetBanNet(ctx, req.IP)
if err != nil |
if len(banNet) > 0 {
resp.BanType = model.Banned
resp.Msg = fmt.Sprintf("Network banned (C: %d)", len(banNet))
responseOK(c, http.StatusOK, resp)
log.WithFields(log.Fields{"type": "cidr", "reason": banNet[0].Reason}).Infof("Player dropped")
return
}
// Check SteamID
steamID, errResolve := steamid.ResolveSID64(context.Background(), req.SteamID)
if errResolve != nil || !steamID.Valid() {
resp.Msg = "Invalid steam id"
responseErr(c, http.StatusBadRequest, resp)
return
}
var asnRecord ip2location.ASNRecord
errASN := db.GetASNRecordByIP(ctx, req.IP, &asnRecord)
if errASN == nil {
var asnBan model.BanASN
if errASNBan := db.GetBanASN(ctx, int64(asnRecord.ASNum), &asnBan); errASNBan != nil {
if !errors.Is(errASNBan, store.ErrNoResult) {
log.Errorf("Failed to fetch asn ban: %v", errASNBan)
}
} else {
resp.BanType = model.Banned
resp.Msg = asnBan.Reason
responseOK(c, http.StatusOK, resp)
log.WithFields(log.Fields{"type": "asn", "reason": asnBan.Reason}).Infof("Player dropped")
return
}
}
ban := model.NewBannedPerson()
if errB := db.GetBanBySteamID(ctx, steamID, false, &ban); errB != nil {
if errB == store.ErrNoResult {
resp.BanType = model.OK
responseErr(c, http.StatusOK, resp)
return
}
resp.Msg = "Error determining state"
responseErr(c, http.StatusInternalServerError, resp)
return
}
resp.BanType = ban.Ban.BanType
resp.Msg = ban.Ban.ReasonText
responseOK(c, http.StatusOK, resp)
}
}
//
//func onAPIPostAppeal() gin.HandlerFunc {
// type req struct {
// Email string `json:"email"`
// AppealText string `json:"appeal_text"`
// }
// return func(c *gin.Context) {
// var app req
// if err := c.BindJSON(&app); err != nil {
// log.Errorf("Received malformed appeal apiBanRequest: %v", err)
// responseErr(c, http.StatusBadRequest, nil)
// return
// }
// responseOK(c, http.StatusOK, gin.H{})
// }
//}
//
//func onAPIPostReport() gin.HandlerFunc {
// return func(c *gin.Context) {
// responseErr(c, http.StatusInternalServerError, gin.H{})
// }
//}
func (w *web) onAPIGetServers(db store.Store) gin.HandlerFunc {
return func(c *gin.Context) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
servers, err := db.GetServers(ctx, true)
if err != nil {
log.Errorf("Failed to fetch servers: %s", err)
responseErr(c, http.StatusInternalServerError, nil)
return
}
responseOK(c, http.StatusOK, servers)
}
}
func (w *web) queryFilterFromContext(c *gin.Context) (*store.QueryFilter, error) {
var qf store.QueryFilter
if err := c.BindUri(&qf); err != nil {
return nil, err
}
return &qf, nil
}
func (w *web) onAPIGetPlayers(db store.Store) gin.HandlerFunc {
return func(c *gin.Context) {
qf, err := w.queryFilterFromContext(c)
if err != nil {
responseErr(c, http.StatusBadRequest, nil)
return
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
people, err2 := db.GetPeople(ctx, qf)
if err2 != nil {
responseErr(c, http.StatusInternalServerError, nil)
return
}
responseOK(c, http.StatusOK, people)
}
}
func (w *web) onAPICurrentProfile() gin.HandlerFunc {
type resp struct {
Player *model.Person `json:"player"`
Friends []steamweb.PlayerSummary `json:"friends"`
}
return func(c *gin.Context) {
p := currentPerson(c)
if !p.SteamID.Valid() {
responseErr(c, http.StatusForbidden, nil)
return
}
friendIDs, err := steam.FetchFriends(p.SteamID)
if err != nil {
responseErr(c, http.StatusServiceUnavailable, "Could not fetch friends")
return
}
friends, err := steam.FetchSummaries(friendIDs)
if err != nil {
responseErr(c, http.StatusServiceUnavailable, "Could not fetch summaries")
return
}
var response resp
response.Player = &p
response.Friends = friends
responseOK(c, http.StatusOK, response)
}
}
func (w *web) onAPIProfile(db store.Store) gin.HandlerFunc {
type req struct {
Query string `form:"query"`
}
type resp struct {
Player *model.Person `json:"player"`
Friends []steamweb.PlayerSummary `json:"friends"`
}
return func(c *gin.Context) {
var r req
if err := c.Bind(&r); err != nil {
responseErr(c, http.StatusBadRequest, nil)
return
}
cx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
sid, err := steamid.StringToSID64(r.Query)
if err != nil {
sid, err = steamid.ResolveSID64(cx, r.Query)
if err != nil {
responseErr(c, http.StatusNotFound, nil)
return
}
}
person := model.NewPerson(sid)
if err2 := db.GetOrCreatePersonBySteamID(cx, sid, &person); err2 != nil {
responseErr(c, http.StatusInternalServerError, nil)
return
}
sum, err3 := steamweb.PlayerSummaries(steamid.Collection{sid})
if err3 != nil || len(sum) != 1 {
log.Errorf("Failed to get player summary: %v", err3)
responseErr(c, http.StatusInternalServerError, "Could not fetch summary")
return
}
person.PlayerSummary = &sum[0]
friendIDs, err4 := steam.FetchFriends(person.SteamID)
if err4 != nil {
responseErr(c, http.StatusServiceUnavailable, "Could not fetch friends")
return
}
friends, err5 := steam.FetchSummaries(friendIDs)
if err5 != nil {
responseErr(c, http.StatusServiceUnavailable, "Could not fetch summaries")
return
}
var response resp
response.Player = &person
response.Friends = friends
responseOK(c, http.StatusOK, response)
}
}
func (w *web) onAPIGetFilteredWords(db store.Store) gin.HandlerFunc {
type resp struct {
Count int `json:"count"`
Words []string `json:"words"`
}
return func(c *gin.Context) {
cx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
words, err := db.GetFilters(cx)
if err != nil {
responseErr(c, http.StatusInternalServerError, nil)
return
}
var w []string
for _, f := range words {
w = append(w, f.Pattern.String())
}
responseOK(c, http.StatusOK, resp{Count: len(words), Words: w})
}
}
func (w *web) onAPIGetCompHist(db store.Store) gin.HandlerFunc {
return func(c *gin.Context) {
sidStr := c.DefaultQuery("sid", "")
if sidStr == "" {
responseErr(c, http.StatusBadRequest, "missing sid")
return
}
sid, err := steamid.StringToSID64(sidStr)
if err != nil || !sid.Valid() {
responseErr(c, http.StatusBadRequest, "invalid sid")
return
}
cx, cancel := context.WithTimeout(c, time.Second*10)
defer cancel()
var hist external.CompHist
if err := external.FetchCompHist(cx, sid, &hist); err != nil {
responseErr(c, http.StatusInternalServerError, "query failed")
return
}
responseOK(c, http.StatusOK, hist)
}
}
func (w *web) onAPIGetStats(db store.Store) gin.HandlerFunc {
return func(c *gin.Context) {
cx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
var stats model.Stats
if err := db.GetStats(cx, &stats); err != nil {
responseErr(c, http.StatusInternalServerError, nil)
return
}
stats.ServersAlive = 1
responseOK(c, http.StatusOK, stats)
}
}
func loadBanMeta(_ *model.BannedPerson) {
}
func (w *web) onAPIGetBanByID(db store.Store) gin.HandlerFunc {
return func(c *gin.Context) {
banIDStr := c.Param("ban_id")
if banIDStr == "" {
responseErr(c, http.StatusBadRequest, nil)
return
}
sid, err := strconv.ParseUint(banIDStr, 10, 64)
if err != nil {
responseErr(c, http.StatusBadRequest, nil)
return
}
cx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
ban := model.NewBannedPerson()
if errB := db.GetBanByBanID(cx, sid, false, &ban); errB != nil {
responseErr(c, http.StatusNotFound, nil)
log.Errorf("Failed to fetch bans: %v", errB)
return
}
loadBanMeta(&ban)
responseOK(c, http.StatusOK, ban)
}
}
func (w *web) onAPIGetBans(db store.Store) gin.HandlerFunc {
return func(c *gin.Context) {
o := store.NewQueryFilter("")
cx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
bans, err := db.GetBans(cx, o)
if err != nil {
responseErr(c, http.StatusInternalServerError, nil)
log.Errorf("Failed to fetch bans")
return
}
responseOK(c, http.StatusOK, bans)
}
}
func (w *web) onAPIPostServer() gin.HandlerFunc {
return func(c *gin.Context) {
responseOK(c, http.StatusOK, gin.H{})
}
}
func (w *web) onSup(p ws.Payload) error {
return nil
}
| {
responseErr(c, http.StatusInternalServerError, checkResponse{
BanType: model.Unknown,
Msg: "Error determining state",
})
log.Errorf("Could not get ban net results: %v", err)
return
} |
index.tsx | import { useContext, useState, FormEvent } from "react";
import { VscGithubInverted, VscSignOut } from "react-icons/vsc";
import { AuthContext } from "../../contexts/auth";
import { api } from "../../services/api";
import styles from "./style.module.scss";
export function SendMessageForm() { | const { user, signOut } = useContext(AuthContext);
const [message, setMessage] = useState("");
const [confirmation, setConfirmation] = useState(false);
async function handleSendMessage(event: FormEvent) {
event.preventDefault();
if (!message.trim()) {
return;
}
await api.post("messages", { message });
await setConfirmation(true);
await setInterval(() => {
setConfirmation(false);
}, 1500);
setMessage("");
}
return (
<div className={styles.sendMessageFormWrapper}>
{confirmation ? <span className={styles.toast}>Mensagem enviada com sucesso</span> : ""}
<span className={styles.seal} />
<button className={styles.signOutButton} onClick={signOut}>
<VscSignOut size="32" />
</button>
<header className={styles.userInformation}>
<div className={styles.userImage}>
<img src={user?.avatar_url} alt={user?.name} />
</div>
<strong className={styles.userName}>{user?.name}</strong>
<span className={styles.userGithub}>
<VscGithubInverted size="16" />
{user?.login}
</span>
</header>
<form onSubmit={handleSendMessage} className={styles.sendMessageForm}>
<label htmlFor="message">Mensagem</label>
<textarea
name="message"
id="message"
placeholder="Qual sua expectativa para o evento"
onChange={(event) => setMessage(event.target.value)}
value={message}
/>
<button type="submit">Enviar mensagem</button>
</form>
</div>
);
} | |
caster_test.go | package ntrip_test
import (
"io"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/go-gnss/ntrip"
"github.com/go-gnss/ntrip/internal/mock"
"github.com/sirupsen/logrus"
)
// TODO: Test failure cases with httptest.Server
// Test running Caster with mock service using httptest.Server, which is close to actually calling
// caster.ListenAndServe(), write data with v2 server and read with v2 and v1 clients
func TestCasterServerClient(t *testing.T) |
func testV1Client(t *testing.T, host, path string, serverWriter io.Writer) {
req, err := ntrip.NewClientV1(host, path, mock.Username, mock.Password)
if err != nil {
t.Fatalf("v1 client - error connecting to caster: %s", err)
}
defer req.Close()
testString := "some test data"
_, err = serverWriter.Write([]byte(testString))
if err != nil {
t.Fatalf("server - error during write for v1: %s", err)
}
responseHeaders := "ICY 200 OK\r\n"
buf := make([]byte, len(responseHeaders))
br, err := req.Read(buf)
if err != nil {
t.Fatalf("v1 client - error during read headers: %s", err)
}
if string(buf[:br]) != responseHeaders {
t.Fatalf("v1 client - expected response headers %q, received %q", responseHeaders, string(buf[:br]))
}
buf = make([]byte, len(testString))
br, err = req.Read(buf)
if err != nil {
t.Fatalf("v1 client - error during read: %s", err)
}
if string(buf[:br]) != testString {
t.Fatalf("v1 client - expected response body %q, received %q", testString, string(buf[:br]))
}
}
func testV2Client(t *testing.T, url string, serverWriter io.Writer) {
req, _ := ntrip.NewClientRequest(url)
req.SetBasicAuth(mock.Username, mock.Password)
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("client - error connecting to caster: %s", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("v2 client - expected response code %d, received %d", http.StatusOK, resp.StatusCode)
}
testString := "some test data"
_, err = serverWriter.Write([]byte(testString))
if err != nil {
t.Fatalf("server - error during write: %s", err)
}
buf := make([]byte, len(testString))
_, err = resp.Body.Read(buf)
if err != nil {
t.Fatalf("v2 client - error during read: %s", err)
}
if string(buf) != testString {
t.Fatalf("v2 client - expected response body %q, received %q", testString, string(buf))
}
resp.Body.Close()
}
| {
caster := ntrip.NewCaster("N/A", mock.NewMockSourceService(), logrus.StandardLogger())
ts := httptest.NewServer(caster.Handler)
defer ts.Close()
r, w := io.Pipe()
// Server
{
sreq, _ := ntrip.NewServerRequest(ts.URL+mock.MountPath, r)
sreq.SetBasicAuth(mock.Username, mock.Password)
sresp, err := http.DefaultClient.Do(sreq)
if err != nil {
t.Fatalf("server - error connecting to caster: %s", err)
}
defer sreq.Body.Close()
if sresp.StatusCode != http.StatusOK {
t.Fatalf("server - expected response code %d, received %d", http.StatusOK, sresp.StatusCode)
}
}
testV2Client(t, ts.URL+mock.MountPath, w)
// POST request's context may not get closed in the server before the next Write occurs,
// resulting in the mock writing to the first connected client's Body
// Nothing like a 10ms timeout to fix a bit of non-deterministic behaviour
// TODO: Could fix this by rewriting the mock service, or using the inmemory SourceService
time.Sleep(10 * time.Millisecond)
testV1Client(t, ts.URL[7:], mock.MountPath, w)
} |
ConfusionMatrixUnitOfWork.py | import numpy as np
from matrices.ConfusionMatrix import ConfusionMatrix
class ConfusionMatrixUnitOfWork:
def go(self):
| cm = ConfusionMatrix(4)
cm.loadRow([70, 10, 15, 5])
cm.loadRow([8, 67, 20, 5])
cm.loadRow([0, 11, 88, 1])
cm.loadRow([4, 10, 14, 72])
cm.printStatsOf(0)
cm.printStatsOf(1)
cm.printStatsOf(2)
cm.printStatsOf(3)
print(cm.totalSensitivity())
print(cm.totalSpecificity()) |
|
welcome-banner.component.spec.ts | /*
* Copyright (c) 2014-2021 Bjoern Kimminich.
* SPDX-License-Identifier: MIT
*/
import { TranslateModule } from '@ngx-translate/core'
import { HttpClientTestingModule } from '@angular/common/http/testing'
import { CookieService } from 'ngx-cookie-service'
import { ComponentFixture, TestBed, waitForAsync } from '@angular/core/testing'
import { WelcomeBannerComponent } from './welcome-banner.component'
import { MatDialogRef } from '@angular/material/dialog'
import { MatIconModule } from '@angular/material/icon'
import { MatTooltipModule } from '@angular/material/tooltip'
describe('WelcomeBannerComponent', () => {
let component: WelcomeBannerComponent
let fixture: ComponentFixture<WelcomeBannerComponent>
let cookieService: any
let matDialogRef: MatDialogRef<WelcomeBannerComponent>
beforeEach(waitForAsync(() => {
matDialogRef = jasmine.createSpyObj('MatDialogRef', ['close'])
TestBed.configureTestingModule({
imports: [
TranslateModule.forRoot(),
HttpClientTestingModule,
MatIconModule,
MatTooltipModule
],
declarations: [WelcomeBannerComponent],
providers: [
{ provide: MatDialogRef, useValue: matDialogRef },
CookieService
]
})
.compileComponents()
cookieService = TestBed.inject(CookieService)
}))
beforeEach(() => {
fixture = TestBed.createComponent(WelcomeBannerComponent)
component = fixture.componentInstance
fixture.detectChanges()
})
it('should create', () => {
expect(component).toBeTruthy()
})
it('should not dismiss if cookie not set', () => {
component.ngOnInit() | expect(matDialogRef.close).toHaveBeenCalledTimes(0)
})
it('should dismiss and add cookie when closed', () => {
component.closeWelcome()
expect(cookieService.get('welcomebanner_status')).toBe('dismiss')
expect(matDialogRef.close).toHaveBeenCalled()
})
}) | |
produce_response_test.go | package sarama
import "testing"
var (
produceResponseNoBlocks = []byte{
0x00, 0x00, 0x00, 0x00}
produceResponseManyBlocks = []byte{
0x00, 0x00, 0x00, 0x02,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0x00, 0x00, 0x00, 0x02,
0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
)
func | (t *testing.T) {
response := ProduceResponse{}
testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocks, 0)
if len(response.Blocks) != 0 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were none")
}
testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, 0)
if len(response.Blocks) != 2 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were 2")
}
if len(response.Blocks["foo"]) != 0 {
t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none")
}
if len(response.Blocks["bar"]) != 2 {
t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two")
}
block := response.GetBlock("bar", 1)
if block == nil {
t.Error("Decoding did not produce a block for bar/1")
} else {
if block.Err != ErrNoError {
t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err))
}
if block.Offset != 0xFF {
t.Error("Decoding failed for bar/1/Offset, got:", block.Offset)
}
}
block = response.GetBlock("bar", 2)
if block == nil {
t.Error("Decoding did not produce a block for bar/2")
} else {
if block.Err != ErrInvalidMessage {
t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err))
}
if block.Offset != 0 {
t.Error("Decoding failed for bar/2/Offset, got:", block.Offset)
}
}
}
| TestProduceResponse |
decoders.rs | //! Audio decoders.
mod adpcm;
#[cfg(any(feature = "minimp3", feature = "symphonia"))]
mod mp3;
mod nellymoser;
mod pcm;
pub use adpcm::AdpcmDecoder;
#[cfg(feature = "minimp3")]
pub use mp3::minimp3::Mp3Decoder;
#[cfg(all(feature = "symphonia", not(feature = "minimp3")))]
pub use mp3::symphonia::Mp3Decoder;
pub use nellymoser::NellymoserDecoder;
pub use pcm::PcmDecoder;
use crate::tag_utils::SwfSlice;
use std::io::{Cursor, Read};
use swf::{AudioCompression, SoundFormat, TagCode};
type Error = Box<dyn std::error::Error>;
/// An audio decoder. Can be used as an `Iterator` to return stero sample frames.
/// If the sound is mono, the sample is duplicated across both channels.
pub trait Decoder: Iterator<Item = [i16; 2]> {
/// The number of channels of this audio decoder. Always 1 or 2.
fn num_channels(&self) -> u8;
/// The sample rate of this audio decoder.
fn sample_rate(&self) -> u16;
}
/// Instantiate a decoder for the compression that the sound data uses.
pub fn make_decoder<R: 'static + Send + Read>(
format: &SoundFormat,
data: R,
) -> Result<Box<dyn Send + Decoder>, Error> {
let decoder: Box<dyn Send + Decoder> = match format.compression {
AudioCompression::UncompressedUnknownEndian => {
// Cross fingers that it's little endian.
log::warn!("make_decoder: PCM sound is unknown endian; assuming little endian");
Box::new(PcmDecoder::new(
data,
format.is_stereo,
format.sample_rate,
format.is_16_bit,
))
}
AudioCompression::Uncompressed => Box::new(PcmDecoder::new(
data,
format.is_stereo,
format.sample_rate,
format.is_16_bit,
)),
AudioCompression::Adpcm => Box::new(AdpcmDecoder::new(
data,
format.is_stereo,
format.sample_rate,
)?),
#[cfg(any(feature = "minimp3", feature = "symphonia"))]
AudioCompression::Mp3 => Box::new(Mp3Decoder::new(data)?),
AudioCompression::Nellymoser => {
Box::new(NellymoserDecoder::new(data, format.sample_rate.into()))
}
_ => {
let msg = format!(
"make_decoder: Unhandled audio compression {:?}",
format.compression
);
log::error!("{}", msg);
return Err(msg.into());
}
};
Ok(decoder)
}
impl Decoder for Box<dyn Decoder + Send> {
#[inline]
fn num_channels(&self) -> u8 {
self.as_ref().num_channels()
}
/// The sample rate of this audio decoder.
fn sample_rate(&self) -> u16 {
self.as_ref().sample_rate()
}
}
/// A "stream" sound is a sound that has its data distributed across `SoundStreamBlock` tags,
/// one per each frame of a MovieClip. The sound is synced to the MovieClip's timeline, and will
/// stop/seek as the MovieClip stops/seeks.
///
/// In the Flash IDE, the is created by changing the "Sync" setting of the sound
/// to "Stream."
///
/// TODO: Add `current_frame`.
pub trait StreamDecoder: Decoder {}
/// The `StandardStreamDecoder` takes care of reading the audio data from `SoundStreamBlock` tags
/// and feeds it to the decoder.
struct StandardStreamDecoder {
/// The underlying decoder. The decoder will get its data from a `StreamTagReader`.
decoder: Box<dyn Decoder + Send>,
}
impl StandardStreamDecoder {
/// Constructs a new `StandardStreamDecoder.
/// `swf_data` should be the tag data of the MovieClip that contains the stream.
fn new(stream_info: &swf::SoundStreamHead, swf_data: SwfSlice) -> Result<Self, Error> |
}
impl Decoder for StandardStreamDecoder {
fn num_channels(&self) -> u8 {
self.decoder.num_channels()
}
fn sample_rate(&self) -> u16 {
self.decoder.sample_rate()
}
}
impl Iterator for StandardStreamDecoder {
type Item = [i16; 2];
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.decoder.next()
}
}
/// Stream sounds encoded with ADPCM have an ADPCM header in each `SoundStreamBlock` tag, unlike
/// other compression formats that remain the same as if they were a single sound clip.
/// Therefore, we must recreate the decoder with each `SoundStreamBlock` to parse the additional
/// headers.
pub struct AdpcmStreamDecoder {
format: SoundFormat,
tag_reader: StreamTagReader,
decoder: AdpcmDecoder<Cursor<SwfSlice>>,
}
impl AdpcmStreamDecoder {
fn new(stream_info: &swf::SoundStreamHead, swf_data: SwfSlice) -> Result<Self, Error> {
let movie = swf_data.movie.clone();
let mut tag_reader = StreamTagReader::new(stream_info, swf_data);
let audio_data = tag_reader.next().unwrap_or_else(|| SwfSlice::empty(movie));
let decoder = AdpcmDecoder::new(
Cursor::new(audio_data),
stream_info.stream_format.is_stereo,
stream_info.stream_format.sample_rate,
)?;
Ok(Self {
format: stream_info.stream_format.clone(),
tag_reader,
decoder,
})
}
}
impl Decoder for AdpcmStreamDecoder {
fn num_channels(&self) -> u8 {
self.decoder.num_channels()
}
fn sample_rate(&self) -> u16 {
self.decoder.sample_rate()
}
}
impl Iterator for AdpcmStreamDecoder {
type Item = [i16; 2];
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if let Some(sample_frame) = self.decoder.next() {
// Return sample frames until the decoder has exhausted
// the SoundStreamBlock tag.
Some(sample_frame)
} else if let Some(audio_data) = self.tag_reader.next() {
// We've reached the end of the sound stream block tag, so
// read the next one and recreate the decoder.
// `AdpcmDecoder` read the ADPCM header when it is created.
self.decoder = AdpcmDecoder::new(
Cursor::new(audio_data),
self.format.is_stereo,
self.format.sample_rate,
)
.ok()?;
self.decoder.next()
} else {
// No more SoundStreamBlock tags.
None
}
}
}
/// Makes a `StreamDecoder` for the given stream. `swf_data` should be the MovieClip's tag data.
/// Generally this will return a `StandardStreamDecoder`, except for ADPCM streams.
pub fn make_stream_decoder(
stream_info: &swf::SoundStreamHead,
swf_data: SwfSlice,
) -> Result<Box<dyn Decoder + Send>, Error> {
let decoder: Box<dyn Decoder + Send> =
if stream_info.stream_format.compression == AudioCompression::Adpcm {
Box::new(AdpcmStreamDecoder::new(stream_info, swf_data)?)
} else {
Box::new(StandardStreamDecoder::new(stream_info, swf_data)?)
};
Ok(decoder)
}
/// Adds seeking ability to decoders where the underline stream is `std::io::Seek`.
pub trait SeekableDecoder: Decoder {
/// Resets the decoder to the beginning of the stream.
fn reset(&mut self);
/// Seeks to a specific sample frame.
fn seek_to_sample_frame(&mut self, frame: u32) {
// The default implementation simply resets the stream and steps through
// until the desired position.
// This will be slow for long sounds on heavy decoders.
self.reset();
for _ in 0..frame {
self.next();
}
}
}
/// `StreamTagReader` reads through the SWF tag data of a `MovieClip`, extracting
/// audio data from the `SoundStreamBlock` tags. It can be used as an `Iterator` that
/// will return consecutive slices of the underlying audio data.
/// `StreamTagReader` reads through the SWF tag data of a `MovieClip`, extracting
/// audio data from the `SoundStreamBlock` tags. It can be used as an `Iterator` that
/// will return consecutive slices of the underlying audio data.
struct StreamTagReader {
/// The tag data of the `MovieClip` that contains the streaming audio track.
swf_data: SwfSlice,
/// The audio playback position inside `swf_data`.
pos: usize,
/// The compressed audio data in the most recent `SoundStreamBlock` we've seen, returned by `Iterator::next`.
current_audio_data: SwfSlice,
/// The compression used by the audio data.
compression: AudioCompression,
/// The number of audio samples for use in future animation frames.
///
/// Only used in MP3 encoding to properly handle gaps in the audio track.
mp3_samples_buffered: i32,
/// The ideal number of audio samples in each animation frame, i.e. the sample rate divided by frame rate.
///
/// Only used in MP3 encoding to properly handle gaps in the audio track.
mp3_samples_per_block: u16,
}
impl StreamTagReader {
/// Builds a new `StreamTagReader` from the given SWF data.
/// `swf_data` should be the tag data of a MovieClip.
fn new(stream_info: &swf::SoundStreamHead, swf_data: SwfSlice) -> Self {
let current_audio_data = SwfSlice::empty(swf_data.movie.clone());
Self {
swf_data,
pos: 0,
compression: stream_info.stream_format.compression,
current_audio_data,
mp3_samples_buffered: 0,
mp3_samples_per_block: stream_info.num_samples_per_block,
}
}
}
impl Iterator for StreamTagReader {
type Item = SwfSlice;
fn next(&mut self) -> Option<Self::Item> {
let audio_data = &mut self.current_audio_data;
let compression = self.compression;
let mut found = false;
let swf_data = &self.swf_data;
loop {
let tag_callback =
|reader: &mut swf::read::Reader<'_>, tag_code, tag_len| match tag_code {
TagCode::SoundStreamBlock if !found => {
found = true;
let mut audio_block = &reader.get_ref()[..tag_len];
// MP3 audio blocks start with a header indicating sample count + seek offset (SWF19 p.184).
if compression == AudioCompression::Mp3 && audio_block.len() >= 4 {
// MP3s deliver audio in frames of 576 samples, which means we may have SoundStreamBlocks with
// lots of extra samples, followed by a block with 0 samples. Worse, there may be frames without
// blocks at all despite SWF19 saying this shouldn't happen. This may or may not indicate a gap
// in the audio depending on the number of empty frames.
// Keep a tally of the # of samples we've seen compared to the number of samples that will be
// played in each timeline frame. Only stop an MP3 sound if we've exhausted all of the samples.
// RESEARCHME: How does Flash Player actually determine when there is an audio gap or not?
// If an MP3 audio track has gaps, Flash Player will often play it out of sync (too early).
// Seems closely related to `stream_info.num_samples_per_block`.
let num_samples =
u16::from_le_bytes(audio_block[..2].try_into().unwrap());
self.mp3_samples_buffered += i32::from(num_samples);
audio_block = &audio_block[4..];
}
*audio_data = swf_data.to_subslice(audio_block).unwrap();
Ok(())
}
TagCode::ShowFrame if compression == AudioCompression::Mp3 => {
self.mp3_samples_buffered -= i32::from(self.mp3_samples_per_block);
Ok(())
}
_ => Ok(()),
};
let mut reader = self.swf_data.read_from(self.pos as u64);
let _ = crate::tag_utils::decode_tags(&mut reader, tag_callback, TagCode::ShowFrame);
self.pos = reader.get_ref().as_ptr() as usize - swf_data.as_ref().as_ptr() as usize;
// If we hit a SoundStreamBlock within this frame, return it. Otherwise, the stream should end.
// The exception is MP3 streaming sounds, which will continue to play even when a few frames
// are missing SoundStreamBlock tags (see above).
if found {
break Some(self.current_audio_data.clone());
} else if compression != AudioCompression::Mp3
|| self.mp3_samples_buffered <= 0
|| reader.get_ref().is_empty()
{
break None;
}
}
}
}
/// Returns an `Reader` that reads through SWF tags and returns slices of any
/// audio stream data for `SoundStreamBlock` tags.
impl Read for StreamTagReader {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
while self.current_audio_data.as_ref().is_empty() {
self.current_audio_data = if let Some(audio_data) = self.next() {
audio_data
} else {
return Ok(0);
}
}
let len = std::cmp::min(buf.len(), self.current_audio_data.as_ref().len());
buf[..len].copy_from_slice(&self.current_audio_data.as_ref()[..len]);
self.current_audio_data.start += len;
Ok(len)
}
}
| {
// Create a tag reader to get the audio data from SoundStreamBlock tags.
let tag_reader = StreamTagReader::new(stream_info, swf_data);
// Wrap the tag reader in the decoder.
let decoder = make_decoder(&stream_info.stream_format, tag_reader)?;
Ok(Self { decoder })
} |
bond.rs | use cosmwasm_std::{
attr, to_binary, Api, CanonicalAddr, CosmosMsg, Decimal, Deps, DepsMut, Env, MessageInfo,
Order, QueryRequest, Response, StdError, StdResult, Uint128, WasmMsg, WasmQuery,
};
use crate::state::{
pool_info_read, pool_info_store, read_config, read_state, rewards_read, rewards_store,
state_store, Config, PoolInfo, RewardInfo, State,
};
use cw20::Cw20ExecuteMsg;
use crate::querier::query_pylon_pool_balance;
use pylon_token::staking::{Cw20HookMsg as PylonCw20HookMsg, ExecuteMsg as PylonStakingExecuteMsg};
use spectrum_protocol::farm_helper::compute_deposit_time;
use spectrum_protocol::gov::{
BalanceResponse as SpecBalanceResponse, ExecuteMsg as SpecExecuteMsg, QueryMsg as SpecQueryMsg,
};
use spectrum_protocol::math::UDec128;
use spectrum_protocol::pylon_farm::{RewardInfoResponse, RewardInfoResponseItem};
#[allow(clippy::too_many_arguments)]
fn bond_internal(
deps: DepsMut,
env: Env,
sender_addr_raw: CanonicalAddr,
asset_token_raw: CanonicalAddr,
amount_to_auto: Uint128,
amount_to_stake: Uint128,
lp_balance: Uint128,
config: &Config,
reallocate: bool,
) -> StdResult<PoolInfo> {
let mut pool_info = pool_info_read(deps.storage).load(asset_token_raw.as_slice())?;
let mut state = read_state(deps.storage)?;
// update reward index; before changing share
if !pool_info.total_auto_bond_share.is_zero() || !pool_info.total_stake_bond_share.is_zero() {
deposit_spec_reward(deps.as_ref(), &env, &mut state, config, false)?;
spec_reward_to_pool(&state, &mut pool_info, lp_balance)?;
}
// withdraw reward to pending reward; before changing share
let mut reward_info = rewards_read(deps.storage, &sender_addr_raw)
.may_load(asset_token_raw.as_slice())?
.unwrap_or_else(|| RewardInfo {
farm_share_index: pool_info.farm_share_index,
auto_spec_share_index: pool_info.auto_spec_share_index,
stake_spec_share_index: pool_info.stake_spec_share_index,
auto_bond_share: Uint128::zero(),
stake_bond_share: Uint128::zero(),
spec_share: Uint128::zero(),
farm_share: Uint128::zero(),
deposit_amount: Uint128::zero(),
deposit_time: 0u64,
});
before_share_change(&pool_info, &mut reward_info);
if !reallocate &&
reward_info.deposit_amount.is_zero() &&
(!reward_info.auto_bond_share.is_zero() || !reward_info.stake_bond_share.is_zero()) {
let auto_bond_amount = pool_info.calc_user_auto_balance(lp_balance, reward_info.auto_bond_share);
let stake_bond_amount = pool_info.calc_user_stake_balance(reward_info.stake_bond_share);
reward_info.deposit_amount = auto_bond_amount + stake_bond_amount;
reward_info.deposit_time = env.block.time.seconds();
}
// increase bond_amount
let new_deposit_amount = increase_bond_amount(
&mut pool_info,
&mut reward_info,
if reallocate { Decimal::zero() } else { config.deposit_fee },
amount_to_auto,
amount_to_stake,
lp_balance,
)?;
if !reallocate {
let last_deposit_amount = reward_info.deposit_amount;
reward_info.deposit_amount = last_deposit_amount + new_deposit_amount;
reward_info.deposit_time = compute_deposit_time(last_deposit_amount, new_deposit_amount, reward_info.deposit_time, env.block.time.seconds())?;
}
rewards_store(deps.storage, &sender_addr_raw)
.save(asset_token_raw.as_slice(), &reward_info)?;
pool_info_store(deps.storage).save(asset_token_raw.as_slice(), &pool_info)?;
state_store(deps.storage).save(&state)?;
Ok(pool_info)
}
pub fn bond(
mut deps: DepsMut,
env: Env,
info: MessageInfo,
sender_addr: String,
asset_token: String,
amount: Uint128,
compound_rate: Option<Decimal>,
) -> StdResult<Response> {
let staker_addr_raw = deps.api.addr_canonicalize(&sender_addr)?;
let asset_token_raw = deps.api.addr_canonicalize(&asset_token)?;
let pool_info = pool_info_read(deps.storage).load(asset_token_raw.as_slice())?;
// only staking token contract can execute this message
if pool_info.staking_token != deps.api.addr_canonicalize(info.sender.as_str())? {
return Err(StdError::generic_err("unauthorized"));
}
let config = read_config(deps.storage)?;
let compound_rate = compound_rate.unwrap_or_else(Decimal::zero);
let amount_to_auto = amount * compound_rate;
let amount_to_stake = amount.checked_sub(amount_to_auto)?;
let lp_balance = query_pylon_pool_balance(
deps.as_ref(),
&config.pylon_staking,
&env.contract.address,
)?;
bond_internal(
deps.branch(),
env,
staker_addr_raw,
asset_token_raw.clone(),
amount_to_auto,
amount_to_stake,
lp_balance,
&config,
false,
)?;
stake_token(
deps.api,
config.pylon_staking,
pool_info.staking_token,
asset_token_raw,
amount,
)
}
pub fn deposit_farm_share(
state: &mut State,
pool_info: &mut PoolInfo,
amount: Uint128,
) -> StdResult<()> {
let mut new_total_share = Uint128::zero();
if !pool_info.total_stake_bond_share.is_zero() {
let new_share = state.calc_farm_share(amount, state.total_farm_amount);
let share_per_bond = Decimal::from_ratio(new_share, pool_info.total_stake_bond_share);
pool_info.farm_share_index = pool_info.farm_share_index + share_per_bond;
pool_info.farm_share += new_share;
new_total_share += new_share;
}
state.total_farm_share += new_total_share;
state.total_farm_amount += amount;
Ok(())
}
pub fn deposit_spec_reward(
deps: Deps,
env: &Env,
state: &mut State,
config: &Config,
query: bool,
) -> StdResult<SpecBalanceResponse> {
if state.total_weight == 0 {
return Ok(SpecBalanceResponse {
share: Uint128::zero(),
balance: Uint128::zero(),
locked_balance: vec![],
pools: vec![],
});
}
let staked: SpecBalanceResponse =
deps.querier.query(&QueryRequest::Wasm(WasmQuery::Smart {
contract_addr: deps.api.addr_humanize(&config.spectrum_gov)?.to_string(),
msg: to_binary(&SpecQueryMsg::balance {
address: env.contract.address.to_string(),
})?,
}))?;
let diff = staked.share.checked_sub(state.previous_spec_share);
let deposit_share = if query {
diff.unwrap_or_else(|_| Uint128::zero())
} else {
diff?
};
let share_per_weight = Decimal::from_ratio(deposit_share, state.total_weight);
state.spec_share_index = state.spec_share_index + share_per_weight;
state.previous_spec_share = staked.share;
Ok(staked)
}
fn spec_reward_to_pool(
state: &State,
pool_info: &mut PoolInfo,
lp_balance: Uint128,
) -> StdResult<()> {
if lp_balance.is_zero() {
return Ok(());
}
let share = (UDec128::from(state.spec_share_index) - pool_info.state_spec_share_index.into())
* Uint128::from(pool_info.weight as u128);
// pool_info.total_stake_bond_amount / lp_balance = ratio for auto-stake
// now stake_share is additional SPEC rewards for auto-stake
let stake_share = share.multiply_ratio(pool_info.total_stake_bond_amount, lp_balance);
// spec reward to staker is per stake bond share & auto bond share
if !stake_share.is_zero() {
let stake_share_per_bond = stake_share / pool_info.total_stake_bond_share;
pool_info.stake_spec_share_index =
pool_info.stake_spec_share_index + stake_share_per_bond.into();
}
// auto_share is additional SPEC rewards for auto-compound
let auto_share = share - stake_share;
if !auto_share.is_zero() {
let auto_share_per_bond = auto_share / pool_info.total_auto_bond_share;
pool_info.auto_spec_share_index =
pool_info.auto_spec_share_index + auto_share_per_bond.into();
}
pool_info.state_spec_share_index = state.spec_share_index;
Ok(())
}
// withdraw reward to pending reward
fn before_share_change(pool_info: &PoolInfo, reward_info: &mut RewardInfo) {
let farm_share =
(pool_info.farm_share_index - reward_info.farm_share_index) * reward_info.stake_bond_share;
reward_info.farm_share += farm_share;
reward_info.farm_share_index = pool_info.farm_share_index;
let stake_spec_share = reward_info.stake_bond_share
* (pool_info.stake_spec_share_index - reward_info.stake_spec_share_index);
let auto_spec_share = reward_info.auto_bond_share
* (pool_info.auto_spec_share_index - reward_info.auto_spec_share_index);
let spec_share = stake_spec_share + auto_spec_share;
reward_info.spec_share += spec_share;
reward_info.stake_spec_share_index = pool_info.stake_spec_share_index;
reward_info.auto_spec_share_index = pool_info.auto_spec_share_index;
}
// increase share amount in pool and reward info
fn increase_bond_amount(
pool_info: &mut PoolInfo,
reward_info: &mut RewardInfo,
deposit_fee: Decimal,
amount_to_auto: Uint128,
amount_to_stake: Uint128,
lp_balance: Uint128,
) -> StdResult<Uint128> {
let (auto_bond_amount, stake_bond_amount, stake_bond_fee) = if deposit_fee.is_zero() {
(amount_to_auto, amount_to_stake, Uint128::zero())
} else {
// calculate target state
let amount = amount_to_auto + amount_to_stake;
let new_balance = lp_balance + amount;
let new_auto_bond_amount =
new_balance.checked_sub(pool_info.total_stake_bond_amount + amount_to_stake)?;
// calculate deposit fee; split based on auto balance & stake balance
let deposit_fee = amount * deposit_fee;
let auto_bond_fee = deposit_fee.multiply_ratio(new_auto_bond_amount, new_balance);
let stake_bond_fee = deposit_fee.checked_sub(auto_bond_fee)?;
// calculate amount after fee
let remaining_amount = amount.checked_sub(deposit_fee)?;
let auto_bond_amount = remaining_amount.multiply_ratio(amount_to_auto, amount);
let stake_bond_amount = remaining_amount.checked_sub(auto_bond_amount)?;
(auto_bond_amount, stake_bond_amount, stake_bond_fee)
};
// convert amount to share & update
let auto_bond_share = pool_info.calc_auto_bond_share(auto_bond_amount, lp_balance);
let stake_bond_share = pool_info.calc_stake_bond_share(stake_bond_amount);
pool_info.total_auto_bond_share += auto_bond_share;
pool_info.total_stake_bond_amount += stake_bond_amount + stake_bond_fee;
pool_info.total_stake_bond_share += stake_bond_share;
reward_info.auto_bond_share += auto_bond_share;
reward_info.stake_bond_share += stake_bond_share;
let new_auto_bond_amount = pool_info.calc_user_auto_balance(lp_balance + amount_to_auto + amount_to_stake, auto_bond_share);
let new_stake_bond_amount = pool_info.calc_user_stake_balance(stake_bond_share);
Ok(new_auto_bond_amount + new_stake_bond_amount)
}
// stake LP token to Pylon Staking
fn stake_token(
api: &dyn Api,
pylon_staking: CanonicalAddr,
staking_token: CanonicalAddr,
asset_token: CanonicalAddr,
amount: Uint128,
) -> StdResult<Response> {
let asset_token = api.addr_humanize(&asset_token)?;
let pylon_staking = api.addr_humanize(&pylon_staking)?;
let staking_token = api.addr_humanize(&staking_token)?;
Ok(Response::new()
.add_messages(vec![CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: staking_token.to_string(),
funds: vec![],
msg: to_binary(&Cw20ExecuteMsg::Send {
contract: pylon_staking.to_string(),
amount,
msg: to_binary(&PylonCw20HookMsg::Bond {})?,
})?,
})])
.add_attributes(vec![
attr("action", "bond"),
attr("staking_token", staking_token),
attr("asset_token", asset_token),
attr("amount", amount),
]))
}
#[allow(clippy::too_many_arguments)]
fn unbond_internal(
deps: DepsMut,
env: Env,
staker_addr_raw: CanonicalAddr,
asset_token_raw: CanonicalAddr,
amount: Uint128,
lp_balance: Uint128,
config: &Config,
reallocate: bool,
) -> StdResult<PoolInfo> {
let mut state = read_state(deps.storage)?;
let mut pool_info = pool_info_read(deps.storage).load(asset_token_raw.as_slice())?;
let mut reward_info =
rewards_read(deps.storage, &staker_addr_raw).load(asset_token_raw.as_slice())?;
let user_auto_balance =
pool_info.calc_user_auto_balance(lp_balance, reward_info.auto_bond_share);
let user_stake_balance = pool_info.calc_user_stake_balance(reward_info.stake_bond_share);
let user_balance = user_auto_balance + user_stake_balance;
if user_balance < amount {
return Err(StdError::generic_err("Cannot unbond more than bond amount"));
}
// distribute reward to pending reward; before changing share
deposit_spec_reward(deps.as_ref(), &env, &mut state, config, false)?;
spec_reward_to_pool(&state, &mut pool_info, lp_balance)?;
before_share_change(&pool_info, &mut reward_info);
// decrease bond amount
let auto_bond_amount = if reward_info.stake_bond_share.is_zero() {
amount
} else {
amount.multiply_ratio(user_auto_balance, user_balance)
};
let stake_bond_amount = amount.checked_sub(auto_bond_amount)?;
// add 1 to share, otherwise there will always be a fraction
let mut auto_bond_share = pool_info.calc_auto_bond_share(auto_bond_amount, lp_balance);
if pool_info.calc_user_auto_balance(lp_balance, auto_bond_share) < auto_bond_amount {
auto_bond_share += Uint128::new(1u128);
}
let mut stake_bond_share = pool_info.calc_stake_bond_share(stake_bond_amount);
if pool_info.calc_user_stake_balance(stake_bond_share) < stake_bond_amount {
stake_bond_share += Uint128::new(1u128);
}
pool_info.total_auto_bond_share = pool_info
.total_auto_bond_share
.checked_sub(auto_bond_share)?;
pool_info.total_stake_bond_amount = pool_info
.total_stake_bond_amount
.checked_sub(stake_bond_amount)?;
pool_info.total_stake_bond_share = pool_info
.total_stake_bond_share
.checked_sub(stake_bond_share)?;
reward_info.auto_bond_share = reward_info.auto_bond_share.checked_sub(auto_bond_share)?;
reward_info.stake_bond_share = reward_info.stake_bond_share.checked_sub(stake_bond_share)?;
if !reallocate {
reward_info.deposit_amount = reward_info.deposit_amount.multiply_ratio(user_balance.checked_sub(amount)?, user_balance);
}
// update rewards info
if reward_info.spec_share.is_zero()
&& reward_info.farm_share.is_zero()
&& reward_info.auto_bond_share.is_zero()
&& reward_info.stake_bond_share.is_zero()
&& !reallocate
{
rewards_store(deps.storage, &staker_addr_raw).remove(asset_token_raw.as_slice());
} else {
rewards_store(deps.storage, &staker_addr_raw)
.save(asset_token_raw.as_slice(), &reward_info)?;
}
// update pool info
pool_info_store(deps.storage).save(asset_token_raw.as_slice(), &pool_info)?;
state_store(deps.storage).save(&state)?;
Ok(pool_info)
}
pub fn unbond(
mut deps: DepsMut,
env: Env,
info: MessageInfo,
asset_token: String,
amount: Uint128,
) -> StdResult<Response> {
let staker_addr_raw = deps.api.addr_canonicalize(info.sender.as_str())?;
let asset_token_raw = deps.api.addr_canonicalize(&asset_token)?;
let config = read_config(deps.storage)?;
let lp_balance = query_pylon_pool_balance(
deps.as_ref(),
&config.pylon_staking,
&env.contract.address,
)?;
let pool_info = unbond_internal(
deps.branch(),
env,
staker_addr_raw,
asset_token_raw,
amount,
lp_balance,
&config,
false,
)?;
Ok(Response::new()
.add_messages(vec![
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: deps.api.addr_humanize(&config.pylon_staking)?.to_string(),
funds: vec![],
msg: to_binary(&PylonStakingExecuteMsg::Unbond { amount })?,
}),
CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: deps
.api
.addr_humanize(&pool_info.staking_token)?
.to_string(),
msg: to_binary(&Cw20ExecuteMsg::Transfer {
recipient: info.sender.to_string(),
amount,
})?,
funds: vec![],
}),
])
.add_attributes(vec![
attr("action", "unbond"),
attr("staker_addr", info.sender),
attr("asset_token", asset_token),
attr("amount", amount),
]))
}
pub fn | (
mut deps: DepsMut,
env: Env,
info: MessageInfo,
asset_token: String,
amount_to_auto: Uint128,
amount_to_stake: Uint128,
) -> StdResult<Response> {
let config = read_config(deps.storage)?;
let staker_addr_raw = deps.api.addr_canonicalize(info.sender.as_str())?;
let asset_token_raw = deps.api.addr_canonicalize(&asset_token)?;
let amount = amount_to_auto + amount_to_stake;
let lp_balance = query_pylon_pool_balance(
deps.as_ref(),
&config.pylon_staking,
&env.contract.address,
)?;
unbond_internal(
deps.branch(),
env.clone(),
staker_addr_raw.clone(),
asset_token_raw.clone(),
amount,
lp_balance,
&config,
true,
)?;
bond_internal(
deps,
env,
staker_addr_raw,
asset_token_raw,
amount_to_auto,
amount_to_stake,
lp_balance.checked_sub(amount)?,
&config,
true,
)?;
Ok(Response::new().add_attributes(vec![
attr("action", "update_bond"),
attr("asset_token", asset_token),
attr("amount_to_auto", amount_to_auto),
attr("amount_to_stake", amount_to_stake),
]))
}
pub fn withdraw(
mut deps: DepsMut,
env: Env,
info: MessageInfo,
asset_token: Option<String>,
spec_amount: Option<Uint128>,
farm_amount: Option<Uint128>,
) -> StdResult<Response> {
let staker_addr = deps.api.addr_canonicalize(info.sender.as_str())?;
let asset_token = asset_token.map(|a| deps.api.addr_canonicalize(&a).unwrap());
let mut state = read_state(deps.storage)?;
// update pending reward; before withdraw
let config = read_config(deps.storage)?;
let spec_staked =
deposit_spec_reward(deps.as_ref(), &env, &mut state, &config, false)?;
let (spec_amount, spec_share, farm_amount, farm_share) = withdraw_reward(
deps.branch(),
env,
&config,
&state,
&staker_addr,
&asset_token,
&spec_staked,
spec_amount,
farm_amount,
)?;
state.previous_spec_share = state.previous_spec_share.checked_sub(spec_share)?;
state.total_farm_share = state.total_farm_share.checked_sub(farm_share)?;
state.total_farm_amount = state.total_farm_amount.checked_sub(farm_amount)?;
state_store(deps.storage).save(&state)?;
let mut messages: Vec<CosmosMsg> = vec![];
if !spec_amount.is_zero() {
messages.push(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: deps.api.addr_humanize(&config.spectrum_gov)?.to_string(),
msg: to_binary(&SpecExecuteMsg::withdraw {
amount: Some(spec_amount),
days: None,
})?,
funds: vec![],
}));
messages.push(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: deps.api.addr_humanize(&config.spectrum_token)?.to_string(),
msg: to_binary(&Cw20ExecuteMsg::Transfer {
recipient: info.sender.to_string(),
amount: spec_amount,
})?,
funds: vec![],
}));
}
if !farm_amount.is_zero() {
messages.push(CosmosMsg::Wasm(WasmMsg::Execute {
contract_addr: deps.api.addr_humanize(&config.pylon_token)?.to_string(),
msg: to_binary(&Cw20ExecuteMsg::Transfer {
recipient: info.sender.to_string(),
amount: farm_amount,
})?,
funds: vec![],
}));
}
Ok(Response::new().add_messages(messages).add_attributes(vec![
attr("action", "withdraw"),
attr("farm_amount", farm_amount),
attr("spec_amount", spec_amount),
]))
}
#[allow(clippy::too_many_arguments)]
fn withdraw_reward(
deps: DepsMut,
env: Env,
config: &Config,
state: &State,
staker_addr: &CanonicalAddr,
asset_token: &Option<CanonicalAddr>,
spec_staked: &SpecBalanceResponse,
mut request_spec_amount: Option<Uint128>,
mut request_farm_amount: Option<Uint128>,
) -> StdResult<(Uint128, Uint128, Uint128, Uint128)> {
let rewards_bucket = rewards_read(deps.storage, staker_addr);
// single reward withdraw; or all rewards
let reward_pairs: Vec<(CanonicalAddr, RewardInfo)>;
if let Some(asset_token) = asset_token {
let key = asset_token.as_slice();
let reward_info = rewards_bucket.may_load(key)?;
reward_pairs = if let Some(reward_info) = reward_info {
vec![(asset_token.clone(), reward_info)]
} else {
vec![]
};
} else {
reward_pairs = rewards_bucket
.range(None, None, Order::Ascending)
.map(|item| {
let (k, v) = item?;
Ok((CanonicalAddr::from(k), v))
})
.collect::<StdResult<Vec<(CanonicalAddr, RewardInfo)>>>()?;
}
let lp_balance = query_pylon_pool_balance(
deps.as_ref(),
&config.pylon_staking,
&env.contract.address,
)?;
let mut spec_amount = Uint128::zero();
let mut spec_share = Uint128::zero();
let mut farm_amount = Uint128::zero();
let mut farm_share = Uint128::zero();
for reward_pair in reward_pairs {
let (asset_token_raw, mut reward_info) = reward_pair;
// withdraw reward to pending reward
let key = asset_token_raw.as_slice();
let mut pool_info = pool_info_read(deps.storage).load(key)?;
spec_reward_to_pool(state, &mut pool_info, lp_balance)?;
before_share_change(&pool_info, &mut reward_info);
// update withdraw
let (asset_farm_share, asset_farm_amount) = if let Some(request_amount) = request_farm_amount {
let avail_amount = calc_farm_balance(reward_info.farm_share, state.total_farm_amount, state.total_farm_share);
let asset_farm_amount = if request_amount > avail_amount { avail_amount } else { request_amount };
let mut asset_farm_share = calc_farm_share(asset_farm_amount, state.total_farm_amount, state.total_farm_share);
if calc_farm_balance(asset_farm_share, state.total_farm_amount, state.total_farm_share) < asset_farm_amount {
asset_farm_share += Uint128::new(1u128);
}
request_farm_amount = Some(request_amount.checked_sub(asset_farm_amount)?);
(asset_farm_share, asset_farm_amount)
} else {
(reward_info.farm_share, calc_farm_balance(
reward_info.farm_share,
state.total_farm_amount,
state.total_farm_share,
))
};
farm_share += asset_farm_share;
farm_amount += asset_farm_amount;
let (asset_spec_share, asset_spec_amount) = if let Some(request_amount) = request_spec_amount {
let avail_amount = calc_spec_balance(reward_info.spec_share, spec_staked);
let asset_spec_amount = if request_amount > avail_amount { avail_amount } else { request_amount };
let mut asset_spec_share = calc_spec_share(asset_spec_amount, spec_staked);
if calc_spec_balance(asset_spec_share, spec_staked) < asset_spec_amount {
asset_spec_share += Uint128::new(1u128);
}
request_spec_amount = Some(request_amount.checked_sub(asset_spec_amount)?);
(asset_spec_share, asset_spec_amount)
} else {
(reward_info.spec_share, calc_spec_balance(reward_info.spec_share, spec_staked))
};
spec_share += asset_spec_share;
spec_amount += asset_spec_amount;
pool_info.farm_share = pool_info.farm_share.checked_sub(asset_farm_share)?;
reward_info.farm_share = reward_info.farm_share.checked_sub(asset_farm_share)?;
reward_info.spec_share = reward_info.spec_share.checked_sub(asset_spec_share)?;
// update rewards info
pool_info_store(deps.storage).save(key, &pool_info)?;
if reward_info.spec_share.is_zero()
&& reward_info.farm_share.is_zero()
&& reward_info.auto_bond_share.is_zero()
&& reward_info.stake_bond_share.is_zero()
{
rewards_store(deps.storage, staker_addr).remove(key);
} else {
rewards_store(deps.storage, staker_addr).save(key, &reward_info)?;
}
}
if let Some(request_amount) = request_farm_amount {
if !request_amount.is_zero() {
return Err(StdError::generic_err("Cannot withdraw more than remaining amount"));
}
}
if let Some(request_amount) = request_spec_amount {
if !request_amount.is_zero() {
return Err(StdError::generic_err("Cannot withdraw more than remaining amount"));
}
}
Ok((spec_amount, spec_share, farm_amount, farm_share))
}
fn calc_farm_balance(share: Uint128, total_balance: Uint128, total_farm_share: Uint128) -> Uint128 {
if total_farm_share.is_zero() {
Uint128::zero()
} else {
total_balance.multiply_ratio(share, total_farm_share)
}
}
fn calc_farm_share(amount: Uint128, total_balance: Uint128, total_farm_share: Uint128) -> Uint128 {
if total_balance.is_zero() {
amount
} else {
amount.multiply_ratio(total_farm_share, total_balance)
}
}
fn calc_spec_balance(share: Uint128, staked: &SpecBalanceResponse) -> Uint128 {
if staked.share.is_zero() {
Uint128::zero()
} else {
share.multiply_ratio(staked.balance, staked.share)
}
}
fn calc_spec_share(amount: Uint128, stated: &SpecBalanceResponse) -> Uint128 {
if stated.balance.is_zero() {
amount
} else {
amount.multiply_ratio(stated.share, stated.balance)
}
}
pub fn query_reward_info(
deps: Deps,
env: Env,
staker_addr: String,
) -> StdResult<RewardInfoResponse> {
let staker_addr_raw = deps.api.addr_canonicalize(&staker_addr)?;
let mut state = read_state(deps.storage)?;
let config = read_config(deps.storage)?;
let spec_staked = deposit_spec_reward(deps, &env, &mut state, &config, true)?;
let reward_infos = read_reward_infos(
deps,
env,
&config,
&state,
&staker_addr_raw,
&spec_staked,
)?;
Ok(RewardInfoResponse {
staker_addr,
reward_infos,
})
}
fn read_reward_infos(
deps: Deps,
env: Env,
config: &Config,
state: &State,
staker_addr: &CanonicalAddr,
spec_staked: &SpecBalanceResponse,
) -> StdResult<Vec<RewardInfoResponseItem>> {
let rewards_bucket = rewards_read(deps.storage, staker_addr);
let reward_pair = rewards_bucket
.range(None, None, Order::Ascending)
.map(|item| {
let (k, v) = item?;
Ok((CanonicalAddr::from(k), v))
})
.collect::<StdResult<Vec<(CanonicalAddr, RewardInfo)>>>()?;
let lp_balance =
query_pylon_pool_balance(deps, &config.pylon_staking, &env.contract.address)?;
let bucket = pool_info_read(deps.storage);
let reward_infos: Vec<RewardInfoResponseItem> = reward_pair
.into_iter()
.map(|(asset_token_raw, reward_info)| {
let mut pool_info = bucket.load(asset_token_raw.as_slice())?;
// update pending rewards
let mut reward_info = reward_info;
let farm_share_index = reward_info.farm_share_index;
let auto_spec_index = reward_info.auto_spec_share_index;
let stake_spec_index = reward_info.stake_spec_share_index;
let has_deposit_amount = !reward_info.deposit_amount.is_zero();
spec_reward_to_pool(state, &mut pool_info, lp_balance)?;
before_share_change(&pool_info, &mut reward_info);
let auto_bond_amount =
pool_info.calc_user_auto_balance(lp_balance, reward_info.auto_bond_share);
let stake_bond_amount = pool_info.calc_user_stake_balance(reward_info.stake_bond_share);
Ok(RewardInfoResponseItem {
asset_token: deps.api.addr_humanize(&asset_token_raw)?.to_string(),
farm_share_index,
auto_spec_share_index: auto_spec_index,
stake_spec_share_index: stake_spec_index,
bond_amount: auto_bond_amount + stake_bond_amount,
auto_bond_amount,
stake_bond_amount,
farm_share: reward_info.farm_share,
auto_bond_share: reward_info.auto_bond_share,
stake_bond_share: reward_info.stake_bond_share,
spec_share: reward_info.spec_share,
pending_spec_reward: calc_spec_balance(reward_info.spec_share, spec_staked),
pending_farm_reward: calc_farm_balance(
reward_info.farm_share,
state.total_farm_amount,
state.total_farm_share,
),
deposit_amount: if has_deposit_amount {
Some(reward_info.deposit_amount)
} else {
None
},
deposit_time: if has_deposit_amount {
Some(reward_info.deposit_time)
} else {
None
},
})
})
.collect::<StdResult<Vec<RewardInfoResponseItem>>>()?;
Ok(reward_infos)
}
| update_bond |
SortKeyPage.js |
import React, { Component } from 'react';
import {
StyleSheet,
Text,
View,
Image,
TouchableHighlight,
TouchableOpacity,
Alert,
DeviceEventEmitter
} from 'react-native';
import LanguageDao ,{FLAG_LANGUAGE}from '../../dao/LanguageDao'
import ArrayUtils from '../../util/ArrayUtls'
import SortableListView from 'react-native-sortable-listview'
import NavigationBar from '../../common/NavigationBar'
import ViewUtils from '../../util/ViewUtils'
import {ACTION_HOME,FLAG_TAB} from '../Entry/HomePage'
export default class NewPage extends Component {
constructor(props){
super(props);
this.dataArray = [];
this.sortResultArray=[];
this.originalCheckedArray =[];
this.state={
checkedArray:[],
hasSorted:false
}
}
componentDidMount() {
this.languageDao = new LanguageDao(this.props.flag);
this.loadData();
}
loadData(){
this.languageDao.fetch()
.then(result=>{
this.getCheckedItems(result);
})
.catch(error=>{
console.log(error);
})
}
getCheckedItems(result){
this.dataArray = result;
let checkedArray = [];
for(let i = 0,len=result.length;i<len;i++){
let data = result[i];
if(data.checked)checkedArray.push(data);
}
this.setState({
checkedArray:checkedArray,
})
this.originalCheckedArray = ArrayUtils.clone(checkedArray);
}
goBack(){
//判断两个数组的元素是否都一致(即使是排序过的)
if(ArrayUtils.isEqual(this.originalCheckedArray,this.state.checkedArray)){
this.props.navigator.pop();
return;
}else {
Alert.alert(
'提示',
'排序已经改变,是否保存修改?',
[
{text:'不保存',onPress:()=>{
this.props.navigator.pop();
},style:'cancel'},
{text:'保存',onPress:()=>{
this.onSave(true);
}}
]
)
}
}
onSave(isChecked){
if(!isChecked && ArrayUtils.isEqual(this.originalCheckedArray,this.state.checkedArray)) {
this.props.navigator.pop();
return;
}
this.getSortResult();
this.languageDao.save(this.sortResultArray);
//为什么不需要这个呢?
this.props.navigator.pop();
var jumpToTab = this.props.flag == FLAG_LANGUAGE.flag_key?FLAG_TAB.flag_popularTab:FLAG_TAB.flag_trendingTab;
DeviceEventEmitter.emit('ACTION_HOME',ACTION_HOME.A_RESTART,jumpToTab)
}
getSortResult(){
this.sortResultArray = ArrayUtils.clone(this.dataArray);
for(let i =0,l=this.originalCheckedArray.length;i<l;i++){
let item = this.originalCheckedArray[i];
let index = this.dataArray.indexOf(item);
this.sortResultArray.splice(index,1,this.state.checkedArray[i]);
}
}
render(){
let title = this.props.flag === FLAG_LANGUAGE.flag_language?'语言排序':'标签排序';
let rightButton = {
title: '保存',
handler:()=>this.onSave(),
tintColor:'white',
};
return <View style={styles.container}>
<NavigationBar
title={title}
style={this.props.theme.styles.navBar}
leftButton={ViewUtils.getLeftButton(()=>this.goBack())}
rightButton={rightButton}
/>
<SortableListView
data={this.state.checkedArray}
order={Object.keys(this.state.checkedArray)}
onRowMoved={(e) => {
this.state.checkedArray.splice(e.to, 0, this.state.checkedArray.splice(e.from, 1)[0]);
this.forceUpdate();
this.setState({
hasSorted:true,
})
}}
renderRow={row => <SortCell data={row} {...this.props}/>}
/>
</View>
}
}
class SortCell extends Component{
render(){
return <TouchableHighlight
underlayColor={'#eee'}
dela | ss={500}
style={styles.item}
{...this.props.sortHandlers}
>
<View style={styles.row}>
<Image style={[styles.imageStyle,this.props.theme.styles.tabBarSelectedIcon]} source={require('../../../res/images/ic_sort.png')}></Image>
<Text>{this.props.data.name}</Text>
</View>
</TouchableHighlight>
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: 'white',
},
item:{
padding:15,
backgroundColor:'#F8F8F8',
borderBottomWidth:1,
borderColor:'#eee'
},
row:{
flexDirection:'row',
alignItems:'center'
},
imageStyle:{
width:18,
height:18,
marginRight:10
},
title:{
fontSize:20,
color:'white'
},
}); | yLongPre |
requirements.py | """Validate requirements."""
from __future__ import annotations
from collections import deque
import json
import operator
import os
import re
import subprocess
import sys
from awesomeversion import AwesomeVersion, AwesomeVersionStrategy
from stdlib_list import stdlib_list
from tqdm import tqdm
from homeassistant.const import REQUIRED_PYTHON_VER
import homeassistant.util.package as pkg_util
from script.gen_requirements_all import COMMENT_REQUIREMENTS
from .model import Config, Integration
IGNORE_PACKAGES = {
commented.lower().replace("_", "-") for commented in COMMENT_REQUIREMENTS
}
PACKAGE_REGEX = re.compile(r"^(?:--.+\s)?([-_\.\w\d]+).*==.+$")
PIP_REGEX = re.compile(r"^(--.+\s)?([-_\.\w\d]+.*(?:==|>=|<=|~=|!=|<|>|===)?.*$)")
SUPPORTED_PYTHON_TUPLES = [
REQUIRED_PYTHON_VER[:2],
tuple(map(operator.add, REQUIRED_PYTHON_VER, (0, 1, 0)))[:2],
]
SUPPORTED_PYTHON_VERSIONS = [
".".join(map(str, version_tuple)) for version_tuple in SUPPORTED_PYTHON_TUPLES
]
STD_LIBS = {version: set(stdlib_list(version)) for version in SUPPORTED_PYTHON_VERSIONS}
PIPDEPTREE_CACHE = None
IGNORE_VIOLATIONS = {
# Still has standard library requirements.
"acmeda",
"blink",
"ezviz",
"hdmi_cec",
"juicenet",
"lupusec",
"rainbird",
"slide",
"suez_water",
}
def normalize_package_name(requirement: str) -> str:
"""Return a normalized package name from a requirement string."""
match = PACKAGE_REGEX.search(requirement)
if not match:
return ""
# pipdeptree needs lowercase and dash instead of underscore as separator
package = match.group(1).lower().replace("_", "-")
return package
def | (integrations: dict[str, Integration], config: Config):
"""Handle requirements for integrations."""
# Check if we are doing format-only validation.
if not config.requirements:
for integration in integrations.values():
validate_requirements_format(integration)
return
ensure_cache()
# check for incompatible requirements
disable_tqdm = config.specific_integrations or os.environ.get("CI", False)
for integration in tqdm(integrations.values(), disable=disable_tqdm):
if not integration.manifest:
continue
validate_requirements(integration)
def validate_requirements_format(integration: Integration) -> bool:
"""Validate requirements format.
Returns if valid.
"""
start_errors = len(integration.errors)
for req in integration.requirements:
if " " in req:
integration.add_error(
"requirements",
f'Requirement "{req}" contains a space',
)
continue
pkg, sep, version = req.partition("==")
if not sep and integration.core:
integration.add_error(
"requirements",
f'Requirement {req} need to be pinned "<pkg name>==<version>".',
)
continue
if AwesomeVersion(version).strategy == AwesomeVersionStrategy.UNKNOWN:
integration.add_error(
"requirements",
f"Unable to parse package version ({version}) for {pkg}.",
)
continue
return len(integration.errors) == start_errors
def validate_requirements(integration: Integration):
"""Validate requirements."""
if not validate_requirements_format(integration):
return
# Some integrations have not been fixed yet so are allowed to have violations.
if integration.domain in IGNORE_VIOLATIONS:
return
integration_requirements = set()
integration_packages = set()
for req in integration.requirements:
package = normalize_package_name(req)
if not package:
integration.add_error(
"requirements",
f"Failed to normalize package name from requirement {req}",
)
return
if package in IGNORE_PACKAGES:
continue
integration_requirements.add(req)
integration_packages.add(package)
if integration.disabled:
return
install_ok = install_requirements(integration, integration_requirements)
if not install_ok:
return
all_integration_requirements = get_requirements(integration, integration_packages)
if integration_requirements and not all_integration_requirements:
integration.add_error(
"requirements",
f"Failed to resolve requirements {integration_requirements}",
)
return
# Check for requirements incompatible with standard library.
for version, std_libs in STD_LIBS.items():
for req in all_integration_requirements:
if req in std_libs:
integration.add_error(
"requirements",
f"Package {req} is not compatible with Python {version} standard library",
)
def ensure_cache():
"""Ensure we have a cache of pipdeptree.
{
"flake8-docstring": {
"key": "flake8-docstrings",
"package_name": "flake8-docstrings",
"installed_version": "1.5.0"
"dependencies": {"flake8"}
}
}
"""
global PIPDEPTREE_CACHE
if PIPDEPTREE_CACHE is not None:
return
cache = {}
for item in json.loads(
subprocess.run(
["pipdeptree", "-w", "silence", "--json"],
check=True,
capture_output=True,
text=True,
).stdout
):
cache[item["package"]["key"]] = {
**item["package"],
"dependencies": {dep["key"] for dep in item["dependencies"]},
}
PIPDEPTREE_CACHE = cache
def get_requirements(integration: Integration, packages: set[str]) -> set[str]:
"""Return all (recursively) requirements for an integration."""
ensure_cache()
all_requirements = set()
to_check = deque(packages)
while to_check:
package = to_check.popleft()
if package in all_requirements:
continue
all_requirements.add(package)
item = PIPDEPTREE_CACHE.get(package)
if item is None:
# Only warn if direct dependencies could not be resolved
if package in packages:
integration.add_error(
"requirements", f"Failed to resolve requirements for {package}"
)
continue
to_check.extend(item["dependencies"])
return all_requirements
def install_requirements(integration: Integration, requirements: set[str]) -> bool:
"""Install integration requirements.
Return True if successful.
"""
global PIPDEPTREE_CACHE
ensure_cache()
for req in requirements:
match = PIP_REGEX.search(req)
if not match:
integration.add_error(
"requirements",
f"Failed to parse requirement {req} before installation",
)
continue
install_args = match.group(1)
requirement_arg = match.group(2)
is_installed = False
normalized = normalize_package_name(requirement_arg)
if normalized and "==" in requirement_arg:
ver = requirement_arg.split("==")[-1]
item = PIPDEPTREE_CACHE.get(normalized)
is_installed = item and item["installed_version"] == ver
if not is_installed:
try:
is_installed = pkg_util.is_installed(req)
except ValueError:
is_installed = False
if is_installed:
continue
args = [sys.executable, "-m", "pip", "install", "--quiet"]
if install_args:
args.append(install_args)
args.append(requirement_arg)
try:
result = subprocess.run(args, check=True, capture_output=True, text=True)
except subprocess.SubprocessError:
integration.add_error(
"requirements",
f"Requirement {req} failed to install",
)
else:
# Clear the pipdeptree cache if something got installed
if "Successfully installed" in result.stdout:
PIPDEPTREE_CACHE = None
if integration.errors:
return False
return True
| validate |
datastoreio.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A connector for reading from and writing to Google Cloud Datastore.
Please use this module for Datastore I/O since
``apache_beam.io.gcp.datastore.v1.datastoreio`` will be deprecated in the
next Beam major release.
This module uses the newer google-cloud-datastore package. Its API was different
enough to require extensive changes to this and associated modules.
This module is experimental, no backwards compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import division
import logging
import time
from builtins import round
from apache_beam import typehints
from apache_beam.io.gcp.datastore.v1 import util
from apache_beam.io.gcp.datastore.v1.adaptive_throttler import AdaptiveThrottler
from apache_beam.io.gcp.datastore.v1new import helper
from apache_beam.io.gcp.datastore.v1new import query_splitter
from apache_beam.io.gcp.datastore.v1new import types
from apache_beam.metrics.metric import Metrics
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import Reshuffle
from apache_beam.utils import retry
__all__ = ['ReadFromDatastore', 'WriteToDatastore', 'DeleteFromDatastore']
@typehints.with_output_types(types.Entity)
class ReadFromDatastore(PTransform):
"""A ``PTransform`` for querying Google Cloud Datastore.
To read a ``PCollection[Entity]`` from a Cloud Datastore ``Query``, use
the ``ReadFromDatastore`` transform by providing a `query` to
read from. The project and optional namespace are set in the query.
The query will be split into multiple queries to allow for parallelism. The
degree of parallelism is automatically determined, but can be overridden by
setting `num_splits` to a value of 1 or greater.
Note: Normally, a runner will read from Cloud Datastore in parallel across
many workers. However, when the `query` is configured with a `limit` or if the
query contains inequality filters like `GREATER_THAN, LESS_THAN` etc., then
all the returned results will be read by a single worker in order to ensure
correct data. Since data is read from a single worker, this could have
significant impact on the performance of the job. Using a
:class:`~apache_beam.transforms.util.Reshuffle` transform after the read in
this case might be beneficial for parallelizing work across workers.
The semantics for query splitting is defined below:
1. If `num_splits` is equal to 0, then the number of splits will be chosen
dynamically at runtime based on the query data size.
2. Any value of `num_splits` greater than
`ReadFromDatastore._NUM_QUERY_SPLITS_MAX` will be capped at that value.
3. If the `query` has a user limit set, or contains inequality filters, then
`num_splits` will be ignored and no split will be performed.
4. Under certain cases Cloud Datastore is unable to split query to the
requested number of splits. In such cases we just use whatever Cloud
Datastore returns.
See https://developers.google.com/datastore/ for more details on Google Cloud
Datastore.
"""
# An upper bound on the number of splits for a query.
_NUM_QUERY_SPLITS_MAX = 50000
# A lower bound on the number of splits for a query. This is to ensure that
# we parallelize the query even when Datastore statistics are not available.
_NUM_QUERY_SPLITS_MIN = 12
# Default bundle size of 64MB.
_DEFAULT_BUNDLE_SIZE_BYTES = 64 * 1024 * 1024
def __init__(self, query, num_splits=0):
"""Initialize the `ReadFromDatastore` transform.
This transform outputs elements of type
:class:`~apache_beam.io.gcp.datastore.v1new.types.Entity`.
Args:
query: (:class:`~apache_beam.io.gcp.datastore.v1new.types.Query`) query
used to fetch entities.
num_splits: (:class:`int`) (optional) Number of splits for the query.
"""
super(ReadFromDatastore, self).__init__()
if not query.project:
raise ValueError("query.project cannot be empty")
if not query:
raise ValueError("query cannot be empty")
if num_splits < 0:
raise ValueError("num_splits must be greater than or equal 0")
self._project = query.project
# using _namespace conflicts with DisplayData._namespace
self._datastore_namespace = query.namespace
self._query = query
self._num_splits = num_splits
def expand(self, pcoll):
# This is a composite transform involves the following:
# 1. Create a singleton of the user provided `query` and apply a ``ParDo``
# that splits the query into `num_splits` queries if possible.
#
# If the value of `num_splits` is 0, the number of splits will be
# computed dynamically based on the size of the data for the `query`.
#
# 2. The resulting ``PCollection`` is sharded across workers using a
# ``Reshuffle`` operation.
#
# 3. In the third step, a ``ParDo`` reads entities for each query and
# outputs a ``PCollection[Entity]``.
return (pcoll.pipeline
| 'UserQuery' >> Create([self._query])
| 'SplitQuery' >> ParDo(ReadFromDatastore._SplitQueryFn(
self._num_splits))
| Reshuffle()
| 'Read' >> ParDo(ReadFromDatastore._QueryFn()))
def display_data(self):
disp_data = {'project': self._query.project,
'query': str(self._query),
'num_splits': self._num_splits}
if self._datastore_namespace is not None:
disp_data['namespace'] = self._datastore_namespace
return disp_data
@typehints.with_input_types(types.Query)
@typehints.with_output_types(types.Query)
class _SplitQueryFn(DoFn):
"""A `DoFn` that splits a given query into multiple sub-queries."""
def __init__(self, num_splits):
super(ReadFromDatastore._SplitQueryFn, self).__init__()
self._num_splits = num_splits
def process(self, query, *args, **kwargs):
client = helper.get_client(query.project, query.namespace)
try:
# Short circuit estimating num_splits if split is not possible.
query_splitter.validate_split(query)
if self._num_splits == 0:
estimated_num_splits = self.get_estimated_num_splits(client, query)
else:
estimated_num_splits = self._num_splits
logging.info("Splitting the query into %d splits", estimated_num_splits)
query_splits = query_splitter.get_splits(
client, query, estimated_num_splits)
except query_splitter.QuerySplitterError:
logging.info("Unable to parallelize the given query: %s", query,
exc_info=True)
query_splits = [query]
return query_splits
def display_data(self):
disp_data = {'num_splits': self._num_splits}
return disp_data
@staticmethod
def query_latest_statistics_timestamp(client):
"""Fetches the latest timestamp of statistics from Cloud Datastore.
Cloud Datastore system tables with statistics are periodically updated.
This method fetches the latest timestamp (in microseconds) of statistics
update using the `__Stat_Total__` table.
"""
if client.namespace is None:
kind = '__Stat_Total__'
else:
kind = '__Stat_Ns_Total__'
query = client.query(kind=kind, order=["-timestamp", ])
entities = list(query.fetch(limit=1))
if not entities:
raise RuntimeError("Datastore total statistics unavailable.")
return entities[0]['timestamp']
@staticmethod
def get_estimated_size_bytes(client, query):
"""Get the estimated size of the data returned by this instance's query.
Cloud Datastore provides no way to get a good estimate of how large the
result of a query is going to be. Hence we use the __Stat_Kind__ system
table to get size of the entire kind as an approximate estimate, assuming
exactly 1 kind is specified in the query.
See https://cloud.google.com/datastore/docs/concepts/stats.
"""
kind_name = query.kind
latest_timestamp = (
ReadFromDatastore._SplitQueryFn
.query_latest_statistics_timestamp(client))
logging.info('Latest stats timestamp for kind %s is %s',
kind_name, latest_timestamp)
if client.namespace is None:
kind = '__Stat_Kind__'
else:
kind = '__Stat_Ns_Kind__'
query = client.query(kind=kind)
query.add_filter('kind_name', '=', kind_name)
query.add_filter('timestamp', '=', latest_timestamp)
entities = list(query.fetch(limit=1))
if not entities:
raise RuntimeError(
'Datastore statistics for kind %s unavailable' % kind_name)
return entities[0]['entity_bytes']
@staticmethod
def get_estimated_num_splits(client, query):
"""Computes the number of splits to be performed on the query."""
try:
estimated_size_bytes = (
ReadFromDatastore._SplitQueryFn
.get_estimated_size_bytes(client, query))
logging.info('Estimated size bytes for query: %s', estimated_size_bytes)
num_splits = int(min(ReadFromDatastore._NUM_QUERY_SPLITS_MAX, round(
(float(estimated_size_bytes) /
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES))))
except Exception as e:
logging.warning('Failed to fetch estimated size bytes: %s', e)
# Fallback in case estimated size is unavailable.
num_splits = ReadFromDatastore._NUM_QUERY_SPLITS_MIN
return max(num_splits, ReadFromDatastore._NUM_QUERY_SPLITS_MIN)
@typehints.with_input_types(types.Query)
@typehints.with_output_types(types.Entity)
class _QueryFn(DoFn):
"""A DoFn that fetches entities from Cloud Datastore, for a given query."""
def process(self, query, *unused_args, **unused_kwargs):
_client = helper.get_client(query.project, query.namespace)
client_query = query._to_client_query(_client)
for client_entity in client_query.fetch(query.limit):
yield types.Entity.from_client_entity(client_entity)
class _Mutate(PTransform):
"""A ``PTransform`` that writes mutations to Cloud Datastore.
Only idempotent Datastore mutation operations (upsert and delete) are
supported, as the commits are retried when failures occur.
"""
def __init__(self, mutate_fn):
"""Initializes a Mutate transform.
Args:
mutate_fn: Instance of `DatastoreMutateFn` to use.
"""
self._mutate_fn = mutate_fn
def expand(self, pcoll):
return pcoll | 'Write Batch to Datastore' >> ParDo(self._mutate_fn)
class DatastoreMutateFn(DoFn):
"""A ``DoFn`` that write mutations to Datastore.
Mutations are written in batches, where the maximum batch size is
`util.WRITE_BATCH_SIZE`.
Commits are non-transactional. If a commit fails because of a conflict over
an entity group, the commit will be retried. This means that the mutation
should be idempotent (`upsert` and `delete` mutations) to prevent duplicate
data or errors.
"""
def __init__(self, project):
"""
Args:
project: (str) cloud project id
"""
self._project = project
self._client = None
self._rpc_successes = Metrics.counter(
_Mutate.DatastoreMutateFn, "datastoreRpcSuccesses")
self._rpc_errors = Metrics.counter(
_Mutate.DatastoreMutateFn, "datastoreRpcErrors")
self._throttled_secs = Metrics.counter(
_Mutate.DatastoreMutateFn, "cumulativeThrottlingSeconds")
self._throttler = AdaptiveThrottler(window_ms=120000, bucket_ms=1000,
overload_ratio=1.25)
def _update_rpc_stats(self, successes=0, errors=0, throttled_secs=0):
|
def start_bundle(self):
self._client = helper.get_client(self._project, namespace=None)
self._init_batch()
self._batch_sizer = util.DynamicBatchSizer()
self._target_batch_size = self._batch_sizer.get_batch_size(
time.time() * 1000)
def element_to_client_batch_item(self, element):
raise NotImplementedError
def add_to_batch(self, client_batch_item):
raise NotImplementedError
@retry.with_exponential_backoff(num_retries=5,
retry_filter=helper.retry_on_rpc_error)
def write_mutations(self, throttler, rpc_stats_callback, throttle_delay=1):
"""Writes a batch of mutations to Cloud Datastore.
If a commit fails, it will be retried up to 5 times. All mutations in the
batch will be committed again, even if the commit was partially
successful. If the retry limit is exceeded, the last exception from
Cloud Datastore will be raised.
Assumes that the Datastore client library does not perform any retries on
commits. It has not been determined how such retries would interact with
the retries and throttler used here.
See ``google.cloud.datastore_v1.gapic.datastore_client_config`` for
retry config.
Args:
rpc_stats_callback: a function to call with arguments `successes` and
`failures` and `throttled_secs`; this is called to record successful
and failed RPCs to Datastore and time spent waiting for throttling.
throttler: (``apache_beam.io.gcp.datastore.v1.adaptive_throttler.
AdaptiveThrottler``)
Throttler instance used to select requests to be throttled.
throttle_delay: (:class:`float`) time in seconds to sleep when
throttled.
Returns:
(int) The latency of the successful RPC in milliseconds.
"""
# Client-side throttling.
while throttler.throttle_request(time.time() * 1000):
logging.info("Delaying request for %ds due to previous failures",
throttle_delay)
time.sleep(throttle_delay)
rpc_stats_callback(throttled_secs=throttle_delay)
if self._batch is None:
# this will only happen when we re-try previously failed batch
self._batch = self._client.batch()
self._batch.begin()
for element in self._batch_elements:
self.add_to_batch(element)
try:
start_time = time.time()
self._batch.commit()
end_time = time.time()
rpc_stats_callback(successes=1)
throttler.successful_request(start_time * 1000)
commit_time_ms = int((end_time-start_time) * 1000)
return commit_time_ms
except Exception:
self._batch = None
rpc_stats_callback(errors=1)
raise
def process(self, element):
client_element = self.element_to_client_batch_item(element)
self._batch_elements.append(client_element)
self.add_to_batch(client_element)
self._batch_bytes_size += self._batch.mutations[-1].ByteSize()
if (len(self._batch.mutations) >= self._target_batch_size or
self._batch_bytes_size > util.WRITE_BATCH_MAX_BYTES_SIZE):
self._flush_batch()
def finish_bundle(self):
if self._batch_elements:
self._flush_batch()
def _init_batch(self):
self._batch_bytes_size = 0
self._batch = self._client.batch()
self._batch.begin()
self._batch_elements = []
def _flush_batch(self):
# Flush the current batch of mutations to Cloud Datastore.
latency_ms = self.write_mutations(
self._throttler,
rpc_stats_callback=self._update_rpc_stats,
throttle_delay=util.WRITE_BATCH_TARGET_LATENCY_MS // 1000)
logging.debug("Successfully wrote %d mutations in %dms.",
len(self._batch.mutations), latency_ms)
now = time.time() * 1000
self._batch_sizer.report_latency(
now, latency_ms, len(self._batch.mutations))
self._target_batch_size = self._batch_sizer.get_batch_size(now)
self._init_batch()
@typehints.with_input_types(types.Entity)
class WriteToDatastore(_Mutate):
"""
Writes elements of type
:class:`~apache_beam.io.gcp.datastore.v1new.types.Entity` to Cloud Datastore.
Entity keys must be complete. The ``project`` field in each key must match the
project ID passed to this transform. If ``project`` field in entity or
property key is empty then it is filled with the project ID passed to this
transform.
"""
def __init__(self, project):
"""Initialize the `WriteToDatastore` transform.
Args:
project: (:class:`str`) The ID of the project to write entities to.
"""
mutate_fn = WriteToDatastore._DatastoreWriteFn(project)
super(WriteToDatastore, self).__init__(mutate_fn)
class _DatastoreWriteFn(_Mutate.DatastoreMutateFn):
def element_to_client_batch_item(self, element):
if not isinstance(element, types.Entity):
raise ValueError('apache_beam.io.gcp.datastore.v1new.datastoreio.Entity'
' expected, got: %s' % type(element))
if not element.key.project:
element.key.project = self._project
client_entity = element.to_client_entity()
if client_entity.key.is_partial:
raise ValueError('Entities to be written to Cloud Datastore must '
'have complete keys:\n%s' % client_entity)
return client_entity
def add_to_batch(self, client_entity):
self._batch.put(client_entity)
def display_data(self):
return {
'mutation': 'Write (upsert)',
'project': self._project,
}
@typehints.with_input_types(types.Key)
class DeleteFromDatastore(_Mutate):
"""
Deletes elements matching input
:class:`~apache_beam.io.gcp.datastore.v1new.types.Key` elements from Cloud
Datastore.
Keys must be complete. The ``project`` field in each key must match the
project ID passed to this transform. If ``project`` field in key is empty then
it is filled with the project ID passed to this transform.
"""
def __init__(self, project):
"""Initialize the `DeleteFromDatastore` transform.
Args:
project: (:class:`str`) The ID of the project from which the entities will
be deleted.
"""
mutate_fn = DeleteFromDatastore._DatastoreDeleteFn(project)
super(DeleteFromDatastore, self).__init__(mutate_fn)
class _DatastoreDeleteFn(_Mutate.DatastoreMutateFn):
def element_to_client_batch_item(self, element):
if not isinstance(element, types.Key):
raise ValueError('apache_beam.io.gcp.datastore.v1new.datastoreio.Key'
' expected, got: %s' % type(element))
if not element.project:
element.project = self._project
client_key = element.to_client_key()
if client_key.is_partial:
raise ValueError('Keys to be deleted from Cloud Datastore must be '
'complete:\n%s' % client_key)
return client_key
def add_to_batch(self, client_key):
self._batch.delete(client_key)
def display_data(self):
return {
'mutation': 'Delete',
'project': self._project,
}
| self._rpc_successes.inc(successes)
self._rpc_errors.inc(errors)
self._throttled_secs.inc(throttled_secs) |
checkout-confirm.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { CheckoutConfirmComponent } from '@app-buyer/checkout/components/checkout-confirm/checkout-confirm.component';
import { NO_ERRORS_SCHEMA } from '@angular/core';
import { BehaviorSubject, of } from 'rxjs';
import { AppStateService, AppLineItemService } from '@app-buyer/shared';
import { AppPaymentService } from '@app-buyer/shared/services/app-payment-service/app-payment.service';
import { FormBuilder } from '@angular/forms';
import { OcOrderService } from '@ordercloud/angular-sdk';
import { applicationConfiguration } from '@app-buyer/config/app.config';
describe('CheckoutConfirmComponent', () => {
let component: CheckoutConfirmComponent;
let fixture: ComponentFixture<CheckoutConfirmComponent>;
const mockConfig = { anonymousShoppingEnabled: false };
const mockOrder = { ID: '1' };
const appStateService = { orderSubject: new BehaviorSubject(mockOrder) };
const appPaymentService = {
getPayments: jasmine.createSpy('getPayments').and.returnValue(of(null)),
};
const ocLineItemService = {
listAll: jasmine.createSpy('listAll').and.returnValue(of(null)),
};
const orderService = {
Patch: jasmine
.createSpy('Patch')
.and.returnValue(of({ ...mockOrder, Comments: 'comment' })),
};
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [CheckoutConfirmComponent],
providers: [
FormBuilder,
{ provide: OcOrderService, useValue: orderService },
{ provide: AppStateService, useValue: appStateService },
{ provide: AppPaymentService, useValue: appPaymentService },
{ provide: AppLineItemService, useValue: ocLineItemService },
{ provide: applicationConfiguration, useValue: mockConfig },
],
schemas: [NO_ERRORS_SCHEMA], // Ignore template errors: remove if tests are added to test template
}).compileComponents();
}));
| });
it('should create', () => {
expect(component).toBeTruthy();
});
describe('ngOnInit', () => {
beforeEach(() => {
component.ngOnInit();
});
it('should call the right services', () => {
expect(component.form).toBeTruthy();
expect(appPaymentService.getPayments).toHaveBeenCalledWith(
'outgoing',
mockOrder.ID
);
expect(ocLineItemService.listAll).toHaveBeenCalledWith(mockOrder.ID);
});
});
describe('saveComments', () => {
it('should call order.Patch', () => {
spyOn(appStateService.orderSubject, 'next');
spyOn(component.continue, 'emit');
component.form.setValue({ comments: 'comment' });
component.saveCommentsAndSubmitOrder();
expect(orderService.Patch).toHaveBeenCalledWith(
'outgoing',
mockOrder.ID,
{ Comments: 'comment' }
);
expect(appStateService.orderSubject.next).toHaveBeenCalledWith({
...mockOrder,
Comments: 'comment',
});
expect(component.continue.emit).toHaveBeenCalled();
});
});
}); | beforeEach(() => {
fixture = TestBed.createComponent(CheckoutConfirmComponent);
component = fixture.componentInstance;
fixture.detectChanges(); |
errors.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"strings"
"opendev.org/airship/airshipctl/pkg/remote/redfish"
redfishdell "opendev.org/airship/airshipctl/pkg/remote/redfish/vendors/dell"
)
// ErrIncompatibleAuthOptions is returned when incompatible
// AuthTypes are provided
type ErrIncompatibleAuthOptions struct {
ForbiddenOptions []string
AuthType string
}
// NewErrIncompatibleAuthOptions returns Error of type
// ErrIncompatibleAuthOptions
func NewErrIncompatibleAuthOptions(fo []string, ao string) error |
// Error of type ErrIncompatibleAuthOptions is returned when
// ssh-pass type is selected and http-pass, ssh-key or key-pass
// options are defined
func (e ErrIncompatibleAuthOptions) Error() string {
return fmt.Sprintf("Cannot use %s options with an auth type %s", e.ForbiddenOptions, e.AuthType)
}
// ErrAuthTypeNotSupported is returned when wrong AuthType is provided
type ErrAuthTypeNotSupported struct {
}
func (e ErrAuthTypeNotSupported) Error() string {
return "Invalid Auth type. Allowed types: " + strings.Join(AllowedAuthTypes, ",")
}
// ErrRepoSpecRequiresURL is returned when repository URL is not specified
type ErrRepoSpecRequiresURL struct {
}
func (e ErrRepoSpecRequiresURL) Error() string {
return "Repository spec requires url."
}
// ErrMutuallyExclusiveCheckout is returned if
// mutually exclusive options are given as checkout options
type ErrMutuallyExclusiveCheckout struct {
}
func (e ErrMutuallyExclusiveCheckout) Error() string {
return "Checkout mutually exclusive, use either: commit-hash, branch, tag, or ref."
}
// ErrRepositoryNotFound is returned if repository is empty
// when using in set-manifest
type ErrRepositoryNotFound struct {
Name string
}
func (e ErrRepositoryNotFound) Error() string {
return fmt.Sprintf("Repository %q not found.", e.Name)
}
// ErrMissingRepositoryName is returned if repository name is empty
// when using in set-manifest
type ErrMissingRepositoryName struct {
RepoType string
}
func (e ErrMissingRepositoryName) Error() string {
return fmt.Sprintf("Missing '%s' repository name.", e.RepoType)
}
// ErrMissingRepoURL is returned if repository is empty
// when using --phase in set-manifest
type ErrMissingRepoURL struct {
}
func (e ErrMissingRepoURL) Error() string {
return "A valid URL should be specified."
}
// ErrMissingRepoCheckoutOptions is returned if repository checkout
// options is empty in set-manifest
type ErrMissingRepoCheckoutOptions struct {
}
func (e ErrMissingRepoCheckoutOptions) Error() string {
return "Missing repository checkout options."
}
// ErrInvalidConfig returned in case of incorrect configuration
type ErrInvalidConfig struct {
What string
}
func (e ErrInvalidConfig) Error() string {
return fmt.Sprintf("Invalid configuration: %s", e.What)
}
// ErrMissingConfig returned in case of missing configuration
type ErrMissingConfig struct {
What string
}
func (e ErrMissingConfig) Error() string {
return "missing configuration: " + e.What
}
// ErrConfigFailed returned in case of failure during configuration
type ErrConfigFailed struct {
}
func (e ErrConfigFailed) Error() string {
return "Configuration failed to complete."
}
// ErrManagementConfigurationNotFound describes a situation in which a user has attempted to reference a management
// configuration that cannot be referenced.
type ErrManagementConfigurationNotFound struct {
Name string
}
func (e ErrManagementConfigurationNotFound) Error() string {
return fmt.Sprintf("Unknown management configuration '%s'.", e.Name)
}
// ErrEmptyManagementConfigurationName returned when attempted to create/modify management config with empty name
type ErrEmptyManagementConfigurationName struct {
}
func (e ErrEmptyManagementConfigurationName) Error() string {
return fmt.Sprintf("management config name must not be empty")
}
// ErrMissingCurrentContext returned in case --current used without setting current-context
type ErrMissingCurrentContext struct {
}
func (e ErrMissingCurrentContext) Error() string {
return "Current context must be set before using --current flag."
}
// ErrMissingManagementConfiguration means the management configuration was not defined for the active cluster.
type ErrMissingManagementConfiguration struct {
contextName string
}
func (e ErrMissingManagementConfiguration) Error() string {
return fmt.Sprintf("Management configuration for context '%s' undefined.", e.contextName)
}
// ErrMissingPhaseRepo returned when Phase Repository is not set in context manifest
type ErrMissingPhaseRepo struct {
}
func (e ErrMissingPhaseRepo) Error() string {
return "Current context manifest must have a phase repository set."
}
// ErrMissingPhaseDocument returned when appropriate Phase document was not found in the filesystem
type ErrMissingPhaseDocument struct {
PhaseName string
}
func (e ErrMissingPhaseDocument) Error() string {
return fmt.Sprintf("Phase document '%s' was not found. "+
"You can initialize it using 'airshipctl document init %s' command.", e.PhaseName, e.PhaseName)
}
// ErrConflictingAuthOptions returned in case both token and username/password is set at same time
type ErrConflictingAuthOptions struct {
}
func (e ErrConflictingAuthOptions) Error() string {
return "specifying token and username/password is not allowed at the same time."
}
// ErrConflictingClusterOptions returned when both certificate-authority and
// insecure-skip-tls-verify is set at same time
type ErrConflictingClusterOptions struct {
}
func (e ErrConflictingClusterOptions) Error() string {
return "specifying certificate-authority and insecure-skip-tls-verify mode is not allowed at the same time."
}
// ErrConflictingContextOptions returned when both context and --current is set at same time
type ErrConflictingContextOptions struct {
}
func (e ErrConflictingContextOptions) Error() string {
return "specifying context and --current Flag is not allowed at the same time."
}
// ErrEmptyContextName returned when empty context name is set
type ErrEmptyContextName struct {
}
func (e ErrEmptyContextName) Error() string {
return "context name must not be empty."
}
// ErrDecodingCredentials returned when the given string cannot be decoded
type ErrDecodingCredentials struct {
Given string
}
func (e ErrDecodingCredentials) Error() string {
return fmt.Sprintf("Error decoding credentials. String '%s' cannot not be decoded", e.Given)
}
// ErrUnknownManagementType describes a situation in which an unknown management type is listed in the airshipctl
// config.
type ErrUnknownManagementType struct {
Type string
}
func (e ErrUnknownManagementType) Error() string {
return fmt.Sprintf("Unknown management type '%s'. Known types include '%s' and '%s'.", e.Type,
redfish.ClientType, redfishdell.ClientType)
}
// ErrMissingManifestName is returned when manifest name is empty
type ErrMissingManifestName struct {
}
func (e ErrMissingManifestName) Error() string {
return "missing manifest name"
}
// ErrMissingFlag is returned when flag is not provided
type ErrMissingFlag struct {
FlagName string
}
func (e ErrMissingFlag) Error() string {
return fmt.Sprintf("missing flag, specify a --%s to embed", e.FlagName)
}
// ErrCheckFile is returned if there is error when checking file on FS
type ErrCheckFile struct {
FlagName string
Path string
InternalErr error
}
func (e ErrCheckFile) Error() string {
return fmt.Sprintf("could not read %s data from '%s': %v", e.FlagName, e.Path, e.InternalErr)
}
// ErrConfigFileExists is returned when there is an existing file at specified location
type ErrConfigFileExists struct {
Path string
}
func (e ErrConfigFileExists) Error() string {
return fmt.Sprintf("could not create default config at %s, file already exists", e.Path)
}
// ErrWrongOutputFormat is returned when unknown output format is defined for printing config
type ErrWrongOutputFormat struct {
Wrong string
Possible []string
}
func (e ErrWrongOutputFormat) Error() string {
return fmt.Sprintf("wrong output format %s, must be one of %s", e.Wrong, strings.Join(e.Possible, " "))
}
| {
return ErrIncompatibleAuthOptions{
ForbiddenOptions: fo,
AuthType: ao,
}
} |
simple.rs | //! Other simple table/ column migrations
#![allow(unused_imports)]
use crate::backend::{MySql, SqlGenerator};
#[test]
fn create_table() {
let sql = MySql::create_table("table_to_create", None);
assert_eq!(String::from("CREATE TABLE table_to_create"), sql);
}
#[test]
fn create_table_with_schema() {
let sql = MySql::create_table("table_to_create", Some("my_schema"));
assert_eq!(String::from("CREATE TABLE my_schema.table_to_create"), sql);
}
#[test]
fn create_table_if_not_exists() |
#[test]
fn drop_table() {
let sql = MySql::drop_table("table_to_drop", None);
assert_eq!(String::from("DROP TABLE table_to_drop"), sql);
}
#[test]
fn drop_table_if_exists() {
let sql = MySql::drop_table_if_exists("table_to_drop", None);
assert_eq!(String::from("DROP TABLE table_to_drop IF EXISTS"), sql);
}
#[test]
fn rename_table() {
let sql = MySql::rename_table("old_table", "new_table", None);
assert_eq!(String::from("RENAME TABLE `old_table` TO `new_table`"), sql);
}
#[test]
fn alter_table() {
let sql = MySql::alter_table("table_to_alter", None);
assert_eq!(String::from("ALTER TABLE `table_to_alter`"), sql);
}
| {
let sql = MySql::create_table_if_not_exists("table_to_create", None);
assert_eq!(
String::from("CREATE TABLE table_to_create IF NOT EXISTS"),
sql
);
} |
caesar_test.go | package caesar_test
import (
"TheAlgorithms/Go/ciphers/caesar"
"fmt"
"testing"
)
var c *caesar.Caesar = caesar.NewCaesar()
func TestEncrypt(t *testing.T) {
var caesarTestData = []struct {
description string
input string
key int
expected string
}{
{
"Basic caesar encryption with letter 'a'",
"a",
3,
"d",
},
{
"Basic caesar encryption wrap around alphabet on letter 'z'",
"z",
3,
"c",
},
{
"Encrypt a simple string with caesar encryiption",
"hello",
3,
"khoor",
},
{
"Encrypt a simple string with key 13",
"hello",
13,
"uryyb",
},
{
"Encrypt a simple string with key -13",
"hello",
-13,
"uryyb",
},
{
"With key of 26 output should be the same as the input",
"no change",
26,
"no change",
},
{
"Encrypt sentence with key 10",
"the quick brown fox jumps over the lazy dog.",
10,
"dro aesmu lbygx pyh tewzc yfob dro vkji nyq.",
},
{
"Encrypt sentence with key 10",
"The Quick Brown Fox Jumps over the Lazy Dog.",
10,
"Dro Aesmu Lbygx Pyh Tewzc yfob dro Vkji Nyq.",
},
}
for _, test := range caesarTestData {
t.Run(test.description, func(t *testing.T) {
actual := c.Encrypt(test.input, test.key)
if actual != test.expected {
t.Logf("FAIL: %s", test.description)
t.Fatalf("With input string '%s' and key '%d' was expecting '%s' but actual was '%s'",
test.input, test.key, test.expected, actual)
}
})
}
}
func TestDecrypt(t *testing.T) |
func ExampleNewCaesar() {
const (
key = 10
input = "The Quick Brown Fox Jumps over the Lazy Dog."
)
c := caesar.NewCaesar()
encryptedText := c.Encrypt(input, key)
fmt.Printf("Encrypt=> key: %d, input: %s, encryptedText: %s\n", key, input, encryptedText)
decryptedText := c.Decrypt(encryptedText, key)
fmt.Printf("Decrypt=> key: %d, input: %s, decryptedText: %s\n", key, encryptedText, decryptedText)
// Output:
// Encrypt=> key: 10, input: The Quick Brown Fox Jumps over the Lazy Dog., encryptedText: Dro Aesmu Lbygx Pyh Tewzc yfob dro Vkji Nyq.
// Decrypt=> key: 10, input: Dro Aesmu Lbygx Pyh Tewzc yfob dro Vkji Nyq., decryptedText: The Quick Brown Fox Jumps over the Lazy Dog.
}
| {
var caesarTestData = []struct {
description string
input string
key int
expected string
}{
{
"Basic caesar decryption with letter 'a'",
"a",
3,
"x",
},
{
"Basic caesar decryption wrap around alphabet on letter 'z'",
"z",
3,
"w",
},
{
"Decrypt a simple string with caesar encryiption",
"hello",
3,
"ebiil",
},
{
"Decrypt a simple string with key 13",
"hello",
13,
"uryyb",
},
{
"Decrypt a simple string with key -13",
"hello",
-13,
"uryyb",
},
{
"With key of 26 output should be the same as the input",
"no change",
26,
"no change",
},
{
"Decrypt sentence with key 10",
"Dro Aesmu Lbygx Pyh Tewzc yfob dro Vkji Nyq.",
10,
"The Quick Brown Fox Jumps over the Lazy Dog.",
},
}
for _, test := range caesarTestData {
t.Run(test.description, func(t *testing.T) {
actual := c.Decrypt(test.input, test.key)
if actual != test.expected {
t.Logf("FAIL: %s", test.description)
t.Fatalf("With input string '%s' and key '%d' was expecting '%s' but actual was '%s'",
test.input, test.key, test.expected, actual)
}
})
}
} |
test_detect_score.py | import unittest
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
sys.path.append(str(Path(__file__).parent.parent / "src/predict"))
import src.predict.detect_score as detect_score
class TestDetectScore(unittest.TestCase):
def setUp(self):#設定
self.ds=detect_score.DetectScore()
def test_fix_text(self):
#bugfix
text="3 6 10 6 3 4 15"#10を1 0 に分解したい
text=self.ds.fix_text(text)
self.assertEqual("3 6 1 0 6 3 4 15",text)
def test_fix_in_ad(self):
print("text_fix_in_ad")
text_array=['3','6','Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '40','6','Ad'],text_array)
text_array=['3','Ad','6']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3','Ad','6', '40'],text_array)
text_array=['3', '6', '1', '6', '3', '4', 'Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', '40','6', '3', '4', 'Ad'],text_array)
text_array=['3', '6', '1', 'Ad','6', '3', '4']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', 'Ad','6', '3', '4', '40'],text_array)
def test_text2score(self):
text="A 40"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(2,set_num)
text="4 1 15\n6 1 15"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(6,set_num)
text="1 15 \n0\n0"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("0",score_b)
text="1 A \n5\n40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a) | self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="30 15"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("30",score_a)
self.assertEqual("15",score_b)
text="A 40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="15 "
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("",score_b)
text=""
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("",game_a)
self.assertEqual("",game_b)
self.assertEqual("",score_a)
self.assertEqual("",score_b)
text="4 1 15\n6 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="4 6 4 15\n6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="6 4 6 4 15\n4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="5 6 4 6 4 15\n7 4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
# if __name__ == "__main__":
# unittest.main() | self.assertEqual("5",game_b) |
logic.rs | use crate::common::data::Data;
use crate::core::extract::binop; |
// TODO: implement equality rather than just deriving PartialEq on Data.
// Rust hit it right on the nose with the difference between equality and partial equality
// TODO: equality vs partial equality in passerine?
/// Returns `true` if the `Data` are equal, false otherwise.
pub fn equal(data: Data) -> Result<Data, String> {
let (left, right) = binop(data);
return Ok(Data::Boolean(left == right));
} | |
make_all_parametric_reactors.py | """
This python script demonstrates the creation of all parametric reactors available | import paramak
def main():
all_reactors = []
my_reactor = paramak.BallReactor(
inner_bore_radial_thickness=50,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
divertor_radial_thickness=100,
inner_plasma_gap_radial_thickness=50,
plasma_radial_thickness=200,
outer_plasma_gap_radial_thickness=50,
firstwall_radial_thickness=50,
blanket_radial_thickness=100,
blanket_rear_wall_radial_thickness=50,
elongation=2,
triangularity=0.55,
number_of_tf_coils=16,
rotation_angle=180,
)
my_reactor.name = "BallReactor"
all_reactors.append(my_reactor)
my_reactor = paramak.BallReactor(
inner_bore_radial_thickness=50,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
divertor_radial_thickness=100,
inner_plasma_gap_radial_thickness=50,
plasma_radial_thickness=200,
outer_plasma_gap_radial_thickness=50,
firstwall_radial_thickness=50,
blanket_radial_thickness=100,
blanket_rear_wall_radial_thickness=50,
elongation=2,
triangularity=0.55,
number_of_tf_coils=16,
rotation_angle=180,
pf_coil_radial_thicknesses=[50, 50, 50, 50],
pf_coil_vertical_thicknesses=[50, 50, 50, 50],
pf_coil_to_rear_blanket_radial_gap=50,
pf_coil_to_tf_coil_radial_gap=50,
outboard_tf_coil_radial_thickness=100,
outboard_tf_coil_poloidal_thickness=50,
)
my_reactor.name = "BallReactor_with_pf_tf_coils"
all_reactors.append(my_reactor)
my_reactor = paramak.SingleNullBallReactor(
inner_bore_radial_thickness=50,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
divertor_radial_thickness=100,
inner_plasma_gap_radial_thickness=50,
plasma_radial_thickness=200,
outer_plasma_gap_radial_thickness=50,
firstwall_radial_thickness=50,
blanket_radial_thickness=100,
blanket_rear_wall_radial_thickness=50,
elongation=2,
triangularity=0.55,
number_of_tf_coils=16,
rotation_angle=180,
pf_coil_radial_thicknesses=[50, 50, 50, 50],
pf_coil_vertical_thicknesses=[50, 50, 50, 50],
pf_coil_to_rear_blanket_radial_gap=50,
pf_coil_to_tf_coil_radial_gap=50,
outboard_tf_coil_radial_thickness=100,
outboard_tf_coil_poloidal_thickness=50,
divertor_position="lower"
)
my_reactor.name = "SingleNullBallReactor_with_pf_tf_coils"
all_reactors.append(my_reactor)
my_reactor = paramak.SubmersionTokamak(
inner_bore_radial_thickness=25,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
inboard_blanket_radial_thickness=50,
firstwall_radial_thickness=50,
inner_plasma_gap_radial_thickness=70,
plasma_radial_thickness=300,
outer_plasma_gap_radial_thickness=70,
outboard_blanket_radial_thickness=200,
blanket_rear_wall_radial_thickness=50,
divertor_radial_thickness=50,
plasma_high_point=(50 + 50 + 50 + 100 + 100, 350),
rotation_angle=180,
support_radial_thickness=150,
outboard_tf_coil_radial_thickness=50,
)
my_reactor.name = "SubmersionTokamak"
all_reactors.append(my_reactor)
my_reactor = paramak.SubmersionTokamak(
inner_bore_radial_thickness=25,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness=50,
inboard_blanket_radial_thickness=50,
firstwall_radial_thickness=50,
inner_plasma_gap_radial_thickness=70,
plasma_radial_thickness=300,
outer_plasma_gap_radial_thickness=70,
outboard_blanket_radial_thickness=200,
blanket_rear_wall_radial_thickness=50,
divertor_radial_thickness=50,
plasma_high_point=(50 + 50 + 50 + 100 + 100, 350),
rotation_angle=180,
support_radial_thickness=150,
outboard_tf_coil_radial_thickness=50,
tf_coil_to_rear_blanket_radial_gap=50,
outboard_tf_coil_poloidal_thickness=70,
pf_coil_vertical_thicknesses=[50, 50, 50, 50, 50],
pf_coil_radial_thicknesses=[40, 40, 40, 40, 40],
pf_coil_to_tf_coil_radial_gap=50,
number_of_tf_coils=16,
)
my_reactor.name = "SubmersionTokamak_with_pf_tf_coils"
all_reactors.append(my_reactor)
my_reactor = paramak.SingleNullSubmersionTokamak(
inner_bore_radial_thickness=10,
inboard_tf_leg_radial_thickness=30,
center_column_shield_radial_thickness=60,
divertor_radial_thickness=50,
inner_plasma_gap_radial_thickness=30,
plasma_radial_thickness=300,
outer_plasma_gap_radial_thickness=30,
firstwall_radial_thickness=30,
blanket_rear_wall_radial_thickness=30,
number_of_tf_coils=16,
rotation_angle=180,
support_radial_thickness=20,
inboard_blanket_radial_thickness=20,
outboard_blanket_radial_thickness=20,
plasma_high_point=(200, 200),
divertor_position="upper",
support_position="upper"
)
my_reactor.name = "SingleNullSubmersionTokamak"
all_reactors.append(my_reactor)
my_reactor = paramak.SingleNullSubmersionTokamak(
inner_bore_radial_thickness=10,
inboard_tf_leg_radial_thickness=30,
center_column_shield_radial_thickness=60,
divertor_radial_thickness=50,
inner_plasma_gap_radial_thickness=30,
plasma_radial_thickness=300,
outer_plasma_gap_radial_thickness=30,
firstwall_radial_thickness=30,
blanket_rear_wall_radial_thickness=30,
number_of_tf_coils=16,
rotation_angle=180,
support_radial_thickness=20,
inboard_blanket_radial_thickness=20,
outboard_blanket_radial_thickness=20,
plasma_high_point=(200, 200),
pf_coil_radial_thicknesses=[50, 50, 50, 50],
pf_coil_vertical_thicknesses=[50, 50, 50, 50],
pf_coil_to_tf_coil_radial_gap=50,
outboard_tf_coil_radial_thickness=100,
outboard_tf_coil_poloidal_thickness=50,
tf_coil_to_rear_blanket_radial_gap=20,
divertor_position="upper",
support_position="upper"
)
my_reactor.name = "SingleNullSubmersionTokamak_with_pf_tf_coils"
all_reactors.append(my_reactor)
return all_reactors
if __name__ == "__main__":
all_reactors = main()
for reactors in all_reactors:
reactors.export_stp(reactors.name)
reactors.export_stl(reactors.name)
reactors.export_neutronics_description() | in the paramak tool
"""
|
plot.py | from io import SEEK_CUR
from os import name
from types import FunctionType
import matplotlib.pyplot as plt
from matplotlib.pyplot import legend, plot, xticks
## class for plot function
class PlotFunction():
"""Make Data visualization Easier !"""
def __init__(self, y_data, x_label, y_label, x_ticklabels=[], x_ticks=[], title=''):
self.y_data=y_data
self.x_label=x_label
self.y_label=y_label
self.x_ticklabels=x_ticklabels
self.x_ticks=x_ticks
if title == '':
self.title=self.y_label+" vs. "+self.x_label
else:
self.title=self.y_label+" vs. "+self.x_label+title
def | (self):
plt.clf()
legend_list = []
line_type=['-x', '-*', '-^', '-o', '-s', '-<', '-v', '-D']
plt_ptrs = []
i = 0
default_xticks_len = 0
for key, value in self.y_data.items():
legend_list.append(key)
assert(i < len(line_type)) # aviod over the range of line_type
plt_ptr, = plt.plot([int(x) for x in value], line_type[i])
plt_ptrs.append(plt_ptr)
i += 1
if default_xticks_len == 0:
default_xticks_len = len(value)
plt.title(self.title)
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
plt.legend(plt_ptrs, legend_list)
ax = plt.gca()
if self.x_ticklabels != []:
ax.set_xticklabels(self.x_ticklabels)
else:
ax.set_xticklabels([str(x) for x in range(default_xticks_len)])
if self.x_ticks != []:
ax.set_xticks(self.x_ticks)
else:
ax.set_xticks([x for x in range(default_xticks_len)])
plt.tight_layout()
def save_figs(self, dir, filename=''):
self.plot_figs()
name=''
if filename != '':
name = filename + '.jpg'
else:
name = self.title + '.jpg'
plt.savefig(dir + name)
def plot_pie(self, dir='./', title='', legend_list=[], if_save=True):
plt.clf()
if legend_list == []:
legend_list = ['HQM', 'Core-0', 'PE-1 PE-2', 'DDR-0', 'PE-3 PE-4 PE-5', 'PE-13', 'Core-1', \
'Core-2', 'PE-6 PE-7 PE-8', 'DDR-1', 'PE-9 PE-10 PE-11 PE-12', 'Core-3']
explode = [0.01] * len(self.y_data)
plt.pie(self.y_data, explode=explode, labels=legend_list)
plt.title(title)
plt.tight_layout()
if if_save:
plt.savefig(dir+title+'.jpg')
## System Path
dir_path = "/home/wj/test/Gem5_task_graph/my_STATS/10_03/different_memory_access/"
result_path = dir_path+"results.txt"
fig_path = dir_path+"FIGS/"
link_result_path = dir_path+"LINK_RESULT/"
log_path = dir_path + "log"
## Parameter Setting
app=[1, 2, 3, 4, 5]
iters=[100]
mem_access=['10', '20', '30', '40', '50']
mem_type = ['DDR3']
## Read File -> result.txt
with open(result_path) as input_file:
input_data = input_file.readlines()
# use dict to store info {name: [...]}
input_data_dict = {}
for i in range(1, len(input_data)):
input_data_dict[input_data[i].split()[0]] = input_data[i].strip().split()[1:]
ete_delay = {}
flits = {}
hops = {}
latency = {}
network_latency = {}
queueing_latency = {}
for app_num in range(1,6):
app_name = "App_0" + str(app_num)
ete_delay[app_name] = []
flits[app_name] = []
hops[app_name] = []
latency[app_name] = []
network_latency[app_name] = []
queueing_latency[app_name] = []
for iter in iters:
for mc in mem_access:
for mt in mem_type:
filename = "Application_0" + str(app_num) + '_Iters_' + str(iter) + '_Memory_Access_' +\
str(mc) + '_Memory_Type_' + str(mt)
ete_delay[app_name].append(input_data_dict[filename][5])
flits[app_name].append(input_data_dict[filename][0])
hops[app_name].append(input_data_dict[filename][1])
latency[app_name].append(input_data_dict[filename][2])
network_latency[app_name].append(input_data_dict[filename][3])
queueing_latency[app_name].append(input_data_dict[filename][4])
p = PlotFunction(ete_delay, 'Memory Access', 'Average ETE Delay', mem_access)
p.save_figs(fig_path)
## Read File -> log
if 0:
with open(log_path) as log_file:
log_data = log_file.readlines()
for app in app:
ete_delay = {}
average_ete_delay = {}
for iter in iters:
for mc in mem_access:
for mt in mem_type:
key_word = 'Application_0' + str(app) + '_Iters_' + str(iter) + '_Memory_Access_' +\
str(mc) + '_Memory_Type_' + str(mt)
start_idx = -1
for i in range(len(log_data)):
if key_word in log_data[i]:
start_idx = i + 3
break
assert(start_idx != -1)
each_iter_data = []
aver_data = []
total_delay = 0
assert(log_data[start_idx+iter-1].strip().split()[1] == str(iter-1))
for i in range(start_idx, start_idx+iter):
delay = log_data[i].strip().split()[-1]
total_delay += int(delay)
each_iter_data.append(delay)
aver_data.append(total_delay/(i-start_idx+1))
x_index = 'Memory_Access_' + mc # for legend
ete_delay[x_index] = each_iter_data
average_ete_delay[x_index] = aver_data
x_ticklabels=['10', '20', '30', '40', '50', '60', '70', '80', '90', '100']
x_ticks=[i*10 for i in range(1,11)]
p = PlotFunction(ete_delay, "Execution Iterations", "ETE Delay", x_ticklabels, x_ticks, ' for_App_0'+str(app))
p.save_figs(fig_path)
p1 = PlotFunction(average_ete_delay, "Execution Iterations", "Average ETE Delay", x_ticklabels, x_ticks, ' for_App_0'+str(app))
p1.save_figs(fig_path)
| plot_figs |
utils.go | package new_storage
import (
"fmt"
"github.com/apex/log"
"sort"
"github.com/mholt/archiver/v3"
)
func GetBackupsToDelete(backups []Backup, keep int) []Backup {
if len(backups) > keep {
sort.SliceStable(backups, func(i, j int) bool {
return backups[i].UploadDate.After(backups[j].UploadDate)
})
// KeepRemoteBackups should respect incremental backups and don't delete required backups
// fix https://github.com/AlexAkulov/clickhouse-backup/issues/111
// fix https://github.com/AlexAkulov/clickhouse-backup/issues/385
deletedBackups := make([]Backup, len(backups)-keep)
copied := copy(deletedBackups, backups[keep:])
if copied != len(backups)-keep {
log.Warnf("copied wrong items from backup list expected=%d, actual=%d", len(backups)-keep, copied)
}
for _, b := range backups {
if b.RequiredBackup != "" {
for i, deletedBackup := range deletedBackups {
if b.RequiredBackup == deletedBackup.BackupName {
deletedBackups = append(deletedBackups[:i], deletedBackups[i+1:]...)
break
}
}
}
}
return deletedBackups
}
return []Backup{}
}
func getArchiveWriter(format string, level int) (archiver.Writer, error) {
switch format {
case "tar":
return &archiver.Tar{}, nil
case "lz4":
return &archiver.TarLz4{CompressionLevel: level, Tar: archiver.NewTar()}, nil
case "bzip2", "bz2":
return &archiver.TarBz2{CompressionLevel: level, Tar: archiver.NewTar()}, nil
case "gzip", "gz":
return &archiver.TarGz{CompressionLevel: level, Tar: archiver.NewTar()}, nil
case "sz":
return &archiver.TarSz{Tar: archiver.NewTar()}, nil
case "xz":
return &archiver.TarXz{Tar: archiver.NewTar()}, nil
case "br", "brotli":
return &archiver.TarBrotli{Quality: level, Tar: archiver.NewTar()}, nil
case "zstd":
return &archiver.TarZstd{Tar: archiver.NewTar()}, nil
}
return nil, fmt.Errorf("wrong compression_format: %s, supported: 'tar', 'lz4', 'bzip2', 'bz2', 'gzip', 'gz', 'sz', 'xz', 'br', 'brotli', 'zstd'", format)
}
func getArchiveReader(format string) (archiver.Reader, error) | {
switch format {
case "tar":
return archiver.NewTar(), nil
case "lz4":
return archiver.NewTarLz4(), nil
case "bzip2", "bz2":
return archiver.NewTarBz2(), nil
case "gzip", "gz":
return archiver.NewTarGz(), nil
case "sz":
return archiver.NewTarSz(), nil
case "xz":
return archiver.NewTarXz(), nil
case "br", "brotli":
return archiver.NewTarBrotli(), nil
case "zstd":
return archiver.NewTarZstd(), nil
}
return nil, fmt.Errorf("wrong compression_format: %s, supported: 'tar', 'lz4', 'bzip2', 'bz2', 'gzip', 'gz', 'sz', 'xz', 'br', 'brotli', 'zstd'", format)
} |
|
is-undefined.js | export default function isUndefined(input) {
| return input === void 0;
} | |
assorted_demo.rs | #[allow(unused_imports)] use builtin::*;
#[allow(unused_imports)] use builtin_macros::*;
mod pervasive; #[allow(unused_imports)] use pervasive::*;
fn main() {
let x = 3;
let y = 4;
assert(x != y);
}
#[derive(Eq, PartialEq, Structural)]
struct Train {
cars: u64,
}
fn main2() {
let t = Train { cars: 10 };
let q = Train { cars: 10 };
assert(t == q);
}
| }
#[spec]
fn divides(v: u64, d: u64) -> bool {
exists(|k: u64| mul(d, k) == v)
}
#[verifier(external)]
fn gcd_external(a: u64, b: u64) -> u64 {
let mut i = a;
while i >= 1 {
if a % i == 0 && b % i == 0 {
break;
}
i -= 1;
}
i
}
#[verifier(external_body)]
fn gcd(a: u64, b: u64) -> u64 {
requires([a >= 0, b >= 0]);
ensures(|result: u64| [divides(a, result), divides(b, result)]);
gcd_external(a, b)
}
fn main3() {
let x = 42;
let y = 182;
let z = gcd(x, y);
assert(divides(x, z));
assert(divides(y, z));
// TOOD assert(x % z == 0);
} | #[spec]
fn mul(a: u64, b: u64) -> u64 {
a * b |
pet_get_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package pet
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/go-swagger/go-swagger/examples/contributed-templates/stratoscale/models"
)
// PetGetReader is a Reader for the PetGet structure.
type PetGetReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PetGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPetGetOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewPetGetBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewPetGetNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewPetGetOK creates a PetGetOK with default headers values
func | () *PetGetOK {
return &PetGetOK{}
}
/* PetGetOK describes a response with status code 200, with default header values.
successful operation
*/
type PetGetOK struct {
Payload *models.Pet
}
func (o *PetGetOK) Error() string {
return fmt.Sprintf("[GET /pet/{petId}][%d] petGetOK %+v", 200, o.Payload)
}
func (o *PetGetOK) GetPayload() *models.Pet {
return o.Payload
}
func (o *PetGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Pet)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPetGetBadRequest creates a PetGetBadRequest with default headers values
func NewPetGetBadRequest() *PetGetBadRequest {
return &PetGetBadRequest{}
}
/* PetGetBadRequest describes a response with status code 400, with default header values.
Invalid ID supplied
*/
type PetGetBadRequest struct {
}
func (o *PetGetBadRequest) Error() string {
return fmt.Sprintf("[GET /pet/{petId}][%d] petGetBadRequest ", 400)
}
func (o *PetGetBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewPetGetNotFound creates a PetGetNotFound with default headers values
func NewPetGetNotFound() *PetGetNotFound {
return &PetGetNotFound{}
}
/* PetGetNotFound describes a response with status code 404, with default header values.
Pet not found
*/
type PetGetNotFound struct {
}
func (o *PetGetNotFound) Error() string {
return fmt.Sprintf("[GET /pet/{petId}][%d] petGetNotFound ", 404)
}
func (o *PetGetNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
| NewPetGetOK |
MapJsonDecoder.ts | /// <reference path="../Map.ts"/>
class MapJsonDecoder{
private rootMap:Map<any>;
private dataString:string;
constructor(dataString:string) {
this.rootMap = new Map<any>('rootMap');
this.dataString = dataString;
}
public decode():Map<any>{ | private parseStringToMap(dataString:string, parentMap:Map<any>):void{
var dataJson:any = '';
try{
dataJson = JSON.parse(dataString);
}
catch(error){
console.log('MapJsonDecoder. Not valid json.');
//throw new Error('Not valid json.');
}
this.parseObjectToMap(dataJson, parentMap);
}
private parseObjectToMap(dataObject:any, parentMap:Map<any>):Map<any>{
var id:string = dataObject["id"];
var type:string = dataObject["type"];
if(type=="Map"){
for(var key in dataObject){
var value:any = dataObject[key];
var valueId:string = value["id"];
var valueType:string = value["type"];
if(key!="id" && key!="type" && valueType=="Map"){
var subMap:Map<any> = new Map<any>(valueId);
parentMap.add(key, this.parseObjectToMap(value, subMap));
}
else{
if(key === "id"){
parentMap.setId(value);
}
else if(key != "type"){
parentMap.add(key, value);
}
}
}
}
return parentMap;
}
} | this.parseStringToMap(this.dataString, this.rootMap);
return this.rootMap;
}
|
spritespin.beh-hold.js | (function ($, SpriteSpin) {
"use strict";
function | (e, data) {
if (data.loading || data.dragging || !data.stage.is(':visible')) return;
SpriteSpin.updateInput(e, data);
data.dragging = true;
data.animate = true;
SpriteSpin.setAnimation(data);
}
function stop(e, data) {
data.dragging = false;
SpriteSpin.resetInput(data);
SpriteSpin.stopAnimation(data);
}
function update(e, data) {
if (!data.dragging) return;
SpriteSpin.updateInput(e, data);
var half, delta, target = data.target, offset = target.offset();
if (data.orientation === "horizontal") {
half = target.innerWidth() / 2;
delta = (data.currentX - offset().left - half) / half;
} else {
half = (data.height / 2);
delta = (data.currentY - offset().top - half) / half;
}
data.reverse = delta < 0;
delta = delta < 0 ? -delta : delta;
data.frameTime = 80 * (1 - delta) + 20;
if (((data.orientation === 'horizontal') && (data.dX < data.dY)) ||
((data.orientation === 'vertical') && (data.dX < data.dY))) {
e.preventDefault();
}
}
SpriteSpin.registerModule('hold', {
mousedown: start,
mousemove: update,
mouseup: stop,
mouseleave: stop,
touchstart: start,
touchmove: update,
touchend: stop,
touchcancel: stop,
onFrame: function () {
$(this).spritespin("api").startAnimation();
}
});
}(window.jQuery || window.Zepto || window.$, window.SpriteSpin));
| start |
web.rs | //! Test suite for the Web and headless browsers.
#![cfg(target_arch = "wasm32")]
extern crate awful_idea_name;
extern crate regex; | extern crate wasm_bindgen_test;
use awful_idea_name::AwfulIdeaName;
use regex::Regex;
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn it_returns_a_random_name() {
let a = AwfulIdeaName::new();
let name1 = a.generate();
let name2 = a.generate();
assert!(!(&name1 == &name2))
}
#[wasm_bindgen_test]
fn it_contains_four_digits_at_the_end() {
let a = AwfulIdeaName::new();
let name = a.generate();
let re = Regex::new(r"^\w+-\w+-[0-9]{4}$").unwrap();
assert!(re.is_match(&name));
} | |
wellcomeLibraryRedirect.ts | import { CloudFrontRequestEvent, Context } from 'aws-lambda';
import { | import { getBnumberFromPath } from './paths';
import {
createRedirect,
getSierraIdentifierRedirect,
wellcomeCollectionNotFoundRedirect,
wellcomeCollectionRedirect,
} from './redirectHelpers';
import { redirectToRoot } from './redirectToRoot';
import { lookupRedirect } from './lookupRedirect';
import { wlorgpLookup } from './wlorgpLookup';
import rawStaticRedirects from './staticRedirects.json';
const staticRedirects = rawStaticRedirects as Record<string, string>;
async function getWorksRedirect(
uri: string
): Promise<CloudFrontResultResponse> {
const sierraIdentifier = getBnumberFromPath(uri);
return getSierraIdentifierRedirect(sierraIdentifier);
}
async function getApiRedirects(
uri: string
): Promise<CloudFrontResultResponse | undefined> {
const apiRedirectUri = await wlorgpLookup(uri);
if (apiRedirectUri instanceof Error) {
console.error(apiRedirectUri);
return Promise.resolve(undefined);
}
return createRedirect(apiRedirectUri);
}
async function redirectRequestUri(
request: CloudFrontRequest
): Promise<undefined | CloudFrontResultResponse> {
let uri = request.uri;
if (request.querystring) {
uri = `${uri}?${request.querystring}`;
}
const itemPathRegExp: RegExp = /^\/(item|player)\/.*/;
const eventsPathRegExp: RegExp = /^\/events(\/)?.*/;
const apiPathRegExp: RegExp = /^\/(iiif|service|ddsconf|dds-static|annoservices)\/.*/;
const collectionsBrowseExp: RegExp = /^\/collections\/browse(\/)?.*/;
const staticRedirect = lookupRedirect(staticRedirects, uri);
if (staticRedirect) {
return staticRedirect;
} else if (uri.match(itemPathRegExp)) {
return getWorksRedirect(uri);
} else if (uri.match(collectionsBrowseExp)) {
return wellcomeCollectionRedirect('/collections');
} else if (uri.match(eventsPathRegExp)) {
return wellcomeCollectionRedirect('/whats-on');
} else if (uri.match(apiPathRegExp)) {
return getApiRedirects(uri);
}
}
export const requestHandler = async (
event: CloudFrontRequestEvent,
_: Context
) => {
const request: CloudFrontRequest = event.Records[0].cf.request;
const rootRedirect = redirectToRoot(request);
if (rootRedirect) {
return rootRedirect;
}
const requestRedirect = await redirectRequestUri(request);
if (requestRedirect) {
return requestRedirect;
}
// If we've matched nothing we redirect to wellcomecollection.org
console.warn(`Unable to redirect request ${JSON.stringify(event.Records[0].cf.request)}`);
return wellcomeCollectionRedirect('/');
}; | CloudFrontRequest,
CloudFrontResultResponse,
} from 'aws-lambda/common/cloudfront'; |
dota_clientmessages.pb.go | // Code generated by protoc-gen-go.
// source: dota_clientmessages.proto
// DO NOT EDIT!
package dota
import proto "github.com/golang/protobuf/proto"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = math.Inf
type EDotaClientMessages int32
const (
EDotaClientMessages_DOTA_CM_MapLine EDotaClientMessages = 1
EDotaClientMessages_DOTA_CM_AspectRatio EDotaClientMessages = 2
EDotaClientMessages_DOTA_CM_MapPing EDotaClientMessages = 3
EDotaClientMessages_DOTA_CM_UnitsAutoAttack EDotaClientMessages = 4
EDotaClientMessages_DOTA_CM_AutoPurchaseItems EDotaClientMessages = 5
EDotaClientMessages_DOTA_CM_TestItems EDotaClientMessages = 6
EDotaClientMessages_DOTA_CM_SearchString EDotaClientMessages = 7
EDotaClientMessages_DOTA_CM_Pause EDotaClientMessages = 8
EDotaClientMessages_DOTA_CM_ShopViewMode EDotaClientMessages = 9
EDotaClientMessages_DOTA_CM_SetUnitShareFlag EDotaClientMessages = 10
EDotaClientMessages_DOTA_CM_SwapRequest EDotaClientMessages = 11
EDotaClientMessages_DOTA_CM_SwapAccept EDotaClientMessages = 12
EDotaClientMessages_DOTA_CM_WorldLine EDotaClientMessages = 13
EDotaClientMessages_DOTA_CM_RequestGraphUpdate EDotaClientMessages = 14
EDotaClientMessages_DOTA_CM_ItemAlert EDotaClientMessages = 15
EDotaClientMessages_DOTA_CM_ChatWheel EDotaClientMessages = 16
EDotaClientMessages_DOTA_CM_SendStatPopup EDotaClientMessages = 17
EDotaClientMessages_DOTA_CM_BeginLastHitChallenge EDotaClientMessages = 18
EDotaClientMessages_DOTA_CM_UpdateQuickBuy EDotaClientMessages = 19
EDotaClientMessages_DOTA_CM_UpdateCoachListen EDotaClientMessages = 20
EDotaClientMessages_DOTA_CM_CoachHUDPing EDotaClientMessages = 21
EDotaClientMessages_DOTA_CM_RecordVote EDotaClientMessages = 22
EDotaClientMessages_DOTA_CM_UnitsAutoAttackAfterSpell EDotaClientMessages = 23
EDotaClientMessages_DOTA_CM_WillPurchaseAlert EDotaClientMessages = 24
EDotaClientMessages_DOTA_CM_PlayerShowCase EDotaClientMessages = 25
EDotaClientMessages_DOTA_CM_TeleportRequiresHalt EDotaClientMessages = 26
EDotaClientMessages_DOTA_CM_CameraZoomAmount EDotaClientMessages = 27
EDotaClientMessages_DOTA_CM_BroadcasterUsingCamerman EDotaClientMessages = 28
EDotaClientMessages_DOTA_CM_BroadcasterUsingAssistedCameraOperator EDotaClientMessages = 29
EDotaClientMessages_DOTA_CM_EnemyItemAlert EDotaClientMessages = 30
EDotaClientMessages_DOTA_CM_FreeInventory EDotaClientMessages = 31
EDotaClientMessages_DOTA_CM_BuyBackStateAlert EDotaClientMessages = 32
EDotaClientMessages_DOTA_CM_QuickBuyAlert EDotaClientMessages = 33
EDotaClientMessages_DOTA_CM_HeroStatueLike EDotaClientMessages = 34
EDotaClientMessages_DOTA_CM_ModifierAlert EDotaClientMessages = 35
EDotaClientMessages_DOTA_CM_TeamShowcaseEditor EDotaClientMessages = 36
EDotaClientMessages_DOTA_CM_HPManaAlert EDotaClientMessages = 37
EDotaClientMessages_DOTA_CM_GlyphAlert EDotaClientMessages = 38
EDotaClientMessages_DOTA_CM_TeamShowcaseClientData EDotaClientMessages = 39
EDotaClientMessages_DOTA_CM_PlayTeamShowcase EDotaClientMessages = 40
EDotaClientMessages_DOTA_CM_EventCNY2015Cmd EDotaClientMessages = 41
EDotaClientMessages_DOTA_CM_ChallengeSelect EDotaClientMessages = 42
EDotaClientMessages_DOTA_CM_ChallengeReroll EDotaClientMessages = 43
EDotaClientMessages_DOTA_CM_ClickedBuff EDotaClientMessages = 44
)
var EDotaClientMessages_name = map[int32]string{
1: "DOTA_CM_MapLine",
2: "DOTA_CM_AspectRatio",
3: "DOTA_CM_MapPing",
4: "DOTA_CM_UnitsAutoAttack",
5: "DOTA_CM_AutoPurchaseItems",
6: "DOTA_CM_TestItems",
7: "DOTA_CM_SearchString",
8: "DOTA_CM_Pause",
9: "DOTA_CM_ShopViewMode",
10: "DOTA_CM_SetUnitShareFlag",
11: "DOTA_CM_SwapRequest",
12: "DOTA_CM_SwapAccept",
13: "DOTA_CM_WorldLine",
14: "DOTA_CM_RequestGraphUpdate",
15: "DOTA_CM_ItemAlert",
16: "DOTA_CM_ChatWheel",
17: "DOTA_CM_SendStatPopup",
18: "DOTA_CM_BeginLastHitChallenge",
19: "DOTA_CM_UpdateQuickBuy",
20: "DOTA_CM_UpdateCoachListen",
21: "DOTA_CM_CoachHUDPing",
22: "DOTA_CM_RecordVote",
23: "DOTA_CM_UnitsAutoAttackAfterSpell",
24: "DOTA_CM_WillPurchaseAlert",
25: "DOTA_CM_PlayerShowCase",
26: "DOTA_CM_TeleportRequiresHalt",
27: "DOTA_CM_CameraZoomAmount",
28: "DOTA_CM_BroadcasterUsingCamerman",
29: "DOTA_CM_BroadcasterUsingAssistedCameraOperator",
30: "DOTA_CM_EnemyItemAlert",
31: "DOTA_CM_FreeInventory",
32: "DOTA_CM_BuyBackStateAlert",
33: "DOTA_CM_QuickBuyAlert",
34: "DOTA_CM_HeroStatueLike",
35: "DOTA_CM_ModifierAlert",
36: "DOTA_CM_TeamShowcaseEditor",
37: "DOTA_CM_HPManaAlert",
38: "DOTA_CM_GlyphAlert",
39: "DOTA_CM_TeamShowcaseClientData",
40: "DOTA_CM_PlayTeamShowcase",
41: "DOTA_CM_EventCNY2015Cmd",
42: "DOTA_CM_ChallengeSelect",
43: "DOTA_CM_ChallengeReroll",
44: "DOTA_CM_ClickedBuff",
}
var EDotaClientMessages_value = map[string]int32{
"DOTA_CM_MapLine": 1,
"DOTA_CM_AspectRatio": 2,
"DOTA_CM_MapPing": 3,
"DOTA_CM_UnitsAutoAttack": 4,
"DOTA_CM_AutoPurchaseItems": 5,
"DOTA_CM_TestItems": 6,
"DOTA_CM_SearchString": 7,
"DOTA_CM_Pause": 8,
"DOTA_CM_ShopViewMode": 9,
"DOTA_CM_SetUnitShareFlag": 10,
"DOTA_CM_SwapRequest": 11,
"DOTA_CM_SwapAccept": 12,
"DOTA_CM_WorldLine": 13,
"DOTA_CM_RequestGraphUpdate": 14,
"DOTA_CM_ItemAlert": 15,
"DOTA_CM_ChatWheel": 16,
"DOTA_CM_SendStatPopup": 17,
"DOTA_CM_BeginLastHitChallenge": 18,
"DOTA_CM_UpdateQuickBuy": 19,
"DOTA_CM_UpdateCoachListen": 20,
"DOTA_CM_CoachHUDPing": 21,
"DOTA_CM_RecordVote": 22,
"DOTA_CM_UnitsAutoAttackAfterSpell": 23,
"DOTA_CM_WillPurchaseAlert": 24,
"DOTA_CM_PlayerShowCase": 25,
"DOTA_CM_TeleportRequiresHalt": 26,
"DOTA_CM_CameraZoomAmount": 27,
"DOTA_CM_BroadcasterUsingCamerman": 28,
"DOTA_CM_BroadcasterUsingAssistedCameraOperator": 29,
"DOTA_CM_EnemyItemAlert": 30,
"DOTA_CM_FreeInventory": 31,
"DOTA_CM_BuyBackStateAlert": 32,
"DOTA_CM_QuickBuyAlert": 33,
"DOTA_CM_HeroStatueLike": 34,
"DOTA_CM_ModifierAlert": 35,
"DOTA_CM_TeamShowcaseEditor": 36,
"DOTA_CM_HPManaAlert": 37,
"DOTA_CM_GlyphAlert": 38,
"DOTA_CM_TeamShowcaseClientData": 39,
"DOTA_CM_PlayTeamShowcase": 40,
"DOTA_CM_EventCNY2015Cmd": 41,
"DOTA_CM_ChallengeSelect": 42,
"DOTA_CM_ChallengeReroll": 43,
"DOTA_CM_ClickedBuff": 44,
}
func (x EDotaClientMessages) Enum() *EDotaClientMessages {
p := new(EDotaClientMessages)
*p = x
return p
}
func (x EDotaClientMessages) String() string {
return proto.EnumName(EDotaClientMessages_name, int32(x))
}
func (x *EDotaClientMessages) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(EDotaClientMessages_value, data, "EDotaClientMessages")
if err != nil {
return err
}
*x = EDotaClientMessages(value)
return nil
}
type CDOTAClientMsg_MapPing struct {
LocationPing *CDOTAMsg_LocationPing `protobuf:"bytes,1,opt,name=location_ping" json:"location_ping,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_MapPing) Reset() { *m = CDOTAClientMsg_MapPing{} }
func (m *CDOTAClientMsg_MapPing) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_MapPing) ProtoMessage() {}
func (m *CDOTAClientMsg_MapPing) GetLocationPing() *CDOTAMsg_LocationPing {
if m != nil {
return m.LocationPing
}
return nil
}
type CDOTAClientMsg_ItemAlert struct {
ItemAlert *CDOTAMsg_ItemAlert `protobuf:"bytes,1,opt,name=item_alert" json:"item_alert,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_ItemAlert) Reset() { *m = CDOTAClientMsg_ItemAlert{} }
func (m *CDOTAClientMsg_ItemAlert) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_ItemAlert) ProtoMessage() {}
func (m *CDOTAClientMsg_ItemAlert) GetItemAlert() *CDOTAMsg_ItemAlert {
if m != nil {
return m.ItemAlert
}
return nil
}
type CDOTAClientMsg_EnemyItemAlert struct {
ItemEntindex *uint32 `protobuf:"varint,1,opt,name=item_entindex" json:"item_entindex,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_EnemyItemAlert) Reset() { *m = CDOTAClientMsg_EnemyItemAlert{} }
func (m *CDOTAClientMsg_EnemyItemAlert) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_EnemyItemAlert) ProtoMessage() {}
func (m *CDOTAClientMsg_EnemyItemAlert) GetItemEntindex() uint32 {
if m != nil && m.ItemEntindex != nil {
return *m.ItemEntindex
}
return 0
}
type CDOTAClientMsg_ModifierAlert struct {
BuffInternalIndex *int32 `protobuf:"varint,1,opt,name=buff_internal_index" json:"buff_internal_index,omitempty"`
TargetEntindex *uint32 `protobuf:"varint,2,opt,name=target_entindex" json:"target_entindex,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_ModifierAlert) Reset() { *m = CDOTAClientMsg_ModifierAlert{} }
func (m *CDOTAClientMsg_ModifierAlert) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_ModifierAlert) ProtoMessage() {}
func (m *CDOTAClientMsg_ModifierAlert) GetBuffInternalIndex() int32 {
if m != nil && m.BuffInternalIndex != nil {
return *m.BuffInternalIndex
}
return 0
}
func (m *CDOTAClientMsg_ModifierAlert) GetTargetEntindex() uint32 {
if m != nil && m.TargetEntindex != nil {
return *m.TargetEntindex
}
return 0
}
type CDOTAClientMsg_ClickedBuff struct {
BuffInternalIndex *int32 `protobuf:"varint,1,opt,name=buff_internal_index" json:"buff_internal_index,omitempty"`
TargetEntindex *uint32 `protobuf:"varint,2,opt,name=target_entindex" json:"target_entindex,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_ClickedBuff) Reset() { *m = CDOTAClientMsg_ClickedBuff{} }
func (m *CDOTAClientMsg_ClickedBuff) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_ClickedBuff) ProtoMessage() {}
func (m *CDOTAClientMsg_ClickedBuff) GetBuffInternalIndex() int32 {
if m != nil && m.BuffInternalIndex != nil {
return *m.BuffInternalIndex
}
return 0
}
func (m *CDOTAClientMsg_ClickedBuff) GetTargetEntindex() uint32 {
if m != nil && m.TargetEntindex != nil {
return *m.TargetEntindex
}
return 0
}
type CDOTAClientMsg_HPManaAlert struct {
TargetEntindex *uint32 `protobuf:"varint,1,opt,name=target_entindex" json:"target_entindex,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_HPManaAlert) Reset() { *m = CDOTAClientMsg_HPManaAlert{} }
func (m *CDOTAClientMsg_HPManaAlert) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_HPManaAlert) ProtoMessage() {}
func (m *CDOTAClientMsg_HPManaAlert) GetTargetEntindex() uint32 {
if m != nil && m.TargetEntindex != nil {
return *m.TargetEntindex
}
return 0
}
type CDOTAClientMsg_GlyphAlert struct {
Negative *bool `protobuf:"varint,1,opt,name=negative" json:"negative,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_GlyphAlert) Reset() { *m = CDOTAClientMsg_GlyphAlert{} }
func (m *CDOTAClientMsg_GlyphAlert) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_GlyphAlert) ProtoMessage() {}
func (m *CDOTAClientMsg_GlyphAlert) GetNegative() bool {
if m != nil && m.Negative != nil {
return *m.Negative
}
return false
}
type CDOTAClientMsg_MapLine struct {
Mapline *CDOTAMsg_MapLine `protobuf:"bytes,1,opt,name=mapline" json:"mapline,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_MapLine) Reset() { *m = CDOTAClientMsg_MapLine{} }
func (m *CDOTAClientMsg_MapLine) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_MapLine) ProtoMessage() {}
func (m *CDOTAClientMsg_MapLine) GetMapline() *CDOTAMsg_MapLine {
if m != nil {
return m.Mapline
}
return nil
}
type CDOTAClientMsg_AspectRatio struct {
Ratio *float32 `protobuf:"fixed32,1,opt,name=ratio" json:"ratio,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_AspectRatio) Reset() { *m = CDOTAClientMsg_AspectRatio{} }
func (m *CDOTAClientMsg_AspectRatio) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_AspectRatio) ProtoMessage() {}
func (m *CDOTAClientMsg_AspectRatio) GetRatio() float32 {
if m != nil && m.Ratio != nil {
return *m.Ratio
}
return 0
}
type CDOTAClientMsg_UnitsAutoAttack struct {
Enabled *bool `protobuf:"varint,1,opt,name=enabled" json:"enabled,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_UnitsAutoAttack) Reset() { *m = CDOTAClientMsg_UnitsAutoAttack{} }
func (m *CDOTAClientMsg_UnitsAutoAttack) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_UnitsAutoAttack) ProtoMessage() {}
func (m *CDOTAClientMsg_UnitsAutoAttack) GetEnabled() bool {
if m != nil && m.Enabled != nil {
return *m.Enabled
}
return false
}
type CDOTAClientMsg_UnitsAutoAttackAfterSpell struct {
Enabled *bool `protobuf:"varint,1,opt,name=enabled" json:"enabled,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_UnitsAutoAttackAfterSpell) Reset() {
*m = CDOTAClientMsg_UnitsAutoAttackAfterSpell{}
}
func (m *CDOTAClientMsg_UnitsAutoAttackAfterSpell) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_UnitsAutoAttackAfterSpell) ProtoMessage() {}
func (m *CDOTAClientMsg_UnitsAutoAttackAfterSpell) GetEnabled() bool {
if m != nil && m.Enabled != nil {
return *m.Enabled
}
return false
}
type CDOTAClientMsg_TeleportRequiresHalt struct {
Enabled *bool `protobuf:"varint,1,opt,name=enabled" json:"enabled,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_TeleportRequiresHalt) Reset() { *m = CDOTAClientMsg_TeleportRequiresHalt{} }
func (m *CDOTAClientMsg_TeleportRequiresHalt) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_TeleportRequiresHalt) ProtoMessage() {}
func (m *CDOTAClientMsg_TeleportRequiresHalt) GetEnabled() bool {
if m != nil && m.Enabled != nil {
return *m.Enabled
}
return false
}
type CDOTAClientMsg_AutoPurchaseItems struct {
Enabled *bool `protobuf:"varint,1,opt,name=enabled" json:"enabled,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_AutoPurchaseItems) Reset() { *m = CDOTAClientMsg_AutoPurchaseItems{} }
func (m *CDOTAClientMsg_AutoPurchaseItems) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_AutoPurchaseItems) ProtoMessage() {}
func (m *CDOTAClientMsg_AutoPurchaseItems) GetEnabled() bool {
if m != nil && m.Enabled != nil {
return *m.Enabled
}
return false
}
type CDOTAClientMsg_TestItems struct {
KeyValues *string `protobuf:"bytes,1,opt,name=key_values" json:"key_values,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_TestItems) Reset() { *m = CDOTAClientMsg_TestItems{} }
func (m *CDOTAClientMsg_TestItems) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_TestItems) ProtoMessage() {}
func (m *CDOTAClientMsg_TestItems) GetKeyValues() string {
if m != nil && m.KeyValues != nil {
return *m.KeyValues
}
return ""
}
type CDOTAClientMsg_SearchString struct {
Search *string `protobuf:"bytes,1,opt,name=search" json:"search,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_SearchString) Reset() { *m = CDOTAClientMsg_SearchString{} }
func (m *CDOTAClientMsg_SearchString) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_SearchString) ProtoMessage() {}
func (m *CDOTAClientMsg_SearchString) GetSearch() string {
if m != nil && m.Search != nil {
return *m.Search
}
return ""
}
type CDOTAClientMsg_Pause struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_Pause) Reset() { *m = CDOTAClientMsg_Pause{} }
func (m *CDOTAClientMsg_Pause) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_Pause) ProtoMessage() {}
type CDOTAClientMsg_ShopViewMode struct {
Mode *uint32 `protobuf:"varint,1,opt,name=mode" json:"mode,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_ShopViewMode) Reset() { *m = CDOTAClientMsg_ShopViewMode{} }
func (m *CDOTAClientMsg_ShopViewMode) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_ShopViewMode) ProtoMessage() {}
func (m *CDOTAClientMsg_ShopViewMode) GetMode() uint32 {
if m != nil && m.Mode != nil {
return *m.Mode
}
return 0
}
type CDOTAClientMsg_SetUnitShareFlag struct {
PlayerID *uint32 `protobuf:"varint,1,opt,name=playerID" json:"playerID,omitempty"`
Flag *uint32 `protobuf:"varint,2,opt,name=flag" json:"flag,omitempty"`
State *bool `protobuf:"varint,3,opt,name=state" json:"state,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_SetUnitShareFlag) Reset() { *m = CDOTAClientMsg_SetUnitShareFlag{} }
func (m *CDOTAClientMsg_SetUnitShareFlag) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_SetUnitShareFlag) ProtoMessage() {}
func (m *CDOTAClientMsg_SetUnitShareFlag) GetPlayerID() uint32 {
if m != nil && m.PlayerID != nil {
return *m.PlayerID
}
return 0
}
func (m *CDOTAClientMsg_SetUnitShareFlag) GetFlag() uint32 {
if m != nil && m.Flag != nil {
return *m.Flag
}
return 0
}
func (m *CDOTAClientMsg_SetUnitShareFlag) GetState() bool {
if m != nil && m.State != nil {
return *m.State
}
return false
}
type CDOTAClientMsg_SwapRequest struct {
PlayerId *uint32 `protobuf:"varint,1,opt,name=player_id" json:"player_id,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_SwapRequest) Reset() { *m = CDOTAClientMsg_SwapRequest{} }
func (m *CDOTAClientMsg_SwapRequest) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_SwapRequest) ProtoMessage() {}
func (m *CDOTAClientMsg_SwapRequest) GetPlayerId() uint32 {
if m != nil && m.PlayerId != nil {
return *m.PlayerId
}
return 0
}
type CDOTAClientMsg_SwapAccept struct {
PlayerId *uint32 `protobuf:"varint,1,opt,name=player_id" json:"player_id,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_SwapAccept) Reset() { *m = CDOTAClientMsg_SwapAccept{} }
func (m *CDOTAClientMsg_SwapAccept) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_SwapAccept) ProtoMessage() {}
func (m *CDOTAClientMsg_SwapAccept) GetPlayerId() uint32 {
if m != nil && m.PlayerId != nil {
return *m.PlayerId
}
return 0
}
type CDOTAClientMsg_WorldLine struct {
Worldline *CDOTAMsg_WorldLine `protobuf:"bytes,1,opt,name=worldline" json:"worldline,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_WorldLine) Reset() { *m = CDOTAClientMsg_WorldLine{} }
func (m *CDOTAClientMsg_WorldLine) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_WorldLine) ProtoMessage() {}
func (m *CDOTAClientMsg_WorldLine) GetWorldline() *CDOTAMsg_WorldLine {
if m != nil {
return m.Worldline
}
return nil
}
type CDOTAClientMsg_RequestGraphUpdate struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_RequestGraphUpdate) Reset() { *m = CDOTAClientMsg_RequestGraphUpdate{} }
func (m *CDOTAClientMsg_RequestGraphUpdate) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_RequestGraphUpdate) ProtoMessage() {}
type CDOTAClientMsg_ChatWheel struct {
ChatMessage *EDOTAChatWheelMessage `protobuf:"varint,1,opt,name=chat_message,enum=dota.EDOTAChatWheelMessage,def=0" json:"chat_message,omitempty"`
ParamHeroId *uint32 `protobuf:"varint,2,opt,name=param_hero_id" json:"param_hero_id,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_ChatWheel) Reset() { *m = CDOTAClientMsg_ChatWheel{} }
func (m *CDOTAClientMsg_ChatWheel) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_ChatWheel) ProtoMessage() {}
const Default_CDOTAClientMsg_ChatWheel_ChatMessage EDOTAChatWheelMessage = EDOTAChatWheelMessage_k_EDOTA_CW_Ok
func (m *CDOTAClientMsg_ChatWheel) GetChatMessage() EDOTAChatWheelMessage {
if m != nil && m.ChatMessage != nil {
return *m.ChatMessage
}
return Default_CDOTAClientMsg_ChatWheel_ChatMessage
}
func (m *CDOTAClientMsg_ChatWheel) GetParamHeroId() uint32 {
if m != nil && m.ParamHeroId != nil {
return *m.ParamHeroId
}
return 0
}
type CDOTAClientMsg_SendStatPopup struct {
Statpopup *CDOTAMsg_SendStatPopup `protobuf:"bytes,1,opt,name=statpopup" json:"statpopup,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_SendStatPopup) Reset() { *m = CDOTAClientMsg_SendStatPopup{} }
func (m *CDOTAClientMsg_SendStatPopup) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_SendStatPopup) ProtoMessage() {}
func (m *CDOTAClientMsg_SendStatPopup) GetStatpopup() *CDOTAMsg_SendStatPopup {
if m != nil {
return m.Statpopup
}
return nil
}
type CDOTAClientMsg_BeginLastHitChallenge struct {
ChosenLane *uint32 `protobuf:"varint,1,opt,name=chosen_lane" json:"chosen_lane,omitempty"`
HelperEnabled *bool `protobuf:"varint,2,opt,name=helper_enabled" json:"helper_enabled,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_BeginLastHitChallenge) Reset() { *m = CDOTAClientMsg_BeginLastHitChallenge{} }
func (m *CDOTAClientMsg_BeginLastHitChallenge) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_BeginLastHitChallenge) ProtoMessage() {}
func (m *CDOTAClientMsg_BeginLastHitChallenge) GetChosenLane() uint32 {
if m != nil && m.ChosenLane != nil {
return *m.ChosenLane
}
return 0
}
func (m *CDOTAClientMsg_BeginLastHitChallenge) GetHelperEnabled() bool {
if m != nil && m.HelperEnabled != nil {
return *m.HelperEnabled
}
return false
}
type CDOTAClientMsg_UpdateQuickBuyItem struct {
ItemType *int32 `protobuf:"varint,1,opt,name=item_type" json:"item_type,omitempty"`
Purchasable *bool `protobuf:"varint,2,opt,name=purchasable" json:"purchasable,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_UpdateQuickBuyItem) Reset() { *m = CDOTAClientMsg_UpdateQuickBuyItem{} }
func (m *CDOTAClientMsg_UpdateQuickBuyItem) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_UpdateQuickBuyItem) ProtoMessage() {}
func (m *CDOTAClientMsg_UpdateQuickBuyItem) GetItemType() int32 {
if m != nil && m.ItemType != nil {
return *m.ItemType
}
return 0
}
func (m *CDOTAClientMsg_UpdateQuickBuyItem) GetPurchasable() bool {
if m != nil && m.Purchasable != nil {
return *m.Purchasable
}
return false
}
type CDOTAClientMsg_UpdateQuickBuy struct {
Items []*CDOTAClientMsg_UpdateQuickBuyItem `protobuf:"bytes,1,rep,name=items" json:"items,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_UpdateQuickBuy) Reset() { *m = CDOTAClientMsg_UpdateQuickBuy{} }
func (m *CDOTAClientMsg_UpdateQuickBuy) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_UpdateQuickBuy) ProtoMessage() {}
func (m *CDOTAClientMsg_UpdateQuickBuy) GetItems() []*CDOTAClientMsg_UpdateQuickBuyItem {
if m != nil {
return m.Items
}
return nil
}
type CDOTAClientMsg_UpdateCoachListen struct {
PlayerMask *uint32 `protobuf:"varint,1,opt,name=player_mask" json:"player_mask,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_UpdateCoachListen) Reset() { *m = CDOTAClientMsg_UpdateCoachListen{} }
func (m *CDOTAClientMsg_UpdateCoachListen) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_UpdateCoachListen) ProtoMessage() {}
func (m *CDOTAClientMsg_UpdateCoachListen) GetPlayerMask() uint32 {
if m != nil && m.PlayerMask != nil {
return *m.PlayerMask
}
return 0
}
type CDOTAClientMsg_CoachHUDPing struct {
HudPing *CDOTAMsg_CoachHUDPing `protobuf:"bytes,1,opt,name=hud_ping" json:"hud_ping,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_CoachHUDPing) Reset() { *m = CDOTAClientMsg_CoachHUDPing{} }
func (m *CDOTAClientMsg_CoachHUDPing) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_CoachHUDPing) ProtoMessage() {}
func (m *CDOTAClientMsg_CoachHUDPing) GetHudPing() *CDOTAMsg_CoachHUDPing {
if m != nil {
return m.HudPing
}
return nil
}
type CDOTAClientMsg_RecordVote struct {
ChoiceIndex *int32 `protobuf:"varint,1,opt,name=choice_index" json:"choice_index,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_RecordVote) Reset() { *m = CDOTAClientMsg_RecordVote{} }
func (m *CDOTAClientMsg_RecordVote) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_RecordVote) ProtoMessage() {}
func (m *CDOTAClientMsg_RecordVote) GetChoiceIndex() int32 {
if m != nil && m.ChoiceIndex != nil {
return *m.ChoiceIndex
}
return 0
}
type CDOTAClientMsg_WillPurchaseAlert struct {
Itemid *int32 `protobuf:"varint,1,opt,name=itemid" json:"itemid,omitempty"`
GoldRemaining *uint32 `protobuf:"varint,2,opt,name=gold_remaining" json:"gold_remaining,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_WillPurchaseAlert) Reset() { *m = CDOTAClientMsg_WillPurchaseAlert{} }
func (m *CDOTAClientMsg_WillPurchaseAlert) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_WillPurchaseAlert) ProtoMessage() {}
func (m *CDOTAClientMsg_WillPurchaseAlert) GetItemid() int32 {
if m != nil && m.Itemid != nil {
return *m.Itemid
}
return 0
}
func (m *CDOTAClientMsg_WillPurchaseAlert) GetGoldRemaining() uint32 {
if m != nil && m.GoldRemaining != nil {
return *m.GoldRemaining
}
return 0
}
type CDOTAClientMsg_BuyBackStateAlert struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_BuyBackStateAlert) Reset() { *m = CDOTAClientMsg_BuyBackStateAlert{} }
func (m *CDOTAClientMsg_BuyBackStateAlert) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_BuyBackStateAlert) ProtoMessage() {}
type CDOTAClientMsg_QuickBuyAlert struct {
Itemid *int32 `protobuf:"varint,1,opt,name=itemid" json:"itemid,omitempty"`
GoldRequired *int32 `protobuf:"varint,2,opt,name=gold_required" json:"gold_required,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_QuickBuyAlert) Reset() { *m = CDOTAClientMsg_QuickBuyAlert{} }
func (m *CDOTAClientMsg_QuickBuyAlert) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_QuickBuyAlert) ProtoMessage() {}
func (m *CDOTAClientMsg_QuickBuyAlert) GetItemid() int32 {
if m != nil && m.Itemid != nil {
return *m.Itemid
}
return 0
}
func (m *CDOTAClientMsg_QuickBuyAlert) GetGoldRequired() int32 {
if m != nil && m.GoldRequired != nil {
return *m.GoldRequired
}
return 0
}
type CDOTAClientMsg_PlayerShowCase struct {
Showcase *bool `protobuf:"varint,1,opt,name=showcase" json:"showcase,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_PlayerShowCase) Reset() { *m = CDOTAClientMsg_PlayerShowCase{} }
func (m *CDOTAClientMsg_PlayerShowCase) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_PlayerShowCase) ProtoMessage() {}
func (m *CDOTAClientMsg_PlayerShowCase) GetShowcase() bool {
if m != nil && m.Showcase != nil {
return *m.Showcase
}
return false
}
type CDOTAClientMsg_CameraZoomAmount struct {
ZoomAmount *float32 `protobuf:"fixed32,1,opt,name=zoom_amount" json:"zoom_amount,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_CameraZoomAmount) Reset() { *m = CDOTAClientMsg_CameraZoomAmount{} }
func (m *CDOTAClientMsg_CameraZoomAmount) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_CameraZoomAmount) ProtoMessage() {}
func (m *CDOTAClientMsg_CameraZoomAmount) GetZoomAmount() float32 {
if m != nil && m.ZoomAmount != nil {
return *m.ZoomAmount
}
return 0
}
type CDOTAClientMsg_BroadcasterUsingCameraman struct {
Cameraman *bool `protobuf:"varint,1,opt,name=cameraman" json:"cameraman,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_BroadcasterUsingCameraman) Reset() {
*m = CDOTAClientMsg_BroadcasterUsingCameraman{}
}
func (m *CDOTAClientMsg_BroadcasterUsingCameraman) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_BroadcasterUsingCameraman) ProtoMessage() {}
func (m *CDOTAClientMsg_BroadcasterUsingCameraman) GetCameraman() bool {
if m != nil && m.Cameraman != nil {
return *m.Cameraman
}
return false
}
type CDOTAClientMsg_BroadcasterUsingAssistedCameraOperator struct {
Enabled *bool `protobuf:"varint,1,opt,name=enabled" json:"enabled,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_BroadcasterUsingAssistedCameraOperator) Reset() {
*m = CDOTAClientMsg_BroadcasterUsingAssistedCameraOperator{}
}
func (m *CDOTAClientMsg_BroadcasterUsingAssistedCameraOperator) String() string {
return proto.CompactTextString(m)
}
func (*CDOTAClientMsg_BroadcasterUsingAssistedCameraOperator) ProtoMessage() {}
func (m *CDOTAClientMsg_BroadcasterUsingAssistedCameraOperator) GetEnabled() bool {
if m != nil && m.Enabled != nil {
return *m.Enabled
}
return false
}
type CAdditionalEquipSlotClientMsg struct {
ClassId *uint32 `protobuf:"varint,1,opt,name=class_id" json:"class_id,omitempty"`
SlotId *uint32 `protobuf:"varint,2,opt,name=slot_id" json:"slot_id,omitempty"`
DefIndex *uint32 `protobuf:"varint,3,opt,name=def_index" json:"def_index,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CAdditionalEquipSlotClientMsg) Reset() { *m = CAdditionalEquipSlotClientMsg{} }
func (m *CAdditionalEquipSlotClientMsg) String() string { return proto.CompactTextString(m) }
func (*CAdditionalEquipSlotClientMsg) ProtoMessage() {}
func (m *CAdditionalEquipSlotClientMsg) GetClassId() uint32 {
if m != nil && m.ClassId != nil {
return *m.ClassId
}
return 0
}
func (m *CAdditionalEquipSlotClientMsg) GetSlotId() uint32 {
if m != nil && m.SlotId != nil {
return *m.SlotId
}
return 0
}
func (m *CAdditionalEquipSlotClientMsg) GetDefIndex() uint32 {
if m != nil && m.DefIndex != nil {
return *m.DefIndex
}
return 0
}
type CDOTAClientMsg_FreeInventory struct {
Equips []*CAdditionalEquipSlotClientMsg `protobuf:"bytes,1,rep,name=equips" json:"equips,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_FreeInventory) Reset() { *m = CDOTAClientMsg_FreeInventory{} }
func (m *CDOTAClientMsg_FreeInventory) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_FreeInventory) ProtoMessage() {}
func (m *CDOTAClientMsg_FreeInventory) GetEquips() []*CAdditionalEquipSlotClientMsg {
if m != nil {
return m.Equips
}
return nil
}
type CDOTAClientMsg_HeroStatueLike struct {
OwnerPlayerId *uint32 `protobuf:"varint,1,opt,name=owner_player_id" json:"owner_player_id,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_HeroStatueLike) Reset() { *m = CDOTAClientMsg_HeroStatueLike{} }
func (m *CDOTAClientMsg_HeroStatueLike) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_HeroStatueLike) ProtoMessage() {}
func (m *CDOTAClientMsg_HeroStatueLike) GetOwnerPlayerId() uint32 {
if m != nil && m.OwnerPlayerId != nil {
return *m.OwnerPlayerId
}
return 0
}
type CDOTAClientMsg_TeamShowcaseEditor struct {
Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_TeamShowcaseEditor) Reset() { *m = CDOTAClientMsg_TeamShowcaseEditor{} }
func (m *CDOTAClientMsg_TeamShowcaseEditor) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_TeamShowcaseEditor) ProtoMessage() {}
func (m *CDOTAClientMsg_TeamShowcaseEditor) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
type CDOTAClientMsg_TeamShowcaseClientData struct {
Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_TeamShowcaseClientData) Reset() { *m = CDOTAClientMsg_TeamShowcaseClientData{} }
func (m *CDOTAClientMsg_TeamShowcaseClientData) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_TeamShowcaseClientData) ProtoMessage() {}
func (m *CDOTAClientMsg_TeamShowcaseClientData) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
type CDOTAClientMsg_PlayTeamShowcase struct {
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_PlayTeamShowcase) Reset() { *m = CDOTAClientMsg_PlayTeamShowcase{} }
func (m *CDOTAClientMsg_PlayTeamShowcase) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_PlayTeamShowcase) ProtoMessage() {}
type CDOTAClientMsg_EventCNY2015Cmd struct {
Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_EventCNY2015Cmd) Reset() { *m = CDOTAClientMsg_EventCNY2015Cmd{} }
func (m *CDOTAClientMsg_EventCNY2015Cmd) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_EventCNY2015Cmd) ProtoMessage() {}
func (m *CDOTAClientMsg_EventCNY2015Cmd) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
type CDOTAClientMsg_ChallengeSelect struct {
EventId *uint32 `protobuf:"varint,1,opt,name=event_id" json:"event_id,omitempty"`
SlotId *uint32 `protobuf:"varint,2,opt,name=slot_id" json:"slot_id,omitempty"`
SequenceId *uint32 `protobuf:"varint,3,opt,name=sequence_id" json:"sequence_id,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_ChallengeSelect) Reset() { *m = CDOTAClientMsg_ChallengeSelect{} }
func (m *CDOTAClientMsg_ChallengeSelect) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_ChallengeSelect) ProtoMessage() {}
func (m *CDOTAClientMsg_ChallengeSelect) GetEventId() uint32 {
if m != nil && m.EventId != nil {
return *m.EventId
}
return 0
}
func (m *CDOTAClientMsg_ChallengeSelect) GetSlotId() uint32 {
if m != nil && m.SlotId != nil {
return *m.SlotId
}
return 0
}
func (m *CDOTAClientMsg_ChallengeSelect) GetSequenceId() uint32 {
if m != nil && m.SequenceId != nil {
return *m.SequenceId
}
return 0
}
type CDOTAClientMsg_ChallengeReroll struct {
EventId *uint32 `protobuf:"varint,1,opt,name=event_id" json:"event_id,omitempty"`
SlotId *uint32 `protobuf:"varint,2,opt,name=slot_id" json:"slot_id,omitempty"`
SequenceId *uint32 `protobuf:"varint,3,opt,name=sequence_id" json:"sequence_id,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *CDOTAClientMsg_ChallengeReroll) Reset() { *m = CDOTAClientMsg_ChallengeReroll{} }
func (m *CDOTAClientMsg_ChallengeReroll) String() string { return proto.CompactTextString(m) }
func (*CDOTAClientMsg_ChallengeReroll) ProtoMessage() {}
func (m *CDOTAClientMsg_ChallengeReroll) GetEventId() uint32 {
if m != nil && m.EventId != nil {
return *m.EventId
}
return 0
}
func (m *CDOTAClientMsg_ChallengeReroll) GetSlotId() uint32 {
if m != nil && m.SlotId != nil {
return *m.SlotId
}
return 0
}
func (m *CDOTAClientMsg_ChallengeReroll) GetSequenceId() uint32 {
if m != nil && m.SequenceId != nil {
return *m.SequenceId
}
return 0
}
func | () {
proto.RegisterEnum("dota.EDotaClientMessages", EDotaClientMessages_name, EDotaClientMessages_value)
}
| init |
alteromonadaceaebacteriumbs31.py | """
This file offers the methods to automatically retrieve the graph Alteromonadaceae bacterium Bs31.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def | (
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Alteromonadaceae bacterium Bs31 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Alteromonadaceae bacterium Bs31 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="AlteromonadaceaeBacteriumBs31",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| AlteromonadaceaeBacteriumBs31 |
action_test.go | package towercli
import (
"io/ioutil"
"testing"
"get.porter.sh/porter/pkg/exec/builder"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
yaml "gopkg.in/yaml.v2"
)
func TestMixin_UnmarshalStep(t *testing.T) | {
b, err := ioutil.ReadFile("testdata/step-input.yaml")
require.NoError(t, err)
var action Action
err = yaml.Unmarshal(b, &action)
require.NoError(t, err)
assert.Equal(t, "install", action.Name)
require.Len(t, action.Steps, 1)
step := action.Steps[0]
assert.Equal(t, "Summon Minion", step.Description)
assert.NotEmpty(t, step.Outputs)
assert.Equal(t, Output{Name: "VICTORY", JsonPath: "$Id"}, step.Outputs[0])
require.Len(t, step.Arguments, 1)
assert.Equal(t, "man-e-faces", step.Arguments[0])
require.Len(t, step.Flags, 1)
assert.Equal(t, builder.NewFlag("species", "human"), step.Flags[0])
} |
|
23ae8758-5cc5-11e4-af55-00155d01fe08.py | #!/usr/bin/python
################################################################################
# 23ae8758-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# [email protected]
# [email protected]
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "23ae8758-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Execute command and parse capture standard output
stdout = cli.system("ls -l /etc/gshadow")
# Split output lines
self.output = stdout.split('\n')
# Process standard output
lineNumber = 0
for line in self.output:
lineNumber += 1
if len(line.strip()) > 0:
subStrings = line.split(' ')
if "----------" in subStrings[0]:
self.is_compliant = True
return self.is_compliant
def | (self, cli):
cli.system("chmod 0000 /etc/gshadow")
| fix |
corrupt.go | package cmd
import (
"context"
"fmt"
"github.com/urfave/cli"
"github.com/alexei-led/pumba/pkg/chaos"
"github.com/alexei-led/pumba/pkg/chaos/netem"
)
type corruptContext struct {
context context.Context
}
// NewCorruptCLICommand initialize CLI corrupt command and bind it to the corruptContext
func NewCorruptCLICommand(ctx context.Context) *cli.Command {
cmdContext := &corruptContext{context: ctx}
return &cli.Command{
Name: "corrupt",
Flags: []cli.Flag{
cli.Float64Flag{
Name: "percent, p",
Usage: "packet corrupt percentage",
Value: 0.0,
},
cli.Float64Flag{
Name: "correlation, c",
Usage: "corrupt correlation; in percentage",
Value: 0.0,
},
},
Usage: "adds packet corruption",
ArgsUsage: fmt.Sprintf("containers (name, list of names, or RE2 regex if prefixed with %q", chaos.Re2Prefix),
Description: "adds packet corruption, based on independent (Bernoulli) probability model\n \tsee: http://www.voiptroubleshooter.com/indepth/burstloss.html",
Action: cmdContext.corrupt,
}
}
// NETEM Corrupt Command - network emulation corrupt
func (cmd *corruptContext) corrupt(c *cli.Context) error {
// get random flag
random := c.GlobalBool("random")
// get dry-run mode
dryRun := c.GlobalBool("dry-run")
// get names or pattern
names, pattern := chaos.GetNamesOrPattern(c)
// get global chaos interval
interval := c.GlobalString("interval")
// get network interface from parent `netem` command
iface := c.Parent().String("interface")
// get ips list from parent `netem`` command `target` flag
ips := c.Parent().StringSlice("target") | duration := c.Parent().String("duration")
// get traffic control image from parent `netem` command
image := c.Parent().String("tc-image")
// get pull tc image flag
pull := c.Parent().BoolT("pull-image")
// get limit for number of containers to netem
limit := c.Parent().Int("limit")
// get corrupt percentage
percent := c.Float64("percent")
// get delay variation
correlation := c.Float64("correlation")
// init netem corrupt command
corruptCommand, err := netem.NewCorruptCommand(chaos.DockerClient, names, pattern, iface, ips, duration, interval, percent, correlation, image, pull, limit, dryRun)
if err != nil {
return err
}
// run netem command
return chaos.RunChaosCommand(cmd.context, corruptCommand, interval, random)
} | // get duration from parent `netem`` command |
app-routing.module.ts | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { HomeComponent } from './home/home.component';
import { CowComponent } from './Cow/Cow.component';
import { SteakComponent } from './Steak/Steak.component';
import { ProductionComponent } from './Production/Production.component';
import { ProcessingComponent } from './Processing/Processing.component';
import { DistributionComponent } from './Distribution/Distribution.component';
import { RetailComponent } from './Retail/Retail.component';
import { RestaurantComponent } from './Restaurant/Restaurant.component';
import { InitTestDataComponent } from './InitTestData/InitTestData.component';
import { ClearDataComponent } from './ClearData/ClearData.component';
import { ProcessComponent } from './Process/Process.component';
import { ProduceComponent } from './Produce/Produce.component';
import { ConsumeComponent } from './Consume/Consume.component';
const routes: Routes = [
{ path: '', component: HomeComponent },
{ path: 'Cow', component: CowComponent },
{ path: 'Steak', component: SteakComponent },
{ path: 'Production', component: ProductionComponent },
{ path: 'Processing', component: ProcessingComponent },
{ path: 'Distribution', component: DistributionComponent },
{ path: 'Retail', component: RetailComponent },
{ path: 'Restaurant', component: RestaurantComponent },
{ path: 'InitTestData', component: InitTestDataComponent },
{ path: 'ClearData', component: ClearDataComponent },
{ path: 'Process', component: ProcessComponent },
{ path: 'Produce', component: ProduceComponent },
{ path: 'Consume', component: ConsumeComponent },
{ path: '**', redirectTo: '' }
];
@NgModule({
imports: [RouterModule.forRoot(routes)],
exports: [RouterModule],
providers: []
})
export class | { }
| AppRoutingModule |
equal.js | // Copyright (c) 2013 Adobe Systems Incorporated. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
Snap.plugin(function (Snap, Element, Paper, glob) {
var names = {},
reUnit = /[a-z]+$/i,
Str = String;
names.stroke = names.fill = "colour";
function getEmpty(item) {
var l = item[0];
switch (l.toLowerCase()) {
case "t": return [l, 0, 0];
case "m": return [l, 1, 0, 0, 1, 0, 0];
case "r": if (item.length == 4) {
return [l, 0, item[2], item[3]];
} else {
return [l, 0];
}
case "s": if (item.length == 5) {
return [l, 1, 1, item[3], item[4]];
} else if (item.length == 3) {
return [l, 1, 1];
} else {
return [l, 1];
}
}
}
function equaliseTransform(t1, t2, getBBox) {
t2 = Str(t2).replace(/\.{3}|\u2026/g, t1);
t1 = Snap.parseTransformString(t1) || [];
t2 = Snap.parseTransformString(t2) || [];
var maxlength = Math.max(t1.length, t2.length),
from = [],
to = [],
i = 0, j, jj,
tt1, tt2;
for (; i < maxlength; i++) {
tt1 = t1[i] || getEmpty(t2[i]);
tt2 = t2[i] || getEmpty(tt1);
if ((tt1[0] != tt2[0]) ||
(tt1[0].toLowerCase() == "r" && (tt1[2] != tt2[2] || tt1[3] != tt2[3])) ||
(tt1[0].toLowerCase() == "s" && (tt1[3] != tt2[3] || tt1[4] != tt2[4]))
) {
t1 = Snap._.transform2matrix(t1, getBBox());
t2 = Snap._.transform2matrix(t2, getBBox());
from = [["m", t1.a, t1.b, t1.c, t1.d, t1.e, t1.f]];
to = [["m", t2.a, t2.b, t2.c, t2.d, t2.e, t2.f]];
break;
}
from[i] = [];
to[i] = [];
for (j = 0, jj = Math.max(tt1.length, tt2.length); j < jj; j++) {
j in tt1 && (from[i][j] = tt1[j]);
j in tt2 && (to[i][j] = tt2[j]);
}
}
return {
from: path2array(from),
to: path2array(to),
f: getPath(from)
};
}
function getNumber(val) {
return val;
}
function getUnit(unit) {
return function (val) {
return +val.toFixed(3) + unit;
};
}
function getViewBox(val) {
return val.join(" ");
}
function getColour(clr) {
return Snap.rgb(clr[0], clr[1], clr[2]);
}
function getPath(path) {
var k = 0, i, ii, j, jj, out, a, b = [];
for (i = 0, ii = path.length; i < ii; i++) {
out = "[";
a = ['"' + path[i][0] + '"'];
for (j = 1, jj = path[i].length; j < jj; j++) {
a[j] = "val[" + (k++) + "]";
}
out += a + "]";
b[i] = out;
}
return Function("val", "return Snap.path.toString.call([" + b + "])");
}
function path2array(path) {
var out = [];
for (var i = 0, ii = path.length; i < ii; i++) {
for (var j = 1, jj = path[i].length; j < jj; j++) {
out.push(path[i][j]);
}
}
return out;
}
function | (obj) {
return isFinite(parseFloat(obj));
}
function arrayEqual(arr1, arr2) {
if (!Snap.is(arr1, "array") || !Snap.is(arr2, "array")) {
return false;
}
return arr1.toString() == arr2.toString();
}
Element.prototype.equal = function (name, b) {
return eve("snap.util.equal", this, name, b).firstDefined();
};
eve.on("snap.util.equal", function (name, b) {
var A, B, a = Str(this.attr(name) || ""),
el = this;
if (isNumeric(a) && isNumeric(b)) {
return {
from: parseFloat(a),
to: parseFloat(b),
f: getNumber
};
}
if (names[name] == "colour") {
A = Snap.color(a);
B = Snap.color(b);
return {
from: [A.r, A.g, A.b, A.opacity],
to: [B.r, B.g, B.b, B.opacity],
f: getColour
};
}
if (name == "viewBox") {
A = this.attr(name).vb.split(" ").map(Number);
B = b.split(" ").map(Number);
return {
from: A,
to: B,
f: getViewBox
};
}
if (name == "transform" || name == "gradientTransform" || name == "patternTransform") {
if (b instanceof Snap.Matrix) {
b = b.toTransformString();
}
if (!Snap._.rgTransform.test(b)) {
b = Snap._.svgTransform2string(b);
}
return equaliseTransform(a, b, function () {
return el.getBBox(1);
});
}
if (name == "d" || name == "path") {
A = Snap.path.toCubic(a, b);
return {
from: path2array(A[0]),
to: path2array(A[1]),
f: getPath(A[0])
};
}
if (name == "points") {
A = Str(a).split(Snap._.separator);
B = Str(b).split(Snap._.separator);
return {
from: A,
to: B,
f: function (val) { return val; }
};
}
var aUnit = a.match(reUnit),
bUnit = Str(b).match(reUnit);
if (aUnit && arrayEqual(aUnit, bUnit)) {
return {
from: parseFloat(a),
to: parseFloat(b),
f: getUnit(aUnit)
};
} else {
return {
from: this.asPX(name),
to: this.asPX(name, b),
f: getNumber
};
}
});
});
| isNumeric |
main.rs | //! logteewoop is a work in progress thing that lets you tee stdout/stderr
//! to a remote logteewoop service.
mod actors;
mod cli;
mod server; | println!("error: {}", err);
std::process::exit(1);
}
} |
fn main() {
if let Err(err) = cli::run() { |
PsdImagePlugin.py | #
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
# __version__ is deprecated and will be removed in a future version. Use
# PIL.__version__ instead.
__version__ = "0.4"
import io
from . import Image, ImageFile, ImagePalette
from ._binary import i8, i16be as i16, i32be as i32
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3),
}
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix):
return prefix[:4] == b"8BPS"
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
_close_exclusive_fp_after_loading = False
def _open(self):
read = self.fp.read
#
# header
s = read(26)
if s[:4] != b"8BPS" or i16(s[4:]) != 1:
raise SyntaxError("not a PSD file")
psd_bits = i16(s[22:])
psd_channels = i16(s[12:])
psd_mode = i16(s[24:])
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
raise IOError("not enough channels")
self.mode = mode
self._size = i32(s[18:]), i32(s[14:])
#
# color mode data
size = i32(read(4))
if size:
data = read(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(read(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
read(4) # signature
id = i16(read(2))
name = read(i8(read(1)))
if not (len(name) & 1):
read(1) # padding
data = read(i32(read(4)))
if len(data) & 1:
read(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self.layers = []
size = i32(read(4))
if size:
end = self.fp.tell() + size
size = i32(read(4))
if size:
self.layers = _layerinfo(self.fp)
self.fp.seek(end)
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self.__fp = self.fp
self.frame = 1
self._min_frame = 1
@property
def n_frames(self):
return len(self.layers)
@property
def is_animated(self):
return len(self.layers) > 1
def seek(self, layer):
if not self._seek_check(layer):
return
# seek to given layer (1..max)
try:
name, mode, bbox, tile = self.layers[layer - 1]
self.mode = mode
self.tile = tile
self.frame = layer
self.fp = self.__fp
return name, bbox
except IndexError:
raise EOFError("no such layer")
def tell(self):
# return layer number (0=image, 1..max=layers)
return self.frame
def load_prepare(self):
# create image memory if necessary
if not self.im or self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.fill(self.mode, self.size, 0)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def _close__fp(self):
try:
if self.__fp != self.fp:
self.__fp.close()
except AttributeError:
pass
finally:
self.__fp = None
def _layerinfo(file):
# read layerinfo block
layers = []
read = file.read
for i in range(abs(i16(read(2)))):
# bounding box
y0 = i32(read(4))
x0 = i32(read(4))
y1 = i32(read(4))
x1 = i32(read(4))
# image info
info = []
mode = []
types = list(range(i16(read(2))))
if len(types) > 4:
continue
for i in types:
type = i16(read(2))
if type == 65535:
m = "A"
else:
m = "RGBA"[type]
mode.append(m)
size = i32(read(4))
info.append((m, size))
# figure out the image mode
mode.sort()
if mode == ["R"]:
mode = "L"
elif mode == ["B", "G", "R"]:
mode = "RGB"
elif mode == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = None # unknown
# skip over blend flags and extra information
read(12) # filler
name = ""
size = i32(read(4)) # length of the extra data field
combined = 0
if size:
data_end = file.tell() + size
length = i32(read(4))
if length:
file.seek(length - 16, io.SEEK_CUR)
combined += length + 4
length = i32(read(4))
if length:
file.seek(length, io.SEEK_CUR)
combined += length + 4
length = i8(read(1))
if length:
# Don't know the proper encoding,
# Latin-1 should be a good guess
name = read(length).decode("latin-1", "replace")
combined += length + 1
file.seek(data_end)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
i = 0
for name, mode, bbox in layers:
tile = []
for m in mode:
t = _maketile(file, m, bbox, 1)
if t:
tile.extend(t)
layers[i] = name, mode, bbox, tile
i += 1
return layers
def | (file, mode, bbox, channels):
tile = None
read = file.read
compression = i16(read(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
tile = []
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(("raw", bbox, offset, layer))
offset = offset + xsize * ysize
elif compression == 1:
#
# packbits compression
i = 0
tile = []
bytecount = read(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer += ";I"
tile.append(("packbits", bbox, offset, layer))
for y in range(ysize):
offset = offset + i16(bytecount[i : i + 2])
i += 2
file.seek(offset)
if offset & 1:
read(1) # padding
return tile
# --------------------------------------------------------------------
# registry
Image.register_open(PsdImageFile.format, PsdImageFile, _accept)
Image.register_extension(PsdImageFile.format, ".psd")
| _maketile |
main.rs | #![recursion_limit="128"]
extern crate strum;
#[macro_use]
extern crate strum_macros;
#[macro_use]
extern crate yew;
use strum::IntoEnumIterator;
use yew::html::*;
#[derive(EnumIter, ToString, Clone, PartialEq)]
enum Filter {
All,
Active,
Completed,
}
impl<'a> Into<Href> for &'a Filter {
fn into(self) -> Href {
match *self {
Filter::All => "#/".into(),
Filter::Active => "#/active".into(),
Filter::Completed => "#/completed".into(),
}
}
}
impl Filter {
fn fit(&self, entry: &Entry) -> bool {
match *self {
Filter::All => true,
Filter::Active => !entry.completed,
Filter::Completed => entry.completed,
}
}
}
struct Model {
entries: Vec<Entry>,
filter: Filter,
value: String,
}
struct Entry {
description: String,
completed: bool,
}
enum Msg {
Add,
Update(String),
Remove(usize),
SetFilter(Filter),
ToggleAll,
Toggle(usize),
ClearCompleted,
Nope,
}
fn update(_: &mut Context<Msg>, model: &mut Model, msg: Msg) {
match msg {
Msg::Add => {
let entry = Entry {
description: model.value.clone(),
completed: false,
};
model.entries.push(entry);
model.value = "".to_string();
}
Msg::Update(val) => {
println!("Input: {}", val);
model.value = val;
}
Msg::Remove(idx) => {
model.remove(idx);
}
Msg::SetFilter(filter) => {
model.filter = filter;
}
Msg::ToggleAll => {
let status = !model.is_all_completed();
model.toggle_all(status);
}
Msg::Toggle(idx) => {
model.toggle(idx);
}
Msg::ClearCompleted => {
model.clear_completed();
}
Msg::Nope => {}
}
}
fn view(model: &Model) -> Html<Msg> {
html! {
<div class="todomvc-wrapper",>
<section class="todoapp",>
<header class="header",>
<h1>{ "todos" }</h1>
{ view_input(&model) }
</header>
<section class="main",>
<input class="toggle-all", type="checkbox", checked=model.is_all_completed(), onclick=|_| Msg::ToggleAll, />
<ul class="todo-list",>
{ for model.entries.iter().filter(|e| model.filter.fit(e)).enumerate().map(view_entry) }
</ul>
</section>
<footer class="footer",>
<span class="todo-count",>
<strong>{ model.total() }</strong>
{ " item(s) left" }
</span>
<ul class="filters",>
{ for Filter::iter().map(|flt| view_filter(&model, flt)) }
</ul>
<button class="clear-completed", onclick=|_| Msg::ClearCompleted,>
{ format!("Clear completed ({})", model.total_completed()) }
</button>
</footer>
</section>
<footer class="info",>
<p>{ "Double-click to edit a todo" }</p>
<p>{ "Written by " }<a href="https://github.com/DenisKolodin/", target="_blank",>{ "Denis Kolodin" }</a></p>
<p>{ "Part of " }<a href="http://todomvc.com/", target="_blank",>{ "TodoMVC" }</a></p>
</footer>
</div>
}
}
fn view_filter(model: &Model, filter: Filter) -> Html<Msg> {
let flt = filter.clone();
html! {
<li>
<a class=if model.filter == flt { "selected" } else { "not-selected" },
href=&flt,
onclick=move |_| Msg::SetFilter(flt.clone()),>
{ filter }
</a>
</li>
}
}
fn view_input(model: &Model) -> Html<Msg> {
html! {
// You can use standard Rust comments. One line:
// <li></li>
<input class="new-todo",
placeholder="What needs to be done?",
value=&model.value,
oninput=|e: InputData| Msg::Update(e.value),
onkeypress=|e: KeyData| {
if e.key == "Enter" { Msg::Add } else { Msg::Nope }
}, />
/* Or multiline:
<ul>
<li></li>
</ul>
*/
}
}
fn view_entry((idx, entry): (usize, &Entry)) -> Html<Msg> {
html! {
<li>
<div class="view",>
<input class="toggle", type="checkbox", checked=entry.completed, onclick=move|_| Msg::Toggle(idx), />
<label>{ &entry.description }</label>
<button class="destroy", onclick=move |_| Msg::Remove(idx),></button>
</div>
</li>
}
}
fn main() {
let model = Model {
entries: Vec::new(),
filter: Filter::All,
value: "".into(),
};
program(model, update, view);
}
impl Model {
fn total(&self) -> usize {
self.entries.len()
}
fn total_completed(&self) -> usize {
self.entries.iter().filter(|e| Filter::Completed.fit(e)).count()
}
fn is_all_completed(&self) -> bool {
let entries = self.entries.iter()
.filter(|e| self.filter.fit(e))
.collect::<Vec<_>>();
if entries.len() == 0 {
false
} else {
entries.into_iter()
.fold(true, |status, entry| status && entry.completed)
}
}
fn toggle_all(&mut self, value: bool) {
for entry in self.entries.iter_mut() {
if self.filter.fit(entry) {
entry.completed = value;
}
}
}
fn clear_completed(&mut self) {
let entries = self.entries.drain(..)
.filter(|e| Filter::Active.fit(e))
.collect();
self.entries = entries;
}
fn toggle(&mut self, idx: usize) {
let filter = self.filter.clone();
let mut entries = self.entries
.iter_mut()
.filter(|e| filter.fit(e))
.collect::<Vec<_>>();
let entry = entries.get_mut(idx).unwrap();
entry.completed = !entry.completed;
}
fn | (&mut self, idx: usize) {
let idx = {
let filter = self.filter.clone();
let entries = self.entries
.iter()
.enumerate()
.filter(|&(_, e)| filter.fit(e))
.collect::<Vec<_>>();
let &(idx, _) = entries.get(idx).unwrap();
idx
};
self.entries.remove(idx);
}
}
| remove |
visualize_editor_page.ts | /*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*
* Any modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import expect from '@osd/expect/expect.js';
import { FtrProviderContext } from '../ftr_provider_context';
export function | ({ getService, getPageObjects }: FtrProviderContext) {
const find = getService('find');
const log = getService('log');
const retry = getService('retry');
const browser = getService('browser');
const testSubjects = getService('testSubjects');
const comboBox = getService('comboBox');
const { common, header, visChart } = getPageObjects(['common', 'header', 'visChart']);
interface IntervalOptions {
type?: 'default' | 'numeric' | 'custom';
aggNth?: number;
append?: boolean;
}
class VisualizeEditorPage {
public async clickDataTab() {
await testSubjects.click('visEditorTab__data');
}
public async clickOptionsTab() {
await testSubjects.click('visEditorTab__options');
}
public async clickMetricsAndAxes() {
await testSubjects.click('visEditorTab__advanced');
}
public async clickVisEditorTab(tabName: string) {
await testSubjects.click(`visEditorTab__${tabName}`);
await header.waitUntilLoadingHasFinished();
}
public async addInputControl(type?: string) {
if (type) {
const selectInput = await testSubjects.find('selectControlType');
await selectInput.type(type);
}
await testSubjects.click('inputControlEditorAddBtn');
await header.waitUntilLoadingHasFinished();
}
public async inputControlClear() {
await testSubjects.click('inputControlClearBtn');
await header.waitUntilLoadingHasFinished();
}
public async inputControlSubmit() {
await testSubjects.clickWhenNotDisabled('inputControlSubmitBtn');
await visChart.waitForVisualizationRenderingStabilized();
}
public async clickGo() {
const prevRenderingCount = await visChart.getVisualizationRenderingCount();
log.debug(`Before Rendering count ${prevRenderingCount}`);
await testSubjects.clickWhenNotDisabled('visualizeEditorRenderButton');
await visChart.waitForRenderingCount(prevRenderingCount + 1);
}
public async removeDimension(aggNth: number) {
await testSubjects.click(`visEditorAggAccordion${aggNth} > removeDimensionBtn`);
}
public async setFilterParams(aggNth: number, indexPattern: string, field: string) {
await comboBox.set(`indexPatternSelect-${aggNth}`, indexPattern);
await comboBox.set(`fieldSelect-${aggNth}`, field);
}
public async setFilterRange(aggNth: number, min: string, max: string) {
const control = await testSubjects.find(`inputControl${aggNth}`);
const inputMin = await control.findByCssSelector('[name$="minValue"]');
await inputMin.type(min);
const inputMax = await control.findByCssSelector('[name$="maxValue"]');
await inputMax.type(max);
}
public async clickSplitDirection(direction: string) {
const radioBtn = await find.byCssSelector(
`[data-test-subj="visEditorSplitBy"][title="${direction}"]`
);
await radioBtn.click();
}
public async clickAddDateRange() {
await testSubjects.click(`visEditorAddDateRange`);
}
public async setDateRangeByIndex(index: string, from: string, to: string) {
await testSubjects.setValue(`visEditorDateRange${index}__from`, from);
await testSubjects.setValue(`visEditorDateRange${index}__to`, to);
}
/**
* Adds new bucket
* @param bucketName bucket name, like 'X-axis', 'Split rows', 'Split series'
* @param type aggregation type, like 'buckets', 'metrics'
*/
public async clickBucket(bucketName: string, type = 'buckets') {
await testSubjects.click(`visEditorAdd_${type}`);
await testSubjects.click(`visEditorAdd_${type}_${bucketName}`);
}
public async clickEnableCustomRanges() {
await testSubjects.click('heatmapUseCustomRanges');
}
public async clickAddRange() {
await testSubjects.click(`heatmapColorRange__addRangeButton`);
}
public async setCustomRangeByIndex(index: string, from: string, to: string) {
await testSubjects.setValue(`heatmapColorRange${index}__from`, from);
await testSubjects.setValue(`heatmapColorRange${index}__to`, to);
}
public async changeHeatmapColorNumbers(value = 6) {
const input = await testSubjects.find(`heatmapColorsNumber`);
await input.clearValueWithKeyboard();
await input.type(`${value}`);
}
public async getBucketErrorMessage() {
const error = await find.byCssSelector(
'[data-test-subj="bucketsAggGroup"] [data-test-subj="defaultEditorAggSelect"] + .euiFormErrorText'
);
const errorMessage = await error.getAttribute('innerText');
log.debug(errorMessage);
return errorMessage;
}
public async addNewFilterAggregation() {
await testSubjects.click('visEditorAddFilterButton');
}
public async selectField(
fieldValue: string,
groupName = 'buckets',
childAggregationType = false
) {
log.debug(`selectField ${fieldValue}`);
const selector = `
[data-test-subj="${groupName}AggGroup"]
[data-test-subj^="visEditorAggAccordion"].euiAccordion-isOpen
[data-test-subj="visAggEditorParams"]
${childAggregationType ? '.visEditorAgg__subAgg' : ''}
[data-test-subj="visDefaultEditorField"]
`;
const fieldEl = await find.byCssSelector(selector);
await comboBox.setElement(fieldEl, fieldValue);
}
public async selectOrderByMetric(aggNth: number, metric: string) {
const sortSelect = await testSubjects.find(`visEditorOrderBy${aggNth}`);
const sortMetric = await sortSelect.findByCssSelector(`option[value="${metric}"]`);
await sortMetric.click();
}
public async selectCustomSortMetric(aggNth: number, metric: string, field: string) {
await this.selectOrderByMetric(aggNth, 'custom');
await this.selectAggregation(metric, 'buckets', true);
await this.selectField(field, 'buckets', true);
}
public async selectAggregation(
aggValue: string,
groupName = 'buckets',
childAggregationType = false
) {
const comboBoxElement = await find.byCssSelector(`
[data-test-subj="${groupName}AggGroup"]
[data-test-subj^="visEditorAggAccordion"].euiAccordion-isOpen
${childAggregationType ? '.visEditorAgg__subAgg' : ''}
[data-test-subj="defaultEditorAggSelect"]
`);
await comboBox.setElement(comboBoxElement, aggValue);
await common.sleep(500);
}
/**
* Set the test for a filter aggregation.
* @param {*} filterValue the string value of the filter
* @param {*} filterIndex used when multiple filters are configured on the same aggregation
* @param {*} aggregationId the ID if the aggregation. On Tests, it start at from 2
*/
public async setFilterAggregationValue(
filterValue: string,
filterIndex = 0,
aggregationId = 2
) {
await testSubjects.setValue(
`visEditorFilterInput_${aggregationId}_${filterIndex}`,
filterValue
);
}
public async setValue(newValue: string) {
const input = await find.byCssSelector('[data-test-subj="visEditorPercentileRanks"] input');
await input.clearValue();
await input.type(newValue);
}
public async clickEditorSidebarCollapse() {
await testSubjects.click('collapseSideBarButton');
}
public async clickDropPartialBuckets() {
await testSubjects.click('dropPartialBucketsCheckbox');
}
public async expectMarkdownTextArea() {
await testSubjects.existOrFail('markdownTextarea');
}
public async setMarkdownTxt(markdownTxt: string) {
const input = await testSubjects.find('markdownTextarea');
await input.clearValue();
await input.type(markdownTxt);
}
public async isSwitchChecked(selector: string) {
const checkbox = await testSubjects.find(selector);
const isChecked = await checkbox.getAttribute('aria-checked');
return isChecked === 'true';
}
public async checkSwitch(selector: string) {
const isChecked = await this.isSwitchChecked(selector);
if (!isChecked) {
log.debug(`checking switch ${selector}`);
await testSubjects.click(selector);
}
}
public async uncheckSwitch(selector: string) {
const isChecked = await this.isSwitchChecked(selector);
if (isChecked) {
log.debug(`unchecking switch ${selector}`);
await testSubjects.click(selector);
}
}
public async setIsFilteredByCollarCheckbox(value = true) {
await retry.try(async () => {
const isChecked = await this.isSwitchChecked('isFilteredByCollarCheckbox');
if (isChecked !== value) {
await testSubjects.click('isFilteredByCollarCheckbox');
throw new Error('isFilteredByCollar not set correctly');
}
});
}
public async setCustomLabel(label: string, index = 1) {
const customLabel = await testSubjects.find(`visEditorStringInput${index}customLabel`);
customLabel.type(label);
}
public async selectYAxisAggregation(agg: string, field: string, label: string, index = 1) {
// index starts on the first "count" metric at 1
// Each new metric or aggregation added to a visualization gets the next index.
// So to modify a metric or aggregation tests need to keep track of the
// order they are added.
await this.toggleOpenEditor(index);
// select our agg
const aggSelect = await find.byCssSelector(
`#visEditorAggAccordion${index} [data-test-subj="defaultEditorAggSelect"]`
);
await comboBox.setElement(aggSelect, agg);
const fieldSelect = await find.byCssSelector(
`#visEditorAggAccordion${index} [data-test-subj="visDefaultEditorField"]`
);
// select our field
await comboBox.setElement(fieldSelect, field);
// enter custom label
await this.setCustomLabel(label, index);
}
public async getField() {
return await comboBox.getComboBoxSelectedOptions('visDefaultEditorField');
}
public async sizeUpEditor() {
const resizerPanel = await testSubjects.find('splitPanelResizer');
// Drag panel 100 px left
await browser.dragAndDrop({ location: resizerPanel }, { location: { x: -100, y: 0 } });
}
public async toggleDisabledAgg(agg: string) {
await testSubjects.click(`visEditorAggAccordion${agg} > ~toggleDisableAggregationBtn`);
await header.waitUntilLoadingHasFinished();
}
public async toggleAggregationEditor(agg: string) {
await find.clickByCssSelector(
`[data-test-subj="visEditorAggAccordion${agg}"] .euiAccordion__button`
);
await header.waitUntilLoadingHasFinished();
}
public async toggleOtherBucket(agg = 2) {
await testSubjects.click(`visEditorAggAccordion${agg} > otherBucketSwitch`);
}
public async toggleMissingBucket(agg = 2) {
await testSubjects.click(`visEditorAggAccordion${agg} > missingBucketSwitch`);
}
public async toggleScaleMetrics() {
await testSubjects.click('scaleMetricsSwitch');
}
public async toggleAutoMode() {
// this is a temporary solution, should be replaced with initial after fixing the EuiToggleButton
// passing the data-test-subj attribute to a checkbox
await find.clickByCssSelector('.visEditorSidebar__controls input[type="checkbox"]');
// await testSubjects.click('visualizeEditorAutoButton');
}
public async isApplyEnabled() {
const applyButton = await testSubjects.find('visualizeEditorRenderButton');
return await applyButton.isEnabled();
}
public async toggleAccordion(id: string, toState = 'true') {
const toggle = await find.byCssSelector(`button[aria-controls="${id}"]`);
const toggleOpen = await toggle.getAttribute('aria-expanded');
log.debug(`toggle ${id} expand = ${toggleOpen}`);
if (toggleOpen !== toState) {
log.debug(`toggle ${id} click()`);
await toggle.click();
}
}
public async toggleOpenEditor(index: number, toState = 'true') {
// index, see selectYAxisAggregation
await this.toggleAccordion(`visEditorAggAccordion${index}`, toState);
}
public async toggleAdvancedParams(aggId: string) {
const accordion = await testSubjects.find(`advancedParams-${aggId}`);
const accordionButton = await find.descendantDisplayedByCssSelector('button', accordion);
await accordionButton.click();
}
public async clickReset() {
await testSubjects.click('visualizeEditorResetButton');
await visChart.waitForVisualization();
}
public async clickYAxisOptions(axisId: string) {
await testSubjects.click(`toggleYAxisOptions-${axisId}`);
}
public async changeYAxisShowCheckbox(axisId: string, enabled: boolean) {
const selector = `valueAxisShow-${axisId}`;
const button = await testSubjects.find(selector);
const isEnabled = (await button.getAttribute('aria-checked')) === 'true';
if (enabled !== isEnabled) {
await button.click();
}
}
public async changeYAxisFilterLabelsCheckbox(axisId: string, enabled: boolean) {
const selector = `yAxisFilterLabelsCheckbox-${axisId}`;
const button = await testSubjects.find(selector);
const isEnabled = (await button.getAttribute('aria-checked')) === 'true';
if (enabled !== isEnabled) {
await button.click();
}
}
public async setSize(newValue: string, aggId: string) {
const dataTestSubj = aggId
? `visEditorAggAccordion${aggId} > sizeParamEditor`
: 'sizeParamEditor';
await testSubjects.setValue(dataTestSubj, String(newValue));
}
public async selectChartMode(mode: string) {
const selector = await find.byCssSelector(`#seriesMode0 > option[value="${mode}"]`);
await selector.click();
}
public async selectYAxisScaleType(axisId: string, scaleType: string) {
const selector = await find.byCssSelector(
`#scaleSelectYAxis-${axisId} > option[value="${scaleType}"]`
);
await selector.click();
}
public async selectXAxisPosition(position: string) {
const option = await (await testSubjects.find('categoryAxisPosition')).findByCssSelector(
`option[value="${position}"]`
);
await option.click();
}
public async selectYAxisMode(mode: string) {
const selector = await find.byCssSelector(`#valueAxisMode0 > option[value="${mode}"]`);
await selector.click();
}
public async setAxisExtents(min: string, max: string, axisId = 'ValueAxis-1') {
await this.toggleAccordion(`yAxisAccordion${axisId}`);
await this.toggleAccordion(`yAxisOptionsAccordion${axisId}`);
await testSubjects.click('yAxisSetYExtents');
await testSubjects.setValue('yAxisYExtentsMax', max);
await testSubjects.setValue('yAxisYExtentsMin', min);
}
public async selectAggregateWith(fieldValue: string) {
await testSubjects.selectValue('visDefaultEditorAggregateWith', fieldValue);
}
public async setInterval(newValue: string, options: IntervalOptions = {}) {
const { type = 'default', aggNth = 2, append = false } = options;
log.debug(`visEditor.setInterval(${newValue}, {${type}, ${aggNth}, ${append}})`);
if (type === 'default') {
await comboBox.set('visEditorInterval', newValue);
} else if (type === 'custom') {
await comboBox.setCustom('visEditorInterval', newValue);
} else {
if (type === 'numeric') {
const autoMode = await testSubjects.getAttribute(
`visEditorIntervalSwitch${aggNth}`,
'aria-checked'
);
if (autoMode === 'true') {
await testSubjects.click(`visEditorIntervalSwitch${aggNth}`);
}
}
if (append) {
await testSubjects.append(`visEditorInterval${aggNth}`, String(newValue));
} else {
await testSubjects.setValue(`visEditorInterval${aggNth}`, String(newValue));
}
}
}
public async getInterval() {
return await comboBox.getComboBoxSelectedOptions('visEditorInterval');
}
public async getNumericInterval(aggNth = 2) {
return await testSubjects.getAttribute(`visEditorInterval${aggNth}`, 'value');
}
public async clickMetricEditor() {
await find.clickByCssSelector('[data-test-subj="metricsAggGroup"] .euiAccordion__button');
}
public async clickMetricByIndex(index: number) {
const metrics = await find.allByCssSelector(
'[data-test-subj="visualizationLoader"] .mtrVis .mtrVis__container'
);
expect(metrics.length).greaterThan(index);
await metrics[index].click();
}
public async setSelectByOptionText(selectId: string, optionText: string) {
const selectField = await find.byCssSelector(`#${selectId}`);
const options = await find.allByCssSelector(`#${selectId} > option`);
const $ = await selectField.parseDomContent();
const optionsText = $('option')
.toArray()
.map((option) => $(option).text());
const optionIndex = optionsText.indexOf(optionText);
if (optionIndex === -1) {
throw new Error(
`Unable to find option '${optionText}' in select ${selectId}. Available options: ${optionsText.join(
','
)}`
);
}
await options[optionIndex].click();
}
// point series
async clickAddAxis() {
return await testSubjects.click('visualizeAddYAxisButton');
}
async setAxisTitle(title: string, aggNth = 0) {
return await testSubjects.setValue(`valueAxisTitle${aggNth}`, title);
}
public async toggleGridCategoryLines() {
return await testSubjects.click('showCategoryLines');
}
public async toggleValuesOnChart() {
return await testSubjects.click('showValuesOnChart');
}
public async setGridValueAxis(axis: string) {
log.debug(`setGridValueAxis(${axis})`);
await find.selectValue('select#gridAxis', axis);
}
public async setSeriesAxis(seriesNth: number, axis: string) {
await find.selectValue(`select#seriesValueAxis${seriesNth}`, axis);
}
public async setSeriesType(seriesNth: number, type: string) {
await find.selectValue(`select#seriesType${seriesNth}`, type);
}
}
return new VisualizeEditorPage();
}
| VisualizeEditorPageProvider |
tests.py | from __future__ import absolute_import
| import shutil
from io import StringIO
from . import punc
from .punc import Punctuator, download_model
class Tests(unittest.TestCase):
samples = [
(
'mary had a little lamb its fleece was white as snow and anywhere that mary went the lamb was sure to go',
'Mary had a little lamb, its fleece was white as snow and anywhere that mary went, the lamb was sure to go.'
),
(
"they say it's only as cold as it feels in your mind i don't buy into that theory much what do you think",
"They say it's only as cold as it feels in your mind. I don't buy into that theory much. What do you think."
),
(
"he's a do me a favor go home to your wife",
"He's a do me: a favor go home to your wife.",
),
(
"they'll even negotiate your rate with the insurance company",
"They'll even negotiate your rate with the insurance company.",
),
(
"for me i wanted to get into commentary some sort of way i didn't know how to do that so i left the firm and i started a business",
"For me, I wanted to get into commentary some sort of way. I didn't know how to do that. So I left the firm and I started a business."
),
]
def test_punctuate(self):
# Create temp directory for downloading data.
d = tempfile.mkdtemp()
os.makedirs(punc.PUNCTUATOR_DATA_DIR, exist_ok=True)
model_file = os.path.join(punc.PUNCTUATOR_DATA_DIR, 'Demo-Europarl-EN.pcl')
print('Temp dir:', d)
os.chdir(d)
try:
# Download pre-trained model.
if not os.path.isfile(model_file):
model_file = download_model()
print('Model file:', model_file)
# Create punctuator.
t0 = time.time()
p = Punctuator(model_file=model_file)
td = time.time() - t0
print('Loaded in %s seconds from path.' % td)
# Add punctuation.
for input_text, expect_output_text in self.samples:
fout = StringIO()
actual_output_text = p.punctuate(input_text)
print('expect_output_text:', expect_output_text)
print('actual_output_text:', actual_output_text)
self.assertEqual(actual_output_text, expect_output_text)
# Serialize the entire punctuator, not just the model.
print('Writing...')
t0 = time.time()
fn = 'data.pickle'
p.save(fn)
td = time.time() - t0
print('Wrote in %s seconds.' % td)
# Load puncutator.
print('Loading...')
t0 = time.time()
p2 = Punctuator.load(fn)
td = time.time() - t0
print('Loaded in %s seconds.' % td)
# Confirm punctuations match previous.
for input_text, expect_output_text in self.samples:
fout = StringIO()
actual_output_text = p2.punctuate(input_text)
print('expect_output_text:', expect_output_text)
print('actual_output_text:', actual_output_text)
self.assertEqual(actual_output_text, expect_output_text)
finally:
shutil.rmtree(d)
def test_punctuate_stream(self):
# Create temp directory for downloading data.
d = tempfile.mkdtemp()
os.makedirs(punc.PUNCTUATOR_DATA_DIR, exist_ok=True)
model_file = os.path.join(punc.PUNCTUATOR_DATA_DIR, 'Demo-Europarl-EN.pcl')
print('Temp dir:', d)
os.chdir(d)
try:
# Download pre-trained model.
if not os.path.isfile(model_file):
model_file = download_model()
print('Model file:', model_file)
# Check if file can be read in as bytes
infile = open(model_file, 'rb')
data = infile.read()
t0 = time.time()
p = Punctuator(data)
td = time.time() - t0
print('Loaded in %s seconds as bytes.' % td)
# Add punctuation.
for input_text, expect_output_text in self.samples:
fout = StringIO()
actual_output_text = p.punctuate(input_text)
print('expect_output_text:', expect_output_text)
print('actual_output_text:', actual_output_text)
self.assertEqual(actual_output_text, expect_output_text)
# Serialize the entire punctuator, not just the model.
print('Writing...')
t0 = time.time()
fn = 'data.pickle'
p.save(fn)
td = time.time() - t0
print('Wrote in %s seconds.' % td)
# Load puncutator.
print('Loading...')
t0 = time.time()
p2 = Punctuator.load(fn)
td = time.time() - t0
print('Loaded in %s seconds.' % td)
# Confirm punctuations match previous.
for input_text, expect_output_text in self.samples:
fout = StringIO()
actual_output_text = p2.punctuate(input_text)
print('expect_output_text:', expect_output_text)
print('actual_output_text:', actual_output_text)
self.assertEqual(actual_output_text, expect_output_text)
finally:
shutil.rmtree(d)
if __name__ == '__main__':
unittest.main() | import time
import os
import unittest
import tempfile |
application.ts | /*!
* Omelox -- proto
* Copyright(c) 2012 xiechengchao <[email protected]>
* MIT Licensed
*/
/**
* Module dependencies.
*/
import * as utils from './util/utils';
import { getLogger, ILogger } from 'omelox-logger';
import { EventEmitter } from 'events';
import { AppEvents, default as events } from './util/events';
import * as appUtil from './util/appUtil';
import { ServerStartArgs } from './util/appUtil';
import * as Constants from './util/constants';
import { FRONTENDID, ServerInfo } from './util/constants';
import * as appManager from './common/manager/appManager';
import { TransactionCondictionFunction, TransactionHandlerFunction } from './common/manager/appManager';
import * as fs from 'fs';
import * as path from 'path';
import { isFunction } from 'util';
import { IComponent } from './interfaces/IComponent';
import { DictionaryComponent } from './components/dictionary';
import { PushSchedulerComponent } from './components/pushScheduler';
import { BackendSessionService } from './common/service/backendSessionService';
import { ChannelService, ChannelServiceOptions } from './common/service/channelService';
import { SessionComponent } from './components/session';
import { ServerComponent } from './components/server';
import { RemoteComponent } from './components/remote';
import { ProxyComponent, RouteFunction, RouteMaps } from './components/proxy';
import { ProtobufComponent, ProtobufComponentOptions } from './components/protobuf';
import { MonitorComponent } from './components/monitor';
import { MasterComponent } from './components/master';
import { ConnectorComponent, ConnectorComponentOptions } from './components/connector';
import { ConnectionComponent } from './components/connection';
import { SessionService } from './common/service/sessionService';
import { ObjectType } from './interfaces/define';
import { IModule, IModuleFactory } from 'omelox-admin';
import { ChannelComponent } from './components/channel';
import { BackendSessionComponent } from './components/backendSession';
import { AfterHandlerFilter, BeforeHandlerFilter, IHandlerFilter } from './interfaces/IHandlerFilter';
import { MailStationErrorHandler, RpcFilter, RpcMsg } from 'omelox-rpc';
import { ModuleRecord } from './util/moduleUtil';
import { ApplicationEventContructor, IPlugin } from './interfaces/IPlugin';
import { Cron, ResponseErrorHandler } from './server/server';
import { RemoterProxy } from './util/remoterHelper';
import { FrontendOrBackendSession, ISession, ScheduleOptions, SID, UID } from './index';
let logger = getLogger('omelox', path.basename(__filename));
export type ConfigureCallback = () => void;
export type AConfigureFunc1 = () => Promise<void> ;
export type AConfigureFunc2 = (env: string) => Promise<void> ;
export type AConfigureFunc3 = (env: string, type: string) => Promise<void>;
export interface ApplicationOptions {
base?: string;
}
export type BeforeStopHookFunction = (app: Application, shutDown: () => void, cancelShutDownTimer: () => void) => void;
declare global {
// 定义用户Rpc基础类
interface UserRpc {
test(): void;
}
// channelRemote functions
type pushMessageFunction = (route: string, msg: any, uids: UID[], opts: ScheduleOptions) => Promise<UID[]>;
type broadcastFunction = (route: string, msg: any, opts: ScheduleOptions) => Promise<UID[]>;
// sessionRemote functions
type bindFunction = (sid: SID, uid: UID) => Promise<void>;
type unbindFunction = (sid: SID, uid: UID) => Promise<void>;
type pushFunction = (sid: SID, key: string, value: any) => Promise<void>;
type pushAllFunction = (sid: SID, settings: { [key: string]: any }) => Promise<void>;
type getBackendSessionBySidFunction = (sid: SID) => Promise<ISession>;
type getBackendSessionsByUidFunction = (uid: UID) => Promise<ISession[]>;
type kickBySidFunction = (sid: SID, reason: string) => Promise<void>;
type kickByUidFunction = (uid: UID, reason: string) => Promise<void>;
interface SysRpc {
[serverType: string]: {
/**
* 用来把客户端发到前端的handler信息转发到后端服务器
*/
msgRemote: {
forwardMessage: (routeParam: FrontendOrBackendSession, msg: any, session: ISession) => Promise<void>;
},
/**
* 用来通知前端服务器往客户端发信息
*/
channelRemote: {
pushMessage: RemoterProxy<pushMessageFunction>;
broadcast: RemoterProxy<broadcastFunction>;
}
/**
* 用来从前端服务器获取或设置Session相关的服务
*/
sessionRemote: {
bind: RemoterProxy<bindFunction>;
unbind: RemoterProxy<unbindFunction>;
push: RemoterProxy<pushFunction>;
pushAll: RemoterProxy<pushAllFunction>;
getBackendSessionBySid: RemoterProxy<getBackendSessionBySidFunction>;
getBackendSessionsByUid: RemoterProxy<getBackendSessionsByUidFunction>;
kickBySid: RemoterProxy<kickBySidFunction>;
kickByUid: RemoterProxy<kickByUidFunction>;
}
};
}
}
/**
* Application states
*/
let STATE_INITED = 1; // app has inited
let STATE_BEFORE_START = 2; // app before start
let STATE_START = 3; // app start
let STATE_STARTED = 4; // app has started
let STATE_STOPED = 5; // app has stoped
export class Application {
loaded: IComponent[] = []; // loaded component list
components: {
__backendSession__?: | ionComponent,
__channel__?: ChannelComponent,
__connection__?: ConnectionComponent,
__connector__?: ConnectorComponent,
__dictionary__?: DictionaryComponent,
__master__?: MasterComponent,
__monitor__?: MonitorComponent,
__protobuf__?: ProtobufComponent,
__proxy__?: ProxyComponent,
__remote__?: RemoteComponent,
__server__?: ServerComponent,
__session__?: SessionComponent,
__pushScheduler__?: PushSchedulerComponent,
[key: string]: IComponent
} = {}; // name -> component map
sessionService ?: SessionService;
backendSessionService ?: BackendSessionService;
channelService ?: ChannelService;
settings: { [key: string]: any } = {}; // collection keep set/get
event = new EventEmitter(); // event object to sub/pub events
// current server info
serverId: string; // current server id
serverType: string; // current server type
curServer: ServerInfo; // current server info
startTime: number; // current server start time
// global server infos
master: ServerStartArgs = null; // master server info
servers: { [id: string]: ServerInfo } = {}; // current global server info maps, id -> info
serverTypeMaps: { [type: string]: ServerInfo[] } = {}; // current global type maps, type -> [info]
serverTypes: string[] = []; // current global server type list
usedPlugins: IPlugin[] = []; // current server custom lifecycle callbacks
clusterSeq: { [serverType: string]: number } = {}; // cluster id seqence
state: number;
base: string;
startId: string;
type: string;
stopTimer: any;
/**
* Initialize the server.
*
* - setup default configuration
*/
init(opts ?: ApplicationOptions) {
opts = opts || {};
let base = opts.base || path.dirname(require.main.filename);
this.set(Constants.RESERVED.BASE, base);
this.base = base;
appUtil.defaultConfiguration(this);
this.state = STATE_INITED;
logger.info('application inited: %j', this.getServerId());
}
/**
* Get application base path
*
* // cwd: /home/game/
* omelox start
* // app.getBase() -> /home/game
*
* @return {String} application base path
*
* @memberOf Application
*/
getBase() {
return this.get(Constants.RESERVED.BASE);
}
/**
* Override require method in application
*
* @param {String} relative path of file
*
* @memberOf Application
*/
require(ph: string) {
return require(path.join(this.getBase(), ph));
}
/**
* Configure logger with {$base}/config/log4js.json
*
* @param {Object} logger omelox-logger instance without configuration
*
* @memberOf Application
*/
configureLogger(logger: ILogger) {
if (process.env.POMELO_LOGGER !== 'off') {
let serverId = this.getServerId();
let base = this.getBase();
let env = this.get(Constants.RESERVED.ENV);
let originPath = path.join(base, Constants.FILEPATH.LOG);
let presentPath = path.join(base, Constants.FILEPATH.CONFIG_DIR, env, path.basename(Constants.FILEPATH.LOG));
if (this._checkCanRequire(originPath)) {
logger.configure(originPath, { serverId: serverId, base: base });
} else if (this._checkCanRequire(presentPath)) {
logger.configure(presentPath, { serverId: serverId, base: base });
} else {
console.error('logger file path configuration is error.');
}
}
}
/**
* add a filter to before and after filter
*
* @param {Object} filter provide before and after filter method.
* A filter should have two methods: before and after.
* @memberOf Application
*/
filter(filter: IHandlerFilter): void {
this.before(filter);
this.after(filter);
}
/**
* Add before filter.
*
* @param {Object|Function} bf before fileter, bf(msg, session, next)
* @memberOf Application
*/
before(bf: BeforeHandlerFilter): void {
addFilter(this, Constants.KEYWORDS.BEFORE_FILTER, bf);
}
/**
* Add after filter.
*
* @param {Object|Function} af after filter, `af(err, msg, session, resp, next)`
* @memberOf Application
*/
after(af: AfterHandlerFilter): void {
addFilter(this, Constants.KEYWORDS.AFTER_FILTER, af);
}
/**
* add a global filter to before and after global filter
*
* @param {Object} filter provide before and after filter method.
* A filter should have two methods: before and after.
* @memberOf Application
*/
globalFilter(filter: IHandlerFilter) {
this.globalBefore(filter);
this.globalAfter(filter);
}
/**
* Add global before filter.
*
* @param {Object|Function} bf before fileter, bf(msg, session, next)
* @memberOf Application
*/
globalBefore(bf: BeforeHandlerFilter) {
addFilter(this, Constants.KEYWORDS.GLOBAL_BEFORE_FILTER, bf);
}
/**
* Add global after filter.
*
* @param {Object|Function} af after filter, `af(err, msg, session, resp, next)`
* @memberOf Application
*/
globalAfter(af: AfterHandlerFilter) {
addFilter(this, Constants.KEYWORDS.GLOBAL_AFTER_FILTER, af);
}
/**
* Add rpc before filter.
*
* @param {Object|Function} bf before fileter, bf(serverId, msg, opts, next)
* @memberOf Application
*/
rpcBefore(bf: RpcFilter | RpcFilter[]) {
addFilter(this, Constants.KEYWORDS.RPC_BEFORE_FILTER, bf);
}
/**
* Add rpc after filter.
*
* @param {Object|Function} af after filter, `af(serverId, msg, opts, next)`
* @memberOf Application
*/
rpcAfter(af: RpcFilter | RpcFilter[]) {
addFilter(this, Constants.KEYWORDS.RPC_AFTER_FILTER, af);
}
/**
* add a rpc filter to before and after rpc filter
*
* @param {Object} filter provide before and after filter method.
* A filter should have two methods: before and after.
* @memberOf Application
*/
rpcFilter(filter: RpcFilter) {
this.rpcBefore(filter);
this.rpcAfter(filter);
}
/**
* Load component
*
* @param {String} name (optional) name of the component
* @param {Object} component component instance or factory function of the component
* @param {[type]} opts (optional) construct parameters for the factory function
* @return {Object} app instance for chain invoke
* @memberOf Application
*/
load<T extends IComponent>(component: ObjectType<T>, opts ?: any): T;
load<T extends IComponent>(name: string, component: ObjectType<T>, opts ?: any): T;
load<T extends IComponent>(component: T, opts ?: any): T;
load<T extends IComponent>(name: string, component: T, opts ?: any): T;
load<T extends IComponent>(name: string | ObjectType<T>, component ?: ObjectType<T> | any | T, opts ?: any): T {
if (typeof name !== 'string') {
opts = component;
component = name;
name = null;
}
if (isFunction(component)) {
component = new component(this, opts);
}
if (!name && typeof component.name === 'string') {
name = component.name;
}
if (name && this.components[name as string]) {
// ignore duplicat component
logger.warn('ignore duplicate component: %j', name);
return;
}
this.loaded.push(component);
if (name) {
// components with a name would get by name throught app.components later.
this.components[name as string] = component;
}
return component;
}
_checkCanRequire(path: string) {
try {
path = require.resolve(path);
} catch (err) {
return null;
}
return path;
}
/**
* Load Configure json file to settings.(support different enviroment directory & compatible for old path)
*
* @param {String} key environment key
* @param {String} val environment value
* @param {Boolean} reload whether reload after change default false
* @return {Server|Mixed} for chaining, or the setting value
* @memberOf Application
*/
loadConfigBaseApp(key: string, val: string, reload = false) {
let self = this;
let env = this.get(Constants.RESERVED.ENV);
let originPath = path.join(this.getBase(), val);
let presentPath = path.join(this.getBase(), Constants.FILEPATH.CONFIG_DIR, env, path.basename(val));
let realPath: string;
let tmp: string;
if (self._checkCanRequire(originPath)) {
realPath = require.resolve(originPath);
let file = require(originPath);
if (file[env]) {
file = file[env];
}
this.set(key, file);
} else if (self._checkCanRequire(presentPath)) {
realPath = require.resolve(presentPath);
let pfile = require(presentPath);
this.set(key, pfile);
} else {
logger.error('invalid configuration with file path: %s', key);
}
if (!!realPath && !!reload) {
const watcher = fs.watch(realPath, function (event, filename) {
if (event === 'change') {
self.clearRequireCache(require.resolve(realPath));
watcher.close();
self.loadConfigBaseApp(key, val, reload);
}
});
}
}
clearRequireCache(path: string) {
const moduleObj = require.cache[path];
if (!moduleObj) {
logger.warn('can not find module of truepath', path);
return;
}
if (moduleObj.parent) {
// console.log('has parent ',moduleObj.parent);
moduleObj.parent.children.splice(moduleObj.parent.children.indexOf(moduleObj), 1);
}
delete require.cache[path];
}
/**
* Load Configure json file to settings.
*
* @param {String} key environment key
* @param {String} val environment value
* @return {Server|Mixed} for chaining, or the setting value
* @memberOf Application
*/
loadConfig(key: string, val: string) {
let env = this.get(Constants.RESERVED.ENV);
let cfg = require(val);
if (cfg[env]) {
cfg = cfg[env];
}
this.set(key, cfg);
}
/**
* Set the route function for the specified server type.
*
* Examples:
*
* app.route('area', routeFunc);
*
* let routeFunc = function(session, msg, app, cb) {
* // all request to area would be route to the first area server
* let areas = app.getServersByType('area');
* cb(null, areas[0].id);
* };
*
* @param {String} serverType server type string
* @param {Function} routeFunc route function. routeFunc(session, msg, app, cb)
* @return {Object} current application instance for chain invoking
* @memberOf Application
*/
route(serverType: string, routeFunc: RouteFunction) {
let routes = this.get(Constants.KEYWORDS.ROUTE);
if (!routes) {
routes = {};
this.set(Constants.KEYWORDS.ROUTE, routes);
}
routes[serverType] = routeFunc;
return this;
}
/**
* Set before stop function. It would perform before servers stop.
*
* @param {Function} fun before close function
* @return {Void}
* @memberOf Application
*/
beforeStopHook(fun: BeforeStopHookFunction) {
logger.warn('this method was deprecated in omelox 0.8');
if (!!fun && typeof fun === 'function') {
this.set(Constants.KEYWORDS.BEFORE_STOP_HOOK, fun);
}
}
/**
* Start application. It would load the default components and start all the loaded components.
*
* @param {Function} cb callback function
* @memberOf Application
*/
start(cb ?: (err ?: Error, result ?: void) => void) {
this.startTime = Date.now();
if (this.state > STATE_INITED) {
utils.invokeCallback(cb, new Error('application has already start.'));
return;
}
let self = this;
appUtil.startByType(self, function () {
appUtil.loadDefaultComponents(self);
let startUp = function () {
self.state = STATE_BEFORE_START;
logger.info('%j enter before start...', self.getServerId());
appUtil.optComponents(self.loaded, Constants.RESERVED.BEFORE_START, function (err) {
if (err) {
utils.invokeCallback(cb, err);
} else {
logger.info('%j enter start...', self.getServerId());
appUtil.optComponents(self.loaded, Constants.RESERVED.START, function (err) {
self.state = STATE_START;
if (err) {
utils.invokeCallback(cb, err);
} else {
logger.info('%j enter after start...', self.getServerId());
self.afterStart(cb);
}
});
}
});
};
appUtil.optLifecycles(self.usedPlugins, Constants.LIFECYCLE.BEFORE_STARTUP, self, function (err) {
if (err) {
utils.invokeCallback(cb, err);
} else {
startUp();
}
});
});
}
/**
* Lifecycle callback for after start.
*
* @param {Function} cb callback function
* @return {Void}
*/
afterStart(cb ?: (err?: Error) => void) {
if (this.state !== STATE_START) {
utils.invokeCallback(cb, new Error('application is not running now.'));
return;
}
let self = this;
appUtil.optComponents(this.loaded, Constants.RESERVED.AFTER_START, function (err) {
self.state = STATE_STARTED;
let id = self.getServerId();
if (!err) {
logger.info('%j finish start', id);
}
appUtil.optLifecycles(self.usedPlugins, Constants.LIFECYCLE.AFTER_STARTUP, self, function (err?: Error) {
let usedTime = Date.now() - self.startTime;
logger.info('%j startup in %s ms', id, usedTime);
self.event.emit(events.START_SERVER, id);
cb && cb(err);
});
});
}
/**
* Stop components.
*
* @param {Boolean} force whether stop the app immediately
*/
stop(force: boolean) {
if (this.state > STATE_STARTED) {
logger.warn('[omelox application] application is not running now.');
return;
}
this.state = STATE_STOPED;
let self = this;
this.stopTimer = setTimeout(function () {
process.exit(0);
}, Constants.TIME.TIME_WAIT_STOP);
let cancelShutDownTimer = function () {
if (!!self.stopTimer) {
clearTimeout(self.stopTimer);
}
};
let shutDown = function () {
appUtil.stopComps(self.loaded, 0, force, function () {
cancelShutDownTimer();
if (force) {
process.exit(0);
}
});
};
let fun = this.get(Constants.KEYWORDS.BEFORE_STOP_HOOK);
appUtil.optLifecycles(self.usedPlugins, Constants.LIFECYCLE.BEFORE_SHUTDOWN, self, function (err) {
if (err) {
console.error(`throw err when beforeShutdown `, err.stack);
} else {
if (!!fun) {
utils.invokeCallback(fun, self, shutDown, cancelShutDownTimer);
} else {
shutDown();
}
}
}, cancelShutDownTimer);
}
/**
* Assign `setting` to `val`, or return `setting`'s value.
*
* Example:
*
* app.set('key1', 'value1');
* app.get('key1'); // 'value1'
* app.key1; // undefined
*
* app.set('key2', 'value2', true);
* app.get('key2'); // 'value2'
* app.key2; // 'value2'
*
* @param {String} setting the setting of application
* @param {String} val the setting's value
* @param {Boolean} attach whether attach the settings to application
* @return {Server|Mixed} for chaining, or the setting value
* @memberOf Application
*/
set(setting: 'channelService', val: ChannelService, attach?: boolean): Application;
set(setting: 'sessionService', val: SessionService, attach?: boolean): Application;
set(setting: 'channelConfig', val: ChannelServiceOptions, attach?: boolean): Application;
set(setting: 'backendSessionService', val: BackendSessionComponent, attach?: boolean): Application;
set(setting: 'protobufConfig', val: ProtobufComponentOptions, attach?: boolean): Application;
set(setting: 'connectorConfig', val: ConnectorComponentOptions, attach?: boolean): Application;
set(setting: Constants.KEYWORDS.BEFORE_FILTER, val: BeforeHandlerFilter[], attach?: boolean): Application;
set(setting: Constants.KEYWORDS.AFTER_FILTER, val: AfterHandlerFilter[], attach?: boolean): Application;
set(setting: Constants.KEYWORDS.GLOBAL_BEFORE_FILTER, val: BeforeHandlerFilter[], attach?: boolean): Application;
set(setting: Constants.KEYWORDS.GLOBAL_AFTER_FILTER, val: AfterHandlerFilter[], attach?: boolean): Application;
set(setting: Constants.KEYWORDS.RPC_BEFORE_FILTER, val: RpcFilter | RpcFilter[], attach?: boolean): Application;
set(setting: Constants.KEYWORDS.RPC_AFTER_FILTER, val: RpcFilter | RpcFilter[], attach?: boolean): Application;
set(setting: Constants.RESERVED.RPC_ERROR_HANDLER, val: MailStationErrorHandler, attach?: boolean): Application;
set(setting: Constants.KEYWORDS.ROUTE, val: RouteMaps, attach?: boolean): Application;
set(setting: Constants.KEYWORDS.BEFORE_STOP_HOOK, val: BeforeStopHookFunction, attach?: boolean): Application;
set(setting: Constants.RESERVED.BASE, val: string, attach?: boolean): Application;
set(setting: Constants.RESERVED.ENV, val: string, attach?: boolean): Application;
set(setting: Constants.RESERVED.GLOBAL_ERROR_HANDLER, val: ResponseErrorHandler, attach?: boolean): Application;
set(setting: Constants.RESERVED.ERROR_HANDLER, val: ResponseErrorHandler, attach?: boolean): Application;
set(setting: Constants.KEYWORDS.MODULE, val: { [key: string]: ModuleRecord }, attach?: boolean): Application;
set(setting: string, val: string | any, attach?: boolean): Application;
set(setting: string, val: string | any, attach?: boolean): Application {
this.settings[setting] = val;
if (attach) {
(this as any)[setting] = val;
}
return this;
}
/**
* Get property from setting
*
* @param {String} setting application setting
* @return {String} val
* @memberOf Application
*/
get(setting: 'channelService'): ChannelService;
get(setting: 'sessionService'): SessionService;
get(setting: 'channelConfig'): ChannelServiceOptions;
get(setting: 'backendSessionService'): BackendSessionComponent;
get(setting: Constants.KEYWORDS.BEFORE_FILTER): BeforeHandlerFilter[];
get(setting: Constants.KEYWORDS.AFTER_FILTER): AfterHandlerFilter[];
get(setting: Constants.KEYWORDS.GLOBAL_BEFORE_FILTER): BeforeHandlerFilter[];
get(setting: Constants.KEYWORDS.GLOBAL_AFTER_FILTER): AfterHandlerFilter[];
get(setting: Constants.KEYWORDS.RPC_BEFORE_FILTER): RpcFilter | RpcFilter[];
get(setting: Constants.KEYWORDS.RPC_AFTER_FILTER): RpcFilter | RpcFilter[];
get(setting: Constants.RESERVED.RPC_ERROR_HANDLER): MailStationErrorHandler;
get(setting: Constants.KEYWORDS.ROUTE): RouteMaps;
get(setting: Constants.KEYWORDS.BEFORE_STOP_HOOK): BeforeStopHookFunction;
get(setting: Constants.RESERVED.BASE): string;
get(setting: Constants.RESERVED.ENV): string;
get(setting: Constants.RESERVED.GLOBAL_ERROR_HANDLER): ResponseErrorHandler;
get(setting: Constants.RESERVED.ERROR_HANDLER): ResponseErrorHandler;
get(setting: Constants.KEYWORDS.MODULE): { [key: string]: ModuleRecord };
get(setting: string): string | any;
get(setting: string): string | any {
return this.settings[setting];
}
/**
* Check if `setting` is enabled.
*
* @param {String} setting application setting
* @return {Boolean}
* @memberOf Application
*/
enabled(setting: string) {
return !!this.get(setting);
}
/**
* Check if `setting` is disabled.
*
* @param {String} setting application setting
* @return {Boolean}
* @memberOf Application
*/
disabled(setting: string) {
return !this.get(setting);
}
/**
* Enable `setting`.
*
* @param {String} setting application setting
* @return {app} for chaining
* @memberOf Application
*/
enable(setting: string) {
return this.set(setting, true);
}
/**
* Disable `setting`.
*
* @param {String} setting application setting
* @return {app} for chaining
* @memberOf Application
*/
disable(setting: string) {
return this.set(setting, false);
}
/**
* Configure callback for the specified env and server type.
* When no env is specified that callback will
* be invoked for all environments and when no type is specified
* that callback will be invoked for all server types.
*
* Examples:
*
* app.configure(function(){
* // executed for all envs and server types
* });
*
* app.configure('development', function(){
* // executed development env
* });
*
* app.configure('development', 'connector', function(){
* // executed for development env and connector server type
* });
*
* @param {String} env application environment
* @param {Function} fn callback function
* @param {String} type server type
* @return {Application} for chaining
* @memberOf Application
*/
configure(fn: ConfigureCallback): Application;
configure(env: string, fn: ConfigureCallback): Application;
configure(env: string, type: string, fn: ConfigureCallback): Application;
configure(env: string | ConfigureCallback, type ?: string | ConfigureCallback, fn ?: ConfigureCallback): Application {
let args = [].slice.call(arguments);
fn = args.pop();
env = type = Constants.RESERVED.ALL;
if (args.length > 0) {
env = args[0];
}
if (args.length > 1) {
type = args[1];
}
if (env === Constants.RESERVED.ALL || contains(this.settings.env, env as string)) {
if (type === Constants.RESERVED.ALL || contains(this.settings.serverType, type as string)) {
fn.call(this);
}
}
return this;
}
/**
* Register admin modules. Admin modules is the extends point of the monitor system.
*
* @param {String} module (optional) module id or provoided by module.moduleId
* @param {Object} module module object or factory function for module
* @param {Object} opts construct parameter for module
* @memberOf Application
*/
registerAdmin(module: IModule, opts ?: any): void;
registerAdmin(moduleId: string, module ?: IModule, opts ?: any): void;
registerAdmin(module: IModuleFactory, opts ?: any): void;
registerAdmin(moduleId: string, module ?: IModuleFactory, opts ?: any): void;
registerAdmin(moduleId: string | IModule | IModuleFactory, module ?: IModule | IModuleFactory, opts ?: any) {
let modules = this.get(Constants.KEYWORDS.MODULE);
if (!modules) {
modules = {};
this.set(Constants.KEYWORDS.MODULE, modules);
}
if (typeof moduleId !== 'string') {
opts = module;
module = moduleId;
if (module) {
moduleId = ((module as IModuleFactory).moduleId);
if (!moduleId)
moduleId = (module as IModule).constructor.name;
}
}
if (!moduleId) {
return;
}
modules[moduleId as string] = {
moduleId: moduleId as string,
module: module,
opts: opts
};
}
/**
* Use plugin.
*
* @param {Object} plugin plugin instance
* @param {[type]} opts (optional) construct parameters for the factory function
* @memberOf Application
*/
use(plugin: IPlugin, opts ?: any) {
opts = opts || {};
if (!plugin) {
throw new Error(`pluin is null!]`);
}
if (this.usedPlugins.indexOf(plugin) >= 0) {
throw new Error(`pluin[${ plugin.name } was used already!]`);
}
if (plugin.components) {
for (let componentCtor of plugin.components) {
this.load(componentCtor, opts);
}
}
if (plugin.events) {
for (let eventCtor of plugin.events) {
this.loadEvent(eventCtor, opts);
}
}
this.usedPlugins.push(plugin);
console.warn(`used Plugin : ${ plugin.name }`);
}
/**
* Application transaction. Transcation includes conditions and handlers, if conditions are satisfied, handlers would be executed.
* And you can set retry times to execute handlers. The transaction log is in file logs/transaction.log.
*
* @param {String} name transaction name
* @param {Object} conditions functions which are called before transaction
* @param {Object} handlers functions which are called during transaction
* @param {Number} retry retry times to execute handlers if conditions are successfully executed
* @memberOf Application
*/
transaction(name: string, conditions: { [key: string]: TransactionCondictionFunction }, handlers: { [key: string]: TransactionHandlerFunction }, retry?: number) {
appManager.transaction(name, conditions, handlers, retry);
}
/**
* Get master server info.
*
* @return {Object} master server info, {id, host, port}
* @memberOf Application
*/
getMaster() {
return this.master;
}
/**
* Get current server info.
*
* @return {Object} current server info, {id, serverType, host, port}
* @memberOf Application
*/
getCurServer() {
return this.curServer;
}
/**
* Get current server id.
*
* @return {String|Number} current server id from servers.json
* @memberOf Application
*/
getServerId() {
return this.serverId;
}
/**
* Get current server
* @returns ServerInfo
*/
getCurrentServer() {
return this.curServer;
}
/**
* Get current server type.
*
* @return {String|Number} current server type from servers.json
* @memberOf Application
*/
getServerType() {
return this.serverType;
}
/**
* Get all the current server infos.
*
* @return {Object} server info map, key: server id, value: server info
* @memberOf Application
*/
getServers() {
return this.servers;
}
/**
* Get all server infos from servers.json.
*
* @return {Object} server info map, key: server id, value: server info
* @memberOf Application
*/
getServersFromConfig() {
return this.get(Constants.KEYWORDS.SERVER_MAP);
}
/**
* Get all the server type.
*
* @return {Array} server type list
* @memberOf Application
*/
getServerTypes() {
return this.serverTypes;
}
/**
* Get server info by server id from current server cluster.
*
* @param {String} serverId server id
* @return {Object} server info or undefined
* @memberOf Application
*/
getServerById(serverId: string) {
return this.servers[serverId];
}
/**
* Get server info by server id from servers.json.
*
* @param {String} serverId server id
* @return {Object} server info or undefined
* @memberOf Application
*/
getServerFromConfig(serverId: string) {
return this.get(Constants.KEYWORDS.SERVER_MAP)[serverId];
}
/**
* Get server infos by server type.
*
* @param {String} serverType server type
* @return {Array} server info list
* @memberOf Application
*/
getServersByType(serverType: string) {
return this.serverTypeMaps[serverType];
}
/**
* Check the server whether is a frontend server
*
* @param {server} server server info. it would check current server
* if server not specified
* @return {Boolean}
*
* @memberOf Application
*/
isFrontend(server ?: any) {
server = server || this.getCurServer();
return !!server && server.frontend === 'true';
}
/**
* Check the server whether is a backend server
*
* @param {server} server server info. it would check current server
* if server not specified
* @return {Boolean}
* @memberOf Application
*/
isBackend(server: ServerInfo) {
server = server || this.getCurServer();
return !!server && !server.frontend;
}
/**
* Check whether current server is a master server
*
* @return {Boolean}
* @memberOf Application
*/
isMaster() {
return this.serverType === Constants.RESERVED.MASTER;
}
/**
* Add new server info to current application in runtime.
*
* @param {Array} servers new server info list
* @memberOf Application
*/
addServers(servers: ServerInfo[]) {
if (!servers || !servers.length) {
return;
}
let item: ServerInfo, slist: ServerInfo[];
for (let i = 0, l = servers.length; i < l; i++) {
item = servers[i];
// update global server map
this.servers[item.id] = item;
// update global server type map
slist = this.serverTypeMaps[item.serverType];
if (!slist) {
this.serverTypeMaps[item.serverType] = slist = [];
}
replaceServer(slist, item);
// update global server type list
if (this.serverTypes.indexOf(item.serverType) < 0) {
this.serverTypes.push(item.serverType);
}
}
this.event.emit(events.ADD_SERVERS, servers);
}
/**
* Remove server info from current application at runtime.
*
* @param {Array} ids server id list
* @memberOf Application
*/
removeServers(ids: string[]) {
if (!ids || !ids.length) {
return;
}
let id, item, slist;
for (let i = 0, l = ids.length; i < l; i++) {
id = ids[i];
item = this.servers[id];
if (!item) {
continue;
}
// clean global server map
delete this.servers[id];
// clean global server type map
slist = this.serverTypeMaps[item.serverType];
removeServer(slist, id);
// TODO: should remove the server type if the slist is empty?
}
this.event.emit(events.REMOVE_SERVERS, ids);
}
/**
* Replace server info from current application at runtime.
*
* @param {Object} server id map
* @memberOf Application
*/
replaceServers(servers: { [serverId: string]: ServerInfo }) {
if (!servers) {
return;
}
this.servers = servers;
this.serverTypeMaps = {};
this.serverTypes = [];
let serverArray = [];
for (let id in servers) {
let server = servers[id];
let serverType = server[Constants.RESERVED.SERVER_TYPE];
let slist = this.serverTypeMaps[serverType];
if (!slist) {
this.serverTypeMaps[serverType] = slist = [];
}
this.serverTypeMaps[serverType].push(server);
// update global server type list
if (this.serverTypes.indexOf(serverType) < 0) {
this.serverTypes.push(serverType);
}
serverArray.push(server);
}
this.event.emit(events.REPLACE_SERVERS, serverArray);
}
/**
* Add crons from current application at runtime.
*
* @param {Array} crons new crons would be added in application
* @memberOf Application
*/
addCrons(crons: Cron[]) {
if (!crons || !crons.length) {
logger.warn('crons is not defined.');
return;
}
this.event.emit(events.ADD_CRONS, crons);
}
/**
* Remove crons from current application at runtime.
*
* @param {Array} crons old crons would be removed in application
* @memberOf Application
*/
removeCrons(crons: Cron[]) {
if (!crons || !crons.length) {
logger.warn('ids is not defined.');
return;
}
this.event.emit(events.REMOVE_CRONS, crons);
}
astart = utils.promisify(this.start);
aconfigure: AConfigureFunc1 | AConfigureFunc2 | AConfigureFunc3 = utils.promisify(this.configure) as any;
rpc ?: UserRpc;
sysrpc ?: SysRpc;
/**
* Proxy for rpc client rpcInvoke.
*
* @param {String} serverId remote server id
* @param {Object} msg rpc message: {serverType: serverType, service: serviceName, method: methodName, args: arguments}
* @param {Function} cb callback function
*/
rpcInvoke ?: (serverId: FRONTENDID, msg: RpcMsg, cb: Function) => void;
/**
* 加载一个事件侦听
* @param Event
* @param opts
*/
loadEvent(Event: ApplicationEventContructor, opts: any) {
let eventInstance = new Event(opts);
for (let evt in AppEvents) {
let name = (AppEvents as any)[evt];
let method = (eventInstance as any)[name];
if (method) {
this.event.on(name, method.bind(eventInstance));
}
}
}
}
let replaceServer = function (slist: ServerInfo[], serverInfo: ServerInfo) {
for (let i = 0, l = slist.length; i < l; i++) {
if (slist[i].id === serverInfo.id) {
slist[i] = serverInfo;
return;
}
}
slist.push(serverInfo);
};
let removeServer = function (slist: ServerInfo[], id: string) {
if (!slist || !slist.length) {
return;
}
for (let i = 0, l = slist.length; i < l; i++) {
if (slist[i].id === id) {
slist.splice(i, 1);
return;
}
}
};
let contains = function (str: string, settings: string) {
if (!settings) {
return false;
}
let ts = settings.split('|');
for (let i = 0, l = ts.length; i < l; i++) {
if (str === ts[i]) {
return true;
}
}
return false;
};
let addFilter = function <T>(app: Application, type: string, filter: T) {
let filters = app.get(type);
if (!filters) {
filters = [];
app.set(type, filters);
}
filters.push(filter);
};
| BackendSess |
memory_copier.rs | use crate::core::worker::Worker;
use crate::host::memory_manager::page_size;
use crate::host::syscall_types::TypedPluginPtr;
use crate::utility::pod;
use crate::utility::pod::Pod;
use log::*;
use nix::{errno::Errno, unistd::Pid};
use std::fmt::Debug;
/// A utility for copying data to and from a process's memory.
#[derive(Debug, Clone)]
pub struct MemoryCopier {
pid: Pid,
}
impl MemoryCopier {
pub fn new(pid: Pid) -> Self {
Self { pid }
}
/// Copy the region.
/// SAFETY: A mutable reference to the process memory must not exist.
pub unsafe fn clone_mem<T: Pod + Debug>(
&self,
ptr: TypedPluginPtr<T>,
) -> Result<Vec<T>, Errno> |
/// Copy the readable prefix of the region.
/// SAFETY: A mutable reference to the process memory must not exist.
pub unsafe fn clone_mem_prefix<T: Pod + Debug>(
&self,
ptr: TypedPluginPtr<T>,
) -> Result<Vec<T>, Errno> {
let mut v = Vec::with_capacity(ptr.len());
unsafe { v.set_len(v.capacity()) };
let copied = unsafe { self.copy_prefix_from_ptr(&mut v, ptr)? };
v.truncate(copied);
Ok(v)
}
// Read as much of `ptr` as is accessible into `dst`.
/// SAFETY: A mutable reference to the process memory must not exist.
pub unsafe fn copy_prefix_from_ptr<T>(
&self,
dst: &mut [T],
src: TypedPluginPtr<T>,
) -> Result<usize, Errno>
where
T: Pod + Debug,
{
// Convert to u8
let mut buf = pod::to_u8_slice_mut(dst);
let ptr = src.cast_u8();
// Split at page boundaries to allow partial reads.
let mut slices = Vec::with_capacity((buf.len() + page_size() - 1) / page_size() + 1);
let mut total_bytes_toread = std::cmp::min(buf.len(), ptr.len());
// First chunk to read is from pointer to beginning of next page.
let prev_page_boundary = usize::from(ptr.ptr()) / page_size() * page_size();
let next_page_boundary = prev_page_boundary + page_size();
let mut next_bytes_toread = std::cmp::min(
next_page_boundary - usize::from(ptr.ptr()),
total_bytes_toread,
);
while next_bytes_toread > 0 {
// Add the next chunk to read.
let (prefix, suffix) = buf.split_at_mut(next_bytes_toread);
buf = suffix;
slices.push(prefix);
total_bytes_toread -= next_bytes_toread;
// Reads should now be page-aligned. Read a whole page at a time,
// up to however much is left.
next_bytes_toread = std::cmp::min(total_bytes_toread, page_size());
}
let bytes_read = unsafe { self.readv_ptrs(&mut slices, &[ptr])? };
Ok(bytes_read / std::mem::size_of::<T>())
}
// Copy `dst` into `src`.
/// SAFETY: A mutable reference to the process memory must not exist.
pub unsafe fn copy_from_ptr<T: Pod + Debug>(
&self,
dst: &mut [T],
src: TypedPluginPtr<T>,
) -> Result<(), Errno> {
assert_eq!(dst.len(), src.len());
let buf = pod::to_u8_slice_mut(dst);
let ptr = src.cast_u8();
let bytes_read = unsafe { self.readv_ptrs(&mut [buf], &[ptr])? };
if bytes_read != buf.len() {
warn!(
"Tried to read {} bytes but only got {}",
buf.len(),
bytes_read
);
return Err(Errno::EFAULT);
}
Ok(())
}
// Low level helper for reading directly from `srcs` to `dsts`.
// Returns the number of bytes read. Panics if the
// MemoryManager's process isn't currently active.
/// SAFETY: A mutable reference to the process memory must not exist.
unsafe fn readv_ptrs(
&self,
dsts: &mut [&mut [u8]],
srcs: &[TypedPluginPtr<u8>],
) -> Result<usize, Errno> {
let srcs: Vec<_> = srcs
.iter()
.map(|src| nix::sys::uio::RemoteIoVec {
base: usize::from(src.ptr()),
len: src.len(),
})
.collect();
let dsts: Vec<_> = dsts
.iter_mut()
.map(|dst: &mut &mut [u8]| -> nix::sys::uio::IoVec<&mut [u8]> {
nix::sys::uio::IoVec::from_mut_slice(*dst)
})
.collect();
unsafe { self.readv_iovecs(&dsts, &srcs) }
}
// Low level helper for reading directly from `srcs` to `dsts`.
// Returns the number of bytes read. Panics if the
// MemoryManager's process isn't currently active.
/// SAFETY: A mutable reference to the process memory must not exist.
unsafe fn readv_iovecs(
&self,
dsts: &[nix::sys::uio::IoVec<&mut [u8]>],
srcs: &[nix::sys::uio::RemoteIoVec],
) -> Result<usize, Errno> {
trace!(
"Reading from srcs of len {}",
srcs.iter().map(|s| s.len).sum::<usize>()
);
trace!(
"Reading to dsts of len {}",
dsts.iter().map(|d| d.as_slice().len()).sum::<usize>()
);
// While the documentation for process_vm_readv says to use the pid, in
// practice it needs to be the tid of a still-running thread. i.e. using the
// pid after the thread group leader has exited will fail.
let active_tid = Worker::active_thread_native_tid().unwrap();
let active_pid = Worker::active_process_native_pid().unwrap();
// Don't access another process's memory.
assert_eq!(active_pid, self.pid);
let nread = nix::sys::uio::process_vm_readv(active_tid, &dsts, &srcs)?;
Ok(nread)
}
// Low level helper for writing directly to `dst`. Panics if the
// MemoryManager's process isn't currently active.
/// SAFETY: A reference to the process memory must not exist.
pub unsafe fn copy_to_ptr<T: Pod + Debug>(
&self,
dst: TypedPluginPtr<T>,
src: &[T],
) -> Result<(), Errno> {
let dst = dst.cast_u8();
let src = pod::to_u8_slice(src);
assert_eq!(src.len(), dst.len());
let towrite = src.len();
trace!("write_ptr writing {} bytes", towrite);
let local = [nix::sys::uio::IoVec::from_slice(src)];
let remote = [nix::sys::uio::RemoteIoVec {
base: usize::from(dst.ptr()),
len: towrite,
}];
// While the documentation for process_vm_writev says to use the pid, in
// practice it needs to be the tid of a still-running thread. i.e. using the
// pid after the thread group leader has exited will fail.
let active_tid = Worker::active_thread_native_tid().unwrap();
let active_pid = Worker::active_process_native_pid().unwrap();
// Don't access another process's memory.
assert_eq!(active_pid, self.pid);
let nwritten = nix::sys::uio::process_vm_writev(active_tid, &local, &remote)?;
// There shouldn't be any partial writes with a single remote iovec.
assert_eq!(nwritten, towrite);
Ok(())
}
}
| {
let mut v = Vec::with_capacity(ptr.len());
unsafe { v.set_len(v.capacity()) };
unsafe { self.copy_from_ptr(&mut v, ptr)? };
Ok(v)
} |
dissolve_context.go | package game
import "diceserver/protocol"
// 退出房间统计
type dissolveContext struct {
desk *Desk // 桌子
status map[int64]bool // 退出统计
desc map[int64]string // 退出描述
pause map[int64]bool // 离线状态
}
func newDissolveContext(desk *Desk) *dissolveContext {
return &dissolveContext{
desk: de | int64) bool {
return !d.pause[uid]
}
func (d *dissolveContext) updateOnlineStatus(uid int64, online bool) {
if online {
delete(d.pause, uid)
} else {
d.pause[uid] = true
}
d.desk.logger.Debugf("玩家在线状态:%+v", d.pause)
d.desk.group.Broadcast("onPlayerOfflineStatus", &protocol.PlayerOfflineStatus{Uid: uid, Offline: !online})
}
func (d *dissolveContext) offlineCount() int {
return len(d.pause)
}
| sk,
status: map[int64]bool{},
desc: map[int64]string{},
pause: map[int64]bool{},
}
}
func (d *dissolveContext) isOnline(uid |
__init__.py | from . import ingredients
from . import items
from . import recipes
from . import tags
MODULES = (
ingredients,
items,
recipes,
tags,
)
| def register_blueprints(api):
"""Initialize application with all modules"""
for module in MODULES:
api.register_blueprint(module.blp) | |
generic.py | from six import iteritems, itervalues
from collections import OrderedDict, MutableMapping, Iterable
from functools import wraps
import anvil.config as cfg
def to_list(query):
if isinstance(query, list):
return query
elif isinstance(query, str):
return [query]
elif isinstance(query, dict):
return [query]
elif not query:
return list()
try:
return list(query)
except TypeError:
return [query]
def to_size_list(query, desired_length):
query_list = to_list(query) if query else [None]
if len(query_list) > desired_length:
return query_list[:desired_length]
else:
return query_list + [query_list[-1]] * (desired_length - len(query_list))
def to_camel_case(input_string):
tokens = input_string.split('_')
return tokens[0] + ''.join([token.capitalize() for token in tokens[1:]])
def gen_flatten_dict_depth_two(d):
"""Taken from:
https://stackoverflow.com/questions/3835192/flatten-a-dictionary-of-dictionaries-2-levels-deep-of-lists-in-python
Given the d_inner, return an iterator that provides all the nodes from within.
"""
for d_inner in itervalues(d):
if isinstance(d_inner, dict):
for nodes in itervalues(d_inner):
print('nodes ', nodes)
for node in to_list(nodes):
print(node)
yield node
else:
for node in to_list(d_inner):
print('node ', node)
yield node
def get_dict_depth(d=None, level=0):
"""Returns maximum depth of the hierarchy"""
if not isinstance(d, dict) or not d:
return level
return max(get_dict_depth(d[k], level=level + 1) for k in d)
def get_dict_key_matches(key, dictionary):
for k, v in iteritems(dictionary):
if k == key:
return {k: v}
elif isinstance(v, dict):
return get_dict_key_matches(key, v)
def dict_to_keys_list(d, keys=None):
keys = keys if keys is not None else []
if isinstance(d, dict):
for k, v in iteritems(d):
keys.append(k)
dict_to_keys_list(v, keys)
else:
keys.append(d)
return keys
def dict_deep_sort(cls, obj):
"""Recursively sort list or dict nested lists
Taken from: http://goo.gl/tQfDP6
"""
if isinstance(obj, dict):
_sorted = OrderedDict()
for key in sorted(list(obj)):
_sorted[key] = cls.deep_sort(obj[key])
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(cls.deep_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted
def to_str_dict(d):
data = {}
for k, v in iteritems(d):
try:
data.update({str(k): str(v)})
except TypeError:
pass
return data
def pop_dict_keys(d, keys):
popped = []
for key in keys:
try:
popped.append(d.pop(key))
except KeyError:
pass
return popped
def merge_dicts(*args, **kwargs):
"""Outputs a merged dictionary from inputs. Overwrites data if there are conflicts from left to right.
:param args: (dict), tuple of input dictionaries
:param kwargs: dict, input kwargs to merge
:return: dict, combined data.
"""
data = {}
for input_dict in [arg for arg in args if isinstance(arg, dict)] + [kwargs]:
data.update(input_dict)
return data
def dict_compare(d1, d2):
|
def dict_to_flat_dict(d, full_path=True, parent_key='', sep='_'):
"""Got from https://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys
:param d: dict, input dictionary
:param full_path: bool, whether to store the full path as the key or the final key for that dictionary item.
:param parent_key: str, keeps track of the dictionary path taken, do not set.
:param sep: str, arbitary separator to delineate path separation in the parent_key string.
:return: dict, flat dictionary with all keys as full path keys.
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key and full_path else k
if isinstance(v, MutableMapping):
items.extend(dict_to_flat_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
merge_value_LUT = {
dict: lambda d1, d2: merge_dicts(d2),
list: lambda l1, l2: l1 + to_list(l2),
str: lambda s1, s2: s1 + str(s2),
'replace': lambda e1, e2: e2,
}
class Map(dict):
"""A dot notation accessible dictionary class extension.
Taken from: https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in iteritems(arg):
self[k] = v
if kwargs:
for k, v in iteritems(kwargs):
self[k] = v
def deep_update(self, d, path=None):
if path is None:
path = []
for k, v in iteritems(d):
if isinstance(v, dict):
self.deep_update(v, path=path + [k])
else:
self._merge_value(path + [k], v)
def flatten(self):
return gen_flatten_dict_depth_two(self)
def to_flat_dict(self, full_path=False):
return dict_to_flat_dict(self, full_path=full_path)
def to_value_list(self):
result = []
map(result.extend, [n if isinstance(n, Iterable) else to_list(n) for n in itervalues(self.to_flat_dict())])
return result
def _merge_value(self, path, v):
"""Stably merge values without overwriting or messing up Map object.
This is used since we have a slightly customized way of adding entries and do not want the base Map object
to start getting stale data. If a path does not exist, we will add a default Map object in that place
unless it is the final path, in which case we merge with the existing (or not) value.
:param path: list, list of keys we will traverse down.
:param v: object, any type of object we are adding to that nested/base dict.
"""
current_map = self
for p in path[:-1]:
current_map = current_map.setdefault(p, self.__class__())
current_v = current_map.setdefault(path[-1], None)
current_map[path[-1]] = merge_value_LUT.get(type(current_v), merge_value_LUT['replace'])(current_v, v)
def __getattr__(self, attr):
"""Passthrough function for dictionary.get"""
return self.get(attr)
def __setattr__(self, key, value):
"""Passthrough function for dictionary item setter"""
self.__setitem__(key, value)
def __setitem__(self, key, value):
"""Updates both setitem and instance dictionary key value"""
super(Map, self).__setitem__(key, value)
self.__dict__[key] = value
def __delattr__(self, item):
"""Passthrough for dictionary delete item."""
self.__delitem__(item)
def __delitem__(self, key):
"""Deletes both the attribute and the instance dictionary"""
super(Map, self).__delitem__(key)
del self.__dict__[key]
def __eq__(self, other):
"""Determines if the dictionary is equivalent to the other dictionary."""
return dict_compare(self.__dict__, other)
def extend_parent_kwarg(number_of_parents):
def inner(f):
@wraps(f)
def wrapper(abstract_grouping, *args, **kwargs):
kwargs[cfg.PARENT] = iter(to_size_list(kwargs.get(cfg.PARENT), number_of_parents))
return f(abstract_grouping, *args, **kwargs)
return wrapper
return inner
| """Taken from: https://stackoverflow.com/questions/4527942/comparing-two-dictionaries-in-python"""
d1_keys = set(list(d1))
d2_keys = set(list(d2))
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o: (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
same = set(o for o in intersect_keys if d1[o] == d2[o])
return added, removed, modified, same |
draft.go | package draft
import (
"fmt"
"io"
"io/ioutil"
"path"
"path/filepath"
"strings"
"github.com/Azure/draft/pkg/draft/draftpath"
"github.com/Azure/draft/pkg/draft/pack/repo"
"github.com/Azure/draft/pkg/linguist"
"github.com/jenkins-x/jx/pkg/jx/cmd/log"
)
// copied from draft so we can change the $DRAFT_HOME to ~/.jx/draft and lookup jx draft packs
// credit original from: https://github.com/Azure/draft/blob/8e1a459/cmd/draft/create.go#L163
// doPackDetection performs pack detection across all the packs available in $(draft home)/packs in
// alphabetical order, returning the pack dirpath and any errors that occurred during the pack detection.
func DoPackDetection(home draftpath.Home, out io.Writer, dir string) (string, error) {
log.Infof("perforning pack detection in folder %s\n", dir)
langs, err := linguist.ProcessDir(dir)
if err != nil |
if len(langs) == 0 {
return "", fmt.Errorf("there was an error detecting the language")
}
for _, lang := range langs {
detectedLang := linguist.Alias(lang)
fmt.Fprintf(out, "--> Draft detected %s (%f%%)\n", detectedLang.Language, detectedLang.Percent)
for _, repository := range repo.FindRepositories(home.Packs()) {
packDir := path.Join(repository.Dir, repo.PackDirName)
packs, err := ioutil.ReadDir(packDir)
if err != nil {
return "", fmt.Errorf("there was an error reading %s: %v", packDir, err)
}
for _, file := range packs {
if file.IsDir() {
if strings.Compare(strings.ToLower(detectedLang.Language), strings.ToLower(file.Name())) == 0 {
packPath := filepath.Join(packDir, file.Name())
return packPath, nil
}
}
}
}
fmt.Fprintf(out, "--> Could not find a pack for %s. Trying to find the next likely language match...\n", detectedLang.Language)
}
return "", fmt.Errorf("there was an error detecting the language using packs from %s", home.Packs())
}
| {
return "", fmt.Errorf("there was an error detecting the language: %s", err)
} |
render.ts | import {
ActionType,
EditorContextValue,
findNextNode,
findPreviousNode,
getFocusTree,
PluginState,
State,
StateType
} from '@edtr-io/core'
import * as React from 'react'
import { rowsState, rowState } from '..'
export default function({
row,
rows,
index,
store,
getDocument,
renderIntoExtendedSettings,
PrimarySettingsWrapper
}: {
row: StateType.StateDescriptorReturnType<typeof rowState>
rows: StateType.StateDescriptorReturnType<typeof rowsState>
index: number
store: EditorContextValue
getDocument: (state: State, id: string) => PluginState | null
renderIntoExtendedSettings: (children: React.ReactChild) => React.ReactNode
PrimarySettingsWrapper: React.ComponentType
}) {
const { state, dispatch } = store
return row.render({
renderIntoExtendedSettings,
PrimarySettingsWrapper,
insert: (options?: { plugin: string; state?: unknown }) =>
rows.insert(index + 1, options),
replace: (options?: { plugin: string; state?: unknown }) => {
rows.remove(index)
rows.insert(index, options)
},
remove: () => {
rows.remove(index)
},
mergeWithPrevious: (merge: (statePrevious: unknown) => unknown) => {
if (index - 1 < 0) return
const current = getDocument(state, row.id)
let previous = getDocument(state, rows()[index - 1].id)
if (!previous || !current) return
if (previous.plugin !== current.plugin) {
// check if previous focus plugin is the same type
const root = getFocusTree(state)
if (!root) return | const previousFocusId = findPreviousNode(root, row.id)
if (!previousFocusId) return
previous = getDocument(state, previousFocusId)
if (!previous || previous.plugin !== current.plugin) return
const merged = merge(previous.state)
dispatch({
type: ActionType.Change,
payload: {
id: previousFocusId,
state: () => merged
},
commit: undefined
})
rows.remove(index)
} else {
merge(previous.state)
setTimeout(() => rows.remove(index - 1))
}
},
mergeWithNext: (merge: (statePrevious: unknown) => unknown) => {
if (index + 1 === rows().length) return
const current = getDocument(state, row.id)
let next = getDocument(state, rows()[index + 1].id)
if (!next || !current) return
if (next.plugin !== current.plugin) {
// check if next focus plugin is the same type
const root = getFocusTree(state)
if (!root) return
const nextFocusId = findNextNode(root, row.id)
if (!nextFocusId) return
// use that plugin for merge
next = getDocument(state, nextFocusId)
if (!next || next.plugin !== current.plugin) return
}
merge(next.state)
setTimeout(() => {
rows.remove(index + 1)
})
}
})
} | |
base64_to_pic.py | # -*- coding:utf-8 -*-
import base64
bs='iVBORw0KGgoAAAANSUhEUg....'
imgdata=base64.b64decode(bs)
file=open('2.jpg','wb') | file.close() | file.write(imgdata) |
test_coll.py | """
Reads queries from test collections
@author: Faegheh Hasibi ([email protected])
"""
import csv
from nordlys.tagme import config
def read_yerd_queries(y_erd_file=config.Y_ERD):
"""
Reads queries from Erd query file.
:return dictionary {query_id : query_content}
"""
queries = {}
with open(y_erd_file, 'rb') as y_erd:
reader = csv.DictReader(y_erd, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in reader:
qid = line['qid']
query = line['query']
queries[qid] = query.strip()
print "Number of queries:", len(queries)
return queries
def read_erd_queries(erd_q_file=config.ERD_QUERY):
|
def read_tagme_queries(dataset_file):
"""
Reads queries from snippet file.
:return dictionary {qid : query}
"""
queries = {}
q_file = open(dataset_file, "r")
for line in q_file:
line = line.strip().split("\t")
query_id = line[0].strip()
query = line[1].strip()
queries[query_id] = query
q_file.close()
print "Number of queries:", len(queries)
return queries | """
Reads queries from Erd query file.
:return dictionary {qid : query}
"""
queries = {}
q_file = open(erd_q_file, "r")
for line in q_file:
line = line.split("\t")
query_id = line[0].strip()
query = line[-1].strip()
queries[query_id] = query
q_file.close()
print "Number of queries:", len(queries)
return queries |
stand.py | from os import (
startfile,
getcwd
)
from os.path import join
from io import BytesIO
from csv import (
writer,
excel
)
from openpyxl import (
Workbook,
load_workbook
)
from statistics import (
mean,
variance,
stdev
)
from treetopper.plot import Plot
from treetopper.timber import (
TimberQuick,
TimberFull
)
from treetopper.log import Log
from treetopper.thin import (
ThinTPA,
ThinBA,
ThinRD
)
from treetopper._exceptions import TargetDensityError
from treetopper.fvs import FVS
from treetopper._constants import (
math,
ALL_SPECIES_NAMES,
GRADE_SORT,
LOG_LENGTHS,
SORTED_HEADS
)
from treetopper._utils import (
format_comma,
format_pct,
extension_check,
reorder_dict,
check_date,
add_logs_to_table_heads
)
from treetopper._import_from_sheets import import_from_sheet
from treetopper._print_console import (
print_stand_species,
print_stand_logs,
print_stand_stats
)
from treetopper._print_pdf import PDF
class Stand(object):
"""The Stand Class represents a stand of timber that has had an inventory conducted on it. It should made up of plots (Plot Class)
which contain trees (Timber Classes).
The Stand class will run calculations and statistics of the current stand conditions and it will run calculations of the log
merchantabilty for three metrics: logs per acre, log board feet per acre, and log cubic feet per acre, based on log grades,
log length ranges and species.
"""
def __init__(self, name: str, plot_factor: float, acres: float = None, inventory_date: str = None):
self.name = name.upper()
self.plot_factor = plot_factor
self.plots = []
self.plot_count = 0
self.tpa = 0
self.ba_ac = 0
self.qmd = 0
self.rd_ac = 0
self.bf_ac = 0
self.cf_ac = 0
self.avg_hgt = 0
self.hdr = 0
self.vbar = 0
self.tpa_stats = {}
self.ba_ac_stats = {}
self.rd_ac_stats = {}
self.bf_ac_stats = {}
self.cf_ac_stats = {}
self.species = {}
self.species_gross = {}
self.species_stats = {}
self.logs = {}
self.table_data = []
self.summary_stand = []
self.summary_logs = {}
self.summary_stats = []
self.metrics = ['tpa', 'ba_ac', 'rd_ac', 'bf_ac', 'cf_ac']
self.attrs = ['_gross', '_stats', '']
self.acres = acres
if inventory_date:
self.inv_date = check_date(inventory_date)
else:
self.inv_date = inventory_date
def __getitem__(self, attribute: str):
return self.__dict__[attribute]
def get_stand_table_text(self):
"""Returns a console-formatted string of current stand conditions"""
return print_stand_species(self.summary_stand)
def get_logs_table_text(self):
"""Returns a console-formatted string of stand logs data"""
return print_stand_logs(self.summary_logs)
def get_stats_table_text(self):
"""Returns and console-formatted string of stand stand statistics"""
return print_stand_stats(self.summary_stats)
def get_console_report_text(self):
"""Returns a console-formatted string of the complete stand report"""
return self._compile_report_text()
def console_report(self):
"""Prints a console-formatted string of the complete stand report"""
print(self._compile_report_text())
def get_pdf_report_bytes_io(self):
pdf = self._compile_pdf_report()
return BytesIO(pdf.output(dest='S').encode('latin-1'))
def pdf_report(self, filename: str, directory: str = None, start_file_upon_creation: bool = False):
"""Exports a pdf of the complete stand report to a user specified directory or if directory is None,
to the current working directory. Will open the created pdf report if start_file_upon_creation is True"""
check = extension_check(filename, '.pdf')
if directory:
file = join(directory, check)
else:
file = join(getcwd(), check)
pdf = self._compile_pdf_report()
pdf.output(file, 'F')
if start_file_upon_creation:
startfile(file)
def add_plot(self, plot: Plot):
"""Adds a plot to the stand's plots list and re-runs the calculations and statistics of the stand.
plot argument needs to be the a Plot Class"""
self.plots.append(plot)
self.plot_count += 1
for met in self.metrics:
self._update_metrics(met)
self.qmd = math.sqrt((self.ba_ac / self.tpa) / .005454)
self.vbar = self.bf_ac / self.ba_ac
self._update_species(plot)
self._update_logs(plot)
self.table_data = self._update_table_data()
self.summary_stand = self._update_summary_stand()
self.summary_logs = self._update_summary_logs()
self.summary_stats = self._update_summary_stats()
def import_sheet_quick(self, file_path: str):
"""Imports tree and plot data from a CSV or XLSX file for a quick cruise and adds that data to the stand"""
plots = import_from_sheet(file_path, self.name, 'q')
for plot_num in plots:
plot = Plot()
for tree in plots[plot_num]:
plot.add_tree(TimberQuick(self.plot_factor, *tree))
self.add_plot(plot)
def import_sheet_full(self, file_path: str):
"""Imports tree and plot data from a CSV or XLSX file for a full cruise and adds that data to the stand"""
plots = import_from_sheet(file_path, self.name, 'f')
for plot_num in plots:
plot = Plot()
for tree_data in plots[plot_num]:
args = tree_data[: -1]
logs = tree_data[-1]
tree = TimberFull(self.plot_factor, *args)
for log in logs:
tree.add_log(*log)
plot.add_tree(tree)
self.add_plot(plot)
def table_to_csv(self, filename: str, directory: str = None):
"""Creates or appends a CSV file with tree data from self.table_data"""
check = extension_check(filename, '.csv')
if directory:
file = join(directory, check)
else:
file = join(getcwd(), check)
if isfile(file):
allow = 'a'
start = 1
else:
allow = 'w'
start = 0
with open(file, allow, newline='') as csv_file:
csv_write = writer(csv_file, dialect=excel)
for i in self.table_data[start:]:
csv_write.writerow(i)
def table_to_excel(self, filename: str, directory: str = None):
"""Creates or appends an Excel file with tree data from self.table_data"""
check = extension_check(filename, '.xlsx')
if directory:
file = join(directory, check)
else:
file = join(getcwd(), check)
if isfile(file):
wb = load_workbook(file)
ws = wb.active
for i in self.table_data[1:]:
ws.append(i)
wb.save(file)
else:
wb = Workbook()
ws = wb.active
for i in self.table_data:
ws.append(i)
wb.save(file)
def _update_metrics(self, metric: str):
"""Updates stand metrics based on the metric entered in the argument, used internally"""
metric_list = [plot[metric] for plot in self.plots]
stats = self._get_stats(metric_list)
setattr(self, metric, stats['mean'])
setattr(self, f'{metric}_stats', stats)
def _update_species(self, plot):
"""Re-runs stand conditions calculations and statistics, used internally"""
update_after = ['qmd', 'vbar', 'avg_hgt', 'hdr']
if self.plot_count == 0:
return
else:
for species in plot.species:
if species not in self.species_gross:
for attr in self.attrs:
if attr == '_gross':
getattr(self, f'species{attr}')[species] = {met: [] for met in self.metrics}
else:
getattr(self, f'species{attr}')[species] = {met: 0 for met in self.metrics}
for key in plot.species[species]:
if key not in update_after:
self.species_gross[species][key].append(plot.species[species][key])
for species in self.species_gross:
for key in self.species_gross[species]:
if key not in update_after:
data = self.species_gross[species][key]
if len(data) < self.plot_count:
data += ([0] * (self.plot_count - len(data)))
stats = self._get_stats(data)
self.species[species][key] = stats['mean']
self.species_stats[species][key] = stats
self.species[species]['qmd'] = math.sqrt((self.species[species]['ba_ac'] / self.species[species]['tpa']) / 0.005454)
self.species[species]['vbar'] = self.species[species]['bf_ac'] / self.species[species]['ba_ac']
if species == 'totals_all':
self.species[species]['avg_hgt'] = mean([p.avg_hgt for p in self.plots])
self.species[species]['hdr'] = mean([p.hdr for p in self.plots])
else:
trees = []
for p in self.plots:
for t in p.trees:
trees.append(t)
self.species[species]['avg_hgt'] = mean([t.height for t in trees if t.species == species])
self.species[species]['hdr'] = mean([t.hdr for t in trees if t.species == species])
def _update_logs(self, plot):
"""Re-runs stand logs calculations, used internally"""
if self.plot_count == 0:
return
else:
subs = ['lpa', 'bf_ac', 'cf_ac']
for species in plot.logs:
if species not in self.logs:
self.logs[species] = {}
for grade in plot.logs[species]:
if grade not in self.logs[species]:
self.logs[species][grade] = {rng: {sub: {'gross': [], 'mean': 0} for sub in subs} for rng in LOG_LENGTHS}
self.logs[species][grade]['totals_by_grade'] = {sub: {'gross': [], 'mean': 0} for sub in subs}
for rng in plot.logs[species][grade]:
if rng != 'display':
for sub in subs:
self.logs[species][grade][rng][sub]['gross'].append(plot.logs[species][grade][rng][sub])
for species in self.logs:
for grade in self.logs[species]:
for rng in self.logs[species][grade]:
for sub in subs:
gross = self.logs[species][grade][rng][sub]['gross']
if len(gross) < self.plot_count:
gross += ([0] * (self.plot_count - len(gross)))
self.logs[species][grade][rng][sub]['mean'] = mean(gross)
def _update_table_data(self):
"""Converts stand data to plot/tree inventory data table layout, used internally"""
heads = ['Stand', 'Plot Number', 'Tree Number', 'Species', 'DBH', 'Height',
'Stump Height', 'Log 1 Length', 'Log 1 Grade', 'Log 1 Defect', 'Between Logs Feet']
master = []
max_logs = []
for i, plot in enumerate(self.plots):
for j, tree in enumerate(plot.trees):
temp = [self.name, i + 1, j + 1]
for key in ['species', 'dbh', 'height']:
temp.append(tree[key])
len_logs = len(tree.logs)
max_logs.append(len_logs)
for k, lnum in enumerate(tree.logs):
log = tree.logs[lnum]
if lnum == 1:
temp.append(log.stem_height - log.length - 1)
for lkey in ['length', 'grade', 'defect']:
temp.append(log[lkey])
if k < len(tree.logs) - 1:
between = tree.logs[lnum+1].stem_height - log.stem_height - tree.logs[lnum+1].length - 1
if between < 0:
temp.append(0)
else:
temp.append(between)
master.append(temp)
heads += add_logs_to_table_heads(max(max_logs))
len_heads = len(heads)
for i in master:
len_i = len(i)
if len_i < len_heads:
i += ['' for j in range(len_heads - len_i)]
master.insert(0, heads)
return master
def _update_summary_stand(self):
"""Updates the current stand conditions list of stand.summary_stand, used internally"""
heads = ['SPECIES'] + [head[1] for head in SORTED_HEADS]
body_data = []
for key in self.species:
if key == 'totals_all':
show = 'TOTALS'
else:
show = key
temp = [str(show)] + [format_comma(self.species[key][i[0]]) for i in SORTED_HEADS]
body_data.append(temp)
body_data.append(body_data.pop(0))
body_data.insert(0, heads)
return body_data
def _update_summary_logs(self):
"""Updates the stand logs summary dict, data-tables are broken down by metric type --> species, used internally.
Example: self.summary_logs['BOARD FEET PER ACRE']['DF'] --> data table"""
table_data = {}
tables = [['bf_ac', 'BOARD FEET PER ACRE'], ['cf_ac', 'CUBIC FEET PER ACRE'], ['lpa', 'LOGS PER ACRE']]
for table in tables:
metric_key = table[0]
key = table[1]
table_data[key] = {}
for species in self.logs:
if species == 'totals_all':
show = 'TOTALS'
else:
show = ALL_SPECIES_NAMES[species]
table_data[key][show] = [['LOG GRADES'] + [rng.upper() for rng in LOG_LENGTHS] + ['TOTALS']]
grade_sort = []
for grade in self.logs[species]:
values = [self.logs[species][grade][rng][metric_key]['mean'] for rng in self.logs[species][grade]]
if sum(values) > 0:
if grade == 'totals_by_length':
col_text = 'TOTALS'
else:
col_text = grade
grade_sort.append([col_text] + [format_comma(z) for z in values])
grade_sort = sorted(grade_sort, key=lambda x: GRADE_SORT[x[0]])
for g in grade_sort:
table_data[key][show].append(g)
table_data[key] = reorder_dict(table_data[key])
return table_data
def _update_summary_stats(self):
"""Updates the stand statistics dict, stats-tables are broken down by species, used internally.
Example: self.summary_stats['DF'] --> stats-table"""
tables = {}
for spp in self.species_stats:
if spp == 'totals_all':
show = 'TOTALS'
else:
show = ALL_SPECIES_NAMES[spp]
tables[show] = [['METRIC'] + [head.upper() for head in self.species_stats[spp]['tpa'] if head != 'low_avg_high'] + ['LOW',
'AVERAGE',
'HIGH']]
for key in self.species_stats[spp]:
temp = [key.upper()]
not_enough_data = False
for sub in self.species_stats[spp][key]:
x = self.species_stats[spp][key][sub]
if not_enough_data:
if x == 'Not enough data':
if sub == 'low_avg_high':
for i in range(3):
temp.append('-')
else:
temp.append('-')
else:
if x == 'Not enough data':
temp.append(x)
not_enough_data = True
else:
if sub == 'low_avg_high':
for i in x:
temp.append(format_comma(i))
elif sub == 'stderr_pct':
temp.append(format_pct(x))
else:
temp.append(format_comma(x))
tables[show].append(temp)
return reorder_dict(tables)
def _get_stats(self, data):
"""Runs the statistical calculations on a set of the stand conditions data, returns an updated sub dict, used internally"""
m = mean(data)
if len(data) >= 2:
std = stdev(data)
ste = std / math.sqrt(self.plot_count)
low_avg_high = [max(round(m - ste, 1), 0), m, m + ste]
d = {'mean': m,
'variance': variance(data),
'stdev': std,
'stderr': ste,
'stderr_pct': (ste / m) * 100,
'low_avg_high': low_avg_high}
else:
d = {'mean': m,
'variance': 'Not enough data',
'stdev': 'Not enough data',
'stderr': 'Not enough data',
'stderr_pct': 'Not enough data',
'low_avg_high': 'Not enough data'}
return d
def _compile_report_text(self):
"""Compiles the console-formatted report of all stand data and stats, used internally"""
n = '\n' * 4
console_text = f'{print_stand_species(self.summary_stand)}{n}'
console_text += f'{print_stand_logs(self.summary_logs)}{n}'
console_text += f'{print_stand_stats(self.summary_stats)}'
return console_text
def _compile_pdf_report(self):
pdf = PDF()
pdf.alias_nb_pages()
pdf.add_page()
pdf.compile_stand_report(self)
return pdf
if __name__ == '__main__':
import argparse
import traceback
import sys
from os import mkdir, getcwd
from os.path import join, isfile, isdir, expanduser
from treetopper._utils import get_desktop_path
def make_dir_and_subdir(workflow_num):
desktop = get_desktop_path()
tt_dir = join(desktop, 'treetopper_outputs')
if not isdir(tt_dir):
mkdir(tt_dir)
wf_dir = join(tt_dir, f'workflow_{workflow_num}')
if not isdir(wf_dir):
mkdir(wf_dir)
return wf_dir
def get_package_path(filename):
path = None
for i in sys.path:
if 'AppData' in i and i[-13:] == 'site-packages':
path = i
break
tt_path = join(path, 'treetopper')
sheet_path = join(tt_path, 'example_csv_and_xlsx')
final = join(sheet_path, filename)
return final
parser = argparse.ArgumentParser(description='treetopper Example Workflows')
parser.add_argument('workflow_number', help='Enter the number of the workflow to run.\n Valid workflow numbers: 1, 2, 3, 4, 5, 6)')
args = parser.parse_args()
wf = args.workflow_number
while True:
if wf not in ['1', '2', '3', '4', '5', '6']:
print('Please enter a workflow number 1, 2, 3, 4, 5, or 6')
wf = input('Workflow #: ')
else:
break
wf = int(wf)
def workflow_1(workflow_number):
stand = Stand('WF1', -20)
plot_factor = stand.plot_factor
tree_data = [
# Plot 1
[TimberQuick(plot_factor, 'DF', 29.5, 119), TimberQuick(plot_factor, 'WH', 18.9, 102),
TimberQuick(plot_factor, 'WH', 20.2, 101), TimberQuick(plot_factor, 'WH', 19.9, 100),
TimberQuick(plot_factor, 'DF', 20.6, 112)],
# Plot 2
[TimberQuick(plot_factor, 'DF', 25.0, 117), TimberQuick(plot_factor, 'DF', 14.3, 105),
TimberQuick(plot_factor, 'DF', 20.4, 119), TimberQuick(plot_factor, 'DF', 16.0, 108),
TimberQuick(plot_factor, 'RC', 20.2, 124), TimberQuick(plot_factor, 'RC', 19.5, 116),
TimberQuick(plot_factor, 'RC', 23.4, 121), TimberQuick(plot_factor, 'DF', 17.8, 116),
TimberQuick(plot_factor, 'DF', 22.3, 125)]
]
for trees in tree_data:
plot = Plot()
for tree in trees:
plot.add_tree(tree)
stand.add_plot(plot)
path = make_dir_and_subdir(workflow_number)
stand.console_report()
stand.table_to_csv(join(path, 'example_csv_export.csv'))
thin80tpa = ThinTPA(stand, 80)
thin80tpa.console_report()
end_message = """**WORKFLOW 1 created a QUICK CRUISE stand from manually entered tree data.
It then ran a thinning scenario with a target density of 80 Trees per Acre considering all species and diameter ranges.
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .csv "example_csv_export.csv" in desktop/treetopper_outputs/workflow_1/
"""
print(f'\n\n{end_message}')
def workflow_2(workflow_number):
stand = Stand('WF2', 33.3)
plot_factor = stand.plot_factor
tree_data = [
# Plot 1
[[TimberFull(plot_factor, 'DF', 29.5, 119), [[42, 40, 'S2', 5], [83, 40, 'S3', 0], [102, 18, 'S4', 10]]],
[TimberFull(plot_factor, 'WH', 18.9, 102), [[42, 40, 'S2', 0], [79, 36, 'S4', 5]]],
[TimberFull(plot_factor, 'WH', 20.2, 101), [[42, 40, 'S2', 5], [83, 40, 'S4', 0]]],
[TimberFull(plot_factor, 'WH', 19.9, 100), [[42, 40, 'S2', 0], [83, 40, 'S4', 15]]],
[TimberFull(plot_factor, 'DF', 20.6, 112), [[42, 40, 'S2', 0], [83, 40, 'S3', 5], [100, 16, 'UT', 10]]]],
# Plot 2
[[TimberFull(plot_factor, 'DF', 25.0, 117), [[42, 40, 'SM', 0], [83, 40, 'S3', 5], [100, 16, 'S4', 0]]],
[TimberFull(plot_factor, 'DF', 14.3, 105), [[42, 40, 'S3', 0], [79, 36, 'S4', 0]]],
[TimberFull(plot_factor, 'DF', 20.4, 119), [[42, 40, 'S2', 5], [83, 40, 'S3', 5], [100, 16, 'S4', 5]]],
[TimberFull(plot_factor, 'DF', 16.0, 108), [[42, 40, 'S3', 5], [83, 40, 'S3', 10]]],
[TimberFull(plot_factor, 'RC', 20.2, 124), [[42, 40, 'CR', 5], [83, 40, 'CR', 5], [104, 20, 'CR', 5]]],
[TimberFull(plot_factor, 'RC', 19.5, 116), [[42, 40, 'CR', 10], [83, 40, 'CR', 5], [100, 16, 'CR', 0]]],
[TimberFull(plot_factor, 'RC', 23.4, 121), [[42, 40, 'CR', 0], [83, 40, 'CR', 0], [106, 22, 'CR', 5]]],
[TimberFull(plot_factor, 'DF', 17.8, 116), [[42, 40, 'S2', 0], [83, 40, 'S3', 0], [100, 16, 'S4', 10]]],
[TimberFull(plot_factor, 'DF', 22.3, 125), [[42, 40, 'SM', 0], [83, 40, 'S3', 5], [108, 24, 'S4', 0]]]]
]
for trees in tree_data:
plot = Plot()
for tree, logs in trees:
for log in logs:
tree.add_log(*log)
plot.add_tree(tree)
stand.add_plot(plot)
path = make_dir_and_subdir(workflow_number)
stand.console_report()
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
thin120ba = ThinBA(stand, 120, species_to_cut=['DF', 'WH'])
thin120ba.console_report()
end_message = """**WORKFLOW 2 created a FULL CRUISE stand from manually entered tree data.
It then ran a thinning scenario with a target density of 120 Basal Area per Acre harvesting only DF and WH considering all diameter ranges.
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_2/
"""
print(f'\n\n{end_message}')
def workflow_3(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('EX4', -30)
stand.import_sheet_quick(get_package_path('Example_Excel_quick.xlsx'))
stand.console_report()
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
thin25rd = ThinRD(stand, 25, species_to_cut=['DF', 'WH'], min_dbh_to_cut=10, max_dbh_to_cut=18)
thin25rd.console_report()
end_message = """**WORKFLOW 3 created a QUICK CRUISE stand from importing plot data from an excel sheet.
It then ran a thinning scenario with a target density of 25 Relative Density per Acre harvesting only DF and WH, with a
minimum dbh of 10 inches and a maximum dbh of 18 inches. ** Note this thinning density won't be able to be achieved
fully because our parameters don't allow for the needed harvest density, but this is to illustrate that the thinning
will let the user know how much density was taken and how much more is needed to achieve the desired density target
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_3/
"""
print(f'\n\n{end_message}')
def workflow_4(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('OK2', 46.94)
stand.import_sheet_full(get_package_path('Example_CSV_full.csv'))
stand.console_report()
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
try:
thin100tpa = ThinTPA(stand, 100)
thin100tpa.console_report()
except TargetDensityError as e:
print(traceback.format_exc())
end_message = """**WORKFLOW 4 created a FULL CRUISE stand from importing plot data from an csv sheet.
It then ran a thinning scenario with a target density of 100 Trees per Acre considering all species and diameter ranges.
** Note this thinning density is greater than the current stand density and the Thin Class will throw a TargetDensityError exception
which will explain what went wrong.
Outputs:
Stand console report in terminal [print(stand_class.console_report)] ^above^
Thinning console report in terminal [print(thin_class.console_report))] ^above^
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_4/
"""
print(f'\n\n{end_message}')
def workflow_5(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('EX3', 33.3)
stand.import_sheet_quick(get_package_path('Example_CSV_quick.csv'))
stand.pdf_report(join(path, 'stand_report.pdf'))
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
thin140ba = ThinBA(stand, 140, species_to_cut=['DF', 'WH', 'RA'], max_dbh_to_cut=24)
thin140ba.pdf_report(join(path, 'thin_report.pdf'))
end_message = """**WORKFLOW 5 created a QUICK CRUISE stand from importing plot data from an csv sheet.
It then ran a thinning scenario with a target density of 140 Basal Area per Acre harvesting only DF, WH and RA with a maximum diameter of 24 inches.
Outputs:
Stand PDF report "stand_report.pdf" from [stand_class.pdf_report()] in desktop/treetopper_outputs/workflow_5/
Thinning PDF report "thin_report.pdf" from [thin_class.pdf_report()] in desktop/treetopper_outputs/workflow_5/
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_5/
"""
print(f'\n\n{end_message}')
def workflow_6(workflow_number):
path = make_dir_and_subdir(workflow_number)
stand = Stand('OK1', -30)
stand.import_sheet_full(get_package_path('Example_Excel_full.xlsx'))
stand.table_to_excel(join(path, 'example_xlsx_export.xlsx'))
fvs = FVS()
fvs.set_stand(stand, 'PN', 612, 6, 45, 'DF', 110)
fvs.access_db('access_db', directory=path)
fvs.sqlite_db('sqlite_db', directory=path)
fvs.excel_db('excel_db', directory=path)
end_message = """**WORKFLOW 6 created a FULL CRUISE stand from importing plot data from an excel sheet.
It then ran the FVS module to create FVS formatted databases from the stand data. FVS is the US Forest Service's Forest Vegetation Simulator.
Outputs:
FVS Access database "access_db.db" from [fvs_class.access_db()] in desktop/treetopper_outputs/workflow_6/
FVS Suppose file "Suppose.loc" in desktop/treetopper_outputs/workflow_6/. ** FVS Legacy needs a .loc file along with the database.
FVS SQLite database "sqlite_db.db" from [fvs_class.sqlite_db()] in desktop/treetopper_outputs/workflow_6/
FVS Excel database "excel_db.db" from [fvs_class.excel_db()] in desktop/treetopper_outputs/workflow_6/
Plot data .xlsx "example_xlsx_export.xlsx" in desktop/treetopper_outputs/workflow_6/
"""
print(f'\n\n{end_message}')
def main(workflow_number):
|
print(f"\n\n{'-' * 200}\n\n")
main(wf)
print(f"\n\n{'-' * 200}\n\n")
| opts = {
1: workflow_1,
2: workflow_2,
3: workflow_3,
4: workflow_4,
5: workflow_5,
6: workflow_6
}
opts[workflow_number](workflow_number) |
stack-blur.ts | /* eslint-disable no-bitwise -- used for calculations */
/* eslint-disable unicorn/prefer-query-selector -- aiming at backward-compatibility */
/**
* StackBlur - a fast almost Gaussian Blur For Canvas
*
* In case you find this class useful - especially in commercial projects -
* I am not totally unhappy for a small donation to my PayPal account
* [email protected]
*
* Or support me on flattr:
* {@link https://flattr.com/thing/72791/StackBlur-a-fast-almost-Gaussian-Blur-Effect-for-CanvasJavascript}.
*
* @module StackBlur
* @author Mario Klingemann
* Contact: [email protected]
* Website: {@link http://www.quasimondo.com/StackBlurForCanvas/StackBlurDemo.html}
* Twitter: @quasimondo
*
* @copyright (c) 2010 Mario Klingemann
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
const mulTable = [
512, 512, 456, 512, 328, 456, 335, 512, 405, 328, 271, 456, 388, 335, 292, 512, 454, 405, 364, 328, 298, 271, 496,
456, 420, 388, 360, 335, 312, 292, 273, 512, 482, 454, 428, 405, 383, 364, 345, 328, 312, 298, 284, 271, 259, 496,
475, 456, 437, 420, 404, 388, 374, 360, 347, 335, 323, 312, 302, 292, 282, 273, 265, 512, 497, 482, 468, 454, 441,
428, 417, 405, 394, 383, 373, 364, 354, 345, 337, 328, 320, 312, 305, 298, 291, 284, 278, 271, 265, 259, 507, 496,
485, 475, 465, 456, 446, 437, 428, 420, 412, 404, 396, 388, 381, 374, 367, 360, 354, 347, 341, 335, 329, 323, 318,
312, 307, 302, 297, 292, 287, 282, 278, 273, 269, 265, 261, 512, 505, 497, 489, 482, 475, 468, 461, 454, 447, 441,
435, 428, 422, 417, 411, 405, 399, 394, 389, 383, 378, 373, 368, 364, 359, 354, 350, 345, 341, 337, 332, 328, 324,
320, 316, 312, 309, 305, 301, 298, 294, 291, 287, 284, 281, 278, 274, 271, 268, 265, 262, 259, 257, 507, 501, 496,
491, 485, 480, 475, 470, 465, 460, 456, 451, 446, 442, 437, 433, 428, 424, 420, 416, 412, 408, 404, 400, 396, 392,
388, 385, 381, 377, 374, 370, 367, 363, 360, 357, 354, 350, 347, 344, 341, 338, 335, 332, 329, 326, 323, 320, 318,
315, 312, 310, 307, 304, 302, 299, 297, 294, 292, 289, 287, 285, 282, 280, 278, 275, 273, 271, 269, 267, 265, 263,
261, 259
];
const shgTable = [
9, 11, 12, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18,
18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24
];
/**
* @param {ImageData} imageData
* @param {Integer} width
* @param {Integer} height
* @param {Float} radius
* @returns {ImageData}
*/
function processImageDataRGBA(imageData: ImageData, width: number, height: number, radius: number) {
const pixels = imageData.data;
const div = 2 * radius + 1;
// const w4 = width << 2;
const widthMinus1 = width - 1;
const heightMinus1 = height - 1;
const radiusPlus1 = radius + 1;
const sumFactor = (radiusPlus1 * (radiusPlus1 + 1)) / 2;
const stackStart: BlurStack = new BlurStack();
let stack = stackStart;
let stackEnd;
for (let i = 1; i < div; i++) {
stack = stack.next = new BlurStack();
if (i === radiusPlus1) {
stackEnd = stack;
}
}
stack.next = stackStart;
let stackIn: BlurStack,
stackOut = null,
yw = 0,
yi = 0;
const mulSum = mulTable[radius];
const shgSum = shgTable[radius];
for (let y = 0; y < height; y++) {
stack = stackStart;
const pr = pixels[yi],
pg = pixels[yi + 1],
pb = pixels[yi + 2],
pa = pixels[yi + 3];
for (let i = 0; i < radiusPlus1; i++) {
stack.r = pr;
stack.g = pg;
stack.b = pb;
stack.a = pa;
if (stack.next) stack = stack.next;
}
let rInSum = 0,
gInSum = 0,
bInSum = 0,
aInSum = 0,
rOutSum = radiusPlus1 * pr,
gOutSum = radiusPlus1 * pg,
bOutSum = radiusPlus1 * pb,
aOutSum = radiusPlus1 * pa,
rSum = sumFactor * pr,
gSum = sumFactor * pg,
bSum = sumFactor * pb,
aSum = sumFactor * pa;
for (let i = 1; i < radiusPlus1; i++) {
const p = yi + ((widthMinus1 < i ? widthMinus1 : i) << 2);
const r = pixels[p],
g = pixels[p + 1],
b = pixels[p + 2],
a = pixels[p + 3];
const rbs = radiusPlus1 - i;
rSum += (stack.r = r) * rbs;
gSum += (stack.g = g) * rbs;
bSum += (stack.b = b) * rbs;
aSum += (stack.a = a) * rbs;
rInSum += r;
gInSum += g;
bInSum += b;
aInSum += a;
if (stack.next) stack = stack.next;
}
stackIn = stackStart;
stackOut = stackEnd;
for (let x = 0; x < width; x++) {
const paInitial = (aSum * mulSum) >> shgSum;
pixels[yi + 3] = paInitial;
if (paInitial !== 0) {
const a = 255 / paInitial;
pixels[yi] = ((rSum * mulSum) >> shgSum) * a;
pixels[yi + 1] = ((gSum * mulSum) >> shgSum) * a;
pixels[yi + 2] = ((bSum * mulSum) >> shgSum) * a;
} else {
pixels[yi] = pixels[yi + 1] = pixels[yi + 2] = 0;
}
rSum -= rOutSum;
gSum -= gOutSum;
bSum -= bOutSum;
aSum -= aOutSum;
rOutSum -= stackIn.r;
gOutSum -= stackIn.g;
bOutSum -= stackIn.b;
aOutSum -= stackIn.a;
let p = x + radius + 1;
p = (yw + (p < widthMinus1 ? p : widthMinus1)) << 2;
rInSum += stackIn.r = pixels[p];
gInSum += stackIn.g = pixels[p + 1];
bInSum += stackIn.b = pixels[p + 2];
aInSum += stackIn.a = pixels[p + 3];
rSum += rInSum;
gSum += gInSum;
bSum += bInSum;
aSum += aInSum;
if (stackIn.next) stackIn = stackIn.next;
if (stackOut) {
const {r, g, b, a} = stackOut;
rOutSum += r;
gOutSum += g;
bOutSum += b;
aOutSum += a;
rInSum -= r;
gInSum -= g;
bInSum -= b;
aInSum -= a;
stackOut = stackOut.next;
}
yi += 4;
}
yw += width;
}
for (let x = 0; x < width; x++) {
yi = x << 2;
let pr = pixels[yi],
pg = pixels[yi + 1],
pb = pixels[yi + 2],
pa = pixels[yi + 3],
rOutSum = radiusPlus1 * pr,
gOutSum = radiusPlus1 * pg,
bOutSum = radiusPlus1 * pb,
aOutSum = radiusPlus1 * pa,
rSum = sumFactor * pr,
gSum = sumFactor * pg,
bSum = sumFactor * pb,
aSum = sumFactor * pa;
stack = stackStart;
for (let i = 0; i < radiusPlus1; i++) {
stack.r = pr;
stack.g = pg;
stack.b = pb;
stack.a = pa;
if (stack.next) stack = stack.next;
}
let yp = width;
let gInSum = 0,
bInSum = 0,
aInSum = 0,
rInSum = 0;
for (let i = 1; i <= radius; i++) {
yi = (yp + x) << 2;
const rbs = radiusPlus1 - i;
rSum += (stack.r = pr = pixels[yi]) * rbs;
gSum += (stack.g = pg = pixels[yi + 1]) * rbs;
bSum += (stack.b = pb = pixels[yi + 2]) * rbs;
aSum += (stack.a = pa = pixels[yi + 3]) * rbs;
rInSum += pr;
gInSum += pg;
bInSum += pb;
aInSum += pa;
if (stack.next) stack = stack.next;
if (i < heightMinus1) {
yp += width;
}
}
yi = x;
stackIn = stackStart;
stackOut = stackEnd;
for (let y = 0; y < height; y++) {
let p = yi << 2;
pixels[p + 3] = pa = (aSum * mulSum) >> shgSum;
if (pa > 0) {
pa = 255 / pa;
pixels[p] = ((rSum * mulSum) >> shgSum) * pa;
pixels[p + 1] = ((gSum * mulSum) >> shgSum) * pa;
pixels[p + 2] = ((bSum * mulSum) >> shgSum) * pa;
} else {
pixels[p] = pixels[p + 1] = pixels[p + 2] = 0;
}
rSum -= rOutSum;
gSum -= gOutSum;
bSum -= bOutSum;
aSum -= aOutSum;
rOutSum -= stackIn.r;
gOutSum -= stackIn.g;
bOutSum -= stackIn.b;
aOutSum -= stackIn.a;
p = (x + ((p = y + radiusPlus1) < heightMinus1 ? p : heightMinus1) * width) << 2;
rSum += rInSum += stackIn.r = pixels[p];
gSum += gInSum += stackIn.g = pixels[p + 1];
bSum += bInSum += stackIn.b = pixels[p + 2];
aSum += aInSum += stackIn.a = pixels[p + 3];
if (stackIn.next) stackIn = stackIn.next;
if (stackOut) {
rOutSum += pr = stackOut.r;
gOutSum += pg = stackOut.g;
bOutSum += pb = stackOut.b;
aOutSum += pa = stackOut.a;
rInSum -= pr;
gInSum -= pg;
bInSum -= pb;
aInSum -= pa;
stackOut = stackOut.next;
}
yi += width;
}
}
return imageData;
}
/**
* @param {ImageData} imageData
* @param {Integer} topX
* @param {Integer} topY
* @param {Integer} width
* @param {Integer} height
* @param {Float} radius
* @returns {ImageData}
*/
function | (imageData: ImageData, width: number, height: number, radius: number) {
const pixels = imageData.data;
const div = 2 * radius + 1;
// const w4 = width << 2;
const widthMinus1 = width - 1;
const heightMinus1 = height - 1;
const radiusPlus1 = radius + 1;
const sumFactor = (radiusPlus1 * (radiusPlus1 + 1)) / 2;
const stackStart = new BlurStack();
let stack = stackStart;
let stackEnd;
for (let i = 1; i < div; i++) {
stack = stack.next = new BlurStack();
if (i === radiusPlus1) {
stackEnd = stack;
}
}
stack.next = stackStart;
let stackIn = null;
let stackOut = null;
const mulSum = mulTable[radius];
const shgSum = shgTable[radius];
let p, rbs;
let yw = 0,
yi = 0;
for (let y = 0; y < height; y++) {
let pr = pixels[yi],
pg = pixels[yi + 1],
pb = pixels[yi + 2],
rOutSum = radiusPlus1 * pr,
gOutSum = radiusPlus1 * pg,
bOutSum = radiusPlus1 * pb,
rSum = sumFactor * pr,
gSum = sumFactor * pg,
bSum = sumFactor * pb;
stack = stackStart;
for (let i = 0; i < radiusPlus1; i++) {
stack.r = pr;
stack.g = pg;
stack.b = pb;
if (stack.next) stack = stack.next;
}
let rInSum = 0,
gInSum = 0,
bInSum = 0;
for (let i = 1; i < radiusPlus1; i++) {
p = yi + ((widthMinus1 < i ? widthMinus1 : i) << 2);
rSum += (stack.r = pr = pixels[p]) * (rbs = radiusPlus1 - i);
gSum += (stack.g = pg = pixels[p + 1]) * rbs;
bSum += (stack.b = pb = pixels[p + 2]) * rbs;
rInSum += pr;
gInSum += pg;
bInSum += pb;
if (stack.next) stack = stack.next;
}
stackIn = stackStart;
stackOut = stackEnd;
for (let x = 0; x < width; x++) {
pixels[yi] = (rSum * mulSum) >> shgSum;
pixels[yi + 1] = (gSum * mulSum) >> shgSum;
pixels[yi + 2] = (bSum * mulSum) >> shgSum;
rSum -= rOutSum;
gSum -= gOutSum;
bSum -= bOutSum;
if (stackIn && stackOut) {
rOutSum -= stackIn.r;
gOutSum -= stackIn.g;
bOutSum -= stackIn.b;
p = (yw + ((p = x + radius + 1) < widthMinus1 ? p : widthMinus1)) << 2;
rInSum += stackIn.r = pixels[p];
gInSum += stackIn.g = pixels[p + 1];
bInSum += stackIn.b = pixels[p + 2];
rSum += rInSum;
gSum += gInSum;
bSum += bInSum;
stackIn = stackIn.next;
rOutSum += pr = stackOut.r;
gOutSum += pg = stackOut.g;
bOutSum += pb = stackOut.b;
rInSum -= pr;
gInSum -= pg;
bInSum -= pb;
stackOut = stackOut.next;
}
yi += 4;
}
yw += width;
}
for (let x = 0; x < width; x++) {
yi = x << 2;
let pr = pixels[yi],
pg = pixels[yi + 1],
pb = pixels[yi + 2],
rOutSum = radiusPlus1 * pr,
gOutSum = radiusPlus1 * pg,
bOutSum = radiusPlus1 * pb,
rSum = sumFactor * pr,
gSum = sumFactor * pg,
bSum = sumFactor * pb;
stack = stackStart;
for (let i = 0; i < radiusPlus1; i++) {
stack.r = pr;
stack.g = pg;
stack.b = pb;
if (stack.next) stack = stack.next;
}
let rInSum = 0,
gInSum = 0,
bInSum = 0;
for (let i = 1, yp = width; i <= radius; i++) {
yi = (yp + x) << 2;
rSum += (stack.r = pr = pixels[yi]) * (rbs = radiusPlus1 - i);
gSum += (stack.g = pg = pixels[yi + 1]) * rbs;
bSum += (stack.b = pb = pixels[yi + 2]) * rbs;
rInSum += pr;
gInSum += pg;
bInSum += pb;
if (stack.next) stack = stack.next;
if (i < heightMinus1) {
yp += width;
}
}
yi = x;
stackIn = stackStart;
stackOut = stackEnd;
for (let y = 0; y < height; y++) {
p = yi << 2;
pixels[p] = (rSum * mulSum) >> shgSum;
pixels[p + 1] = (gSum * mulSum) >> shgSum;
pixels[p + 2] = (bSum * mulSum) >> shgSum;
rSum -= rOutSum;
gSum -= gOutSum;
bSum -= bOutSum;
if (stackIn && stackOut) {
rOutSum -= stackIn.r;
gOutSum -= stackIn.g;
bOutSum -= stackIn.b;
p = (x + ((p = y + radiusPlus1) < heightMinus1 ? p : heightMinus1) * width) << 2;
rSum += rInSum += stackIn.r = pixels[p];
gSum += gInSum += stackIn.g = pixels[p + 1];
bSum += bInSum += stackIn.b = pixels[p + 2];
stackIn = stackIn.next;
rOutSum += pr = stackOut.r;
gOutSum += pg = stackOut.g;
bOutSum += pb = stackOut.b;
rInSum -= pr;
gInSum -= pg;
bInSum -= pb;
stackOut = stackOut.next;
}
yi += width;
}
}
return imageData;
}
/**
*
*/
export class BlurStack {
/**
* Set properties.
*/
r: number;
g: number;
b: number;
a: number;
next?: BlurStack;
constructor() {
this.r = 0;
this.g = 0;
this.b = 0;
this.a = 0;
}
}
export const stackBlurImage = (
imageData: ImageData,
width: number,
height: number,
radius: number,
blurAlphaChannel: number
): ImageData => {
if (isNaN(radius) || radius < 1) {
return imageData;
}
radius |= 0;
if (blurAlphaChannel) {
return processImageDataRGBA(imageData, width, height, radius);
} else {
return processImageDataRGB(imageData, width, height, radius);
}
};
| processImageDataRGB |
Experimenter.rs |
pub use problem::problem::ProblemTrait;
pub use ExperimentResults::results::experimentResults;
pub mod experimenter{
extern crate time;
use experimenter::experimentResults;
pub fn run_experiment<Trait: super::ProblemTrait>(problem: Trait) -> Box<experimentResults> {
println!("Running experiments");
let mut measurements = vec![];
for _ in 0..100{
let now = time::precise_time_ns();
let validity = problem.calculate();
let dur = time::precise_time_ns()-now;
measurements.push(dur);
if !validity{
println!("Algorithm failed!")
}
}
return Box::from(experimentResults { average: average(measurements.as_mut_slice()), median: median(measurements.as_mut_slice()), high: high(measurements.as_mut_slice()), low: low(measurements.as_mut_slice()) });
//printall(measurements.as_mut_slice());
}
fn average(nums: &mut [u64]) -> f32{
let mut total : u64 = 0;
for x in nums.iter(){
total = total + x
}
let avg = total as f32 / nums.len() as f32;
return avg;
}
fn median(numbs: &mut [u64]) -> u64{
numbs.sort();
let mid = numbs.len()/2;
return numbs[mid];
}
fn high(numbs: &mut [u64]) -> u64 {
numbs.sort();
let high = numbs.len()-1;
return numbs[high];
}
fn low (numbs: &mut [u64]) -> u64 {
numbs.sort();
let low = 0;
return numbs[low];
}
fn printall(numbs: &mut [u64]){
for x in numbs.iter(){
println!("{}", x)
}
}
pub fn | (results: &[Box<experimentResults>]){
for x in results.iter(){
x.PrintMe();
}
}
pub fn print_avg(results: &[Box<experimentResults>]){
println!("Printing Averages:");
println!("Average-Average: {}ns", avg_avg(results));
println!("Average-Median: {}ns", avg_med(results));
println!("Average-High: {}ns", avg_high(results));
println!("Average-Low: {}ns", avg_low(results));
}
fn avg_avg(results: &[Box<experimentResults>]) -> f32{
let mut total : u64 = 0;
for x in results{
total = total + x.average as u64
}
let avg = total as f32 / results.len() as f32;
return avg;
}
fn avg_med(results: &[Box<experimentResults>]) -> f32{
let mut total : u64 = 0;
for x in results{
total = total + x.median
}
let avg = total as f32 / results.len() as f32;
return avg;
}
fn avg_high(results: &[Box<experimentResults>]) -> f32{
let mut total : u64 = 0;
for x in results{
total = total + x.high
}
let avg = total as f32 / results.len() as f32;
return avg;
}
fn avg_low(results: &[Box<experimentResults>]) -> f32{
let mut total : u64 = 0;
for x in results{
total = total + x.low
}
let avg = total as f32 / results.len() as f32;
return avg;
}
}
| print_indi |
hosted.rs | use std::cell::RefCell;
use std::collections::HashMap;
use std::convert::TryInto;
use std::io::{Read, Write};
use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, ToSocketAddrs};
use std::sync::{Arc, Mutex};
use std::thread_local;
use crate::{Result, PID, TID};
mod mem;
pub use mem::*;
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct ProcessKey([u8; 16]);
impl ProcessKey {
pub fn new(key: [u8; 16]) -> ProcessKey {
ProcessKey(key)
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct ThreadInit {}
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct ProcessInit {
pub key: ProcessKey,
}
pub struct ProcessArgsAsThread<F: FnOnce()> {
main: F,
name: String,
}
impl<F> ProcessArgsAsThread<F>
where
F: FnOnce(),
{
pub fn new(name: &str, main: F) -> ProcessArgsAsThread<F> {
ProcessArgsAsThread {
main,
name: name.to_owned(),
}
}
}
pub struct ProcessHandleAsThread(std::thread::JoinHandle<()>);
/// If no connection exists, create a new connection to the server. This means
/// our parent PID will be PID1. Otherwise, reuse the same connection.
pub fn create_process_pre_as_thread<F>(
_args: &ProcessArgsAsThread<F>,
) -> core::result::Result<ProcessInit, crate::Error>
where
F: FnOnce(),
{
ensure_connection()?;
// Ensure there is a connection, because after this function returns
// we'll make a syscall with CreateProcess(). This should only need
// to happen for PID1.
Ok(ProcessInit {
key: PROCESS_KEY
.with(|pk| *pk.borrow())
.unwrap_or_else(default_process_key),
})
}
pub fn create_process_post_as_thread<F>(
args: ProcessArgsAsThread<F>,
init: ProcessInit,
pid: PID,
) -> core::result::Result<ProcessHandleAsThread, crate::Error>
where
F: FnOnce() + Send + 'static,
{
let server_address = xous_address();
let f = args.main;
let thread_main = std::thread::Builder::new()
.name(args.name)
.spawn(move || {
set_xous_address(server_address);
THREAD_ID.with(|tid| *tid.borrow_mut() = 1);
PROCESS_ID.with(|p| *p.borrow_mut() = pid);
XOUS_SERVER_CONNECTION.with(|xsc| {
let mut xsc = xsc.borrow_mut();
match xous_connect_impl(server_address, &init.key) {
Ok(a) => {
*xsc = Some(a);
Ok(())
}
Err(_) => Err(crate::Error::InternalError),
}
})?;
crate::create_thread(f)
})
.map_err(|_| crate::Error::InternalError)?
.join()
.unwrap()
.unwrap();
Ok(ProcessHandleAsThread(thread_main.0))
}
pub fn wait_process_as_thread(joiner: ProcessHandleAsThread) -> crate::SysCallResult |
pub struct ProcessArgs {
command: String,
name: String,
}
impl ProcessArgs {
pub fn new(name: &str, command: String) -> ProcessArgs {
ProcessArgs {
command,
name: name.to_owned(),
}
}
}
#[derive(Debug)]
pub struct ProcessHandle(std::process::Child);
/// If no connection exists, create a new connection to the server. This means
/// our parent PID will be PID1. Otherwise, reuse the same connection.
pub fn create_process_pre(_args: &ProcessArgs) -> core::result::Result<ProcessInit, crate::Error> {
ensure_connection()?;
// Ensure there is a connection, because after this function returns
// we'll make a syscall with CreateProcess(). This should only need
// to happen for PID1.
Ok(ProcessInit {
key: PROCESS_KEY
.with(|pk| *pk.borrow())
.unwrap_or_else(default_process_key),
})
}
pub fn create_process_post(
args: ProcessArgs,
init: ProcessInit,
pid: PID,
) -> core::result::Result<ProcessHandle, crate::Error> {
use std::process::Command;
let server_env = format!("{}", xous_address());
let pid_env = format!("{}", pid);
let process_name_env = args.name.to_string();
let process_key_env = hex::encode(&init.key.0);
let (shell, args) = if cfg!(windows) {
("cmd", ["/C", &args.command])
} else if cfg!(unix) {
("sh", ["-c", &args.command])
} else {
panic!("unrecognized platform -- don't know how to shell out");
};
// println!("Launching process...");
Command::new(shell)
.args(&args)
.env("XOUS_SERVER", server_env)
.env("XOUS_PID", pid_env)
.env("XOUS_PROCESS_NAME", process_name_env)
.env("XOUS_PROCESS_KEY", process_key_env)
.spawn()
.map(ProcessHandle)
.map_err(|_| {
// eprintln!("couldn't start command: {}", e);
crate::Error::InternalError
})
}
pub fn wait_process(mut joiner: ProcessHandle) -> crate::SysCallResult {
joiner
.0
.wait()
.or(Err(crate::Error::InternalError))
.and_then(|e| {
if e.success() {
Ok(crate::Result::Ok)
} else {
Err(crate::Error::UnknownError)
}
})
}
pub struct WaitHandle<T>(std::thread::JoinHandle<T>);
#[derive(Clone)]
struct ServerConnection {
send: Arc<Mutex<TcpStream>>,
recv: Arc<Mutex<TcpStream>>,
mailbox: Arc<Mutex<HashMap<TID, Result>>>,
}
pub fn thread_to_args(call: usize, _init: &ThreadInit) -> [usize; 8] {
[call, 0, 0, 0, 0, 0, 0, 0]
}
pub fn process_to_args(call: usize, init: &ProcessInit) -> [usize; 8] {
[
call,
u32::from_le_bytes(init.key.0[0..4].try_into().unwrap()) as _,
u32::from_le_bytes(init.key.0[4..8].try_into().unwrap()) as _,
u32::from_le_bytes(init.key.0[8..12].try_into().unwrap()) as _,
u32::from_le_bytes(init.key.0[12..16].try_into().unwrap()) as _,
0,
0,
0,
]
}
pub fn args_to_thread(
_a1: usize,
_a2: usize,
_a3: usize,
_a4: usize,
_a5: usize,
_a6: usize,
_a7: usize,
) -> core::result::Result<ThreadInit, crate::Error> {
Ok(ThreadInit {})
}
pub fn args_to_process(
a1: usize,
a2: usize,
a3: usize,
a4: usize,
_a5: usize,
_a6: usize,
_a7: usize,
) -> core::result::Result<ProcessInit, crate::Error> {
let mut v = vec![];
v.extend_from_slice(&(a1 as u32).to_le_bytes());
v.extend_from_slice(&(a2 as u32).to_le_bytes());
v.extend_from_slice(&(a3 as u32).to_le_bytes());
v.extend_from_slice(&(a4 as u32).to_le_bytes());
let mut key = [0u8; 16];
key.copy_from_slice(&v);
Ok(ProcessInit {
key: ProcessKey(key),
})
}
thread_local!(static NETWORK_CONNECT_ADDRESS: RefCell<Option<SocketAddr>> = RefCell::new(None));
thread_local!(static XOUS_SERVER_CONNECTION: RefCell<Option<ServerConnection>> = RefCell::new(None));
thread_local!(static THREAD_ID: RefCell<TID> = RefCell::new(1));
thread_local!(static PROCESS_ID: RefCell<PID> = RefCell::new(PID::new(1).unwrap()));
thread_local!(static PROCESS_KEY: RefCell<Option<ProcessKey>> = RefCell::new(None));
thread_local!(static CALL_FOR_THREAD: RefCell<Arc<Mutex<HashMap<TID, crate::SysCall>>>> = RefCell::new(Arc::new(Mutex::new(HashMap::new()))));
fn default_xous_address() -> SocketAddr {
std::env::var("XOUS_SERVER")
.map(|s| {
s.to_socket_addrs()
.expect("invalid server address")
.next()
.expect("unable to resolve server address")
})
.unwrap_or_else(|_| SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0))
}
fn default_process_key() -> ProcessKey {
std::env::var("XOUS_PROCESS_KEY")
.map(|s| {
let mut base = ProcessKey([0u8; 16]);
hex::decode_to_slice(s, &mut base.0).unwrap();
base
})
.unwrap_or(ProcessKey([0u8; 16]))
}
pub fn set_process_key(new_key: &[u8; 16]) {
PROCESS_KEY.with(|pk| *pk.borrow_mut() = Some(ProcessKey(*new_key)));
}
/// Set the network address for this particular thread.
pub fn set_xous_address(new_address: SocketAddr) {
NETWORK_CONNECT_ADDRESS.with(|nca| {
let mut address = nca.borrow_mut();
*address = Some(new_address);
XOUS_SERVER_CONNECTION.with(|xsc| *xsc.borrow_mut() = None);
});
}
/// Get the network address for this particular thread.
fn xous_address() -> SocketAddr {
NETWORK_CONNECT_ADDRESS
.with(|nca| *nca.borrow())
.unwrap_or_else(default_xous_address)
}
pub fn create_thread_0_pre<U>(_f: &fn() -> U) -> core::result::Result<ThreadInit, crate::Error>
where
U: Send + 'static,
{
Ok(ThreadInit {})
}
pub fn create_thread_1_pre<U>(
_f: &fn(usize) -> U,
_arg1: &usize,
) -> core::result::Result<ThreadInit, crate::Error>
where
U: Send + 'static,
{
Ok(ThreadInit {})
}
pub fn create_thread_2_pre<U>(
_f: &fn(usize, usize) -> U,
_arg1: &usize,
_arg2: &usize,
) -> core::result::Result<ThreadInit, crate::Error>
where
U: Send + 'static,
{
Ok(ThreadInit {})
}
pub fn create_thread_3_pre<U>(
_f: &fn(usize, usize, usize) -> U,
_arg1: &usize,
_arg2: &usize,
_arg3: &usize,
) -> core::result::Result<ThreadInit, crate::Error>
where
U: Send + 'static,
{
Ok(ThreadInit {})
}
pub fn create_thread_4_pre<U>(
_f: &fn(usize, usize, usize, usize) -> U,
_arg1: &usize,
_arg2: &usize,
_arg3: &usize,
_arg4: &usize,
) -> core::result::Result<ThreadInit, crate::Error>
where
U: Send + 'static,
{
Ok(ThreadInit {})
}
pub fn create_thread_0_post<U>(
f: fn() -> U,
thread_id: TID,
) -> core::result::Result<WaitHandle<U>, crate::Error>
where
U: Send + 'static,
{
create_thread_post(move || f(), thread_id)
}
pub fn create_thread_1_post<U>(
f: fn(usize) -> U,
arg1: usize,
thread_id: TID,
) -> core::result::Result<WaitHandle<U>, crate::Error>
where
U: Send + 'static,
{
create_thread_post(move || f(arg1), thread_id)
}
pub fn create_thread_2_post<U>(
f: fn(usize, usize) -> U,
arg1: usize,
arg2: usize,
thread_id: TID,
) -> core::result::Result<WaitHandle<U>, crate::Error>
where
U: Send + 'static,
{
create_thread_post(move || f(arg1, arg2), thread_id)
}
pub fn create_thread_3_post<U>(
f: fn(usize, usize, usize) -> U,
arg1: usize,
arg2: usize,
arg3: usize,
thread_id: TID,
) -> core::result::Result<WaitHandle<U>, crate::Error>
where
U: Send + 'static,
{
create_thread_post(move || f(arg1, arg2, arg3), thread_id)
}
pub fn create_thread_4_post<U>(
f: fn(usize, usize, usize, usize) -> U,
arg1: usize,
arg2: usize,
arg3: usize,
arg4: usize,
thread_id: TID,
) -> core::result::Result<WaitHandle<U>, crate::Error>
where
U: Send + 'static,
{
create_thread_post(move || f(arg1, arg2, arg3, arg4), thread_id)
}
pub fn create_thread_simple_pre<T, U>(
_f: &fn(T) -> U,
_arg: &T,
) -> core::result::Result<ThreadInit, crate::Error>
where
T: Send + 'static,
U: Send + 'static,
{
Ok(ThreadInit {})
}
pub fn create_thread_simple_post<T, U>(
f: fn(T) -> U,
arg: T,
thread_id: TID,
) -> core::result::Result<WaitHandle<U>, crate::Error>
where
T: Send + 'static,
U: Send + 'static,
{
create_thread_post(move || f(arg), thread_id)
}
pub fn create_thread_pre<F, T>(_f: &F) -> core::result::Result<ThreadInit, crate::Error>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
Ok(ThreadInit {})
}
pub fn create_thread_post<F, U>(
f: F,
thread_id: TID,
) -> core::result::Result<WaitHandle<U>, crate::Error>
where
F: FnOnce() -> U,
F: Send + 'static,
U: Send + 'static,
{
let server_address = xous_address();
let server_connection =
XOUS_SERVER_CONNECTION.with(|xsc| xsc.borrow().as_ref().unwrap().clone());
let process_id = PROCESS_ID.with(|pid| *pid.borrow());
let call_for_thread = CALL_FOR_THREAD.with(|cft| cft.borrow().clone());
Ok(std::thread::Builder::new()
.spawn(move || {
set_xous_address(server_address);
THREAD_ID.with(|tid| *tid.borrow_mut() = thread_id);
PROCESS_ID.with(|pid| *pid.borrow_mut() = process_id);
XOUS_SERVER_CONNECTION.with(|xsc| *xsc.borrow_mut() = Some(server_connection));
CALL_FOR_THREAD.with(|cft| *cft.borrow_mut() = call_for_thread);
f()
})
.map(WaitHandle)
.map_err(|_| crate::Error::InternalError)?)
}
pub fn wait_thread<T>(joiner: WaitHandle<T>) -> crate::SysCallResult {
joiner
.0
.join()
.map(|_| Result::Ok)
.map_err(|_| crate::Error::InternalError)
}
pub fn ensure_connection() -> core::result::Result<(), crate::Error> {
XOUS_SERVER_CONNECTION.with(|xsc| {
let mut xsc = xsc.borrow_mut();
if xsc.is_none() {
NETWORK_CONNECT_ADDRESS.with(|nca| {
let addr = nca.borrow().unwrap_or_else(default_xous_address);
let pid1_key = PROCESS_KEY
.with(|pk| *pk.borrow())
.unwrap_or_else(default_process_key);
match xous_connect_impl(addr, &pid1_key) {
Ok(a) => {
*xsc = Some(a);
Ok(())
}
Err(_) => Err(crate::Error::InternalError),
}
})
} else {
Ok(())
}
})
}
fn xous_connect_impl(
addr: SocketAddr,
key: &ProcessKey,
) -> core::result::Result<ServerConnection, ()> {
// eprintln!("Opening connection to Xous server @ {} with key {:?}...", addr, key);
assert_ne!(&key.0, &[0u8; 16]);
match TcpStream::connect(addr) {
Ok(mut conn) => {
conn.write_all(&key.0).unwrap(); // Send key to authenticate us as PID 1
conn.flush().unwrap();
conn.set_nodelay(true).unwrap();
let mut pid = [0u8];
conn.read_exact(&mut pid).unwrap();
PROCESS_ID.with(|process_id| *process_id.borrow_mut() = PID::new(pid[0]).unwrap());
Ok(ServerConnection {
send: Arc::new(Mutex::new(conn.try_clone().unwrap())),
recv: Arc::new(Mutex::new(conn)),
mailbox: Arc::new(Mutex::new(HashMap::new())),
})
}
Err(_e) => {
// eprintln!("Unable to connect to Xous server: {}", _e);
// eprintln!(
// "Ensure Xous is running, or specify this process as an argument to the kernel"
// );
Err(())
}
}
}
#[allow(clippy::too_many_arguments)]
#[no_mangle]
pub fn _xous_syscall(
nr: usize,
a1: usize,
a2: usize,
a3: usize,
a4: usize,
a5: usize,
a6: usize,
a7: usize,
ret: &mut Result,
) {
XOUS_SERVER_CONNECTION.with(|xsc| {
THREAD_ID.with(|tid| {
let call = crate::SysCall::from_args(nr, a1, a2, a3, a4, a5, a6, a7).unwrap();
{
CALL_FOR_THREAD.with(|cft| {
let cft_rc = cft.borrow();
let mut cft_mtx = cft_rc.lock().unwrap();
let tid = *tid.borrow();
assert!(cft_mtx.get(&tid).is_none());
cft_mtx.insert(tid, call)
});
}
let call = crate::SysCall::from_args(nr, a1, a2, a3, a4, a5, a6, a7).unwrap();
let mut xsc_borrowed = xsc.borrow_mut();
let xsc_asmut = xsc_borrowed.as_mut().expect("not connected to server (did you forget to create a thread with xous::create_thread()?)");
loop {
_xous_syscall_to(
nr,
a1,
a2,
a3,
a4,
a5,
a6,
a7,
&call,
xsc_asmut
);
_xous_syscall_result(ret, *tid.borrow(), xsc_asmut);
if *ret != Result::WouldBlock {
return;
}
std::thread::sleep(std::time::Duration::from_millis(50));
}
})
});
}
fn _xous_syscall_result(ret: &mut Result, thread_id: TID, server_connection: &ServerConnection) {
// Check to see if this thread id has an entry in the mailbox already.
// This will block until the hashmap is free.
{
let mut mailbox = server_connection.mailbox.lock().unwrap();
if let Some(entry) = mailbox.get(&thread_id) {
if &Result::BlockedProcess != entry {
*ret = mailbox.remove(&thread_id).unwrap();
return;
}
}
}
// Receive the packet back
loop {
// Now that we have the Stream mutex, temporarily take the Mailbox mutex to see if
// this thread ID is there. If it is, there's no need to read via the network.
// Note that the mailbox mutex is released if it isn't found.
{
let mut mailbox = server_connection.mailbox.lock().unwrap();
if let Some(entry) = mailbox.get(&thread_id) {
if &Result::BlockedProcess != entry {
*ret = mailbox.remove(&thread_id).unwrap();
return;
}
}
}
let mut stream = match server_connection.recv.try_lock() {
Ok(lk) => lk,
Err(std::sync::TryLockError::WouldBlock) => {
std::thread::sleep(std::time::Duration::from_millis(10));
continue;
}
Err(e) => panic!("Receive error: {}", e),
};
// One more check, in case something came in while we waited for the receiver above.
{
let mut mailbox = server_connection.mailbox.lock().unwrap();
if let Some(entry) = mailbox.get(&thread_id) {
if &Result::BlockedProcess != entry {
*ret = mailbox.remove(&thread_id).unwrap();
return;
}
}
}
// This thread_id doesn't exist in the mailbox, so read additional data.
let mut pkt = [0usize; 8];
let mut raw_bytes = [0u8; size_of::<usize>() * 9];
if let Err(e) = stream.read_exact(&mut raw_bytes) {
eprintln!("Server shut down: {}", e);
std::process::exit(0);
}
let mut raw_bytes_chunks = raw_bytes.chunks(size_of::<usize>());
// Read the Thread ID, which comes across first, followed by the 8 words of
// the message data.
let msg_thread_id =
usize::from_le_bytes(raw_bytes_chunks.next().unwrap().try_into().unwrap());
for (pkt_word, word) in pkt.iter_mut().zip(raw_bytes_chunks) {
*pkt_word = usize::from_le_bytes(word.try_into().unwrap());
}
let mut response = Result::from_args(pkt);
// If we got a `WouldBlock`, then we need to retry the whole call
// again. Return and retry.
if response == Result::WouldBlock {
// If the incoming message was for this thread, return it directly.
if msg_thread_id == thread_id {
*ret = response;
return;
}
// Otherwise, add it to the mailbox and try again.
let mut mailbox = server_connection.mailbox.lock().unwrap();
mailbox.insert(msg_thread_id, response);
continue;
}
if response == Result::BlockedProcess {
// println!(" Waiting again");
continue;
}
// Determine if this thread will have a memory packet following it.
let call = CALL_FOR_THREAD.with(|cft| {
let cft_borrowed = cft.borrow();
let mut cft_mtx = cft_borrowed.lock().unwrap();
cft_mtx
.remove(&msg_thread_id)
.expect("thread didn't declare whether it has data")
});
// If the client is passing us memory, remap the array to our own space.
if let Result::Message(msg) = &mut response {
match &mut msg.body {
crate::Message::Move(ref mut memory_message)
| crate::Message::Borrow(ref mut memory_message)
| crate::Message::MutableBorrow(ref mut memory_message) => {
let data = vec![0u8; memory_message.buf.len()];
let mut data = std::mem::ManuallyDrop::new(data);
if let Err(e) = stream.read_exact(&mut data) {
eprintln!("Server shut down: {}", e);
std::process::exit(0);
}
data.shrink_to_fit();
assert_eq!(data.len(), data.capacity());
let len = data.len();
let addr = data.as_mut_ptr();
memory_message.buf = unsafe { crate::MemoryRange::new(addr as _, len).unwrap() };
}
_ => (),
}
}
// If the original call contained memory, then ensure the memory we get back is correct.
if let Some(mem) = call.memory() {
if call.is_borrow() || call.is_mutableborrow() {
// Read the buffer back from the remote host.
use core::slice;
let mut data = unsafe { slice::from_raw_parts_mut(mem.as_mut_ptr(), mem.len()) };
// If it's a Borrow, verify the contents haven't changed.
let previous_data = if call.is_borrow() {
Some(data.to_vec())
} else {
None
};
if let Err(e) = stream.read_exact(&mut data) {
eprintln!("Server shut down: {}", e);
std::process::exit(0);
}
// If it is an immutable borrow, verify the contents haven't changed somehow
if let Some(previous_data) = previous_data {
assert_eq!(data, previous_data.as_slice());
}
}
if call.is_move() {
// In a hosted environment, the message contents are leaked when
// it gets converted into a MemoryMessage. Now that the call is
// complete, free the memory.
mem::unmap_memory_post(mem).unwrap();
}
// If we're returning memory to the Server, then reconstitute the buffer we just passed,
// and Drop it so it can be freed.
if call.is_return_memory() {
let rebuilt =
unsafe { Vec::from_raw_parts(mem.as_mut_ptr(), mem.len(), mem.len()) };
drop(rebuilt);
}
}
// Now that we have the Stream mutex, temporarily take the Mailbox mutex to see if
// this thread ID is there. If it is, there's no need to read via the network.
// Note that the mailbox mutex is released if it isn't found.
{
// If the incoming message was for this thread, return it directly.
if msg_thread_id == thread_id {
*ret = response;
return;
}
// Otherwise, add it to the mailbox and try again.
let mut mailbox = server_connection.mailbox.lock().unwrap();
mailbox.insert(msg_thread_id, response);
}
}
}
#[allow(clippy::too_many_arguments)]
#[no_mangle]
fn _xous_syscall_to(
nr: usize,
a1: usize,
a2: usize,
a3: usize,
a4: usize,
a5: usize,
a6: usize,
a7: usize,
call: &crate::SysCall,
xsc: &mut ServerConnection,
) {
// println!(
// "Making Syscall: {:?}",
// crate::SysCall::from_args(nr, a1, a2, a3, a4, a5, a6, a7).unwrap()
// );
// Send the packet to the server
let mut capacity = 9 * core::mem::size_of::<usize>();
if let Some(mem) = call.memory() {
capacity += mem.len();
}
let mut pkt = Vec::with_capacity(capacity);
THREAD_ID.with(|tid| pkt.extend_from_slice(&tid.borrow().to_le_bytes()));
for word in &[nr, a1, a2, a3, a4, a5, a6, a7] {
pkt.extend_from_slice(&word.to_le_bytes());
}
// Also send memory, if it's present.
if let Some(memory) = call.memory() {
use core::slice;
let data: &[u8] = unsafe { slice::from_raw_parts(memory.as_ptr(), memory.len()) };
pkt.extend_from_slice(data);
}
let mut stream = xsc.send.lock().unwrap();
if let Err(e) = stream.write_all(&pkt) {
eprintln!("Server shut down: {}", e);
std::process::exit(0);
}
// stream.flush().unwrap();
}
| {
joiner.0.join().map(|_| Result::Ok).map_err(|_x| {
// panic!("wait error: {:?}", x);
crate::Error::InternalError
})
} |
App.js | // @flow
import React, { Component, PropTypes } from 'react';
export default class | extends Component {
static propTypes = {
children: PropTypes.element.isRequired,
};
render() {
return (
<div>
{this.props.children}
</div>
);
}
}
| App |
process_create_token_governance.rs | #![cfg(feature = "test-bpf")]
mod program_test;
use solana_program_test::*;
use program_test::*;
use solana_sdk::{signature::Keypair, signer::Signer};
use spl_governance::error::GovernanceError;
use spl_token::error::TokenError;
#[tokio::test]
async fn test_create_token_governance() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let governed_token_cookie = governance_test.with_governed_token().await;
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
// Act
let token_governance_cookie = governance_test
.with_token_governance(
&realm_cookie,
&governed_token_cookie,
&token_owner_record_cookie,
)
.await
.unwrap();
// Assert
let token_governance_account = governance_test
.get_governance_account(&token_governance_cookie.address)
.await;
assert_eq!(token_governance_cookie.account, token_governance_account);
let token_account = governance_test
.get_token_account(&governed_token_cookie.address)
.await;
assert_eq!(token_governance_cookie.address, token_account.owner);
}
#[tokio::test]
async fn test_create_token_governance_without_transferring_token_owner() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let mut governed_token_cookie = governance_test.with_governed_token().await;
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
governed_token_cookie.transfer_token_owner = false;
// Act
let token_governance_cookie = governance_test
.with_token_governance(
&realm_cookie,
&governed_token_cookie,
&token_owner_record_cookie,
)
.await
.unwrap();
// Assert
let token_governance_account = governance_test
.get_governance_account(&token_governance_cookie.address)
.await;
assert_eq!(token_governance_cookie.account, token_governance_account);
let token_account = governance_test
.get_token_account(&governed_token_cookie.address)
.await;
assert_eq!(
governed_token_cookie.token_owner.pubkey(),
token_account.owner
);
}
#[tokio::test]
async fn test_create_token_governance_without_transferring_token_owner_with_invalid_token_owner_error(
) {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let mut governed_token_cookie = governance_test.with_governed_token().await;
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
governed_token_cookie.transfer_token_owner = false;
governed_token_cookie.token_owner = Keypair::new();
// Act
let err = governance_test
.with_token_governance(
&realm_cookie,
&governed_token_cookie,
&token_owner_record_cookie,
)
.await
.err()
.unwrap();
// Assert
assert_eq!(err, GovernanceError::InvalidTokenOwner.into());
}
#[tokio::test]
async fn test_create_token_governance_without_transferring_token_owner_with_owner_not_signed_error()
{
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let mut governed_token_cookie = governance_test.with_governed_token().await;
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
governed_token_cookie.transfer_token_owner = false;
// Act
let err = governance_test
.with_token_governance_using_instruction(
&realm_cookie,
&governed_token_cookie,
&token_owner_record_cookie,
|i| {
i.accounts[3].is_signer = false; // governed_token_owner
},
Some(&[]),
)
.await
.err()
.unwrap();
// Assert
assert_eq!(err, GovernanceError::TokenOwnerMustSign.into());
}
#[tokio::test]
async fn | () {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let realm_cookie = governance_test.with_realm().await;
let mut governed_token_cookie = governance_test.with_governed_token().await;
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
governed_token_cookie.token_owner = Keypair::new();
// Act
let err = governance_test
.with_token_governance(
&realm_cookie,
&governed_token_cookie,
&token_owner_record_cookie,
)
.await
.err()
.unwrap();
// Assert
assert_eq!(err, TokenError::OwnerMismatch.into());
}
#[tokio::test]
async fn test_create_token_governance_with_invalid_realm_error() {
// Arrange
let mut governance_test = GovernanceProgramTest::start_new().await;
let mut realm_cookie = governance_test.with_realm().await;
let governed_token_cookie = governance_test.with_governed_token().await;
let token_owner_record_cookie = governance_test
.with_community_token_deposit(&realm_cookie)
.await;
let token_governance_cookie = governance_test
.with_token_governance(
&realm_cookie,
&governed_token_cookie,
&token_owner_record_cookie,
)
.await
.unwrap();
// try to use Governance account other than Realm as realm
realm_cookie.address = token_governance_cookie.address;
// Act
let err = governance_test
.with_token_governance(
&realm_cookie,
&governed_token_cookie,
&token_owner_record_cookie,
)
.await
.err()
.unwrap();
// Assert
assert_eq!(err, GovernanceError::InvalidAccountType.into());
}
| test_create_token_governance_with_invalid_token_owner_error |
bot.py | import os
import sys
import time
import shlex
import shutil
import random
import inspect
import logging
import asyncio
import pathlib
import traceback
import math
import re
import aiohttp
import discord
import colorlog
from io import BytesIO, StringIO
from functools import wraps
from textwrap import dedent
from datetime import timedelta
from collections import defaultdict
from discord.enums import ChannelType
from discord.ext.commands.bot import _get_variable
from . import exceptions
from . import downloader
from .playlist import Playlist
from .player import MusicPlayer
from .entry import StreamPlaylistEntry
from .opus_loader import load_opus_lib
from .config import Config, ConfigDefaults
from .permissions import Permissions, PermissionsDefaults
from .constructs import SkipState, Response, VoiceStateUpdate
from .utils import load_file, write_file, fixg, ftimedelta, _func_
from .constants import VERSION as BOTVERSION
from .constants import DISCORD_MSG_CHAR_LIMIT, AUDIO_CACHE_PATH
load_opus_lib()
log = logging.getLogger(__name__)
class MusicBot(discord.Client):
def __init__(self, config_file=None, perms_file=None):
try:
sys.stdout.write("\x1b]2;MusicBot {}\x07".format(BOTVERSION))
except:
pass
if config_file is None:
config_file = ConfigDefaults.options_file
if perms_file is None:
perms_file = PermissionsDefaults.perms_file
self.players = {}
self.exit_signal = None
self.init_ok = False
self.cached_app_info = None
self.last_status = None
self.config = Config(config_file)
self.permissions = Permissions(perms_file, grant_all=[self.config.owner_id])
self.blacklist = set(load_file(self.config.blacklist_file))
self.autoplaylist = load_file(self.config.auto_playlist_file)
self.autoplaylist_session = self.autoplaylist[:]
self.aiolocks = defaultdict(asyncio.Lock)
self.downloader = downloader.Downloader(download_folder='audio_cache')
self._setup_logging()
log.info(' MusicBot (version {}) '.format(BOTVERSION).center(50, '='))
if not self.autoplaylist:
log.warning("Autoplaylist is empty, disabling.")
self.config.auto_playlist = False
else:
log.info("Loaded autoplaylist with {} entries".format(len(self.autoplaylist)))
if self.blacklist:
log.debug("Loaded blacklist with {} entries".format(len(self.blacklist)))
# TODO: Do these properly
ssd_defaults = {
'last_np_msg': None,
'auto_paused': False,
'availability_paused': False
}
self.server_specific_data = defaultdict(ssd_defaults.copy)
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += ' MusicBot/%s' % BOTVERSION
def __del__(self):
# These functions return futures but it doesn't matter
try: self.http.session.close()
except: pass
try: self.aiosession.close()
except: pass
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += ' MusicBot/%s' % BOTVERSION
# TODO: Add some sort of `denied` argument for a message to send when someone else tries to use it
def owner_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
# Only allow the owner to use these commands
orig_msg = _get_variable('message')
if not orig_msg or orig_msg.author.id == self.config.owner_id:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("only the owner can use this command", expire_in=30)
return wrapper
def dev_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
orig_msg = _get_variable('message')
if orig_msg.author.id in self.config.dev_ids:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("only dev users can use this command", expire_in=30)
wrapper.dev_cmd = True
return wrapper
def ensure_appinfo(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
await self._cache_app_info()
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
return wrapper
def _get_owner(self, *, server=None, voice=False):
return discord.utils.find(
lambda m: m.id == self.config.owner_id and (m.voice_channel if voice else True),
server.members if server else self.get_all_members()
)
def _delete_old_audiocache(self, path=AUDIO_CACHE_PATH):
try:
shutil.rmtree(path)
return True
except:
try:
os.rename(path, path + '__')
except:
return False
try:
shutil.rmtree(path)
except:
os.rename(path + '__', path)
return False
return True
def _setup_logging(self):
if len(logging.getLogger(__package__).handlers) > 1:
log.debug("Skipping logger setup, already set up")
return
shandler = logging.StreamHandler(stream=sys.stdout)
shandler.setFormatter(colorlog.LevelFormatter(
fmt = {
'DEBUG': '{log_color}[{levelname}:{module}] {message}',
'INFO': '{log_color}{message}',
'WARNING': '{log_color}{levelname}: {message}',
'ERROR': '{log_color}[{levelname}:{module}] {message}',
'CRITICAL': '{log_color}[{levelname}:{module}] {message}',
'EVERYTHING': '{log_color}[{levelname}:{module}] {message}',
'NOISY': '{log_color}[{levelname}:{module}] {message}',
'VOICEDEBUG': '{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}',
'FFMPEG': '{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}'
},
log_colors = {
'DEBUG': 'cyan',
'INFO': 'white',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red',
'EVERYTHING': 'white',
'NOISY': 'white',
'FFMPEG': 'bold_purple',
'VOICEDEBUG': 'purple',
},
style = '{',
datefmt = ''
))
shandler.setLevel(self.config.debug_level)
logging.getLogger(__package__).addHandler(shandler)
log.debug("Set logging level to {}".format(self.config.debug_level_str))
if self.config.debug_mode:
dlogger = logging.getLogger('discord')
dlogger.setLevel(logging.DEBUG)
dhandler = logging.FileHandler(filename='logs/discord.log', encoding='utf-8', mode='w')
dhandler.setFormatter(logging.Formatter('{asctime}:{levelname}:{name}: {message}', style='{'))
dlogger.addHandler(dhandler)
@staticmethod
def _check_if_empty(vchannel: discord.Channel, *, excluding_me=True, excluding_deaf=False):
def check(member):
if excluding_me and member == vchannel.server.me:
return False
if excluding_deaf and any([member.deaf, member.self_deaf]):
return False
return True
return not sum(1 for m in vchannel.voice_members if check(m))
async def _join_startup_channels(self, channels, *, autosummon=True):
joined_servers = set()
channel_map = {c.server: c for c in channels}
def _autopause(player):
if self._check_if_empty(player.voice_client.channel):
log.info("Initial autopause in empty channel")
player.pause()
self.server_specific_data[player.voice_client.channel.server]['auto_paused'] = True
for server in self.servers:
if server.unavailable or server in channel_map:
continue
if server.me.voice_channel:
log.info("Found resumable voice channel {0.server.name}/{0.name}".format(server.me.voice_channel))
channel_map[server] = server.me.voice_channel
if autosummon:
owner = self._get_owner(server=server, voice=True)
if owner:
log.info("Found owner in \"{}\"".format(owner.voice_channel.name))
channel_map[server] = owner.voice_channel
for server, channel in channel_map.items():
if server in joined_servers:
log.info("Already joined a channel in \"{}\", skipping".format(server.name))
continue
if channel and channel.type == discord.ChannelType.voice:
log.info("Attempting to join {0.server.name}/{0.name}".format(channel))
chperms = channel.permissions_for(server.me)
if not chperms.connect:
log.info("Cannot join channel \"{}\", no permission.".format(channel.name))
continue
elif not chperms.speak:
log.info("Will not join channel \"{}\", no permission to speak.".format(channel.name))
continue
try:
player = await self.get_player(channel, create=True, deserialize=self.config.persistent_queue)
joined_servers.add(server)
log.info("Joined {0.server.name}/{0.name}".format(channel))
if player.is_stopped:
player.play()
if self.config.auto_playlist and not player.playlist.entries:
await self.on_player_finished_playing(player)
if self.config.auto_pause:
player.once('play', lambda player, **_: _autopause(player))
except Exception:
log.debug("Error joining {0.server.name}/{0.name}".format(channel), exc_info=True)
log.error("Failed to join {0.server.name}/{0.name}".format(channel))
elif channel:
log.warning("Not joining {0.server.name}/{0.name}, that's a text channel.".format(channel))
else:
log.warning("Invalid channel thing: {}".format(channel))
async def _wait_delete_msg(self, message, after):
await asyncio.sleep(after)
await self.safe_delete_message(message, quiet=True)
# TODO: Check to see if I can just move this to on_message after the response check
async def _manual_delete_check(self, message, *, quiet=False):
if self.config.delete_invoking:
await self.safe_delete_message(message, quiet=quiet)
async def _check_ignore_non_voice(self, msg):
vc = msg.server.me.voice_channel
# If we've connected to a voice chat and we're in the same voice channel
if not vc or vc == msg.author.voice_channel:
return True
else:
raise exceptions.PermissionsError(
"you cannot use this command when not in the voice channel (%s)" % vc.name, expire_in=30)
async def _cache_app_info(self, *, update=False):
if not self.cached_app_info and not update and self.user.bot:
log.debug("Caching app info")
self.cached_app_info = await self.application_info()
return self.cached_app_info
async def remove_from_autoplaylist(self, song_url:str, *, ex:Exception=None, delete_from_ap=False):
if song_url not in self.autoplaylist:
log.debug("URL \"{}\" not in autoplaylist, ignoring".format(song_url))
return
async with self.aiolocks[_func_()]:
self.autoplaylist.remove(song_url)
log.info("Removing unplayable song from autoplaylist: %s" % song_url)
with open(self.config.auto_playlist_removed_file, 'a', encoding='utf8') as f:
f.write(
'# Entry removed {ctime}\n'
'# Reason: {ex}\n'
'{url}\n\n{sep}\n\n'.format(
ctime=time.ctime(),
ex=str(ex).replace('\n', '\n#' + ' ' * 10), # 10 spaces to line up with # Reason:
url=song_url,
sep='#' * 32
))
if delete_from_ap:
log.info("Updating autoplaylist")
write_file(self.config.auto_playlist_file, self.autoplaylist)
@ensure_appinfo
async def generate_invite_link(self, *, permissions=discord.Permissions(70380544), server=None):
return discord.utils.oauth_url(self.cached_app_info.id, permissions=permissions, server=server)
async def join_voice_channel(self, channel):
if isinstance(channel, discord.Object):
channel = self.get_channel(channel.id)
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise discord.InvalidArgument('Channel passed must be a voice channel')
server = channel.server
if self.is_voice_connected(server):
raise discord.ClientException('Already connected to a voice channel in this server')
def session_id_found(data):
user_id = data.get('user_id')
guild_id = data.get('guild_id')
return user_id == self.user.id and guild_id == server.id
log.voicedebug("(%s) creating futures", _func_())
# register the futures for waiting
session_id_future = self.ws.wait_for('VOICE_STATE_UPDATE', session_id_found)
voice_data_future = self.ws.wait_for('VOICE_SERVER_UPDATE', lambda d: d.get('guild_id') == server.id)
# "join" the voice channel
log.voicedebug("(%s) setting voice state", _func_())
await self.ws.voice_state(server.id, channel.id)
log.voicedebug("(%s) waiting for session id", _func_())
session_id_data = await asyncio.wait_for(session_id_future, timeout=15, loop=self.loop)
# sometimes it gets stuck on this step. Jake said to wait indefinitely. To hell with that.
log.voicedebug("(%s) waiting for voice data", _func_())
data = await asyncio.wait_for(voice_data_future, timeout=15, loop=self.loop)
kwargs = {
'user': self.user,
'channel': channel,
'data': data,
'loop': self.loop,
'session_id': session_id_data.get('session_id'),
'main_ws': self.ws
}
voice = discord.VoiceClient(**kwargs)
try:
log.voicedebug("(%s) connecting...", _func_())
with aiohttp.Timeout(15):
await voice.connect()
except asyncio.TimeoutError as e:
log.voicedebug("(%s) connection failed, disconnecting", _func_())
try:
await voice.disconnect()
except:
pass
raise e
log.voicedebug("(%s) connection successful", _func_())
self.connection._add_voice_client(server.id, voice)
return voice
async def get_voice_client(self, channel: discord.Channel):
if isinstance(channel, discord.Object):
channel = self.get_channel(channel.id)
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise AttributeError('Channel passed must be a voice channel')
async with self.aiolocks[_func_() + ':' + channel.server.id]:
if self.is_voice_connected(channel.server):
return self.voice_client_in(channel.server)
vc = None
t0 = t1 = 0
tries = 5
for attempt in range(1, tries+1):
log.debug("Connection attempt {} to {}".format(attempt, channel.name))
t0 = time.time()
try:
vc = await self.join_voice_channel(channel)
t1 = time.time()
break
except asyncio.TimeoutError:
log.warning("Failed to connect, retrying ({}/{})".format(attempt, tries))
# TODO: figure out if I need this or not
# try:
# await self.ws.voice_state(channel.server.id, None)
# except:
# pass
except:
log.exception("Unknown error attempting to connect to voice")
await asyncio.sleep(0.5)
if not vc:
log.critical("Voice client is unable to connect, restarting...")
await self.restart()
log.debug("Connected in {:0.1f}s".format(t1-t0))
log.info("Connected to {}/{}".format(channel.server, channel))
vc.ws._keep_alive.name = 'VoiceClient Keepalive'
return vc
async def reconnect_voice_client(self, server, *, sleep=0.1, channel=None):
log.debug("Reconnecting voice client on \"{}\"{}".format(
server, ' to "{}"'.format(channel.name) if channel else ''))
async with self.aiolocks[_func_() + ':' + server.id]:
vc = self.voice_client_in(server)
if not (vc or channel):
return
_paused = False
player = self.get_player_in(server)
if player and player.is_playing:
log.voicedebug("(%s) Pausing", _func_())
player.pause()
_paused = True
log.voicedebug("(%s) Disconnecting", _func_())
try:
await vc.disconnect()
except:
pass
if sleep:
log.voicedebug("(%s) Sleeping for %s", _func_(), sleep)
await asyncio.sleep(sleep)
if player:
log.voicedebug("(%s) Getting voice client", _func_())
if not channel:
new_vc = await self.get_voice_client(vc.channel)
else:
new_vc = await self.get_voice_client(channel)
log.voicedebug("(%s) Swapping voice client", _func_())
await player.reload_voice(new_vc)
if player.is_paused and _paused:
log.voicedebug("Resuming")
player.resume()
log.debug("Reconnected voice client on \"{}\"{}".format(
server, ' to "{}"'.format(channel.name) if channel else ''))
async def disconnect_voice_client(self, server):
vc = self.voice_client_in(server)
if not vc:
return
if server.id in self.players:
self.players.pop(server.id).kill()
await vc.disconnect()
async def disconnect_all_voice_clients(self):
for vc in list(self.voice_clients).copy():
await self.disconnect_voice_client(vc.channel.server)
async def set_voice_state(self, vchannel, *, mute=False, deaf=False):
if isinstance(vchannel, discord.Object):
vchannel = self.get_channel(vchannel.id)
if getattr(vchannel, 'type', ChannelType.text) != ChannelType.voice:
raise AttributeError('Channel passed must be a voice channel')
await self.ws.voice_state(vchannel.server.id, vchannel.id, mute, deaf)
# I hope I don't have to set the channel here
# instead of waiting for the event to update it
def get_player_in(self, server: discord.Server) -> MusicPlayer:
return self.players.get(server.id)
async def get_player(self, channel, create=False, *, deserialize=False) -> MusicPlayer:
server = channel.server
async with self.aiolocks[_func_() + ':' + server.id]:
if deserialize:
voice_client = await self.get_voice_client(channel)
player = await self.deserialize_queue(server, voice_client)
if player:
log.debug("Created player via deserialization for server %s with %s entries", server.id, len(player.playlist))
# Since deserializing only happens when the bot starts, I should never need to reconnect
return self._init_player(player, server=server)
if server.id not in self.players:
if not create:
raise exceptions.CommandError(
'The bot is not in a voice channel. '
'Use %ssummon to summon it to your voice channel.' % self.config.command_prefix)
voice_client = await self.get_voice_client(channel)
playlist = Playlist(self)
player = MusicPlayer(self, voice_client, playlist)
self._init_player(player, server=server)
async with self.aiolocks[self.reconnect_voice_client.__name__ + ':' + server.id]:
if self.players[server.id].voice_client not in self.voice_clients:
log.debug("Reconnect required for voice client in {}".format(server.name))
await self.reconnect_voice_client(server, channel=channel)
return self.players[server.id]
def _init_player(self, player, *, server=None):
player = player.on('play', self.on_player_play) \
.on('resume', self.on_player_resume) \
.on('pause', self.on_player_pause) \
.on('stop', self.on_player_stop) \
.on('finished-playing', self.on_player_finished_playing) \
.on('entry-added', self.on_player_entry_added) \
.on('error', self.on_player_error)
player.skip_state = SkipState()
if server:
self.players[server.id] = player
return player
async def on_player_play(self, player, entry):
await self.update_now_playing_status(entry)
player.skip_state.reset()
# This is the one event where its ok to serialize autoplaylist entries
await self.serialize_queue(player.voice_client.channel.server)
channel = entry.meta.get('channel', None)
author = entry.meta.get('author', None)
if channel and author:
last_np_msg = self.server_specific_data[channel.server]['last_np_msg']
if last_np_msg and last_np_msg.channel == channel:
async for lmsg in self.logs_from(channel, limit=1):
if lmsg != last_np_msg and last_np_msg:
await self.safe_delete_message(last_np_msg)
self.server_specific_data[channel.server]['last_np_msg'] = None
break # This is probably redundant
if self.config.now_playing_mentions:
newmsg = '%s - your song **%s** is now playing in %s!' % (
entry.meta['author'].mention, entry.title, player.voice_client.channel.name)
else:
newmsg = 'Now playing in %s: **%s**' % (
player.voice_client.channel.name, entry.title)
if self.server_specific_data[channel.server]['last_np_msg']:
self.server_specific_data[channel.server]['last_np_msg'] = await self.safe_edit_message(last_np_msg, newmsg, send_if_fail=True)
else:
self.server_specific_data[channel.server]['last_np_msg'] = await self.safe_send_message(channel, newmsg)
# TODO: Check channel voice state?
async def on_player_resume(self, player, entry, **_):
await self.update_now_playing_status(entry)
async def on_player_pause(self, player, entry, **_):
await self.update_now_playing_status(entry, True)
# await self.serialize_queue(player.voice_client.channel.server)
async def on_player_stop(self, player, **_):
await self.update_now_playing_status()
async def on_player_finished_playing(self, player, **_):
if not player.playlist.entries and not player.current_entry and self.config.auto_playlist:
if not self.autoplaylist_session:
log.info("Autoplaylist session empty. Re-populating with entries...")
self.autoplaylist_session = self.autoplaylist[:]
while self.autoplaylist_session:
random.shuffle(self.autoplaylist_session)
song_url = random.choice(self.autoplaylist_session)
self.autoplaylist_session.remove(song_url)
info = {}
try:
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
except downloader.youtube_dl.utils.DownloadError as e:
if 'YouTube said:' in e.args[0]:
# url is bork, remove from list and put in removed list
log.error("Error processing youtube url:\n{}".format(e.args[0]))
else:
# Probably an error from a different extractor, but I've only seen youtube's
log.error("Error processing \"{url}\": {ex}".format(url=song_url, ex=e))
await self.remove_from_autoplaylist(song_url, ex=e, delete_from_ap=True)
continue
except Exception as e:
log.error("Error processing \"{url}\": {ex}".format(url=song_url, ex=e))
log.exception()
self.autoplaylist.remove(song_url)
continue
if info.get('entries', None): # or .get('_type', '') == 'playlist'
log.debug("Playlist found but is unsupported at this time, skipping.")
# TODO: Playlist expansion
# Do I check the initial conditions again?
# not (not player.playlist.entries and not player.current_entry and self.config.auto_playlist)
try:
await player.playlist.add_entry(song_url, channel=None, author=None)
except exceptions.ExtractionError as e:
log.error("Error adding song from autoplaylist: {}".format(e))
log.debug('', exc_info=True)
continue
break
if not self.autoplaylist:
# TODO: When I add playlist expansion, make sure that's not happening during this check
log.warning("No playable songs in the autoplaylist, disabling.")
self.config.auto_playlist = False
else: # Don't serialize for autoplaylist events
await self.serialize_queue(player.voice_client.channel.server)
async def on_player_entry_added(self, player, playlist, entry, **_):
if entry.meta.get('author') and entry.meta.get('channel'):
await self.serialize_queue(player.voice_client.channel.server)
async def on_player_error(self, player, entry, ex, **_):
if 'channel' in entry.meta:
await self.safe_send_message(
entry.meta['channel'],
"```\nError from FFmpeg:\n{}\n```".format(ex)
)
else:
log.exception("Player error", exc_info=ex)
async def update_now_playing_status(self, entry=None, is_paused=False):
game = None
if not self.config.status_message:
if self.user.bot:
activeplayers = sum(1 for p in self.players.values() if p.is_playing)
if activeplayers > 1:
game = discord.Game(type=0, name="music on %s servers" % activeplayers)
entry = None
elif activeplayers == 1:
player = discord.utils.get(self.players.values(), is_playing=True)
entry = player.current_entry
if entry:
prefix = u'\u275A\u275A ' if is_paused else ''
name = u'{}{}'.format(prefix, entry.title)[:128]
game = discord.Game(type=0, name=name)
else:
game = discord.Game(type=0, name=self.config.status_message.strip()[:128])
async with self.aiolocks[_func_()]:
if game != self.last_status:
await self.change_presence(game=game)
self.last_status = game
async def update_now_playing_message(self, server, message, *, channel=None):
lnp = self.server_specific_data[server]['last_np_msg']
m = None
if message is None and lnp:
await self.safe_delete_message(lnp, quiet=True)
elif lnp: # If there was a previous lp message
oldchannel = lnp.channel
if lnp.channel == oldchannel: # If we have a channel to update it in
async for lmsg in self.logs_from(channel, limit=1):
if lmsg != lnp and lnp: # If we need to resend it
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else:
m = await self.safe_edit_message(lnp, message, send_if_fail=True, quiet=False)
elif channel: # If we have a new channel to send it to
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else: # we just resend it in the old channel
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(oldchannel, message, quiet=True)
elif channel: # No previous message
m = await self.safe_send_message(channel, message, quiet=True)
self.server_specific_data[server]['last_np_msg'] = m
async def serialize_queue(self, server, *, dir=None):
"""
Serialize the current queue for a server's player to json.
"""
player = self.get_player_in(server)
if not player:
return
if dir is None:
dir = 'data/%s/queue.json' % server.id
async with self.aiolocks['queue_serialization'+':'+server.id]:
log.debug("Serializing queue for %s", server.id)
with open(dir, 'w', encoding='utf8') as f:
f.write(player.serialize(sort_keys=True))
async def serialize_all_queues(self, *, dir=None):
coros = [self.serialize_queue(s, dir=dir) for s in self.servers]
await asyncio.gather(*coros, return_exceptions=True)
async def deserialize_queue(self, server, voice_client, playlist=None, *, dir=None) -> MusicPlayer:
"""
Deserialize a saved queue for a server into a MusicPlayer. If no queue is saved, returns None.
"""
if playlist is None:
playlist = Playlist(self)
if dir is None:
dir = 'data/%s/queue.json' % server.id
async with self.aiolocks['queue_serialization' + ':' + server.id]:
if not os.path.isfile(dir):
return None
log.debug("Deserializing queue for %s", server.id)
with open(dir, 'r', encoding='utf8') as f:
data = f.read()
return MusicPlayer.from_json(data, self, voice_client, playlist)
@ensure_appinfo
async def _on_ready_sanity_checks(self):
# Ensure folders exist
await self._scheck_ensure_env()
# Server permissions check
await self._scheck_server_permissions()
# playlists in autoplaylist
await self._scheck_autoplaylist()
# config/permissions async validate?
await self._scheck_configs()
async def _scheck_ensure_env(self):
log.debug("Ensuring data folders exist")
for server in self.servers:
pathlib.Path('data/%s/' % server.id).mkdir(exist_ok=True)
with open('data/server_names.txt', 'w', encoding='utf8') as f:
for server in sorted(self.servers, key=lambda s:int(s.id)):
f.write('{:<22} {}\n'.format(server.id, server.name))
if not self.config.save_videos and os.path.isdir(AUDIO_CACHE_PATH):
if self._delete_old_audiocache():
log.debug("Deleted old audio cache")
else:
log.debug("Could not delete old audio cache, moving on.")
async def _scheck_server_permissions(self):
log.debug("Checking server permissions")
pass # TODO
async def _scheck_autoplaylist(self):
log.debug("Auditing autoplaylist")
pass # TODO
async def _scheck_configs(self):
log.debug("Validating config")
await self.config.async_validate(self)
log.debug("Validating permissions config")
await self.permissions.async_validate(self)
#######################################################################################################################
async def safe_send_message(self, dest, content, **kwargs):
tts = kwargs.pop('tts', False)
quiet = kwargs.pop('quiet', False)
expire_in = kwargs.pop('expire_in', 0)
allow_none = kwargs.pop('allow_none', True)
also_delete = kwargs.pop('also_delete', None)
msg = None
lfunc = log.debug if quiet else log.warning
try:
if content is not None or allow_none:
msg = await self.send_message(dest, content, tts=tts)
except discord.Forbidden:
lfunc("Cannot send message to \"%s\", no permission", dest.name)
except discord.NotFound:
lfunc("Cannot send message to \"%s\", invalid channel?", dest.name)
except discord.HTTPException:
if len(content) > DISCORD_MSG_CHAR_LIMIT:
lfunc("Message is over the message size limit (%s)", DISCORD_MSG_CHAR_LIMIT)
else:
lfunc("Failed to send message")
log.noise("Got HTTPException trying to send message to %s: %s", dest, content)
finally:
if msg and expire_in:
asyncio.ensure_future(self._wait_delete_msg(msg, expire_in))
if also_delete and isinstance(also_delete, discord.Message):
asyncio.ensure_future(self._wait_delete_msg(also_delete, expire_in))
return msg
async def safe_delete_message(self, message, *, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await self.delete_message(message)
except discord.Forbidden:
lfunc("Cannot delete message \"{}\", no permission".format(message.clean_content))
except discord.NotFound:
lfunc("Cannot delete message \"{}\", message not found".format(message.clean_content))
async def safe_edit_message(self, message, new, *, send_if_fail=False, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await self.edit_message(message, new)
except discord.NotFound:
lfunc("Cannot edit message \"{}\", message not found".format(message.clean_content))
if send_if_fail:
lfunc("Sending message instead")
return await self.safe_send_message(message.channel, new)
async def send_typing(self, destination):
try:
return await super().send_typing(destination)
except discord.Forbidden:
log.warning("Could not send typing to {}, no permission".format(destination))
async def edit_profile(self, **fields):
if self.user.bot:
return await super().edit_profile(**fields)
else:
return await super().edit_profile(self.config._password,**fields)
async def restart(self):
self.exit_signal = exceptions.RestartSignal()
await self.logout()
def restart_threadsafe(self):
asyncio.run_coroutine_threadsafe(self.restart(), self.loop)
def _cleanup(self):
try:
self.loop.run_until_complete(self.logout())
except: pass
pending = asyncio.Task.all_tasks()
gathered = asyncio.gather(*pending)
try:
gathered.cancel()
self.loop.run_until_complete(gathered)
gathered.exception()
except: pass
# noinspection PyMethodOverriding
def run(self):
try:
self.loop.run_until_complete(self.start(*self.config.auth))
except discord.errors.LoginFailure:
# Add if token, else
raise exceptions.HelpfulError(
"Bot cannot login, bad credentials.",
"Fix your %s in the options file. "
"Remember that each field should be on their own line."
% ['shit', 'Token', 'Email/Password', 'Credentials'][len(self.config.auth)]
) # ^^^^ In theory self.config.auth should never have no items
finally:
try:
self._cleanup()
except Exception:
log.error("Error in cleanup", exc_info=True)
self.loop.close()
if self.exit_signal:
raise self.exit_signal
async def logout(self):
await self.disconnect_all_voice_clients()
return await super().logout()
async def on_error(self, event, *args, **kwargs):
ex_type, ex, stack = sys.exc_info()
if ex_type == exceptions.HelpfulError:
log.error("Exception in {}:\n{}".format(event, ex.message))
await asyncio.sleep(2) # don't ask
await self.logout()
elif issubclass(ex_type, exceptions.Signal):
self.exit_signal = ex_type
await self.logout()
else:
log.error("Exception in {}".format(event), exc_info=True)
async def on_resumed(self):
log.info("\nReconnected to discord.\n")
async def on_ready(self):
dlogger = logging.getLogger('discord')
for h in dlogger.handlers:
if getattr(h, 'terminator', None) == '':
dlogger.removeHandler(h)
print()
log.debug("Connection established, ready to go.")
self.ws._keep_alive.name = 'Gateway Keepalive'
if self.init_ok:
log.debug("Received additional READY event, may have failed to resume")
return
await self._on_ready_sanity_checks()
print()
log.info('Connected to Discord!')
self.init_ok = True
################################
log.info("Bot: {0}/{1}#{2}{3}".format(
self.user.id,
self.user.name,
self.user.discriminator,
' [BOT]' if self.user.bot else ' [Userbot]'
))
owner = self._get_owner(voice=True) or self._get_owner()
if owner and self.servers:
log.info("Owner: {0}/{1}#{2}\n".format(
owner.id,
owner.name,
owner.discriminator
))
log.info('Server List:')
[log.info(' - ' + s.name) for s in self.servers]
elif self.servers:
log.warning("Owner could not be found on any server (id: %s)\n" % self.config.owner_id)
log.info('Server List:')
[log.info(' - ' + s.name) for s in self.servers]
else:
log.warning("Owner unknown, bot is not on any servers.")
if self.user.bot:
log.warning(
"To make the bot join a server, paste this link in your browser. \n"
"Note: You should be logged into your main account and have \n"
"manage server permissions on the server you want the bot to join.\n"
" " + await self.generate_invite_link()
)
print(flush=True)
if self.config.bound_channels:
chlist = set(self.get_channel(i) for i in self.config.bound_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if c.type == discord.ChannelType.voice)
chlist.difference_update(invalids)
self.config.bound_channels.difference_update(invalids)
if chlist:
log.info("Bound to text channels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
print("Not bound to any text channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Not binding to voice channels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in invalids if ch]
print(flush=True)
else:
log.info("Not bound to any text channels")
if self.config.autojoin_channels:
chlist = set(self.get_channel(i) for i in self.config.autojoin_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if c.type == discord.ChannelType.text)
chlist.difference_update(invalids)
self.config.autojoin_channels.difference_update(invalids)
if chlist:
log.info("Autojoining voice chanels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
log.info("Not autojoining any voice channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Cannot autojoin text channels:")
[log.info(' - {}/{}'.format(ch.server.name.strip(), ch.name.strip())) for ch in invalids if ch]
autojoin_channels = chlist
else:
log.info("Not autojoining any voice channels")
autojoin_channels = set()
print(flush=True)
log.info("Options:")
log.info(" Command prefix: " + self.config.command_prefix)
log.info(" Default volume: {}%".format(int(self.config.default_volume * 100)))
log.info(" Skip threshold: {} votes or {}%".format(
self.config.skips_required, fixg(self.config.skip_ratio_required * 100)))
log.info(" Now Playing @mentions: " + ['Disabled', 'Enabled'][self.config.now_playing_mentions])
log.info(" Auto-Summon: " + ['Disabled', 'Enabled'][self.config.auto_summon])
log.info(" Auto-Playlist: " + ['Disabled', 'Enabled'][self.config.auto_playlist])
log.info(" Auto-Pause: " + ['Disabled', 'Enabled'][self.config.auto_pause])
log.info(" Delete Messages: " + ['Disabled', 'Enabled'][self.config.delete_messages])
if self.config.delete_messages:
log.info(" Delete Invoking: " + ['Disabled', 'Enabled'][self.config.delete_invoking])
log.info(" Debug Mode: " + ['Disabled', 'Enabled'][self.config.debug_mode])
log.info(" Downloaded songs will be " + ['deleted', 'saved'][self.config.save_videos])
if self.config.status_message:
log.info(" Status message: " + self.config.status_message)
print(flush=True)
await self.update_now_playing_status()
# maybe option to leave the ownerid blank and generate a random command for the owner to use
# wait_for_message is pretty neato
await self._join_startup_channels(autojoin_channels, autosummon=self.config.auto_summon)
# t-t-th-th-that's all folks!
async def cmd_help(self, command=None):
"""
Usage:
{command_prefix}help [command]
Prints a help message.
If a command is specified, it prints a help message for that command.
Otherwise, it lists the available commands.
"""
if command:
cmd = getattr(self, 'cmd_' + command, None)
if cmd and not hasattr(cmd, 'dev_cmd'):
return Response(
"```\n{}```".format(
dedent(cmd.__doc__)
).format(command_prefix=self.config.command_prefix),
delete_after=60
)
else:
return Response("No such command", delete_after=10)
else:
helpmsg = "**Available commands**\n```"
commands = []
for att in dir(self):
if att.startswith('cmd_') and att != 'cmd_help' and not hasattr(getattr(self, att), 'dev_cmd'):
command_name = att.replace('cmd_', '').lower()
commands.append("{}{}".format(self.config.command_prefix, command_name))
helpmsg += ", ".join(commands)
helpmsg += "```\nMessage Alec with any other concerns!"
return Response(helpmsg, reply=True, delete_after=60)
async def cmd_blacklist(self, message, user_mentions, option, something):
"""
Usage:
{command_prefix}blacklist [ + | - | add | remove ] @UserName [@UserName2 ...]
Add or remove users to the blacklist.
Blacklisted users are forbidden from using bot commands.
"""
if not user_mentions:
raise exceptions.CommandError("No users listed.", expire_in=20)
if option not in ['+', '-', 'add', 'remove']:
raise exceptions.CommandError(
'Invalid option "%s" specified, use +, -, add, or remove' % option, expire_in=20
)
for user in user_mentions.copy():
if user.id == self.config.owner_id:
print("[Commands:Blacklist] The owner cannot be blacklisted.")
user_mentions.remove(user)
old_len = len(self.blacklist)
if option in ['+', 'add']:
self.blacklist.update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
'%s users have been added to the blacklist' % (len(self.blacklist) - old_len),
reply=True, delete_after=10
)
else:
if self.blacklist.isdisjoint(user.id for user in user_mentions):
return Response('none of those users are in the blacklist.', reply=True, delete_after=10)
else:
self.blacklist.difference_update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
'%s users have been removed from the blacklist' % (old_len - len(self.blacklist)),
reply=True, delete_after=10
)
async def cmd_id(self, author, user_mentions):
"""
Usage:
{command_prefix}id [@user]
Tells the user their id or the id of another user.
"""
if not user_mentions:
return Response('your id is `%s`' % author.id, reply=True, delete_after=35)
else:
usr = user_mentions[0]
return Response("%s's id is `%s`" % (usr.name, usr.id), reply=True, delete_after=35)
async def cmd_save(self, player):
"""
Usage:
{command_prefix}save
Saves the current song to the autoplaylist.
"""
if player.current_entry and not isinstance(player.current_entry, StreamPlaylistEntry):
url = player.current_entry.url
if url not in self.autoplaylist:
self.autoplaylist.append(url)
write_file(self.config.auto_playlist_file, self.autoplaylist)
log.debug("Appended {} to autoplaylist".format(url))
return Response('\N{THUMBS UP SIGN}')
else:
raise exceptions.CommandError('This song is already in the autoplaylist.')
else:
raise exceptions.CommandError('There is no valid song playing.')
@owner_only
async def cmd_joinserver(self, message, server_link=None):
"""
Usage:
{command_prefix}joinserver invite_link
Asks the bot to join a server. Note: Bot accounts cannot use invite links.
"""
if self.user.bot:
url = await self.generate_invite_link()
return Response(
"Click here to add me to a server: \n{}".format(url),
reply=True, delete_after=30
)
try:
if server_link:
await self.accept_invite(server_link)
return Response("\N{THUMBS UP SIGN}")
except:
raise exceptions.CommandError('Invalid URL provided:\n{}\n'.format(server_link), expire_in=30)
async def cmd_play(self, player, channel, author, permissions, leftover_args, song_url):
"""
Usage:
{command_prefix}play song_link
{command_prefix}play text to search for
Adds the song to the playlist. If a link is not provided, the first
result from a youtube search is added to the queue.
"""
song_url = song_url.strip('<>')
await self.send_typing(channel)
if leftover_args:
song_url = ' '.join([song_url, *leftover_args])
linksRegex = '((http(s)*:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*)'
pattern = re.compile(linksRegex)
matchUrl = pattern.match(song_url)
if matchUrl is None:
song_url = song_url.replace('/', '%2F')
async with self.aiolocks[_func_() + ':' + author.id]:
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
"You have reached your enqueued song limit (%s)" % permissions.max_songs, expire_in=30
)
try:
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
except Exception as e:
raise exceptions.CommandError(e, expire_in=30)
if not info:
raise exceptions.CommandError(
"That video cannot be played. Try using the {}stream command.".format(self.config.command_prefix),
expire_in=30
)
# abstract the search handling away from the user
# our ytdl options allow us to use search strings as input urls
if info.get('url', '').startswith('ytsearch'):
# print("[Command:play] Searching for \"%s\"" % song_url)
info = await self.downloader.extract_info(
player.playlist.loop,
song_url,
download=False,
process=True, # ASYNC LAMBDAS WHEN
on_error=lambda e: asyncio.ensure_future(
self.safe_send_message(channel, "```\n%s\n```" % e, expire_in=120), loop=self.loop),
retry_on_error=True
)
if not info:
raise exceptions.CommandError(
"Error extracting info from search string, youtubedl returned no data. "
"You may need to restart the bot if this continues to happen.", expire_in=30
)
if not all(info.get('entries', [])):
# empty list, no data
log.debug("Got empty list, no data")
return
# TODO: handle 'webpage_url' being 'ytsearch:...' or extractor type
song_url = info['entries'][0]['webpage_url']
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
# Now I could just do: return await self.cmd_play(player, channel, author, song_url)
# But this is probably fine
# TODO: Possibly add another check here to see about things like the bandcamp issue
# TODO: Where ytdl gets the generic extractor version with no processing, but finds two different urls
if 'entries' in info:
# I have to do exe extra checks anyways because you can request an arbitrary number of search results
if not permissions.allow_playlists and ':search' in info['extractor'] and len(info['entries']) > 1:
raise exceptions.PermissionsError("You are not allowed to request playlists", expire_in=30)
# The only reason we would use this over `len(info['entries'])` is if we add `if _` to this one
num_songs = sum(1 for _ in info['entries'])
if permissions.max_playlist_length and num_songs > permissions.max_playlist_length:
raise exceptions.PermissionsError(
"Playlist has too many entries (%s > %s)" % (num_songs, permissions.max_playlist_length),
expire_in=30
)
# This is a little bit weird when it says (x + 0 > y), I might add the other check back in
if permissions.max_songs and player.playlist.count_for_user(author) + num_songs > permissions.max_songs:
raise exceptions.PermissionsError(
"Playlist entries + your already queued songs reached limit (%s + %s > %s)" % (
num_songs, player.playlist.count_for_user(author), permissions.max_songs),
expire_in=30
)
if info['extractor'].lower() in ['youtube:playlist', 'soundcloud:set', 'bandcamp:album']:
try:
return await self._cmd_play_playlist_async(player, channel, author, permissions, song_url, info['extractor'])
except exceptions.CommandError:
raise
except Exception as e:
log.error("Error queuing playlist", exc_info=True)
raise exceptions.CommandError("Error queuing playlist:\n%s" % e, expire_in=30)
t0 = time.time()
# My test was 1.2 seconds per song, but we maybe should fudge it a bit, unless we can
# monitor it and edit the message with the estimated time, but that's some ADVANCED SHIT
# I don't think we can hook into it anyways, so this will have to do.
# It would probably be a thread to check a few playlists and get the speed from that
# Different playlists might download at different speeds though
wait_per_song = 1.2
procmesg = await self.safe_send_message(
channel,
'Gathering playlist information for {} songs{}'.format(
num_songs,
', ETA: {} seconds'.format(fixg(
num_songs * wait_per_song)) if num_songs >= 10 else '.'))
# We don't have a pretty way of doing this yet. We need either a loop
# that sends these every 10 seconds or a nice context manager.
await self.send_typing(channel)
# TODO: I can create an event emitter object instead, add event functions, and every play list might be asyncified
# Also have a "verify_entry" hook with the entry as an arg and returns the entry if its ok
entry_list, position = await player.playlist.import_from(song_url, channel=channel, author=author)
tnow = time.time()
ttime = tnow - t0
listlen = len(entry_list)
drop_count = 0
if permissions.max_song_length:
for e in entry_list.copy():
if e.duration > permissions.max_song_length:
player.playlist.entries.remove(e)
entry_list.remove(e)
drop_count += 1
# Im pretty sure there's no situation where this would ever break
# Unless the first entry starts being played, which would make this a race condition
if drop_count:
print("Dropped %s songs" % drop_count)
log.info("Processed {} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
listlen,
fixg(ttime),
ttime / listlen if listlen else 0,
ttime / listlen - wait_per_song if listlen - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
await self.safe_delete_message(procmesg)
if not listlen - drop_count:
raise exceptions.CommandError(
"No songs were added, all songs were over max duration (%ss)" % permissions.max_song_length,
expire_in=30
)
reply_text = "Enqueued **%s** songs to be played. Position in queue: %s"
btext = str(listlen - drop_count)
else:
if permissions.max_song_length and info.get('duration', 0) > permissions.max_song_length:
raise exceptions.PermissionsError(
"Song duration exceeds limit (%s > %s)" % (info['duration'], permissions.max_song_length),
expire_in=30
)
try:
entry, position = await player.playlist.add_entry(song_url, channel=channel, author=author)
except exceptions.WrongEntryTypeError as e:
if e.use_url == song_url:
log.warning("Determined incorrect entry type, but suggested url is the same. Help.")
log.debug("Assumed url \"%s\" was a single entry, was actually a playlist" % song_url)
log.debug("Using \"%s\" instead" % e.use_url)
return await self.cmd_play(player, channel, author, permissions, leftover_args, e.use_url)
reply_text = "Enqueued **%s** to be played. Position in queue: %s"
btext = entry.title
if position == 1 and player.is_stopped:
position = 'Up next!'
reply_text %= (btext, position)
else:
try:
time_until = await player.playlist.estimate_time_until(position, player)
reply_text += ' - estimated time until playing: %s'
except:
traceback.print_exc()
time_until = ''
reply_text %= (btext, position, ftimedelta(time_until))
return Response(reply_text, delete_after=30)
async def _cmd_play_playlist_async(self, player, channel, author, permissions, playlist_url, extractor_type):
"""
Secret handler to use the async wizardry to make playlist queuing non-"blocking"
"""
await self.send_typing(channel)
info = await self.downloader.extract_info(player.playlist.loop, playlist_url, download=False, process=False)
if not info:
raise exceptions.CommandError("That playlist cannot be played.")
num_songs = sum(1 for _ in info['entries'])
t0 = time.time()
busymsg = await self.safe_send_message(
channel, "Processing %s songs..." % num_songs) # TODO: From playlist_title
await self.send_typing(channel)
entries_added = 0
if extractor_type == 'youtube:playlist':
try:
entries_added = await player.playlist.async_process_youtube_playlist(
playlist_url, channel=channel, author=author)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError('Error handling playlist %s queuing.' % playlist_url, expire_in=30)
elif extractor_type.lower() in ['soundcloud:set', 'bandcamp:album']:
try:
entries_added = await player.playlist.async_process_sc_bc_playlist(
playlist_url, channel=channel, author=author)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError('Error handling playlist %s queuing.' % playlist_url, expire_in=30)
songs_processed = len(entries_added)
drop_count = 0
skipped = False
if permissions.max_song_length:
for e in entries_added.copy():
if e.duration > permissions.max_song_length:
try:
player.playlist.entries.remove(e)
entries_added.remove(e)
drop_count += 1
except:
pass
if drop_count:
log.debug("Dropped %s songs" % drop_count)
if player.current_entry and player.current_entry.duration > permissions.max_song_length:
await self.safe_delete_message(self.server_specific_data[channel.server]['last_np_msg'])
self.server_specific_data[channel.server]['last_np_msg'] = None
skipped = True
player.skip()
entries_added.pop()
await self.safe_delete_message(busymsg)
songs_added = len(entries_added)
tnow = time.time()
ttime = tnow - t0
wait_per_song = 1.2
# TODO: actually calculate wait per song in the process function and return that too
# This is technically inaccurate since bad songs are ignored but still take up time
log.info("Processed {}/{} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
songs_processed,
num_songs,
fixg(ttime),
ttime / num_songs if num_songs else 0,
ttime / num_songs - wait_per_song if num_songs - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
if not songs_added:
basetext = "No songs were added, all songs were over max duration (%ss)" % permissions.max_song_length
if skipped:
basetext += "\nAdditionally, the current song was skipped for being too long."
raise exceptions.CommandError(basetext, expire_in=30)
return Response("Enqueued {} songs to be played in {} seconds".format(
songs_added, fixg(ttime, 1)), delete_after=30)
async def cmd_stream(self, player, channel, author, permissions, song_url):
"""
Usage:
{command_prefix}stream song_link
Enqueue a media stream.
This could mean an actual stream like Twitch or shoutcast, or simply streaming
media without predownloading it. Note: FFmpeg is notoriously bad at handling
streams, especially on poor connections. You have been warned.
"""
song_url = song_url.strip('<>')
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
"You have reached your enqueued song limit (%s)" % permissions.max_songs, expire_in=30
)
await self.send_typing(channel)
await player.playlist.add_stream_entry(song_url, channel=channel, author=author)
return Response(":+1:", delete_after=6)
async def cmd_search(self, player, channel, author, permissions, leftover_args):
"""
Usage:
{command_prefix}search [service] [number] query
Searches a service for a video and adds it to the queue.
- service: any one of the following services:
- youtube (yt) (default if unspecified)
- soundcloud (sc)
- yahoo (yh)
- number: return a number of video results and waits for user to choose one
- defaults to 3 if unspecified
- note: If your search query starts with a number,
you must put your query in quotes
- ex: {command_prefix}search 2 "I ran seagulls"
The command issuer can use reactions to indicate their response to each result.
"""
if permissions.max_songs and player.playlist.count_for_user(author) > permissions.max_songs:
raise exceptions.PermissionsError(
"You have reached your playlist item limit (%s)" % permissions.max_songs,
expire_in=30
)
def argcheck():
if not leftover_args:
# noinspection PyUnresolvedReferences
raise exceptions.CommandError(
"Please specify a search query.\n%s" % dedent(
self.cmd_search.__doc__.format(command_prefix=self.config.command_prefix)),
expire_in=60
)
argcheck()
try:
leftover_args = shlex.split(' '.join(leftover_args))
except ValueError:
raise exceptions.CommandError("Please quote your search query properly.", expire_in=30)
service = 'youtube'
items_requested = 3
max_items = 10 # this can be whatever, but since ytdl uses about 1000, a small number might be better
services = {
'youtube': 'ytsearch',
'soundcloud': 'scsearch',
'yahoo': 'yvsearch',
'yt': 'ytsearch',
'sc': 'scsearch',
'yh': 'yvsearch'
}
if leftover_args[0] in services:
service = leftover_args.pop(0)
argcheck()
if leftover_args[0].isdigit():
items_requested = int(leftover_args.pop(0))
argcheck()
if items_requested > max_items:
raise exceptions.CommandError("You cannot search for more than %s videos" % max_items)
# Look jake, if you see this and go "what the fuck are you doing"
# and have a better idea on how to do this, i'd be delighted to know.
# I don't want to just do ' '.join(leftover_args).strip("\"'")
# Because that eats both quotes if they're there
# where I only want to eat the outermost ones
if leftover_args[0][0] in '\'"':
lchar = leftover_args[0][0]
leftover_args[0] = leftover_args[0].lstrip(lchar)
leftover_args[-1] = leftover_args[-1].rstrip(lchar)
search_query = '%s%s:%s' % (services[service], items_requested, ' '.join(leftover_args))
search_msg = await self.send_message(channel, "Searching for videos...")
await self.send_typing(channel)
try:
info = await self.downloader.extract_info(player.playlist.loop, search_query, download=False, process=True)
except Exception as e:
await self.safe_edit_message(search_msg, str(e), send_if_fail=True)
return
else:
await self.safe_delete_message(search_msg)
if not info:
return Response("No videos found.", delete_after=30)
for e in info['entries']:
result_message = await self.safe_send_message(channel, "Result %s/%s: %s" % (
| for r in reactions:
await self.add_reaction(result_message, r)
res = await self.wait_for_reaction(reactions, user=author, timeout=30, message=result_message)
if not res:
await self.safe_delete_message(result_message)
return
if res.reaction.emoji == '\u2705': # check
await self.safe_delete_message(result_message)
await self.cmd_play(player, channel, author, permissions, [], e['webpage_url'])
return Response("Alright, coming right up!", delete_after=30)
elif res.reaction.emoji == '\U0001F6AB': # cross
await self.safe_delete_message(result_message)
continue
else:
await self.safe_delete_message(result_message)
break
return Response("Oh well \N{SLIGHTLY FROWNING FACE}", delete_after=30)
async def cmd_np(self, player, channel, server, message):
"""
Usage:
{command_prefix}np
Displays the current song in chat.
"""
if player.current_entry:
if self.server_specific_data[server]['last_np_msg']:
await self.safe_delete_message(self.server_specific_data[server]['last_np_msg'])
self.server_specific_data[server]['last_np_msg'] = None
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = ftimedelta(timedelta(seconds=player.current_entry.duration))
streaming = isinstance(player.current_entry, StreamPlaylistEntry)
prog_str = ('`[{progress}]`' if streaming else '`[{progress}/{total}]`').format(
progress=song_progress, total=song_total
)
action_text = 'Streaming' if streaming else 'Playing'
if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):
np_text = "Now {action}: **{title}** added by **{author}** {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>".format(
action=action_text,
title=player.current_entry.title,
author=player.current_entry.meta['author'].name,
progress=prog_str,
url=player.current_entry.url
)
else:
np_text = "Now {action}: **{title}** {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>".format(
action=action_text,
title=player.current_entry.title,
progress=prog_str,
url=player.current_entry.url
)
self.server_specific_data[server]['last_np_msg'] = await self.safe_send_message(channel, np_text)
await self._manual_delete_check(message)
else:
return Response(
'There are no songs queued! Queue something with {}play.'.format(self.config.command_prefix),
delete_after=30
)
async def cmd_summon(self, channel, server, author, voice_channel):
"""
Usage:
{command_prefix}summon
Call the bot to the summoner's voice channel.
"""
if not author.voice_channel:
raise exceptions.CommandError('You are not in a voice channel!')
voice_client = self.voice_client_in(server)
if voice_client and server == author.voice_channel.server:
await voice_client.move_to(author.voice_channel)
return
# move to _verify_vc_perms?
chperms = author.voice_channel.permissions_for(server.me)
if not chperms.connect:
log.warning("Cannot join channel \"{}\", no permission.".format(author.voice_channel.name))
return Response(
"```Cannot join channel \"{}\", no permission.```".format(author.voice_channel.name),
delete_after=25
)
elif not chperms.speak:
log.warning("Will not join channel \"{}\", no permission to speak.".format(author.voice_channel.name))
return Response(
"```Will not join channel \"{}\", no permission to speak.```".format(author.voice_channel.name),
delete_after=25
)
log.info("Joining {0.server.name}/{0.name}".format(author.voice_channel))
player = await self.get_player(author.voice_channel, create=True, deserialize=self.config.persistent_queue)
if player.is_stopped:
player.play()
if self.config.auto_playlist:
await self.on_player_finished_playing(player)
async def cmd_pause(self, player):
"""
Usage:
{command_prefix}pause
Pauses playback of the current song.
"""
if player.is_playing:
player.pause()
else:
raise exceptions.CommandError('Player is not playing.', expire_in=30)
async def cmd_resume(self, player):
"""
Usage:
{command_prefix}resume
Resumes playback of a paused song.
"""
if player.is_paused:
player.resume()
else:
raise exceptions.CommandError('Player is not paused.', expire_in=30)
async def cmd_shuffle(self, channel, player):
"""
Usage:
{command_prefix}shuffle
Shuffles the playlist.
"""
player.playlist.shuffle()
cards = ['\N{BLACK SPADE SUIT}', '\N{BLACK CLUB SUIT}', '\N{BLACK HEART SUIT}', '\N{BLACK DIAMOND SUIT}']
random.shuffle(cards)
hand = await self.send_message(channel, ' '.join(cards))
await asyncio.sleep(0.6)
for x in range(4):
random.shuffle(cards)
await self.safe_edit_message(hand, ' '.join(cards))
await asyncio.sleep(0.6)
await self.safe_delete_message(hand, quiet=True)
return Response("\N{OK HAND SIGN}", delete_after=15)
async def cmd_clear(self, player, author):
"""
Usage:
{command_prefix}clear
Clears the playlist.
"""
player.playlist.clear()
return Response('\N{PUT LITTER IN ITS PLACE SYMBOL}', delete_after=20)
async def cmd_skip(self, player, channel, author, message, permissions, voice_channel):
"""
Usage:
{command_prefix}skip
Skips the current song when enough votes are cast, or by the bot owner.
"""
if player.is_stopped:
raise exceptions.CommandError("Can't skip! The player is not playing!", expire_in=20)
if not player.current_entry:
if player.playlist.peek():
if player.playlist.peek()._is_downloading:
return Response("The next song (%s) is downloading, please wait." % player.playlist.peek().title)
elif player.playlist.peek().is_downloaded:
print("The next song will be played shortly. Please wait.")
else:
print("Something odd is happening. "
"You might want to restart the bot if it doesn't start working.")
else:
print("Something strange is happening. "
"You might want to restart the bot if it doesn't start working.")
if author.id == self.config.owner_id \
or permissions.instaskip \
or author == player.current_entry.meta.get('author', None):
player.skip() # check autopause stuff here
await self._manual_delete_check(message)
return
# TODO: ignore person if they're deaf or take them out of the list or something?
# Currently is recounted if they vote, deafen, then vote
num_voice = sum(1 for m in voice_channel.voice_members if not (
m.deaf or m.self_deaf or m.id in [self.config.owner_id, self.user.id]))
num_skips = player.skip_state.add_skipper(author.id, message)
skips_remaining = min(
self.config.skips_required,
math.ceil(self.config.skip_ratio_required / (1 / num_voice)) # Number of skips from config ratio
) - num_skips
if skips_remaining <= 0:
player.skip() # check autopause stuff here
return Response(
'your skip for **{}** was acknowledged.'
'\nThe vote to skip has been passed.{}'.format(
player.current_entry.title,
' Next song coming up!' if player.playlist.peek() else ''
),
reply=True,
delete_after=20
)
else:
# TODO: When a song gets skipped, delete the old x needed to skip messages
return Response(
'your skip for **{}** was acknowledged.'
'\n**{}** more {} required to vote to skip this song.'.format(
player.current_entry.title,
skips_remaining,
'person is' if skips_remaining == 1 else 'people are'
),
reply=True,
delete_after=20
)
async def cmd_volume(self, message, player, new_volume=None):
"""
Usage:
{command_prefix}volume (+/-)[volume]
Sets the playback volume. Accepted values are from 1 to 100.
Putting + or - before the volume will make the volume change relative to the current volume.
"""
if not new_volume:
return Response('Current volume: `%s%%`' % int(player.volume * 100), reply=True, delete_after=20)
relative = False
if new_volume[0] in '+-':
relative = True
try:
new_volume = int(new_volume)
except ValueError:
raise exceptions.CommandError('{} is not a valid number'.format(new_volume), expire_in=20)
vol_change = None
if relative:
vol_change = new_volume
new_volume += (player.volume * 100)
old_volume = int(player.volume * 100)
if 0 < new_volume <= 100:
player.volume = new_volume / 100.0
return Response('updated volume from %d to %d' % (old_volume, new_volume), reply=True, delete_after=20)
else:
if relative:
raise exceptions.CommandError(
'Unreasonable volume change provided: {}{:+} -> {}%. Provide a change between {} and {:+}.'.format(
old_volume, vol_change, old_volume + vol_change, 1 - old_volume, 100 - old_volume), expire_in=20)
else:
raise exceptions.CommandError(
'Unreasonable volume provided: {}%. Provide a value between 1 and 100.'.format(new_volume), expire_in=20)
async def cmd_queue(self, channel, player):
"""
Usage:
{command_prefix}queue
Prints the current song queue.
"""
lines = []
unlisted = 0
andmoretext = '* ... and %s more*' % ('x' * len(player.playlist.entries))
if player.current_entry:
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = ftimedelta(timedelta(seconds=player.current_entry.duration))
prog_str = '`[%s/%s]`' % (song_progress, song_total)
if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):
lines.append("Currently Playing: **%s** added by **%s** %s\n" % (
player.current_entry.title, player.current_entry.meta['author'].name, prog_str))
else:
lines.append("Now Playing: **%s** %s\n" % (player.current_entry.title, prog_str))
for i, item in enumerate(player.playlist, 1):
if item.meta.get('channel', False) and item.meta.get('author', False):
nextline = '`{}.` **{}** added by **{}**'.format(i, item.title, item.meta['author'].name).strip()
else:
nextline = '`{}.` **{}**'.format(i, item.title).strip()
currentlinesum = sum(len(x) + 1 for x in lines) # +1 is for newline char
if currentlinesum + len(nextline) + len(andmoretext) > DISCORD_MSG_CHAR_LIMIT:
if currentlinesum + len(andmoretext):
unlisted += 1
continue
lines.append(nextline)
if unlisted:
lines.append('\n*... and %s more*' % unlisted)
if not lines:
lines.append(
'There are no songs queued! Queue something with {}play.'.format(self.config.command_prefix))
message = '\n'.join(lines)
return Response(message, delete_after=30)
async def cmd_clean(self, message, channel, server, author, search_range=50):
"""
Usage:
{command_prefix}clean [range]
Removes up to [range] messages the bot has posted in chat. Default: 50, Max: 1000
"""
try:
float(search_range) # lazy check
search_range = min(int(search_range), 1000)
except:
return Response("enter a number. NUMBER. That means digits. `15`. Etc.", reply=True, delete_after=8)
await self.safe_delete_message(message, quiet=True)
def is_possible_command_invoke(entry):
valid_call = any(
entry.content.startswith(prefix) for prefix in [self.config.command_prefix]) # can be expanded
return valid_call and not entry.content[1:2].isspace()
delete_invokes = True
delete_all = channel.permissions_for(author).manage_messages or self.config.owner_id == author.id
def check(message):
if is_possible_command_invoke(message) and delete_invokes:
return delete_all or message.author == author
return message.author == self.user
if self.user.bot:
if channel.permissions_for(server.me).manage_messages:
deleted = await self.purge_from(channel, check=check, limit=search_range, before=message)
return Response('Cleaned up {} message{}.'.format(len(deleted), 's' * bool(deleted)), delete_after=15)
deleted = 0
async for entry in self.logs_from(channel, search_range, before=message):
if entry == self.server_specific_data[channel.server]['last_np_msg']:
continue
if entry.author == self.user:
await self.safe_delete_message(entry)
deleted += 1
await asyncio.sleep(0.21)
if is_possible_command_invoke(entry) and delete_invokes:
if delete_all or entry.author == author:
try:
await self.delete_message(entry)
await asyncio.sleep(0.21)
deleted += 1
except discord.Forbidden:
delete_invokes = False
except discord.HTTPException:
pass
return Response('Cleaned up {} message{}.'.format(deleted, 's' * bool(deleted)), delete_after=6)
async def cmd_pldump(self, channel, song_url):
"""
Usage:
{command_prefix}pldump url
Dumps the individual urls of a playlist
"""
try:
info = await self.downloader.extract_info(self.loop, song_url.strip('<>'), download=False, process=False)
except Exception as e:
raise exceptions.CommandError("Could not extract info from input url\n%s\n" % e, expire_in=25)
if not info:
raise exceptions.CommandError("Could not extract info from input url, no data.", expire_in=25)
if not info.get('entries', None):
# TODO: Retarded playlist checking
# set(url, webpageurl).difference(set(url))
if info.get('url', None) != info.get('webpage_url', info.get('url', None)):
raise exceptions.CommandError("This does not seem to be a playlist.", expire_in=25)
else:
return await self.cmd_pldump(channel, info.get(''))
linegens = defaultdict(lambda: None, **{
"youtube": lambda d: 'https://www.youtube.com/watch?v=%s' % d['id'],
"soundcloud": lambda d: d['url'],
"bandcamp": lambda d: d['url']
})
exfunc = linegens[info['extractor'].split(':')[0]]
if not exfunc:
raise exceptions.CommandError("Could not extract info from input url, unsupported playlist type.", expire_in=25)
with BytesIO() as fcontent:
for item in info['entries']:
fcontent.write(exfunc(item).encode('utf8') + b'\n')
fcontent.seek(0)
await self.send_file(channel, fcontent, filename='playlist.txt', content="Here's the url dump for <%s>" % song_url)
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
async def cmd_listids(self, server, author, leftover_args, cat='all'):
"""
Usage:
{command_prefix}listids [categories]
Lists the ids for various things. Categories are:
all, users, roles, channels
"""
cats = ['channels', 'roles', 'users']
if cat not in cats and cat != 'all':
return Response(
"Valid categories: " + ' '.join(['`%s`' % c for c in cats]),
reply=True,
delete_after=25
)
if cat == 'all':
requested_cats = cats
else:
requested_cats = [cat] + [c.strip(',') for c in leftover_args]
data = ['Your ID: %s' % author.id]
for cur_cat in requested_cats:
rawudata = None
if cur_cat == 'users':
data.append("\nUser IDs:")
rawudata = ['%s #%s: %s' % (m.name, m.discriminator, m.id) for m in server.members]
elif cur_cat == 'roles':
data.append("\nRole IDs:")
rawudata = ['%s: %s' % (r.name, r.id) for r in server.roles]
elif cur_cat == 'channels':
data.append("\nText Channel IDs:")
tchans = [c for c in server.channels if c.type == discord.ChannelType.text]
rawudata = ['%s: %s' % (c.name, c.id) for c in tchans]
rawudata.append("\nVoice Channel IDs:")
vchans = [c for c in server.channels if c.type == discord.ChannelType.voice]
rawudata.extend('%s: %s' % (c.name, c.id) for c in vchans)
if rawudata:
data.extend(rawudata)
with BytesIO() as sdata:
sdata.writelines(d.encode('utf8') + b'\n' for d in data)
sdata.seek(0)
# TODO: Fix naming (Discord20API-ids.txt)
await self.send_file(author, sdata, filename='%s-ids-%s.txt' % (server.name.replace(' ', '_'), cat))
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
async def cmd_perms(self, author, channel, server, permissions):
"""
Usage:
{command_prefix}perms
Sends the user a list of their permissions.
"""
lines = ['Command permissions in %s\n' % server.name, '```', '```']
for perm in permissions.__dict__:
if perm in ['user_list'] or permissions.__dict__[perm] == set():
continue
lines.insert(len(lines) - 1, "%s: %s" % (perm, permissions.__dict__[perm]))
await self.send_message(author, '\n'.join(lines))
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
@owner_only
async def cmd_setname(self, leftover_args, name):
"""
Usage:
{command_prefix}setname name
Changes the bot's username.
Note: This operation is limited by discord to twice per hour.
"""
name = ' '.join([name, *leftover_args])
try:
await self.edit_profile(username=name)
except discord.HTTPException:
raise exceptions.CommandError(
"Failed to change name. Did you change names too many times? "
"Remember name changes are limited to twice per hour.")
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response("\N{OK HAND SIGN}", delete_after=20)
async def cmd_setnick(self, server, channel, leftover_args, nick):
"""
Usage:
{command_prefix}setnick nick
Changes the bot's nickname.
"""
if not channel.permissions_for(server.me).change_nickname:
raise exceptions.CommandError("Unable to change nickname: no permission.")
nick = ' '.join([nick, *leftover_args])
try:
await self.change_nickname(server.me, nick)
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response("\N{OK HAND SIGN}", delete_after=20)
@owner_only
async def cmd_setavatar(self, message, url=None):
"""
Usage:
{command_prefix}setavatar [url]
Changes the bot's avatar.
Attaching a file and leaving the url parameter blank also works.
"""
if message.attachments:
thing = message.attachments[0]['url']
elif url:
thing = url.strip('<>')
else:
raise exceptions.CommandError("You must provide a URL or attach a file.", expire_in=20)
try:
with aiohttp.Timeout(10):
async with self.aiosession.get(thing) as res:
await self.edit_profile(avatar=await res.read())
except Exception as e:
raise exceptions.CommandError("Unable to change avatar: {}".format(e), expire_in=20)
return Response("\N{OK HAND SIGN}", delete_after=20)
async def cmd_disconnect(self, server):
await self.disconnect_voice_client(server)
return Response("\N{DASH SYMBOL}", delete_after=20)
async def cmd_restart(self, channel):
await self.safe_send_message(channel, "\N{WAVING HAND SIGN}")
await self.disconnect_all_voice_clients()
raise exceptions.RestartSignal()
async def cmd_shutdown(self, channel):
await self.safe_send_message(channel, "\N{WAVING HAND SIGN}")
await self.disconnect_all_voice_clients()
raise exceptions.TerminateSignal()
@dev_only
async def cmd_breakpoint(self, message):
log.critical("Activating debug breakpoint")
return
@dev_only
async def cmd_objgraph(self, channel, func='most_common_types()'):
import objgraph
await self.send_typing(channel)
if func == 'growth':
f = StringIO()
objgraph.show_growth(limit=10, file=f)
f.seek(0)
data = f.read()
f.close()
elif func == 'leaks':
f = StringIO()
objgraph.show_most_common_types(objects=objgraph.get_leaking_objects(), file=f)
f.seek(0)
data = f.read()
f.close()
elif func == 'leakstats':
data = objgraph.typestats(objects=objgraph.get_leaking_objects())
else:
data = eval('objgraph.' + func)
return Response(data, codeblock='py')
@dev_only
async def cmd_debug(self, message, _player, *, data):
codeblock = "```py\n{}\n```"
result = None
if data.startswith('```') and data.endswith('```'):
data = '\n'.join(data.rstrip('`\n').split('\n')[1:])
code = data.strip('` \n')
try:
result = eval(code)
except:
try:
exec(code)
except Exception as e:
traceback.print_exc(chain=False)
return Response("{}: {}".format(type(e).__name__, e))
if asyncio.iscoroutine(result):
result = await result
return Response(codeblock.format(result))
async def on_message(self, message):
await self.wait_until_ready()
message_content = message.content.strip()
if not message_content.startswith(self.config.command_prefix):
return
if message.author == self.user:
log.warning("Ignoring command from myself ({})".format(message.content))
return
if self.config.bound_channels and message.channel.id not in self.config.bound_channels and not message.channel.is_private:
return # if I want to log this I just move it under the prefix check
command, *args = message_content.split(' ') # Uh, doesn't this break prefixes with spaces in them (it doesn't, config parser already breaks them)
command = command[len(self.config.command_prefix):].lower().strip()
handler = getattr(self, 'cmd_' + command, None)
if not handler:
return
if message.channel.is_private:
if not (message.author.id == self.config.owner_id and command == 'joinserver'):
await self.send_message(message.channel, 'You cannot use this bot in private messages.')
return
if message.author.id in self.blacklist and message.author.id != self.config.owner_id:
log.warning("User blacklisted: {0.id}/{0!s} ({1})".format(message.author, command))
return
else:
log.info("{0.id}/{0!s}: {1}".format(message.author, message_content.replace('\n', '\n... ')))
user_permissions = self.permissions.for_user(message.author)
argspec = inspect.signature(handler)
params = argspec.parameters.copy()
sentmsg = response = None
# noinspection PyBroadException
try:
if user_permissions.ignore_non_voice and command in user_permissions.ignore_non_voice:
await self._check_ignore_non_voice(message)
handler_kwargs = {}
if params.pop('message', None):
handler_kwargs['message'] = message
if params.pop('channel', None):
handler_kwargs['channel'] = message.channel
if params.pop('author', None):
handler_kwargs['author'] = message.author
if params.pop('server', None):
handler_kwargs['server'] = message.server
if params.pop('player', None):
handler_kwargs['player'] = await self.get_player(message.channel)
if params.pop('_player', None):
handler_kwargs['_player'] = self.get_player_in(message.server)
if params.pop('permissions', None):
handler_kwargs['permissions'] = user_permissions
if params.pop('user_mentions', None):
handler_kwargs['user_mentions'] = list(map(message.server.get_member, message.raw_mentions))
if params.pop('channel_mentions', None):
handler_kwargs['channel_mentions'] = list(map(message.server.get_channel, message.raw_channel_mentions))
if params.pop('voice_channel', None):
handler_kwargs['voice_channel'] = message.server.me.voice_channel
if params.pop('leftover_args', None):
handler_kwargs['leftover_args'] = args
args_expected = []
for key, param in list(params.items()):
# parse (*args) as a list of args
if param.kind == param.VAR_POSITIONAL:
handler_kwargs[key] = args
params.pop(key)
continue
# parse (*, args) as args rejoined as a string
# multiple of these arguments will have the same value
if param.kind == param.KEYWORD_ONLY and param.default == param.empty:
handler_kwargs[key] = ' '.join(args)
params.pop(key)
continue
doc_key = '[{}={}]'.format(key, param.default) if param.default is not param.empty else key
args_expected.append(doc_key)
# Ignore keyword args with default values when the command had no arguments
if not args and param.default is not param.empty:
params.pop(key)
continue
# Assign given values to positional arguments
if args:
arg_value = args.pop(0)
handler_kwargs[key] = arg_value
params.pop(key)
if message.author.id != self.config.owner_id:
if user_permissions.command_whitelist and command not in user_permissions.command_whitelist:
raise exceptions.PermissionsError(
"This command is not enabled for your group ({}).".format(user_permissions.name),
expire_in=20)
elif user_permissions.command_blacklist and command in user_permissions.command_blacklist:
raise exceptions.PermissionsError(
"This command is disabled for your group ({}).".format(user_permissions.name),
expire_in=20)
# Invalid usage, return docstring
if params:
docs = getattr(handler, '__doc__', None)
if not docs:
docs = 'Usage: {}{} {}'.format(
self.config.command_prefix,
command,
' '.join(args_expected)
)
docs = dedent(docs)
await self.safe_send_message(
message.channel,
'```\n{}\n```'.format(docs.format(command_prefix=self.config.command_prefix)),
expire_in=60
)
return
response = await handler(**handler_kwargs)
if response and isinstance(response, Response):
content = response.content
if response.reply:
content = '{}, {}'.format(message.author.mention, content)
sentmsg = await self.safe_send_message(
message.channel, content,
expire_in=response.delete_after if self.config.delete_messages else 0,
also_delete=message if self.config.delete_invoking else None
)
except (exceptions.CommandError, exceptions.HelpfulError, exceptions.ExtractionError) as e:
log.error("Error in {0}: {1.__class__.__name__}: {1.message}".format(command, e), exc_info=True)
expirein = e.expire_in if self.config.delete_messages else None
alsodelete = message if self.config.delete_invoking else None
await self.safe_send_message(
message.channel,
'```\n{}\n```'.format(e.message),
expire_in=expirein,
also_delete=alsodelete
)
except exceptions.Signal:
raise
except Exception:
log.error("Exception in on_message", exc_info=True)
if self.config.debug_mode:
await self.safe_send_message(message.channel, '```\n{}\n```'.format(traceback.format_exc()))
finally:
if not sentmsg and not response and self.config.delete_invoking:
await asyncio.sleep(5)
await self.safe_delete_message(message, quiet=True)
async def on_voice_state_update(self, before, after):
if not self.init_ok:
return # Ignore stuff before ready
state = VoiceStateUpdate(before, after)
if state.broken:
log.voicedebug("Broken voice state update")
return
if state.resuming:
log.debug("Resumed voice connection to {0.server.name}/{0.name}".format(state.voice_channel))
if not state.changes:
log.voicedebug("Empty voice state update, likely a session id change")
return # Session id change, pointless event
################################
log.voicedebug("Voice state update for {mem.id}/{mem!s} on {ser.name}/{vch.name} -> {dif}".format(
mem = state.member,
ser = state.server,
vch = state.voice_channel,
dif = state.changes
))
if not state.is_about_my_voice_channel:
return # Irrelevant channel
if state.joining or state.leaving:
log.info("{0.id}/{0!s} has {1} {2}/{3}".format(
state.member,
'joined' if state.joining else 'left',
state.server,
state.my_voice_channel
))
if not self.config.auto_pause:
return
autopause_msg = "{state} in {channel.server.name}/{channel.name} {reason}"
auto_paused = self.server_specific_data[after.server]['auto_paused']
player = await self.get_player(state.my_voice_channel)
if state.joining and state.empty() and player.is_playing:
log.info(autopause_msg.format(
state = "Pausing",
channel = state.my_voice_channel,
reason = "(joining empty channel)"
).strip())
self.server_specific_data[after.server]['auto_paused'] = True
player.pause()
return
if not state.is_about_me:
if not state.empty(old_channel=state.leaving):
if auto_paused and player.is_paused:
log.info(autopause_msg.format(
state = "Unpausing",
channel = state.my_voice_channel,
reason = ""
).strip())
self.server_specific_data[after.server]['auto_paused'] = False
player.resume()
else:
if not auto_paused and player.is_playing:
log.info(autopause_msg.format(
state = "Pausing",
channel = state.my_voice_channel,
reason = "(empty channel)"
).strip())
self.server_specific_data[after.server]['auto_paused'] = True
player.pause()
async def on_server_update(self, before:discord.Server, after:discord.Server):
if before.region != after.region:
log.warning("Server \"%s\" changed regions: %s -> %s" % (after.name, before.region, after.region))
await self.reconnect_voice_client(after)
async def on_server_join(self, server:discord.Server):
log.info("Bot has been joined server: {}".format(server.name))
if not self.user.bot:
alertmsg = "<@{uid}> Hi I'm a musicbot please mute me."
if server.id == "81384788765712384" and not server.unavailable: # Discord API
playground = server.get_channel("94831883505905664") or discord.utils.get(server.channels, name='playground') or server
await self.safe_send_message(playground, alertmsg.format(uid="98295630480314368")) # fake abal
elif server.id == "129489631539494912" and not server.unavailable: # Rhino Bot Help
bot_testing = server.get_channel("134771894292316160") or discord.utils.get(server.channels, name='bot-testing') or server
await self.safe_send_message(bot_testing, alertmsg.format(uid="98295630480314368")) # also fake abal
log.debug("Creating data folder for server %s", server.id)
pathlib.Path('data/%s/' % server.id).mkdir(exist_ok=True)
async def on_server_remove(self, server: discord.Server):
log.info("Bot has been removed from server: {}".format(server.name))
log.debug('Updated server list:')
[log.debug(' - ' + s.name) for s in self.servers]
if server.id in self.players:
self.players.pop(server.id).kill()
async def on_server_available(self, server: discord.Server):
if not self.init_ok:
return # Ignore pre-ready events
log.debug("Server \"{}\" has become available.".format(server.name))
player = self.get_player_in(server)
if player and player.is_paused:
av_paused = self.server_specific_data[server]['availability_paused']
if av_paused:
log.debug("Resuming player in \"{}\" due to availability.".format(server.name))
self.server_specific_data[server]['availability_paused'] = False
player.resume()
async def on_server_unavailable(self, server: discord.Server):
log.debug("Server \"{}\" has become unavailable.".format(server.name))
player = self.get_player_in(server)
if player and player.is_playing:
log.debug("Pausing player in \"{}\" due to unavailability.".format(server.name))
self.server_specific_data[server]['availability_paused'] = True
player.pause() | info['entries'].index(e) + 1, len(info['entries']), e['webpage_url']))
reactions = ['\u2705', '\U0001F6AB', '\U0001F3C1']
|
artifactorypermissiontarget_test.go | package tests
import (
"fmt"
"testing"
"github.com/jfrog/jfrog-client-go/artifactory/services"
"github.com/stretchr/testify/assert"
)
const (
PermissionTargetNamePrefix = "client-go-tests-target"
)
func TestPermissionTarget(t *testing.T) {
initArtifactoryTest(t)
params := services.NewPermissionTargetParams()
params.Name = fmt.Sprintf("%s-%s", PermissionTargetNamePrefix, getRunId())
params.Repo = &services.PermissionTargetSection{}
params.Repo.Repositories = []string{"ANY"}
params.Repo.ExcludePatterns = []string{"dir/*"}
params.Repo.Actions = &services.Actions{}
params.Repo.Actions.Users = map[string][]string{
"anonymous": {"read"},
}
params.Build = &services.PermissionTargetSection{}
params.Build.Repositories = []string{"artifactory-build-info"}
params.Build.Actions = &services.Actions{}
params.Build.Actions.Users = map[string][]string{
"anonymous": {"annotate"},
}
err := testsPermissionTargetService.Create(params)
assert.NoError(t, err)
// Fill in default values before validation
params.Repo.IncludePatterns = []string{"**"}
params.Build.Repositories = []string{"artifactory-build-info"}
params.Build.IncludePatterns = []string{"**"}
params.Build.ExcludePatterns = []string{}
validatePermissionTarget(t, params)
params.Repo.Actions.Users = nil
params.Repo.Repositories = []string{"ANY REMOTE"}
err = testsPermissionTargetService.Update(params)
validatePermissionTarget(t, params)
assert.NoError(t, err)
err = testsPermissionTargetService.Delete(params.Name)
assert.NoError(t, err)
targetParams, err := getPermissionTarget(params.Name)
assert.NoError(t, err)
assert.Nil(t, targetParams)
}
func validatePermissionTarget(t *testing.T, params services.PermissionTargetParams) {
targetConfig, err := getPermissionTarget(params.Name)
assert.NoError(t, err)
assert.NotNil(t, targetConfig)
if targetConfig == nil {
return
}
assert.Equal(t, params.Name, targetConfig.Name)
assert.Equal(t, params.Repo, targetConfig.Repo)
assert.Equal(t, params.Build, targetConfig.Build)
assert.Equal(t, params.ReleaseBundle, targetConfig.ReleaseBundle)
}
func getPermissionTarget(targetName string) (targetParams *services.PermissionTargetParams, err error) {
return testsPermissionTargetService.Get(targetName)
}
// Assert empty inner structs remain nil unless explicitly set.
func TestPermissionTargetEmptyFields(t *testing.T) | {
initArtifactoryTest(t)
params := services.NewPermissionTargetParams()
params.Name = fmt.Sprintf("%s-%s", PermissionTargetNamePrefix, getRunId())
assert.Nil(t, params.Repo)
params.Repo = &services.PermissionTargetSection{}
params.Repo.Repositories = []string{"ANY"}
params.Repo.IncludePatterns = []string{"**"}
params.Repo.ExcludePatterns = []string{"dir/*"}
params.Repo.Actions = &services.Actions{}
params.Repo.Actions.Users = map[string][]string{
"anonymous": {"read"},
}
assert.Nil(t, params.Build)
assert.Nil(t, params.ReleaseBundle)
assert.NoError(t, testsPermissionTargetService.Create(params))
validatePermissionTarget(t, params)
err := testsPermissionTargetService.Delete(params.Name)
assert.NoError(t, err)
} |
|
config-encrypted.go | /*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"errors"
"fmt" | "github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/madmin"
)
func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
if !server {
return nil
}
// If its server mode or nas gateway, migrate the backend.
doneCh := make(chan struct{})
var encrypted bool
var err error
// Migrating Config backend needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
retryTimerCh := newRetryTimerSimple(doneCh)
var stop bool
rquorum := InsufficientReadQuorum{}
wquorum := InsufficientWriteQuorum{}
bucketNotFound := BucketNotFound{}
for !stop {
select {
case <-retryTimerCh:
if encrypted, err = checkBackendEncrypted(objAPI); err != nil {
if errors.Is(err, errDiskNotFound) ||
errors.As(err, &rquorum) ||
errors.As(err, &bucketNotFound) {
logger.Info("Waiting for config backend to be encrypted..")
continue
}
close(doneCh)
return err
}
stop = true
case <-globalOSSignalCh:
close(doneCh)
return fmt.Errorf("Config encryption process stopped gracefully")
}
}
close(doneCh)
if encrypted {
// backend is encrypted, but credentials are not specified
// we shall fail right here. if not proceed forward.
if !globalConfigEncrypted || !globalActiveCred.IsValid() {
return config.ErrMissingCredentialsBackendEncrypted(nil)
}
} else {
// backend is not yet encrypted, check if encryption of
// backend is requested if not return nil and proceed
// forward.
if !globalConfigEncrypted {
return nil
}
if !globalActiveCred.IsValid() {
return config.ErrMissingCredentialsBackendEncrypted(nil)
}
}
doneCh = make(chan struct{})
defer close(doneCh)
retryTimerCh = newRetryTimerSimple(doneCh)
// Migrating Config backend needs a retry mechanism for
// the following reasons:
// - Read quorum is lost just after the initialization
// of the object layer.
for {
select {
case <-retryTimerCh:
// Migrate IAM configuration
if err = migrateConfigPrefixToEncrypted(objAPI, globalOldCred, encrypted); err != nil {
if errors.Is(err, errDiskNotFound) ||
errors.As(err, &rquorum) ||
errors.As(err, &wquorum) ||
errors.As(err, &bucketNotFound) {
logger.Info("Waiting for config backend to be encrypted..")
continue
}
return err
}
return nil
case <-globalOSSignalCh:
return fmt.Errorf("Config encryption process stopped gracefully")
}
}
}
const (
backendEncryptedFile = "backend-encrypted"
)
var (
backendEncryptedMigrationIncomplete = []byte("incomplete")
backendEncryptedMigrationComplete = []byte("encrypted")
)
func checkBackendEtcdEncrypted(ctx context.Context, client *etcd.Client) (bool, error) {
data, err := readKeyEtcd(ctx, client, backendEncryptedFile)
if err != nil && err != errConfigNotFound {
return false, err
}
return err == nil && bytes.Equal(data, backendEncryptedMigrationComplete), nil
}
func checkBackendEncrypted(objAPI ObjectLayer) (bool, error) {
data, err := readConfig(context.Background(), objAPI, backendEncryptedFile)
if err != nil && err != errConfigNotFound {
return false, err
}
return err == nil && bytes.Equal(data, backendEncryptedMigrationComplete), nil
}
// decryptData - decrypts input data with more that one credentials,
func decryptData(edata []byte, creds ...auth.Credentials) ([]byte, error) {
var err error
var data []byte
for _, cred := range creds {
data, err = madmin.DecryptData(cred.String(), bytes.NewReader(edata))
if err != nil {
if err == madmin.ErrMaliciousData {
continue
}
return nil, err
}
break
}
return data, err
}
func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client) error {
ctx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel()
encrypted, err := checkBackendEtcdEncrypted(ctx, client)
if err != nil {
return err
}
if encrypted {
// backend is encrypted, but credentials are not specified
// we shall fail right here. if not proceed forward.
if !globalConfigEncrypted || !globalActiveCred.IsValid() {
return config.ErrMissingCredentialsBackendEncrypted(nil)
}
} else {
// backend is not yet encrypted, check if encryption of
// backend is requested if not return nil and proceed
// forward.
if !globalConfigEncrypted {
return nil
}
if !globalActiveCred.IsValid() {
return errInvalidArgument
}
}
if encrypted {
// No key rotation requested, and backend is
// already encrypted. We proceed without migration.
if !globalOldCred.IsValid() {
return nil
}
// No real reason to rotate if old and new creds are same.
if globalOldCred.Equal(globalActiveCred) {
return nil
}
logger.Info("Attempting rotation of encrypted IAM users and policies on etcd with newly supplied credentials")
} else {
logger.Info("Attempting encryption of all IAM users and policies on etcd")
}
r, err := client.Get(ctx, minioConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
if err != nil {
return err
}
if err = saveKeyEtcd(ctx, client, backendEncryptedFile, backendEncryptedMigrationIncomplete); err != nil {
return err
}
for _, kv := range r.Kvs {
var (
cdata []byte
cencdata []byte
)
cdata, err = readKeyEtcd(ctx, client, string(kv.Key))
if err != nil {
switch err {
case errConfigNotFound:
// Perhaps not present or someone deleted it.
continue
}
return err
}
var data []byte
// Is rotating of creds requested?
if globalOldCred.IsValid() {
data, err = decryptData(cdata, globalOldCred, globalActiveCred)
if err != nil {
if err == madmin.ErrMaliciousData {
return config.ErrInvalidRotatingCredentialsBackendEncrypted(nil)
}
return err
}
} else {
data = cdata
}
if !utf8.Valid(data) {
_, err = decryptData(data, globalActiveCred)
if err == nil {
// Config is already encrypted with right keys
continue
}
return errors.New("config data not in plain-text form or encrypted")
}
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
if err != nil {
return err
}
if err = saveKeyEtcd(ctx, client, string(kv.Key), cencdata); err != nil {
return err
}
}
if encrypted && globalActiveCred.IsValid() && globalOldCred.IsValid() {
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
}
return saveKeyEtcd(ctx, client, backendEncryptedFile, backendEncryptedMigrationComplete)
}
func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Credentials, encrypted bool) error {
if encrypted {
// No key rotation requested, and backend is
// already encrypted. We proceed without migration.
if !activeCredOld.IsValid() {
return nil
}
// No real reason to rotate if old and new creds are same.
if activeCredOld.Equal(globalActiveCred) {
return nil
}
logger.Info("Attempting rotation of encrypted config, IAM users and policies on MinIO with newly supplied credentials")
} else {
logger.Info("Attempting encryption of all config, IAM users and policies on MinIO backend")
}
err := saveConfig(context.Background(), objAPI, backendEncryptedFile, backendEncryptedMigrationIncomplete)
if err != nil {
return err
}
var marker string
for {
res, err := objAPI.ListObjects(context.Background(), minioMetaBucket,
minioConfigPrefix, marker, "", maxObjectList)
if err != nil {
return err
}
for _, obj := range res.Objects {
var (
cdata []byte
cencdata []byte
)
cdata, err = readConfig(context.Background(), objAPI, obj.Name)
if err != nil {
return err
}
var data []byte
// Is rotating of creds requested?
if activeCredOld.IsValid() {
data, err = decryptData(cdata, activeCredOld, globalActiveCred)
if err != nil {
if err == madmin.ErrMaliciousData {
return config.ErrInvalidRotatingCredentialsBackendEncrypted(nil)
}
return err
}
} else {
data = cdata
}
if !utf8.Valid(data) {
_, err = decryptData(data, globalActiveCred)
if err == nil {
// Config is already encrypted with right keys
continue
}
return errors.New("config data not in plain-text form or encrypted")
}
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
if err != nil {
return err
}
if err = saveConfig(context.Background(), objAPI, obj.Name, cencdata); err != nil {
return err
}
}
if !res.IsTruncated {
break
}
marker = res.NextMarker
}
if encrypted && globalActiveCred.IsValid() && activeCredOld.IsValid() {
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
}
return saveConfig(context.Background(), objAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
} | "unicode/utf8"
etcd "github.com/coreos/etcd/clientv3" |
index.js | import React from 'react'
import TestRenderer from 'react-test-renderer'
import Hide from '../src'
const renderJSON = el => TestRenderer.create(el).toJSON()
describe('@rebass/hide', () => {
test.skip('Mapped renders', () => {
const json = renderJSON(
<Mapped
xsmall | expect(json.props.display).toEqual([
'none',
'none',
'block',
'block',
'block',
])
})
test('Hide renders', () => {
const json = renderJSON(
<Hide
xsmall
small
medium
large
xlarge
/>
)
expect(json).toMatchSnapshot()
expect(json).toHaveStyleRule('display', 'none', {
// media: 'screen and (min-width:40em)'
})
})
}) | small
/>
) |
models.py | from run import db
from flask_login import UserMixin
class Post(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key=True)
image = db.Column(db.Text)
location = db.Column(db.String(255))
title = db.Column(db.String(255))
description = db.Column(db.String)
price = db.Column(db.Integer)
owner = db.Column(db.String(255))
def __init__(self, image, location, title,description, price, owner):
self.image = image
self.location = location
self.title = title
self.description = description
self.price = price
self.owner = owner
class User(UserMixin, db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
full_names = db.Column(db.String(255), nullable=False)
email = db.Column(db.String(255), nullable=False)
mobile_number = db.Column(db.Integer, nullable=False)
member_since = db.Column(db.Date)
password = db.Column(db.String(255), nullable=False)
def __init__(self, full_names, email, mobile_number, member_since, password):
self.full_names = full_names
self.email = email
self.mobile_number = mobile_number
self.member_since = member_since
self.password = password
class | (db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
name = db.Column(db.String(255))
desc = db.Column(db.String(255))
def __init__(self, post_id, name, desc):
self.post_id = post_id
self.name = name
self.desc = desc
| Comment |
wire.go | //go:build wireinject | package main
import (
"github.com/google/wire"
"github.com/weplanx/schedule/app"
"github.com/weplanx/schedule/bootstrap"
"github.com/weplanx/schedule/common"
)
func App(value *common.Values) (*app.App, error) {
wire.Build(
wire.Struct(new(common.Inject), "*"),
bootstrap.Provides,
app.Provides,
)
return &app.App{}, nil
} | // +build wireinject
|
event_informer.rs | #[macro_use] extern crate log;
use k8s_openapi::api::core::v1::Event;
use kube::{
api::{ListParams, Resource, WatchEvent},
client::APIClient,
config,
runtime::Informer,
};
use futures::{StreamExt, TryStreamExt};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
std::env::set_var("RUST_LOG", "info,kube=debug");
env_logger::init();
let config = config::load_kube_config().await?;
let client = APIClient::new(config);
let events = Resource::all::<Event>();
let lp = ListParams::default();
let ei = Informer::new(client, lp, events);
loop {
let mut events = ei.poll().await?.boxed();
while let Some(event) = events.try_next().await? {
handle_event(event)?;
}
}
}
// This function lets the app handle an event from kube
fn handle_event(ev: WatchEvent<Event>) -> anyhow::Result<()> {
match ev {
WatchEvent::Added(o) => {
info!(
"New Event: {} (via {} {})",
o.message.unwrap(),
o.involved_object.kind.unwrap(),
o.involved_object.name.unwrap()
);
}
WatchEvent::Modified(o) => {
info!("Modified Event: {}", o.reason.unwrap());
}
WatchEvent::Deleted(o) => {
info!("Deleted Event: {}", o.message.unwrap());
}
WatchEvent::Error(e) => { | Ok(())
} | warn!("Error event: {:?}", e);
}
} |
app.component.ts | import {Component} from '@angular/core';
@Component({ | templateUrl: './app.component.html',
styleUrls: ['./app.component.scss'],
})
export class AppComponent {
public title: string = 'angular-testing-course';
} | selector: 'atc-root', |
trade.rs | use bitmex::models::BinSize;
use bitmex::models::{GetTradeBucketedRequest, GetTradeRequest};
use bitmex::BitMEX;
use failure::Fallible;
use std::env::var;
use tokio::runtime::Runtime;
#[test]
fn get_trade() -> Fallible<()> {
let _ = dotenv::dotenv();
let _ = env_logger::try_init();
let mut rt = Runtime::new()?;
let bm = BitMEX::with_credential(&var("BITMEX_KEY")?, &var("BITMEX_SECRET")?);
let _ = rt.block_on(bm.request(GetTradeRequest {
..Default::default()
}))?;
Ok(())
}
#[test]
fn | () -> Fallible<()> {
let _ = dotenv::dotenv();
let _ = env_logger::try_init();
let mut rt = Runtime::new()?;
let bm = BitMEX::with_credential(&var("BITMEX_KEY")?, &var("BITMEX_SECRET")?);
let _ = rt.block_on(bm.request(GetTradeBucketedRequest {
partial: Some(false),
bin_size: Some(BinSize::D1),
count: Some(10),
..Default::default()
}))?;
Ok(())
}
| get_trade_bucketed |
collision.py | """Nif User Interface, custom nif properties store for collisions settings"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2014, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
from bpy.props import (PointerProperty,
IntProperty,
BoolProperty,
EnumProperty,
FloatProperty,
)
from bpy.types import PropertyGroup
from pyffi.formats.nif import NifFormat
class C | PropertyGroup):
"""Group of Havok related properties, which gets attached to objects through a property pointer."""
motion_system: EnumProperty(
name='Motion System',
description='Havok Motion System settings for bhkRigidBody(t)',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.MotionSystem._enumkeys)],
# default = 'MO_SYS_FIXED',
)
oblivion_layer: EnumProperty(
name='Oblivion Layer',
description='Mesh color, used in Editor',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.OblivionLayer._enumkeys)],
# default = 'OL_STATIC',
)
deactivator_type: EnumProperty(
name='Deactivator Type',
description='Motion deactivation setting',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.DeactivatorType._enumkeys)],
)
solver_deactivation: EnumProperty(
name='Solver Deactivation',
description='Motion deactivation setting',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.SolverDeactivation._enumkeys)],
)
quality_type: EnumProperty(
name='Quality Type',
description='Determines quality of motion',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.MotionQuality._enumkeys)],
# default = 'MO_QUAL_FIXED',
)
col_filter: IntProperty(
name='Col Filter',
description='Flags for bhkRigidBody(t)',
default=0
)
max_linear_velocity: FloatProperty(
name='Max Linear Velocity',
description='Linear velocity limit for bhkRigidBody(t)',
default=0
)
max_angular_velocity: FloatProperty(
name='Max Angular Velocity',
description='Angular velocity limit for bhkRigidBody(t)',
default=0
)
export_bhklist: BoolProperty(
name='Export BHKList',
description='None',
default=False
)
use_blender_properties: BoolProperty(
name='Use Blender Properties',
description='Whether or not to export collision settings via blender properties',
default=False,
)
| ollisionProperty( |
route.go | // Copyright 2013-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bufio"
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/url"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/nats-io/gnatsd/util"
)
// RouteType designates the router type
type RouteType int
// Type of Route
const (
// This route we learned from speaking to other routes.
Implicit RouteType = iota
// This route was explicitly configured.
Explicit
)
type route struct {
remoteID string
didSolicit bool
retry bool
routeType RouteType
url *url.URL
authRequired bool
tlsRequired bool
closed bool
connectURLs []string
}
type connectInfo struct {
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
TLS bool `json:"tls_required"`
Name string `json:"name"`
}
// Route protocol constants
const (
ConProto = "CONNECT %s" + _CRLF_
InfoProto = "INFO %s" + _CRLF_
)
// Lock should be held entering here.
func (c *client) sendConnect(tlsRequired bool) {
var user, pass string
if userInfo := c.route.url.User; userInfo != nil {
user = userInfo.Username()
pass, _ = userInfo.Password()
}
cinfo := connectInfo{
Verbose: false,
Pedantic: false,
User: user,
Pass: pass,
TLS: tlsRequired,
Name: c.srv.info.ID,
}
b, err := json.Marshal(cinfo)
if err != nil {
c.Errorf("Error marshaling CONNECT to route: %v\n", err)
c.closeConnection()
return
}
c.sendProto([]byte(fmt.Sprintf(ConProto, b)), true)
}
// Process the info message if we are a route.
func (c *client) processRouteInfo(info *Info) {
c.mu.Lock()
// Connection can be closed at any time (by auth timeout, etc).
// Does not make sense to continue here if connection is gone.
if c.route == nil || c.nc == nil {
c.mu.Unlock()
return
}
s := c.srv
remoteID := c.route.remoteID
// We receive an INFO from a server that informs us about another server,
// so the info.ID in the INFO protocol does not match the ID of this route.
if remoteID != "" && remoteID != info.ID {
c.mu.Unlock()
// Process this implicit route. We will check that it is not an explicit
// route and/or that it has not been connected already.
s.processImplicitRoute(info)
return
}
// Need to set this for the detection of the route to self to work
// in closeConnection().
c.route.remoteID = info.ID
// Detect route to self.
if c.route.remoteID == s.info.ID {
c.mu.Unlock()
c.closeConnection()
return
}
// Copy over important information.
c.route.authRequired = info.AuthRequired
c.route.tlsRequired = info.TLSRequired
// If we do not know this route's URL, construct one on the fly
// from the information provided.
if c.route.url == nil {
// Add in the URL from host and port
hp := net.JoinHostPort(info.Host, strconv.Itoa(info.Port))
url, err := url.Parse(fmt.Sprintf("nats-route://%s/", hp))
if err != nil {
c.Errorf("Error parsing URL from INFO: %v\n", err)
c.mu.Unlock()
c.closeConnection()
return
}
c.route.url = url
}
// Check to see if we have this remote already registered.
// This can happen when both servers have routes to each other.
c.mu.Unlock()
if added, sendInfo := s.addRoute(c, info); added {
c.Debugf("Registering remote route %q", info.ID)
// Send our local subscriptions to this route.
s.sendLocalSubsToRoute(c)
// sendInfo will be false if the route that we just accepted
// is the only route there is.
if sendInfo {
// The incoming INFO from the route will have IP set
// if it has Cluster.Advertise. In that case, use that
// otherwise contruct it from the remote TCP address.
if info.IP == "" {
// Need to get the remote IP address.
c.mu.Lock()
switch conn := c.nc.(type) {
case *net.TCPConn, *tls.Conn:
addr := conn.RemoteAddr().(*net.TCPAddr)
info.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(addr.IP.String(),
strconv.Itoa(info.Port)))
default:
info.IP = c.route.url.String()
}
c.mu.Unlock()
}
// Now let the known servers know about this new route
s.forwardNewRouteInfoToKnownServers(info)
}
// Unless disabled, possibly update the server's INFO protocol
// and send to clients that know how to handle async INFOs.
if !s.getOpts().Cluster.NoAdvertise {
s.addClientConnectURLsAndSendINFOToClients(info.ClientConnectURLs)
}
} else {
c.Debugf("Detected duplicate remote route %q", info.ID)
c.closeConnection()
}
}
// sendAsyncInfoToClients sends an INFO protocol to all
// connected clients that accept async INFO updates.
// The server lock is held on entry.
func (s *Server) sendAsyncInfoToClients() {
// If there are no clients supporting async INFO protocols, we are done.
// Also don't send if we are shutting down...
if s.cproto == 0 || s.shutdown {
return
}
for _, c := range s.clients {
c.mu.Lock()
// Here, we are going to send only to the clients that are fully
// registered (server has received CONNECT and first PING). For
// clients that are not at this stage, this will happen in the
// processing of the first PING (see client.processPing)
if c.opts.Protocol >= ClientProtoInfo && c.flags.isSet(firstPongSent) {
// sendInfo takes care of checking if the connection is still
// valid or not, so don't duplicate tests here.
c.sendInfo(s.infoJSON)
}
c.mu.Unlock()
}
}
// This will process implicit route information received from another server.
// We will check to see if we have configured or are already connected,
// and if so we will ignore. Otherwise we will attempt to connect.
func (s *Server) processImplicitRoute(info *Info) {
remoteID := info.ID
s.mu.Lock()
defer s.mu.Unlock()
// Don't connect to ourself
if remoteID == s.info.ID {
return
}
// Check if this route already exists
if _, exists := s.remotes[remoteID]; exists {
return
}
// Check if we have this route as a configured route
if s.hasThisRouteConfigured(info) {
return
}
// Initiate the connection, using info.IP instead of info.URL here...
r, err := url.Parse(info.IP)
if err != nil {
s.Errorf("Error parsing URL from INFO: %v\n", err)
return
}
// Snapshot server options.
opts := s.getOpts()
if info.AuthRequired {
r.User = url.UserPassword(opts.Cluster.Username, opts.Cluster.Password)
}
s.startGoRoutine(func() { s.connectToRoute(r, false) })
}
// hasThisRouteConfigured returns true if info.Host:info.Port is present
// in the server's opts.Routes, false otherwise.
// Server lock is assumed to be held by caller.
func (s *Server) hasThisRouteConfigured(info *Info) bool {
urlToCheckExplicit := strings.ToLower(net.JoinHostPort(info.Host, strconv.Itoa(info.Port)))
for _, ri := range s.getOpts().Routes {
if strings.ToLower(ri.Host) == urlToCheckExplicit {
return true
}
}
return false
}
// forwardNewRouteInfoToKnownServers sends the INFO protocol of the new route
// to all routes known by this server. In turn, each server will contact this
// new route.
func (s *Server) forwardNewRouteInfoToKnownServers(info *Info) {
s.mu.Lock()
defer s.mu.Unlock()
b, _ := json.Marshal(info)
infoJSON := []byte(fmt.Sprintf(InfoProto, b))
for _, r := range s.routes {
r.mu.Lock()
if r.route.remoteID != info.ID {
r.sendInfo(infoJSON)
}
r.mu.Unlock()
}
}
// This will send local subscription state to a new route connection.
// FIXME(dlc) - This could be a DOS or perf issue with many clients
// and large subscription space. Plus buffering in place not a good idea.
func (s *Server) sendLocalSubsToRoute(route *client) {
b := bytes.Buffer{}
s.mu.Lock()
for _, client := range s.clients {
client.mu.Lock()
subs := make([]*subscription, 0, len(client.subs))
for _, sub := range client.subs {
subs = append(subs, sub)
}
client.mu.Unlock()
for _, sub := range subs {
rsid := routeSid(sub)
proto := fmt.Sprintf(subProto, sub.subject, sub.queue, rsid)
b.WriteString(proto)
}
}
s.mu.Unlock()
route.mu.Lock()
defer route.mu.Unlock()
route.sendProto(b.Bytes(), true)
route.Debugf("Route sent local subscriptions")
}
func (s *Server) createRoute(conn net.Conn, rURL *url.URL) *client {
// Snapshot server options.
opts := s.getOpts()
didSolicit := rURL != nil
r := &route{didSolicit: didSolicit}
for _, route := range opts.Routes {
if rURL != nil && (strings.ToLower(rURL.Host) == strings.ToLower(route.Host)) {
r.routeType = Explicit
}
}
c := &client{srv: s, nc: conn, opts: clientOpts{}, typ: ROUTER, route: r}
// Grab server variables
s.mu.Lock()
infoJSON := s.routeInfoJSON
authRequired := s.routeInfo.AuthRequired
tlsRequired := s.routeInfo.TLSRequired
s.mu.Unlock()
// Grab lock
c.mu.Lock()
// Initialize
c.initClient()
if didSolicit {
// Do this before the TLS code, otherwise, in case of failure
// and if route is explicit, it would try to reconnect to 'nil'...
r.url = rURL
}
// Check for TLS
if tlsRequired {
// Copy off the config to add in ServerName if we
tlsConfig := util.CloneTLSConfig(opts.Cluster.TLSConfig)
// If we solicited, we will act like the client, otherwise the server.
if didSolicit {
c.Debugf("Starting TLS route client handshake")
// Specify the ServerName we are expecting.
host, _, _ := net.SplitHostPort(rURL.Host)
tlsConfig.ServerName = host
c.nc = tls.Client(c.nc, tlsConfig)
} else {
c.Debugf("Starting TLS route server handshake")
c.nc = tls.Server(c.nc, tlsConfig)
}
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.Cluster.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS route handshake error: %v", err)
c.sendErr("Secure Connection - TLS Required")
c.closeConnection()
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Verify that the connection did not go away while we released the lock.
if c.nc == nil {
c.mu.Unlock()
return nil
}
// Rewrap bw
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
}
// Do final client initialization
// Set the Ping timer
c.setPingTimer()
// For routes, the "client" is added to s.routes only when processing
// the INFO protocol, that is much later.
// In the meantime, if the server shutsdown, there would be no reference
// to the client (connection) to be closed, leaving this readLoop
// uinterrupted, causing the Shutdown() to wait indefinitively.
// We need to store the client in a special map, under a special lock.
s.grMu.Lock()
running := s.grRunning
if running {
s.grTmpClients[c.cid] = c
}
s.grMu.Unlock()
if !running {
c.mu.Unlock()
c.setRouteNoReconnectOnClose()
c.closeConnection()
return nil
}
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop() })
if tlsRequired {
c.Debugf("TLS handshake complete")
cs := c.nc.(*tls.Conn).ConnectionState()
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
}
// Queue Connect proto if we solicited the connection.
if didSolicit {
c.Debugf("Route connect msg sent")
c.sendConnect(tlsRequired)
}
// Send our info to the other side.
c.sendInfo(infoJSON)
// Check for Auth required state for incoming connections.
if authRequired && !didSolicit {
ttl := secondsToDuration(opts.Cluster.AuthTimeout)
c.setAuthTimer(ttl)
}
c.mu.Unlock()
c.Noticef("Route connection created")
return c
}
const (
_CRLF_ = "\r\n"
_EMPTY_ = ""
)
const (
subProto = "SUB %s %s %s" + _CRLF_
unsubProto = "UNSUB %s%s" + _CRLF_
)
// FIXME(dlc) - Make these reserved and reject if they come in as a sid
// from a client connection.
// Route constants
const (
RSID = "RSID"
QRSID = "QRSID"
QRSID_LEN = len(QRSID)
)
// Parse the given rsid. If the protocol does not start with QRSID,
// returns false and no subscription nor error.
// If it does start with QRSID, returns true and possibly a subscription
// or an error if the QRSID protocol is malformed.
func (s *Server) routeSidQueueSubscriber(rsid []byte) (bool, *subscription, error) {
if !bytes.HasPrefix(rsid, []byte(QRSID)) {
return false, nil, nil
}
cid, sid, err := parseRouteQueueSid(rsid)
if err != nil {
return true, nil, err
}
s.mu.Lock()
client := s.clients[cid]
s.mu.Unlock()
if client == nil {
return true, nil, nil
}
client.mu.Lock()
sub, ok := client.subs[string(sid)]
client.mu.Unlock()
if ok {
return true, sub, nil
}
return true, nil, nil
}
func routeSid(sub *subscription) string {
var qi string
if len(sub.queue) > 0 {
qi = "Q"
}
return fmt.Sprintf("%s%s:%d:%s", qi, RSID, sub.client.cid, sub.sid)
}
// Parse the given `rsid` knowing that it starts with `QRSID`.
// Returns the cid and sid or an error not a valid QRSID.
func parseRouteQueueSid(rsid []byte) (uint64, []byte, error) |
func (s *Server) addRoute(c *client, info *Info) (bool, bool) {
id := c.route.remoteID
sendInfo := false
s.mu.Lock()
if !s.running {
s.mu.Unlock()
return false, false
}
remote, exists := s.remotes[id]
if !exists {
// Remove from the temporary map
s.grMu.Lock()
delete(s.grTmpClients, c.cid)
s.grMu.Unlock()
s.routes[c.cid] = c
s.remotes[id] = c
c.mu.Lock()
c.route.connectURLs = info.ClientConnectURLs
c.mu.Unlock()
// we don't need to send if the only route is the one we just accepted.
sendInfo = len(s.routes) > 1
}
s.mu.Unlock()
if exists {
var r *route
c.mu.Lock()
// upgrade to solicited?
if c.route.didSolicit {
// Make a copy
rs := *c.route
r = &rs
}
c.mu.Unlock()
remote.mu.Lock()
// r will be not nil if c.route.didSolicit was true
if r != nil {
// If we upgrade to solicited, we still want to keep the remote's
// connectURLs. So transfer those.
r.connectURLs = remote.route.connectURLs
remote.route = r
}
// This is to mitigate the issue where both sides add the route
// on the opposite connection, and therefore end-up with both
// connections being dropped.
remote.route.retry = true
remote.mu.Unlock()
}
return !exists, sendInfo
}
func (s *Server) broadcastInterestToRoutes(proto string) {
var arg []byte
if atomic.LoadInt32(&s.logging.trace) == 1 {
arg = []byte(proto[:len(proto)-LEN_CR_LF])
}
protoAsBytes := []byte(proto)
s.mu.Lock()
for _, route := range s.routes {
// FIXME(dlc) - Make same logic as deliverMsg
route.mu.Lock()
route.sendProto(protoAsBytes, true)
route.mu.Unlock()
route.traceOutOp("", arg)
}
s.mu.Unlock()
}
// broadcastSubscribe will forward a client subscription
// to all active routes.
func (s *Server) broadcastSubscribe(sub *subscription) {
if s.numRoutes() == 0 {
return
}
rsid := routeSid(sub)
proto := fmt.Sprintf(subProto, sub.subject, sub.queue, rsid)
s.broadcastInterestToRoutes(proto)
}
// broadcastUnSubscribe will forward a client unsubscribe
// action to all active routes.
func (s *Server) broadcastUnSubscribe(sub *subscription) {
if s.numRoutes() == 0 {
return
}
rsid := routeSid(sub)
maxStr := _EMPTY_
sub.client.mu.Lock()
// Set max if we have it set and have not tripped auto-unsubscribe
if sub.max > 0 && sub.nm < sub.max {
maxStr = fmt.Sprintf(" %d", sub.max)
}
sub.client.mu.Unlock()
proto := fmt.Sprintf(unsubProto, rsid, maxStr)
s.broadcastInterestToRoutes(proto)
}
func (s *Server) routeAcceptLoop(ch chan struct{}) {
defer func() {
if ch != nil {
close(ch)
}
}()
// Snapshot server options.
opts := s.getOpts()
// Snapshot server options.
port := opts.Cluster.Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.Cluster.Host, strconv.Itoa(port))
s.Noticef("Listening for route connections on %s", hp)
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on router port: %d - %v", opts.Cluster.Port, e)
return
}
s.mu.Lock()
// Check for TLSConfig
tlsReq := opts.Cluster.TLSConfig != nil
info := Info{
ID: s.info.ID,
Version: s.info.Version,
AuthRequired: false,
TLSRequired: tlsReq,
TLSVerify: tlsReq,
MaxPayload: s.info.MaxPayload,
}
// Set this if only if advertise is not disabled
if !opts.Cluster.NoAdvertise {
info.ClientConnectURLs = s.clientConnectURLs
}
// If we have selected a random port...
if port == 0 {
// Write resolved port back to options.
opts.Cluster.Port = l.Addr().(*net.TCPAddr).Port
}
// Keep track of actual listen port. This will be needed in case of
// config reload.
s.clusterActualPort = opts.Cluster.Port
// Check for Auth items
if opts.Cluster.Username != "" {
info.AuthRequired = true
}
s.routeInfo = info
// Possibly override Host/Port and set IP based on Cluster.Advertise
if err := s.setRouteInfoHostPortAndIP(); err != nil {
s.Fatalf("Error setting route INFO with Cluster.Advertise value of %s, err=%v", s.opts.Cluster.Advertise, err)
l.Close()
s.mu.Unlock()
return
}
// Setup state that can enable shutdown
s.routeListener = l
s.mu.Unlock()
// Let them know we are up
close(ch)
ch = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
s.Debugf("Temporary Route Accept Errorf(%v), sleeping %dms",
ne, tmpDelay/time.Millisecond)
time.Sleep(tmpDelay)
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else if s.isRunning() {
s.Noticef("Accept error: %v", err)
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createRoute(conn, nil)
s.grWG.Done()
})
}
s.Debugf("Router accept loop exiting..")
s.done <- true
}
// Similar to setInfoHostPortAndGenerateJSON, but for routeInfo.
func (s *Server) setRouteInfoHostPortAndIP() error {
if s.opts.Cluster.Advertise != "" {
advHost, advPort, err := parseHostPort(s.opts.Cluster.Advertise, s.opts.Cluster.Port)
if err != nil {
return err
}
s.routeInfo.Host = advHost
s.routeInfo.Port = advPort
s.routeInfo.IP = fmt.Sprintf("nats-route://%s/", net.JoinHostPort(advHost, strconv.Itoa(advPort)))
} else {
s.routeInfo.Host = s.opts.Cluster.Host
s.routeInfo.Port = s.opts.Cluster.Port
s.routeInfo.IP = ""
}
// (re)generate the routeInfoJSON byte array
s.generateRouteInfoJSON()
return nil
}
// StartRouting will start the accept loop on the cluster host:port
// and will actively try to connect to listed routes.
func (s *Server) StartRouting(clientListenReady chan struct{}) {
defer s.grWG.Done()
// Wait for the client listen port to be opened, and
// the possible ephemeral port to be selected.
<-clientListenReady
// Spin up the accept loop
ch := make(chan struct{})
go s.routeAcceptLoop(ch)
<-ch
// Solicit Routes if needed.
s.solicitRoutes(s.getOpts().Routes)
}
func (s *Server) reConnectToRoute(rURL *url.URL, rtype RouteType) {
tryForEver := rtype == Explicit
// If A connects to B, and B to A (regardless if explicit or
// implicit - due to auto-discovery), and if each server first
// registers the route on the opposite TCP connection, the
// two connections will end-up being closed.
// Add some random delay to reduce risk of repeated failures.
delay := time.Duration(rand.Intn(100)) * time.Millisecond
if tryForEver {
delay += DEFAULT_ROUTE_RECONNECT
}
time.Sleep(delay)
s.connectToRoute(rURL, tryForEver)
}
func (s *Server) connectToRoute(rURL *url.URL, tryForEver bool) {
// Snapshot server options.
opts := s.getOpts()
defer s.grWG.Done()
attempts := 0
for s.isRunning() && rURL != nil {
s.Debugf("Trying to connect to route on %s", rURL.Host)
conn, err := net.DialTimeout("tcp", rURL.Host, DEFAULT_ROUTE_DIAL)
if err != nil {
s.Errorf("Error trying to connect to route: %v", err)
if !tryForEver {
if opts.Cluster.ConnectRetries <= 0 {
return
}
attempts++
if attempts > opts.Cluster.ConnectRetries {
return
}
}
select {
case <-s.rcQuit:
return
case <-time.After(DEFAULT_ROUTE_CONNECT):
continue
}
}
// We have a route connection here.
// Go ahead and create it and exit this func.
s.createRoute(conn, rURL)
return
}
}
func (c *client) isSolicitedRoute() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.typ == ROUTER && c.route != nil && c.route.didSolicit
}
func (s *Server) solicitRoutes(routes []*url.URL) {
for _, r := range routes {
route := r
s.startGoRoutine(func() { s.connectToRoute(route, true) })
}
}
func (s *Server) numRoutes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.routes)
}
| {
var (
cid uint64
sid []byte
cidFound bool
sidFound bool
)
// A valid QRSID needs to be at least QRSID:x:y
// First character here should be `:`
if len(rsid) >= QRSID_LEN+4 {
if rsid[QRSID_LEN] == ':' {
for i, count := QRSID_LEN+1, len(rsid); i < count; i++ {
switch rsid[i] {
case ':':
cid = uint64(parseInt64(rsid[QRSID_LEN+1 : i]))
cidFound = true
sid = rsid[i+1:]
}
}
if cidFound {
// We can't assume the content of sid, so as long
// as it is not len 0, we have to say it is a valid one.
if len(rsid) > 0 {
sidFound = true
}
}
}
}
if cidFound && sidFound {
return cid, sid, nil
}
return 0, nil, fmt.Errorf("invalid QRSID: %s", rsid)
} |
mask_image.py | import sly_globals as g |
def get_mask_from_clicks(image_np, clicks_list):
g.CONTROLLER.set_image(image_np)
for click in clicks_list:
g.CONTROLLER.add_click(click.coords[1], click.coords[0], click.is_positive)
try:
res_mask = g.CONTROLLER.result_mask
except Exception(f"Couldn't process image"):
res_mask = None
return res_mask | |
test_xmlstream.py | # Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.words.xish import xmlstream
class XmlStreamTest(unittest.TestCase):
def setUp(self):
self.errorOccurred = False
self.streamStarted = False
self.streamEnded = False
self.outlist = []
self.xmlstream = xmlstream.XmlStream()
self.xmlstream.transport = self
self.xmlstream.transport.write = self.outlist.append
# Auxilary methods
def loseConnection(self):
self.xmlstream.connectionLost("no reason")
def streamStartEvent(self, rootelem):
self.streamStarted = True
def streamErrorEvent(self, errelem):
self.errorOccurred = True
def | (self, _):
self.streamEnded = True
def testBasicOp(self):
xs = self.xmlstream
xs.addObserver(xmlstream.STREAM_START_EVENT,
self.streamStartEvent)
xs.addObserver(xmlstream.STREAM_ERROR_EVENT,
self.streamErrorEvent)
xs.addObserver(xmlstream.STREAM_END_EVENT,
self.streamEndEvent)
# Go...
xs.connectionMade()
xs.send("<root>")
self.assertEquals(self.outlist[0], "<root>")
xs.dataReceived("<root>")
self.assertEquals(self.streamStarted, True)
self.assertEquals(self.errorOccurred, False)
self.assertEquals(self.streamEnded, False)
xs.dataReceived("<child><unclosed></child>")
self.assertEquals(self.errorOccurred, True)
self.assertEquals(self.streamEnded, True)
| streamEndEvent |
picklepersistence.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2020
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the PicklePersistence class."""
import pickle
from collections import defaultdict
from copy import deepcopy
from typing import Any, DefaultDict, Dict, Optional, Tuple
from telegram.ext import BasePersistence
from telegram.utils.types import ConversationDict
class PicklePersistence(BasePersistence):
"""Using python's builtin pickle for making you bot persistent.
Warning:
:class:`PicklePersistence` will try to replace :class:`telegram.Bot` instances by
:attr:`REPLACED_BOT` and insert the bot set with
:meth:`telegram.ext.BasePersistence.set_bot` upon loading of the data. This is to ensure
that changes to the bot apply to the saved objects, too. If you change the bots token, this
may lead to e.g. ``Chat not found`` errors. For the limitations on replacing bots see
:meth:`telegram.ext.BasePersistence.replace_bot` and
:meth:`telegram.ext.BasePersistence.insert_bot`.
Attributes:
filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`
is :obj:`False` this will be used as a prefix.
store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this
persistence class.
store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this
persistence class.
store_bot_data (:obj:`bool`): Optional. Whether bot_data should be saved by this
persistence class.
single_file (:obj:`bool`): Optional. When :obj:`False` will store 3 separate files of
`filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is
:obj:`True`.
on_flush (:obj:`bool`, optional): When :obj:`True` will only save to file when
:meth:`flush` is called and keep data in memory until that happens. When
:obj:`False` will store data on any transaction *and* on call to :meth:`flush`.
Default is :obj:`False`.
Args:
filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`
is :obj:`False` this will be used as a prefix.
store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this
persistence class. Default is :obj:`True`.
store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this
persistence class. Default is :obj:`True`.
store_bot_data (:obj:`bool`, optional): Whether bot_data should be saved by this
persistence class. Default is :obj:`True` .
single_file (:obj:`bool`, optional): When :obj:`False` will store 3 separate files of
`filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is
:obj:`True`.
on_flush (:obj:`bool`, optional): When :obj:`True` will only save to file when
:meth:`flush` is called and keep data in memory until that happens. When
:obj:`False` will store data on any transaction *and* on call to :meth:`flush`.
Default is :obj:`False`.
"""
def __init__(
self,
filename: str,
store_user_data: bool = True,
store_chat_data: bool = True,
store_bot_data: bool = True,
single_file: bool = True,
on_flush: bool = False,
):
super().__init__(
store_user_data=store_user_data,
store_chat_data=store_chat_data,
store_bot_data=store_bot_data,
)
self.filename = filename
self.single_file = single_file
self.on_flush = on_flush
self.user_data: Optional[DefaultDict[int, Dict]] = None
self.chat_data: Optional[DefaultDict[int, Dict]] = None
self.bot_data: Optional[Dict] = None
self.conversations: Optional[Dict[str, Dict[Tuple, Any]]] = None
def load_singlefile(self) -> None:
try:
filename = self.filename
with open(self.filename, "rb") as file:
data = pickle.load(file)
self.user_data = defaultdict(dict, data['user_data'])
self.chat_data = defaultdict(dict, data['chat_data'])
# For backwards compatibility with files not containing bot data
self.bot_data = data.get('bot_data', {})
self.conversations = data['conversations']
except IOError:
self.conversations = dict()
self.user_data = defaultdict(dict)
self.chat_data = defaultdict(dict)
self.bot_data = {}
except pickle.UnpicklingError as exc:
raise TypeError(f"File {filename} does not contain valid pickle data") from exc
except Exception as exc:
raise TypeError(f"Something went wrong unpickling {filename}") from exc
@staticmethod
def load_file(filename: str) -> Any:
try:
with open(filename, "rb") as file:
return pickle.load(file)
except IOError:
return None
except pickle.UnpicklingError as exc:
raise TypeError(f"File {filename} does not contain valid pickle data") from exc
except Exception as exc:
raise TypeError(f"Something went wrong unpickling {filename}") from exc
def dump_singlefile(self) -> None:
with open(self.filename, "wb") as file:
data = {
'conversations': self.conversations,
'user_data': self.user_data,
'chat_data': self.chat_data,
'bot_data': self.bot_data,
}
pickle.dump(data, file)
@staticmethod
def dump_file(filename: str, data: Any) -> None:
with open(filename, "wb") as file:
pickle.dump(data, file)
def get_user_data(self) -> DefaultDict[int, Dict[Any, Any]]:
"""Returns the user_data from the pickle file if it exists or an empty :obj:`defaultdict`.
Returns:
:obj:`defaultdict`: The restored user data.
"""
if self.user_data:
pass
elif not self.single_file:
filename = f"{self.filename}_user_data"
data = self.load_file(filename)
if not data:
data = defaultdict(dict)
else:
data = defaultdict(dict, data)
self.user_data = data
else:
self.load_singlefile()
return deepcopy(self.user_data) # type: ignore[arg-type]
def get_chat_data(self) -> DefaultDict[int, Dict[Any, Any]]:
"""Returns the chat_data from the pickle file if it exists or an empty :obj:`defaultdict`.
Returns:
:obj:`defaultdict`: The restored chat data.
"""
if self.chat_data:
pass
elif not self.single_file:
filename = f"{self.filename}_chat_data"
data = self.load_file(filename)
if not data:
data = defaultdict(dict)
else:
data = defaultdict(dict, data)
self.chat_data = data
else:
self.load_singlefile()
return deepcopy(self.chat_data) # type: ignore[arg-type]
def get_bot_data(self) -> Dict[Any, Any]:
"""Returns the bot_data from the pickle file if it exists or an empty :obj:`dict`.
Returns:
:obj:`dict`: The restored bot data.
"""
if self.bot_data:
pass
elif not self.single_file:
filename = f"{self.filename}_bot_data"
data = self.load_file(filename)
if not data:
data = {}
self.bot_data = data
else:
self.load_singlefile()
return deepcopy(self.bot_data) # type: ignore[arg-type]
def get_conversations(self, name: str) -> ConversationDict:
"""Returns the conversations from the pickle file if it exsists or an empty dict.
Args:
name (:obj:`str`): The handlers name.
Returns:
:obj:`dict`: The restored conversations for the handler.
"""
if self.conversations:
pass
elif not self.single_file:
filename = f"{self.filename}_conversations"
data = self.load_file(filename)
if not data:
data = {name: {}}
self.conversations = data
else:
self.load_singlefile()
return self.conversations.get(name, {}).copy() # type: ignore[union-attr]
def update_conversation(
self, name: str, key: Tuple[int, ...], new_state: Optional[object]
) -> None:
"""Will update the conversations for the given handler and depending on :attr:`on_flush`
save the pickle file.
Args:
name (:obj:`str`): The handler's name.
key (:obj:`tuple`): The key the state is changed for.
new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.
"""
if not self.conversations:
self.conversations = dict()
if self.conversations.setdefault(name, {}).get(key) == new_state:
return
self.conversations[name][key] = new_state
if not self.on_flush:
if not self.single_file:
filename = f"{self.filename}_conversations"
self.dump_file(filename, self.conversations)
else:
self.dump_singlefile()
def update_user_data(self, user_id: int, data: Dict) -> None:
"""Will update the user_data and depending on :attr:`on_flush` save the pickle file.
Args:
user_id (:obj:`int`): The user the data might have been changed for.
data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].
"""
if self.user_data is None:
self.user_data = defaultdict(dict)
if self.user_data.get(user_id) == data:
return
self.user_data[user_id] = data
if not self.on_flush:
if not self.single_file:
filename = f"{self.filename}_user_data"
self.dump_file(filename, self.user_data)
else:
self.dump_singlefile()
def update_chat_data(self, chat_id: int, data: Dict) -> None:
"""Will update the chat_data and depending on :attr:`on_flush` save the pickle file.
Args:
chat_id (:obj:`int`): The chat the data might have been changed for.
data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].
"""
if self.chat_data is None:
self.chat_data = defaultdict(dict)
if self.chat_data.get(chat_id) == data:
return
self.chat_data[chat_id] = data
if not self.on_flush:
if not self.single_file:
filename = f"{self.filename}_chat_data"
self.dump_file(filename, self.chat_data)
else:
self.dump_singlefile()
def update_bot_data(self, data: Dict) -> None:
"""Will update the bot_data and depending on :attr:`on_flush` save the pickle file.
Args:
data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.bot_data`.
"""
if self.bot_data == data:
return
self.bot_data = data.copy()
if not self.on_flush:
if not self.single_file:
filename = f"{self.filename}_bot_data"
self.dump_file(filename, self.bot_data)
else:
self.dump_singlefile()
def flush(self) -> None:
| """Will save all data in memory to pickle file(s)."""
if self.single_file:
if self.user_data or self.chat_data or self.bot_data or self.conversations:
self.dump_singlefile()
else:
if self.user_data:
self.dump_file(f"{self.filename}_user_data", self.user_data)
if self.chat_data:
self.dump_file(f"{self.filename}_chat_data", self.chat_data)
if self.bot_data:
self.dump_file(f"{self.filename}_bot_data", self.bot_data)
if self.conversations:
self.dump_file(f"{self.filename}_conversations", self.conversations) |
|
sendmail.py | # standard library
import threading
# django
from django.core.mail import send_mail
# local django
from user import constants
class SendMail(threading.Thread):
"""
Responsible to send email in background.
"""
def | (self, email, HealthProfessional, SendInvitationProfile):
self.email = email
self.HealthProfessional = HealthProfessional
self.SendInvitationProfile = SendInvitationProfile
threading.Thread.__init__(self)
def run(self):
email_subject = constants.INVITATION_EMAIL_SUBJECT
email_body = constants.INVITATION_EMAIL_BODY
send_mail(email_subject, email_body % (self.HealthProfessional.name,
self.SendInvitationProfile.activation_key),
'[email protected]', [self.email], fail_silently=False)
| __init__ |
main.go | package main
import (
"flag"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/prometheus/common/log"
"github.com/ryancurrah/pcap_exporter/dns"
"github.com/ryancurrah/pcap_exporter/opt"
"github.com/ryancurrah/pcap_exporter/pcap"
"github.com/ryancurrah/pcap_exporter/prom"
)
const (
version = 0.2
helpText = ` Name: pcap_exporter
Version: %v
License: Copyright (c) 2017 Philip Griesbacher
Source: https://github.com/Griesbacher/pcap_exporter
Source: https://github.com/ryancurrah/pcap_exporter
`
usageText = `Notes:
- If 'l-sa' or 'l-da' is used but no address can be determined, '%s' will be set as the label value.
- If 'l-sp' or 'l-dp' is used but no port can be determined, '%s' will be set as the label value.
- If any protocol is used but no protocol can be determined, '' will be set as the label value.
Usage of %s:
`
)
var (
device = flag.String("i", "any", "Interface name to listen to. 'any' listens to all.")
filter = flag.String("f", "", "A pcap filter string. See http://www.tcpdump.org/manpages/pcap-filter.7.html for usage")
snaplen = flag.Int("s", 65536, "Number of bytes max to read per packet.")
address = flag.String("listen-address", ":9999", "Listen address with the port for the exporter.")
promiscuous = flag.Bool("p", false, "Use promiscuous mode.")
resolve = flag.Bool("r", false, "Resolve ip addresses with their DNS names.")
listVersion = flag.Bool("v", false, "Print exporter version.")
listInterfaces = flag.Bool("list-interfaces", false, "Prints available interfaces and quits.")
//logLevel = flag.String("log-level", "error", "Log level.")
sa = flag.Bool("l-sa", true, printLabel(opt.SourceAddress))
sp = flag.Bool("l-sp", false, printLabel(opt.SourcePort))
da = flag.Bool("l-da", true, printLabel(opt.DestinationAddress))
dp = flag.Bool("l-dp", false, printLabel(opt.DestinationPort))
lp = flag.Bool("l-lp", false, printLabel(opt.LinkProtocol))
np = flag.Bool("l-np", false, printLabel(opt.NetworkProtocol))
tp = flag.Bool("l-tp", false, printLabel(opt.TransportProtocol))
ap = flag.Bool("l-ap", false, printLabel(opt.ApplicationProtocol))
// labelFlags is a map of flag.Bool pointer options with the label name
labelFlags = map[*bool]string{
sa: opt.SourceAddress,
sp: opt.SourcePort,
da: opt.DestinationAddress,
dp: opt.DestinationPort,
lp: opt.LinkProtocol,
np: opt.NetworkProtocol,
tp: opt.TransportProtocol,
ap: opt.ApplicationProtocol,
}
)
func printLabel(label string) string {
return fmt.Sprintf("Add %s to labels.", label)
}
func parseFlags() opt.Options {
// parse user input
flag.Parse()
// determine which flag labels were set
labelNames := []string{}
for labelFlag, labelName := range labelFlags {
if labelFlag != nil && *labelFlag {
labelNames = append(labelNames, labelName)
}
}
options := opt.Options{
LabelNames: labelNames,
Device: *device,
Filter: *filter,
Snaplen: *snaplen,
Promiscuous: *promiscuous,
ResolveDNS: *resolve,
}
return options
}
func main() {
// handle args
flag.Usage = func() {
fmt.Printf(helpText, version)
fmt.Printf(usageText, pcap.UnknownIP, pcap.UnknownPort, os.Args[0])
flag.PrintDefaults()
}
options := parseFlags()
// list version
if *listVersion |
// list all interfaces
if *listInterfaces {
interfacesText, err := pcap.ListAvailableInterfaces()
if err != nil {
log.Fatalf("unable to get a list of all available interfaces: %s", err)
}
fmt.Printf("available interfaces:\n\n%s", interfacesText)
os.Exit(0)
}
// listen for exit signal
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt, os.Kill, syscall.SIGTERM)
// register prometheus metrics
prom.RegisterMetrics(options.GetLabelNames())
// start dns storage cache
dns.Start()
defer dns.Stop()
// start pcap capture and analysis
err := pcap.StartCapture(*device, *filter, *snaplen, *promiscuous, options)
if err != nil {
log.Fatal(err)
}
defer pcap.StopCapture()
// start prometheus exporter
prom.StartExporter(address, options)
PcapExporterLoop:
for {
select {
case sig := <-signals:
log.Warnf("received exit signal %s, quitting now...", sig)
break PcapExporterLoop
}
}
}
| {
fmt.Print(version)
os.Exit(0)
} |
__init__.py | from __future__ import annotations
from time import sleep
from detect import DetectionSession, DetectPlugin
from typing import Any, List
import numpy as np
import cv2
import imutils
from gi.repository import GLib, Gst
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected
class OpenCVDetectionSession(DetectionSession):
cap: cv2.VideoCapture
previous_frame: Any
def __init__(self) -> None:
super().__init__()
self.previous_frame = None
self.cap = None
defaultThreshold = 25
defaultArea = 2000
defaultInterval = 250
class OpenCVPlugin(DetectPlugin):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.color2Gray = None
self.pixelFormat = "I420"
self.pixelFormatChannelCount = 1
if True:
self.retainAspectRatio = False
self.color2Gray = None
self.pixelFormat = "I420"
self.pixelFormatChannelCount = 1
else:
self.retainAspectRatio = True
self.color2Gray = cv2.COLOR_BGRA2GRAY
self.pixelFormat = "BGRA"
self.pixelFormatChannelCount = 4
async def getDetectionModel(self) -> ObjectDetectionModel:
d: ObjectDetectionModel = {
'name': '@scrypted/opencv',
'classes': ['motion'],
}
settings = [
{
'title': "Motion Area",
'description': "The area size required to trigger motion. Higher values (larger areas) are less sensitive. Setting this to 0 will output all matches into the console.",
'value': defaultArea,
'key': 'area',
'placeholder': defaultArea,
'type': 'number',
},
{
'title': "Motion Threshold",
'description': "The threshold required to consider a pixel changed. Higher values (larger changes) are less sensitive.",
'value': defaultThreshold,
'key': 'threshold', | 'title': "Frame Analysis Interval",
'description': "The number of milliseconds to wait between motion analysis.",
'value': defaultInterval,
'key': 'interval',
'placeholder': defaultInterval,
'type': 'number',
},
]
d['settings'] = settings
return d
def get_pixel_format(self):
return self.pixelFormat
def parse_settings(self, settings: Any):
area = defaultArea
threshold = defaultThreshold
interval = defaultInterval
if settings:
area = float(settings.get('area', area))
threshold = int(settings.get('threshold', threshold))
interval = float(settings.get('interval', interval))
return area, threshold, interval
def detect(self, detection_session: OpenCVDetectionSession, frame, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
area, threshold, interval = self.parse_settings(settings)
# see get_detection_input_size on undocumented size requirements for GRAY8
if self.color2Gray != None:
gray = cv2.cvtColor(frame, self.color2Gray)
else:
gray = frame
curFrame = cv2.GaussianBlur(gray, (21,21), 0)
if detection_session.previous_frame is None:
detection_session.previous_frame = curFrame
return
frameDelta = cv2.absdiff(detection_session.previous_frame, curFrame)
detection_session.previous_frame = curFrame
_, thresh = cv2.threshold(frameDelta, threshold, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=2)
fcontours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(fcontours)
detections: List[ObjectDetectionResult] = []
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = src_size
for c in contours:
x, y, w, h = cv2.boundingRect(c)
# if w * h != contour_area:
# print("mismatch w/h", contour_area - w * h)
x2, y2 = convert_to_src_size((x + w, y + h))
x, y = convert_to_src_size((x, y))
w = x2 - x + 1
h = y2 - y + 1
contour_area = w * h
if not area or contour_area > area:
detection: ObjectDetectionResult = {}
detection['boundingBox'] = (x, y, w, h)
detection['className'] = 'motion'
detection['score'] = 1 if area else contour_area
detections.append(detection)
return detection_result
def run_detection_jpeg(self, detection_session: DetectionSession, image_bytes: bytes, min_score: float) -> ObjectsDetected:
raise Exception('can not run motion detection on jpeg')
def get_detection_input_size(self, src_size):
# The initial implementation of this plugin used BGRA
# because it seemed impossible to pull the Y frame out of I420 without corruption.
# This is because while 318x174 is aspect ratio correct,
# it seems to cause strange issues with stride and the image is skewed.
# By using 300x300, this seems to avoid some undocumented minimum size
# reqiurement in gst-videoscale or opencv. Unclear which.
# This is the same input size as tensorflow-lite. Allows for better pipelining.
if not self.retainAspectRatio:
return (300, 300)
width, height = src_size
if (width > height):
if (width > 318):
height = height / width * 318
width = 318
else:
if (height > 318):
width = width / height * 318
height = 318
width = int(np.floor(width / 6) * 6)
height = int(np.floor(height / 6) * 6)
return width, height
def end_session(self, detection_session: OpenCVDetectionSession):
if detection_session and detection_session.cap:
detection_session.cap.release()
detection_session.cap = None
return super().end_session(detection_session)
def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size)-> ObjectsDetected:
buf = gst_sample.get_buffer()
caps = gst_sample.get_caps()
# can't trust the width value, compute the stride
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
result, info = buf.map(Gst.MapFlags.READ)
if not result:
return
try:
mat = np.ndarray(
(height,
width,
self.pixelFormatChannelCount),
buffer=info.data,
dtype= np.uint8)
return self.detect(detection_session, mat, settings, src_size, convert_to_src_size)
finally:
buf.unmap(info)
def create_detection_session(self):
return OpenCVDetectionSession()
def detection_event_notified(self, settings: Any):
area, threshold, interval = self.parse_settings(settings)
# it is safe to block here because gstreamer creates a queue thread
sleep(interval / 1000)
return super().detection_event_notified(settings) | 'placeholder': defaultThreshold,
'type': 'number',
},
{ |
processor.rs | use parsiphae::ppa::symbol_collector::ClassCollector;
use parsiphae::types::Expression;
use parsiphae::{error_handler, errors::*, inner_errors::ParserError, ppa, src_parser, types};
use std::io::Read;
use std::path::{Path, PathBuf};
struct | ;
impl ppa::visitor::Visitor for TestVisitor {
fn visit_expression(&mut self, exp: &Expression, scope: Option<&types::Identifier>) {
let res = exp.evaluate_int();
if let Ok(val) = res {
println!(
"I found an expression that evaluated to {}: {:#?}",
val, exp
)
}
}
fn visit_var_decl(&mut self, decl: &types::VarDeclaration, scope: Option<&types::Identifier>) {
println!(
"A variable was declared: {} in scope {}",
&decl.name,
scope.unwrap_or(&types::Identifier::new(b""))
);
}
}
#[derive(Debug)]
pub struct ParsingResult {
file: PathBuf,
result: Result<types::AST>,
}
impl ParsingResult {
pub fn new<P: AsRef<Path>>(path: P, result: Result<types::AST>) -> Self {
ParsingResult {
file: path.as_ref().to_owned(),
result,
}
}
pub fn print(&self) {
match self.result {
Ok(_) => {}
Err(ref e) => match e {
Error::ParsingError { err, line } => {
let msg = err.description();
println!("Error in file {:?} in line {}: {}", self.file, line, msg);
}
_ => unreachable!(),
},
}
}
pub fn is_ok(&self) -> bool {
return match self.result {
Ok(_) => true,
Err(_) => false,
};
}
}
fn process_file<P: AsRef<Path>>(path: P) -> Result<ParsingResult> {
let mut file = ::std::fs::File::open(&path).unwrap();
let mut content = Vec::new();
file.read_to_end(&mut content)?;
use parsiphae::parsers::*;
let result = start(types::Input(&content))
.map_err(|err| error_handler::map_err(&content, err))
.map(|tuple| tuple.1);
Ok(ParsingResult::new(path, result))
}
pub fn process_single_file<P: AsRef<Path>>(path: P) -> Result<()> {
let res = process_file(path)?;
res.print();
Ok(())
}
pub fn process_src<P: AsRef<Path>>(path: P) -> Result<()> {
let d_paths = src_parser::parse_src(&path)?;
let results: Vec<ParsingResult> = d_paths.iter().map(process_file).collect::<Result<_>>()?;
let mut visitor = ClassCollector::new();
{
let okay_results = results
.iter()
.filter_map(|res| res.result.as_ref().ok());
for ast in okay_results {
::parsiphae::ppa::visitor::visit_ast(&ast, &mut visitor);
}
println!("{:#?}", visitor);
}
println!("Parsed {} files", results.len());
if results.iter().all(ParsingResult::is_ok) {
println!("No syntax errors detected!");
} else {
for result in results {
result.print();
}
}
Ok(())
}
| TestVisitor |
lib.rs | #![forbid(unsafe_code)]
#![deny(
missing_copy_implementations,
missing_crate_level_docs,
missing_debug_implementations,
missing_docs,
nonstandard_style,
unused_qualifications
)]
/*!
# Example
```
use trillium::{async_trait, Conn};
use trillium_controllers::{Controller, ControllerHandler};
struct UserController;
#[async_trait]
impl Controller for UserController {
type Error = &'static str;
async fn get(&self, conn: &mut Conn) -> Result<(), Self::Error> {
conn.set_status(200);
conn.set_body("ok");
conn.set_halted(true);
Ok(())
}
async fn delete(&self, conn: &mut Conn) -> Result<(), Self::Error> {
Err("uh oh")
}
}
let handler = ControllerHandler::new(UserController);
use trillium_testing::prelude::*;
assert_ok!(get("/").on(&handler), "ok");
assert_not_handled!(post("/").on(&handler));
assert_response!(delete("/").on(&handler), 500);
```
*/
use trillium::{async_trait, conn_try, http_types::Method, Conn, Handler};
#[async_trait]
pub trait Controller: Send + Sync + 'static {
type Error: std::fmt::Display + Send + Sync + 'static;
async fn get(&self, _conn: &mut Conn) -> Result<(), Self::Error> {
Ok(())
}
async fn post(&self, _conn: &mut Conn) -> Result<(), Self::Error> {
Ok(())
}
async fn | (&self, _conn: &mut Conn) -> Result<(), Self::Error> {
Ok(())
}
async fn delete(&self, _conn: &mut Conn) -> Result<(), Self::Error> {
Ok(())
}
async fn patch(&self, _conn: &mut Conn) -> Result<(), Self::Error> {
Ok(())
}
}
pub struct ControllerHandler<C>(C);
impl<C> ControllerHandler<C> {
pub fn new(controller: C) -> Self {
Self(controller)
}
}
#[async_trait]
impl<C: Controller> Handler for ControllerHandler<C> {
async fn run(&self, mut conn: Conn) -> Conn {
let result = match *conn.method() {
Method::Get => self.0.get(&mut conn).await,
Method::Post => self.0.post(&mut conn).await,
Method::Put => self.0.put(&mut conn).await,
Method::Delete => self.0.delete(&mut conn).await,
Method::Patch => self.0.patch(&mut conn).await,
_ => Ok(()),
};
conn_try!(conn, result);
conn
}
}
| put |
restart.go | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"github.com/joomcode/errorx"
"github.com/pingcap-incubator/tiup-cluster/pkg/log"
"github.com/pingcap-incubator/tiup-cluster/pkg/logger"
"github.com/pingcap-incubator/tiup-cluster/pkg/meta"
operator "github.com/pingcap-incubator/tiup-cluster/pkg/operation"
"github.com/pingcap-incubator/tiup-cluster/pkg/task"
tiuputils "github.com/pingcap-incubator/tiup/pkg/utils"
"github.com/pingcap/errors"
"github.com/spf13/cobra"
)
func newRestartCmd() *cobra.Command | {
var options operator.Options
cmd := &cobra.Command{
Use: "restart <cluster-name>",
Short: "Restart a TiDB cluster",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return cmd.Help()
}
clusterName := args[0]
if tiuputils.IsNotExist(meta.ClusterPath(clusterName, meta.MetaFileName)) {
return errors.Errorf("cannot restart non-exists cluster %s", clusterName)
}
logger.EnableAuditLog()
metadata, err := meta.ClusterMetadata(clusterName)
if err != nil {
return err
}
t := task.NewBuilder().
SSHKeySet(
meta.ClusterPath(clusterName, "ssh", "id_rsa"),
meta.ClusterPath(clusterName, "ssh", "id_rsa.pub")).
ClusterSSH(metadata.Topology, metadata.User, sshTimeout).
ClusterOperate(metadata.Topology, operator.RestartOperation, options).
Build()
if err := t.Execute(task.NewContext()); err != nil {
if errorx.Cast(err) != nil {
// FIXME: Map possible task errors and give suggestions.
return err
}
return errors.Trace(err)
}
log.Infof("Restarted cluster `%s` successfully", clusterName)
return nil
},
}
cmd.Flags().StringSliceVarP(&options.Roles, "role", "R", nil, "Only restart specified roles")
cmd.Flags().StringSliceVarP(&options.Nodes, "node", "N", nil, "Only restart specified nodes")
return cmd
} |
|
test_validator_cli.py | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
import unittest
from txnmain.validator_cli import get_configuration
class TestValidatorCLI(unittest.TestCase):
def test_currency_home(self):
os.environ.clear()
os.environ["CURRENCYHOME"] = "/test_path"
cfg = get_configuration(args=[], config_files_required=False)
self.assertIn("CurrencyHome", cfg)
self.assertEquals(cfg["CurrencyHome"], "/test_path")
self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc")
self.assertEquals(cfg["LogDirectory"], "/test_path/logs")
self.assertEquals(cfg["DataDirectory"], "/test_path/data")
def test_default_config_posix(self):
os.environ.clear()
cfg = get_configuration(args=[],
os_name='posix',
config_files_required=False)
self.assertNotIn("CurrencyHome", cfg)
self.assertEquals(cfg["ConfigDirectory"], "/etc/sawtooth-validator")
self.assertEquals(cfg["LogDirectory"], "/var/log/sawtooth-validator")
self.assertEquals(cfg["DataDirectory"], "/var/lib/sawtooth-validator")
def test_default_config_nt(self):
os.environ.clear()
cfg = get_configuration(args=[],
os_name='nt',
config_files_required=False)
self.assertNotIn("CurrencyHome", cfg)
self.assertEquals(
cfg["ConfigDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\conf")
self.assertEquals(
cfg["LogDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\logs")
self.assertEquals(
cfg["DataDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\data")
def test_logconfig_arg(self):
os.environ.clear()
cfg = get_configuration(args=["--log-config=Logging.js"],
config_files_required=False)
self.assertIn("LogConfigFile", cfg)
self.assertEquals(cfg["LogConfigFile"], "Logging.js")
def | (self):
os.environ.clear()
cfg = get_configuration(args=["--conf-dir=/test_path/etc"],
config_files_required=False)
self.assertIn("ConfigDirectory", cfg)
self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc")
def test_options_mapping_data_dir(self):
os.environ.clear()
cfg = get_configuration(args=["--data-dir=/test_path/data"],
config_files_required=False)
self.assertIn("DataDirectory", cfg)
self.assertEquals(cfg["DataDirectory"], "/test_path/data")
def test_options_mapping_type(self):
os.environ.clear()
cfg = get_configuration(args=["--type=test"],
config_files_required=False)
self.assertIn("LedgerType", cfg)
self.assertEquals(cfg["LedgerType"], "test")
def test_options_mapping_key_file(self):
os.environ.clear()
cfg = get_configuration(args=["--keyfile=/test_path/keys/key.wif"],
config_files_required=False)
self.assertIn("KeyFile", cfg)
self.assertEquals(cfg["KeyFile"], "/test_path/keys/key.wif")
def test_options_mapping_node(self):
os.environ.clear()
cfg = get_configuration(args=["--node=test000"],
config_files_required=False)
self.assertIn("NodeName", cfg)
self.assertEquals(cfg["NodeName"], "test000")
def test_options_mapping_listsn(self):
os.environ.clear()
cfg = get_configuration(args=['--listen="localhost:5500/UDP gossip"'],
config_files_required=False)
self.assertIn("Listen", cfg)
self.assertEquals(cfg["Listen"], ['"localhost:5500/UDP gossip"'])
def test_options_mapping_restore(self):
os.environ.clear()
cfg = get_configuration(args=["--restore"],
config_files_required=False)
self.assertEquals(cfg["Restore"], True)
def test_options_mapping_peers(self):
os.environ.clear()
cfg = get_configuration(args=["--peers=testpeer1"],
config_files_required=False)
self.assertIn("Peers", cfg)
self.assertIn("testpeer1", cfg["Peers"])
def test_options_mapping_url(self):
os.environ.clear()
cfg = get_configuration(args=["--url",
"http://testhost:8888,"
"http://testhost:8889",
"--url",
"http://testhost:8890"],
config_files_required=False)
self.assertIn("LedgerURL", cfg)
self.assertIn("http://testhost:8888", cfg["LedgerURL"])
self.assertIn("http://testhost:8889", cfg["LedgerURL"])
self.assertIn("http://testhost:8890", cfg["LedgerURL"])
if __name__ == '__main__':
unittest.main()
| test_options_mapping_conf_dir |
suite_test.go | // Copyright 2019 HAProxy Technologies LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build e2e_parallel
package globalconfig
import (
"testing"
"github.com/stretchr/testify/suite"
)
type GlobalConfigSuite struct {
suite.Suite
maxconn string
}
func TestGlobalConfigSuite(t *testing.T) | {
suite.Run(t, new(GlobalConfigSuite))
} |
|
c_tile_TranslateY.js | export const c_tile_TranslateY = {
"name": "--pf-c-tile--TranslateY",
"value": "calc(-1 * 2 * 3px)", | export default c_tile_TranslateY; | "var": "var(--pf-c-tile--TranslateY)"
}; |
cart.tsx | import React, {
createContext,
useState,
useCallback,
useContext,
useEffect,
} from 'react';
import AsyncStorage from '@react-native-community/async-storage';
interface Product {
id: string;
title: string;
image_url: string;
price: number;
quantity: number;
}
interface CartContext {
products: Product[];
addToCart(item: Omit<Product, 'quantity'>): void;
increment(id: string): void;
decrement(id: string): void;
}
const CartContext = createContext<CartContext | null>(null);
const CartProvider: React.FC = ({ children }) => {
const [products, setProducts] = useState<Product[]>([]);
useEffect(() => {
async function loadProducts(): Promise<void> {
const storageProducts = await AsyncStorage.getItem(
'@GoMarketplace:products',
);
if (storageProducts) {
setProducts([...JSON.parse(storageProducts)]);
}
}
loadProducts();
}, []);
const addToCart = useCallback(
async product => {
const productExist = products.find(prod => prod.id === product.id);
if (productExist) {
setProducts(
products.map(prod =>
prod.id === product.id
? { ...product, quantity: prod.quantity + 1 }
: prod,
),
);
} else {
setProducts([...products, { ...product, quantity: 1 }]);
}
await AsyncStorage.setItem(
'@GoMarketplace:products',
JSON.stringify(products),
);
},
[products],
);
const increment = useCallback(
async id => {
const newProd = products.map(prod =>
prod.id === id ? { ...prod, quantity: prod.quantity + 1 } : prod,
);
setProducts(newProd);
await AsyncStorage.setItem(
'@GoMarketplace:products',
JSON.stringify(newProd),
);
},
[products],
);
const decrement = useCallback(
async id => {
const newProd = products.map(prod =>
prod.id === id ? { ...prod, quantity: prod.quantity - 1 } : prod,
);
setProducts(newProd);
await AsyncStorage.setItem(
'@GoMarketplace:products',
JSON.stringify(newProd),
);
},
[products],
);
const value = React.useMemo(
() => ({ addToCart, increment, decrement, products }),
[products, addToCart, increment, decrement],
);
return <CartContext.Provider value={value}>{children}</CartContext.Provider>;
};
function | (): CartContext {
const context = useContext(CartContext);
if (!context) {
throw new Error(`useCart must be used within a CartProvider`);
}
return context;
}
export { CartProvider, useCart };
| useCart |
hdfs.go | package reader
import (
"context"
"path/filepath"
"time"
"github.com/colinmarc/hdfs"
"github.com/benthosdev/benthos/v4/internal/component"
"github.com/benthosdev/benthos/v4/internal/component/metrics"
"github.com/benthosdev/benthos/v4/internal/log"
"github.com/benthosdev/benthos/v4/internal/message"
)
//------------------------------------------------------------------------------
// HDFSConfig contains configuration fields for the HDFS input type.
type HDFSConfig struct {
Hosts []string `json:"hosts" yaml:"hosts"`
User string `json:"user" yaml:"user"`
Directory string `json:"directory" yaml:"directory"`
}
// NewHDFSConfig creates a new Config with default values.
func NewHDFSConfig() HDFSConfig {
return HDFSConfig{
Hosts: []string{},
User: "",
Directory: "",
}
}
//------------------------------------------------------------------------------
// HDFS is a benthos reader.Type implementation that reads messages from a
// HDFS directory.
type HDFS struct {
conf HDFSConfig
targets []string
client *hdfs.Client
log log.Modular
stats metrics.Type
}
// NewHDFS creates a new HDFS writer.Type.
func | (
conf HDFSConfig,
log log.Modular,
stats metrics.Type,
) *HDFS {
return &HDFS{
conf: conf,
log: log,
stats: stats,
}
}
//------------------------------------------------------------------------------
// ConnectWithContext attempts to establish a connection to the target HDFS
// host.
func (h *HDFS) ConnectWithContext(ctx context.Context) error {
if h.client != nil {
return nil
}
client, err := hdfs.NewClient(hdfs.ClientOptions{
Addresses: h.conf.Hosts,
User: h.conf.User,
})
if err != nil {
return err
}
h.client = client
targets, err := client.ReadDir(h.conf.Directory)
if err != nil {
return err
}
for _, info := range targets {
if !info.IsDir() {
h.targets = append(h.targets, info.Name())
}
}
h.log.Infof("Receiving files from HDFS directory: %v\n", h.conf.Directory)
return nil
}
//------------------------------------------------------------------------------
// ReadWithContext reads a new HDFS message.
func (h *HDFS) ReadWithContext(ctx context.Context) (*message.Batch, AsyncAckFn, error) {
if len(h.targets) == 0 {
return nil, nil, component.ErrTypeClosed
}
fileName := h.targets[0]
h.targets = h.targets[1:]
filePath := filepath.Join(h.conf.Directory, fileName)
msgBytes, readerr := h.client.ReadFile(filePath)
if readerr != nil {
return nil, nil, readerr
}
msg := message.QuickBatch([][]byte{msgBytes})
msg.Get(0).MetaSet("hdfs_name", fileName)
msg.Get(0).MetaSet("hdfs_path", filePath)
return msg, noopAsyncAckFn, nil
}
// CloseAsync shuts down the HDFS input and stops processing requests.
func (h *HDFS) CloseAsync() {
}
// WaitForClose blocks until the HDFS input has closed down.
func (h *HDFS) WaitForClose(timeout time.Duration) error {
return nil
}
//------------------------------------------------------------------------------
| NewHDFS |
test_home.py | import pytest
import allure
from config.credentials import Credentials
from framework.pages.HomePage import HomePage
from infra.screenshot_generator import get_screenshot
from infra.shared_steps import SharedSteps
from infra.string_util import identifier_generator
@allure.title('Test navigation into "New Project" page')
@allure.severity(allure.severity_level.CRITICAL)
@pytest.mark.sanity
@pytest.mark.home
@pytest.mark.project
def test_click_create_new_project(setup):
with allure.step('Setup driver'):
driver = setup
driver.get(Credentials.BASE_URL)
with allure.step('Login to OpenProject'):
SharedSteps.login_steps(driver)
with allure.step('Create a HomePage instance'):
home_page = HomePage(driver)
with allure.step('On "Home" page, click "+ Project" green button'):
home_page.click_new_project_button()
with allure.step('Verify navigation into "New Project" page'):
assert driver.title == Credentials.NEW_PROJECT_PAGE_TITLE, get_screenshot(driver, "home", "page_title", Credentials.NEW_PROJECT_PAGE_TITLE)
@allure.title('Test navigation into a selected project page')
@allure.severity(allure.severity_level.CRITICAL)
@pytest.mark.sanity
@pytest.mark.home
@pytest.mark.project
def test_select_project(setup):
with allure.step('Setup driver'):
driver = setup
driver.get(Credentials.BASE_URL)
with allure.step('Login to OpenProject'):
SharedSteps.login_steps(driver)
| home_page = HomePage(driver)
with allure.step('Click "Select a project" menu button, and select a project from the drop-down'):
home_page.select_project(Credentials.HOME_PAGE_SELECTED_PROJECT)
with allure.step('Verify the value of the "identifier" field'):
# Note: OpenProject's identifier field doesn't match project requirements for special characters
expected_identifier = identifier_generator(Credentials.HOME_PAGE_SELECTED_PROJECT)
assert expected_identifier in driver.current_url, get_screenshot(driver, "home", "identifier", expected_identifier)
# Another option
assert f'title="{Credentials.HOME_PAGE_SELECTED_PROJECT}"' in driver.page_source, get_screenshot(driver, "home", "page_source") | with allure.step('Create a HomePage instance'): |
certificate.go | package certificate
import (
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"golang.org/x/crypto/pkcs12"
)
// BlockType PEM formatted block (certificate, private key etc)
type BlockType string
func (bt BlockType) String() string {
return string(bt)
}
// Type of BlockType
const (
PrivateKey BlockType = "PRIVATE KEY"
PublicKey BlockType = "PUBLIC KEY"
RSAPrivateKey BlockType = "RSA PRIVATE KEY"
ECPrivateKey BlockType = "EC PRIVATE KEY"
Certificate BlockType = "CERTIFICATE"
)
// ReadP12File reading a .p12 file
func ReadP12File(filename string, password string) (tls.Certificate, error) {
file, err := ioutil.ReadFile(filename)
if err != nil {
return tls.Certificate{}, fmt.Errorf("Error while loading %s: %v", filename, err)
}
// Decode the certification
privateKey, cert, err := pkcs12.Decode(file, password)
if err != nil {
return tls.Certificate{}, err
}
// Verify the certification
_, err = cert.Verify(x509.VerifyOptions{})
if err == nil {
return tls.Certificate{}, err
}
switch e := err.(type) {
case x509.CertificateInvalidError:
switch e.Reason {
case x509.Expired:
// TODO Better support for error
default:
}
case x509.UnknownAuthorityError:
// TODO Better support for error
default:
}
// check if private key is correct
priv, b := privateKey.(*rsa.PrivateKey)
if !b {
return tls.Certificate{}, fmt.Errorf("Error with private key")
}
certificate := tls.Certificate{
Certificate: [][]byte{cert.Raw},
PrivateKey: priv,
Leaf: cert,
}
| // ReadPemFile parse .pem file returns tls.Certificate, error
func ReadPemFile(filename string, password string) (tls.Certificate, error) {
var certification tls.Certificate
var block *pem.Block
bytes, err := ioutil.ReadFile(filename)
if err != nil {
return tls.Certificate{}, err
}
if len(bytes) > 0 {
for {
block, bytes = pem.Decode(bytes)
if block == nil {
break
}
switch BlockType(block.Type) {
case PrivateKey:
// PrivateKey
case PublicKey:
// PublicKey
case Certificate:
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return tls.Certificate{}, err
}
certification.Leaf = cert
certification.Certificate = append(certification.Certificate, block.Bytes)
case RSAPrivateKey:
if x509.IsEncryptedPEMBlock(block) {
bytes, err := x509.DecryptPEMBlock(block, []byte(password))
if err != nil {
return tls.Certificate{}, errors.New("Failed to decrypt private key")
}
key, err := x509.ParsePKCS1PrivateKey(bytes)
if err != nil {
return tls.Certificate{}, errors.New("Failed to parse PKCS1 private key")
}
certification.PrivateKey = key
} else {
key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return tls.Certificate{}, errors.New("Failed to parse PKCS1 private key")
}
certification.PrivateKey = key
}
case ECPrivateKey:
//ECPrivateKey
default:
return tls.Certificate{}, fmt.Errorf("Decode Pem file: encountered unknown block type %s", block.Type)
}
}
}
return certification, nil
} | //return cert, priv, nil
return certificate, nil
}
|
map.js | // import * as mars3d from "mars3d"
var map // mars3d.Map三维地图对象
// 需要覆盖config.json中地图属性参数(当前示例框架中自动处理合并)
var mapOptions = {
control: {
homeButton: true, // 回到默认视域按钮
navigationHelpButton: false, // 是否显示帮助信息控件
fullscreenButton: false, // 右下角全屏按钮
geocoder: false,
sceneModePicker: false,
vrButton: false
}
}
/**
* 初始化地图业务,生命周期钩子函数(必须)
* 框架在地图初始化完成后自动调用该函数
* @param {mars3d.Map} mapInstance 地图对象
* @returns {void} 无
*/
function onMounted(mapInstance) {
map = mapInstance // 记录map
const toolButton = new mars3d.control.ToolButton({
title: "示例按钮bookmark",
icon: "img/icon/bookmark-one.svg",
insertIndex: 1, // 插入的位置顺序, 1是home按钮后面
click: () => {
globalMsg("单击了 示例按钮bookmark,回调中想干啥就干啥~")
}
})
map.addControl(toolButton)
const toolButton2 = new mars3d.control.ToolButton({
title: "示例按钮good",
icon: "img/icon/good.svg",
insertIndex: 0, // 插入的位置顺序
click: () => {
globalMsg("单击了 示例按钮good,回调中想干啥就干啥~")
}
})
map.addControl(toolButton2)
const toolButton3 = new mars3d.control.ToolButton({
title: "示例按钮chinese",
icon: "img/icon/chinese.svg",
click: () => {
globalMsg("单击了 示例按钮chinese,回调中想干啥就干啥~")
}
})
map.addControl(toolButton3)
}
/**
* 释放当前地图业务的生命周期函数
* @returns {void} 无
*/
function onUnmounted() {
map = null
}
| ||
p-d59ebda8.js | import"./p-58641edb.js";import"./p-01cf4201.js";import{c as t}from"./p-8a636cd9.js";import{g as n}from"./p-fef7f745.js";const o=t=>document.querySelector(t+".ion-cloned-element"),a=t=>t.shadowRoot||t,e=t=>{const n="ION-TABS"===t.tagName?t:t.querySelector("ion-tabs"),o="ion-header:not(.header-collapse-condense-inactive) ion-title.title-large";if(null!=n){const t=n.querySelector("ion-tab:not(.tab-hidden), .ion-page:not(.ion-page-hidden)");return null!=t?t.querySelector(o):null}return t.querySelector(o)},s=(t,n)=>{const o="ION-TABS"===t.tagName?t:t.querySelector("ion-tabs");let a=[];if(null!=o){const t=o.querySelector("ion-tab:not(.tab-hidden), .ion-page:not(.ion-page-hidden)");null!=t&&(a=t.querySelectorAll("ion-buttons"))}else a=t.querySelectorAll("ion-buttons");for(const e of a){const t=e.closest("ion-header"),o=t&&!t.classList.contains("header-collapse-condense-inactive"),a=e.querySelector("ion-back-button"),s=e.classList.contains("buttons-collapse"),r="start"===e.slot||""===e.slot;if(null!==a&&r&&(s&&o&&n||!s))return a}return null},r=(n,e,s,r,i,l)=>{const c=e?`calc(100% - ${l.right+4}px)`:l.left-4+"px",p=e?"7px":"-7px",f=e?"-4px":"4px",d=e?"-4px":"4px",$=e?"right":"left",b=e?"left":"right",m=s?[{offset:0,opacity:1,transform:`translate3d(${f}, ${l.top-46}px, 0) scale(1)`},{offset:.6,opacity:0},{offset:1,opacity:0,transform:`translate3d(${p}, ${i.top-40}px, 0) scale(2.1)`}]:[{offset:0,opacity:0,transform:`translate3d(${p}, ${i.top-40}px, 0) scale(2.1)`},{offset:1,opacity:1,transform:`translate3d(${f}, ${l.top-46}px, 0) scale(1)`}],u=s?[{offset:0,opacity:1,transform:`translate3d(${d}, ${l.top-46}px, 0) scale(1)`},{offset:.2,opacity:0,transform:`translate3d(${d}, ${l.top-41}px, 0) scale(0.6)`},{offset:1,opacity:0,transform:`translate3d(${d}, ${l.top-41}px, 0) scale(0.6)`}]:[{offset:0,opacity:0,transform:`translate3d(${d}, ${l.top-41}px, 0) scale(0.6)`},{offset:1,opacity:1,transform:`translate3d(${d}, ${l.top-46}px, 0) scale(1)`}],y=t(),X=t(),x=o("ion-back-button"),h=a(x).querySelector(".button-text"),g=a(x).querySelector("ion-icon");x.text=r.text,x.mode=r.mode,x.icon=r.icon,x.color=r.color,x.disabled=r.disabled,x.style.setProperty("display","block"),x.style.setProperty("position","fixed"),X.addElement(g),y.addElement(h),y.beforeStyles({"transform-origin":$+" center"}).beforeAddWrite(()=>{r.style.setProperty("display","none"),x.style.setProperty($,c)}).afterAddWrite(()=>{r.style.setProperty("display",""),x.style.setProperty("display","none"),x.style.removeProperty($)}).keyframes(m),X.beforeStyles({"transform-origin":b+" center"}).keyframes(u),n.addAnimation([y,X])},i=(n,a,e,s,r,i)=>{const l=a?`calc(100% - ${r.right}px)`:r.left+"px",c=a?"-18px":"18px",p=a?"right":"left",f=e?[{offset:0,opacity:0,transform:`translate3d(${c}, ${i.top-4}px, 0) scale(0.49)`},{offset:.1,opacity:0},{offset:1,opacity:1,transform:`translate3d(0, ${r.top-2}px, 0) scale(1)`}]:[{offset:0,opacity:.99,transform:`translate3d(0, ${r.top-2}px, 0) scale(1)`},{offset:.6,opacity:0},{offset:1,opacity:0,transform:`translate3d(${c}, ${i.top-4}px, 0) scale(0.5)`}],d=o("ion-title"),$=t();d.innerText=s.innerText,d.size=s.size,d.color=s.color,$.addElement(d),$.beforeStyles({"transform-origin":p+" center",height:"46px",display:"",position:"relative",[p]:l}).beforeAddWrite(()=>{s.style.setProperty("display","none")}).afterAddWrite(()=>{s.style.setProperty("display",""),d.style.setProperty("display","none")}).keyframes(f),n.addAnimation($)},l=(o,l)=>{try{const c="cubic-bezier(0.32,0.72,0,1)",p="opacity",f="transform",d="0%",$=.8,b="rtl"===o.ownerDocument.dir,m=b?"-99.5%":"99.5%",u=b?"33%":"-33%",y=l.enteringEl,X=l.leavingEl,x="back"===l.direction,h=y.querySelector(":scope > ion-content"),g=y.querySelectorAll(":scope > ion-header > *:not(ion-toolbar), :scope > ion-footer > *"),v=y.querySelectorAll(":scope > ion-header > ion-toolbar"),k=t(),w=t();if(k.addElement(y).duration(l.duration||540).easing(l.easing||c).fill("both").beforeRemoveClass("ion-page-invisible"),X&&o){const n=t();n.addElement(o),k.addAnimation(n)}if(h||0!==v.length||0!==g.length?(w.addElement(h),w.addElement(g)):w.addElement(y.querySelector(":scope > .ion-page, :scope > ion-nav, :scope > ion-tabs")),k.addAnimation(w),x?w.beforeClearStyles([p]).fromTo("transform",`translateX(${u})`,`translateX(${d})`).fromTo(p,$,1):w.beforeClearStyles([p]).fromTo("transform",`translateX(${m})`,`translateX(${d})`),h){const n=a(h).querySelector(".transition-effect");if(n){const o=n.querySelector(".transition-cover"),a=n.querySelector(".transition-shadow"),e=t(),s=t(),r=t();e.addElement(n).beforeStyles({opacity:"1",display:"block"}).afterStyles({opacity:"",display:""}),s.addElement(o).beforeClearStyles([p]).fromTo(p,0,.1),r.addElement(a).beforeClearStyles([p]).fromTo(p,.03,.7),e.addAnimation([s,r]),w.addAnimation([e])}}const T=y.querySelector("ion-header.header-collapse-condense"),{forward:j,backward:A}=((t,n,o,a,l)=>{const c=s(a,o),p=e(l),f=e(a),d=s(l,o),$=null!==c&&null!==p&&!o,b=null!==f&&null!==d&&o;if($){const a=p.getBoundingClientRect(),e=c.getBoundingClientRect();i(t,n,o,p,a,e),r(t,n,o,c,a,e)}else if(b){const a=f.getBoundingClientRect(),e=d.getBoundingClientRect();i(t,n,o,f,a,e),r(t,n,o,d,a,e)}return{forward:$,backward:b}})(k,b,x,y,X);if(v.forEach(n=>{const o=t();o.addElement(n),k.addAnimation(o);const e=t();e.addElement(n.querySelector("ion-title"));const s=t(),r=Array.from(n.querySelectorAll("ion-buttons,[menuToggle]")),i=n.closest("ion-header"),l=i&&i.classList.contains("header-collapse-condense-inactive");let c;c=r.filter(x?t=>{const n=t.classList.contains("buttons-collapse");return n&&!l||!n}:t=>!t.classList.contains("buttons-collapse")),s.addElement(c);const f=t();f.addElement(n.querySelectorAll(":scope > *:not(ion-title):not(ion-buttons):not([menuToggle])"));const $=t();$.addElement(a(n).querySelector(".toolbar-background"));const y=t(),X=n.querySelector("ion-back-button");if(X&&y.addElement(X),o.addAnimation([e,s,f,$,y]),s.fromTo(p,.01,1),f.fromTo(p,.01,1),x)l||e.fromTo("transform",`translateX(${u})`,`translateX(${d})`).fromTo(p,.01,1),f.fromTo("transform",`translateX(${u})`,`translateX(${d})`),y.fromTo(p,.01,1);else if(T||e.fromTo("transform",`translateX(${m})`,`translateX(${d})`).fromTo(p,.01,1),f.fromTo("transform",`translateX(${m})`,`translateX(${d})`),$.beforeClearStyles([p,"transform"]),(null==i?void 0:i.translucent)?$.fromTo("transform",b?"translateX(-100%)":"translateX(100%)","translateX(0px)"):$.fromTo(p,.01,"var(--opacity)"),j||y.fromTo(p,.01,1),X&&!j){const n=t();n.addElement(a(X).querySelector(".button-text")).fromTo("transform",b?"translateX(-100px)":"translateX(100px)","translateX(0px)"),o.addAnimation(n)}}),X){const o=t(),e=X.querySelector(":scope > ion-content"),s=X.querySelectorAll(":scope > ion-header > ion-toolbar"),r=X.querySelectorAll(":scope > ion-header > *:not(ion-toolbar), :scope > ion-footer > *");if(e||0!==s.length||0!==r.length?(o.addElement(e),o.addElement(r)):o.addElement(X.querySelector(":scope > .ion-page, :scope > ion-nav, :scope > ion-tabs")),k.addAnimation(o),x){o.beforeClearStyles([p]).fromTo("transform",`translateX(${d})`,b?"translateX(-100%)":"translateX(100%)");const t=n(X);k.afterAddWrite(()=>{"normal"===k.getDirection()&&t.style.setProperty("display","none")})}else o.fromTo("transform",`translateX(${d})`,`translateX(${u})`).fromTo(p,1,$);if(e){const n=a(e).querySelector(".transition-effect");if(n){const a=n.querySelector(".transition-cover"),e=n.querySelector(".transition-shadow"),s=t(),r=t(),i=t();s.addElement(n).beforeStyles({opacity:"1",display:"block"}).afterStyles({opacity:"",display:""}),r.addElement(a).beforeClearStyles([p]).fromTo(p,.1,0),i.addElement(e).beforeClearStyles([p]).fromTo(p,.7,.03),s.addAnimation([r,i]),o.addAnimation([s])}}s.forEach(n=>{const o=t();o.addElement(n);const e=t();e.addElement(n.querySelector("ion-title"));const s=t(),r=n.querySelectorAll("ion-buttons,[menuToggle]"),i=n.closest("ion-header"),l=i&&i.classList.contains("header-collapse-condense-inactive"),c=Array.from(r).filter(t=>{const n=t.classList.contains("buttons-collapse");return n&&!l||!n});s.addElement(c);const $=t(),m=n.querySelectorAll(":scope > *:not(ion-title):not(ion-buttons):not([menuToggle])");m.length>0&&$.addElement(m);const y=t();y.addElement(a(n).querySelector(".toolbar-background"));const X=t(),h=n.querySelector("ion-back-button");if(h&&X.addElement(h),o.addAnimation([e,s,$,X,y]),k.addAnimation(o),X.fromTo(p,.99,0),s.fromTo(p,.99,0),$.fromTo(p,.99,0),x){if(l||e.fromTo("transform",`translateX(${d})`,b?"translateX(-100%)":"translateX(100%)").fromTo(p,.99,0),$.fromTo("transform",`translateX(${d})`,b?"translateX(-100%)":"translateX(100%)"),y.beforeClearStyles([p,"transform"]),(null==i?void 0:i.translucent)?y.fromTo("transform","translateX(0px)",b?"translateX(-100%)":"translateX(100%)"):y.fromTo(p,"var(--opacity)",0),h&&!A){const n=t();n.addElement(a(h).querySelector(".button-text")).fromTo("transform",`translateX(${d})`,`translateX(${(b?-124:124)+"px"})`),o.addAnimation(n)}}else l||e.fromTo("transform",`translateX(${d})`,`translateX(${u})`).fromTo(p,.99,0).afterClearStyles([f,p]),$.fromTo("transform",`translateX(${d})`,`translateX(${u})`).afterClearStyles([f,p]),X.afterClearStyles([p]),e.afterClearStyles([p]),s.afterClearStyles([p])})}return k}catch(c){throw c}};export{l as iosTransitionAnimation,a as shadow} |
||
base.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package base implements functionality to build the kind base image
package base
import (
"os"
"path/filepath" | log "github.com/sirupsen/logrus"
"sigs.k8s.io/kind/pkg/build/base/sources"
"sigs.k8s.io/kind/pkg/exec"
"sigs.k8s.io/kind/pkg/fs"
)
// DefaultImage is the default name:tag of the built base image
const DefaultImage = "kindest/base:latest"
// BuildContext is used to build the kind node base image, and contains
// build configuration
type BuildContext struct {
// option fields
sourceDir string
image string
// non option fields
goCmd string // TODO(bentheelder): should be an option possibly
arch string // TODO(bentheelder): should be an option
}
// Option is BuildContext configuration option supplied to NewBuildContext
type Option func(*BuildContext)
// WithSourceDir configures a NewBuildContext to use the source dir `sourceDir`
func WithSourceDir(sourceDir string) Option {
return func(b *BuildContext) {
b.sourceDir = sourceDir
}
}
// WithImage configures a NewBuildContext to tag the built image with `name`
func WithImage(image string) Option {
return func(b *BuildContext) {
b.image = image
}
}
// NewBuildContext creates a new BuildContext with
// default configuration
func NewBuildContext(options ...Option) *BuildContext {
ctx := &BuildContext{
image: DefaultImage,
goCmd: "go",
arch: "amd64",
}
for _, option := range options {
option(ctx)
}
return ctx
}
// Build builds the cluster node image, the sourcedir must be set on
// the NodeImageBuildContext
func (c *BuildContext) Build() (err error) {
// create tempdir to build in
tmpDir, err := fs.TempDir("", "kind-base-image")
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
// populate with image sources
// if SourceDir is unset, use the baked in sources
buildDir := tmpDir
if c.sourceDir == "" {
// populate with image sources
err = sources.RestoreAssets(buildDir, "images/base")
if err != nil {
return err
}
buildDir = filepath.Join(buildDir, "images", "base")
} else {
err = fs.Copy(c.sourceDir, buildDir)
if err != nil {
log.Errorf("failed to copy sources to build dir %v", err)
return err
}
}
log.Infof("Building base image in: %s", buildDir)
// build the entrypoint binary first
if err := c.buildEntrypoint(buildDir); err != nil {
return err
}
// then the actual docker image
return c.buildImage(buildDir)
}
// builds the entrypoint binary
func (c *BuildContext) buildEntrypoint(dir string) error {
// NOTE: this binary only uses the go1 stdlib, and is a single file
entrypointSrc := filepath.Join(dir, "entrypoint", "main.go")
entrypointDest := filepath.Join(dir, "entrypoint", "entrypoint")
cmd := exec.Command(c.goCmd, "build", "-o", entrypointDest, entrypointSrc)
// TODO(bentheelder): we may need to map between docker image arch and GOARCH
cmd.SetEnv("GOOS=linux", "GOARCH="+c.arch)
// actually build
log.Info("Building entrypoint binary ...")
exec.InheritOutput(cmd)
if err := cmd.Run(); err != nil {
log.Errorf("Entrypoint build Failed! %v", err)
return err
}
log.Info("Entrypoint build completed.")
return nil
}
func (c *BuildContext) buildImage(dir string) error {
// build the image, tagged as tagImageAs, using the our tempdir as the context
cmd := exec.Command("docker", "build", "-t", c.image, dir)
log.Info("Starting Docker build ...")
exec.InheritOutput(cmd)
err := cmd.Run()
if err != nil {
log.Errorf("Docker build Failed! %v", err)
return err
}
log.Info("Docker build completed.")
return nil
} | |
app.component.spec.ts | import { AppComponent } from './app.component';
describe('AppComponent', () => {
beforeEach(async(() => {
TestBed.configureTestingModule({
imports: [
RouterTestingModule
],
declarations: [
AppComponent
],
}).compileComponents();
}));
it('should create the app', () => {
const fixture = TestBed.createComponent(AppComponent);
const app = fixture.debugElement.componentInstance;
expect(app).toBeTruthy();
});
it(`should have as title 'AngularTemplates'`, () => {
const fixture = TestBed.createComponent(AppComponent);
const app = fixture.debugElement.componentInstance;
expect(app.title).toEqual('AngularTemplates');
});
it('should render title in a h1 tag', () => {
const fixture = TestBed.createComponent(AppComponent);
fixture.detectChanges();
const compiled = fixture.debugElement.nativeElement;
expect(compiled.querySelector('h1').textContent).toContain('Welcome to AngularTemplates!');
});
}); | import { TestBed, async } from '@angular/core/testing';
import { RouterTestingModule } from '@angular/router/testing'; |
|
minimum.js | /**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import { minimum } from '../../ops/minimum';
import { getGlobalTensorClass } from '../../tensor';
getGlobalTensorClass().prototype.minimum = function (b) {
this.throwIfDisposed();
return minimum(this, b); | };
//# sourceMappingURL=minimum.js.map |
|
pressable.ts | namespace Ui {
export class | extends Core.Object {
private element: Ui.Element;
private press: (watcher: PressWatcher) => void;
private down: (watcher: PressWatcher) => void;
private up: (watcher: PressWatcher) => void;
private activate: (watcher: PressWatcher) => void;
private delayedpress: (watcher: PressWatcher) => void;
private _isDown: boolean = false;
private lastTime: number = undefined;
private delayedTimer: Core.DelayedTask;
x?: number;
y?: number;
altKey?: boolean;
shiftKey?: boolean;
ctrlKey?: boolean;
lock: boolean = false;
constructor(init: {
element: Ui.Element,
onpressed?: (watcher: PressWatcher) => void,
ondowned?: (watcher: PressWatcher) => void,
onupped?: (watcher: PressWatcher) => void,
onactivated?: (watcher: PressWatcher) => void,
ondelayedpress?: (watcher: PressWatcher) => void
}) {
super();
this.element = init.element;
if (init.onpressed)
this.press = init.onpressed;
if (init.ondowned)
this.down = init.ondowned;
if (init.onupped)
this.up = init.onupped;
if (init.onactivated)
this.activate = init.onactivated;
if (init.ondelayedpress)
this.delayedpress = init.ondelayedpress;
// handle pointers
this.element.ptrdowned.connect((e) => this.onPointerDown(e));
// handle keyboard
this.element.drawing.addEventListener('keydown', (e) => this.onKeyDown(e));
this.element.drawing.addEventListener('keyup', (e) => this.onKeyUp(e));
}
get isDown(): boolean {
return this._isDown;
}
protected onPointerDown(event: PointerEvent) {
if (this.lock || this.element.isDisabled || this._isDown)
return;
if (event.pointer.type == 'mouse' && event.pointer.button != 0)
return;
let watcher = event.pointer.watch(this);
watcher.moved.connect(() => {
if (watcher.pointer.getIsMove())
watcher.cancel();
});
watcher.upped.connect(() => {
this.onUp();
let x = event.pointer.getX();
let y = event.pointer.getY();
let altKey = event.pointer.getAltKey();
let shiftKey = event.pointer.getShiftKey();
let ctrlKey = event.pointer.getCtrlKey();
this.onPress(x, y, altKey, shiftKey, ctrlKey);
watcher.capture();
watcher.cancel();
});
watcher.cancelled.connect(() => this.onUp());
this.onDown();
}
protected onKeyDown(event: KeyboardEvent) {
let key = event.which;
// handle Enter and Space key
if (!this.lock && !this.element.isDisabled && (key == 13 || key == 32)) {
event.preventDefault();
event.stopPropagation();
this.onDown();
}
}
protected onKeyUp(event: KeyboardEvent) {
let key = event.which;
// handle Enter and Space key
if (!this.lock && !this.element.isDisabled && this._isDown && (key == 13 || key == 32)) {
event.preventDefault();
event.stopPropagation();
this.onUp();
this.onPress(undefined, undefined, event.altKey, event.shiftKey, event.ctrlKey);
}
}
protected onDown() {
this._isDown = true;
if (this.down)
this.down(this);
}
protected onUp() {
this._isDown = false;
if (this.up)
this.up(this);
}
protected onPress(x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean) {
this.x = x; this.y = y;
this.altKey = altKey; this.shiftKey = shiftKey; this.ctrlKey = ctrlKey;
if (this.press)
this.press(this);
// test for activate signal
let currentTime = (new Date().getTime()) / 1000;
if ((this.lastTime !== undefined) && (currentTime - this.lastTime < 0.30)) {
this.onActivate(x, y);
if (this.delayedTimer != undefined) {
this.delayedTimer.abort();
this.delayedTimer = undefined;
}
}
else {
this.delayedTimer = new Core.DelayedTask(0.30, () => {
this.onDelayedPress(x, y, altKey, shiftKey, ctrlKey);
});
}
this.lastTime = currentTime;
}
protected onActivate(x?: number, y?: number) {
if (this.activate)
this.activate(this);
}
protected onDelayedPress(x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean) {
this.x = x; this.y = y;
this.altKey = altKey; this.shiftKey = shiftKey; this.ctrlKey = ctrlKey;
if (this.delayedTimer) {
if (!this.delayedTimer.isDone)
this.delayedTimer.abort();
this.delayedTimer = undefined;
}
if (this.delayedpress)
this.delayedpress(this);
}
}
export interface PressableInit extends OverableInit {
lock?: boolean;
onpressed?: (event: { target: Pressable, x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean }) => void;
ondowned?: (event: { target: Pressable }) => void;
onupped?: (event: { target: Pressable }) => void;
onactivated?: (event: { target: Pressable, x?: number, y?: number }) => void;
ondelayedpress?: (event: { target: Pressable, x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean }) => void;
}
export class Pressable extends Overable implements PressableInit {
private pressWatcher: PressWatcher;
readonly downed = new Core.Events<{ target: Pressable }>();
set ondowned(value: (event: { target: Pressable}) => void) { this.downed.connect(value); }
readonly upped = new Core.Events<{ target: Pressable }>();
set onupped(value: (event: { target: Pressable}) => void) { this.upped.connect(value); }
readonly pressed = new Core.Events<{ target: Pressable, x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean }>();
set onpressed(value: (event: { target: Pressable, x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean}) => void) { this.pressed.connect(value); }
readonly activated = new Core.Events<{ target: Pressable, x?: number, y?: number }>();
set onactivated(value: (event: { target: Pressable, x?: number, y?: number }) => void) { this.activated.connect(value); }
readonly delayedpress = new Core.Events<{ target: Pressable, x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean }>();
set ondelayedpress(value: (event:{ target: Pressable, x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean }) => void) { this.delayedpress.connect(value); }
constructor(init?: PressableInit) {
super(init);
this.drawing.style.cursor = 'pointer';
this.focusable = true;
this.role = 'button';
this.pressWatcher = new PressWatcher({
element: this,
onpressed: (watcher) => this.onPress(watcher.x, watcher.y, watcher.altKey, watcher.shiftKey, watcher.ctrlKey),
ondowned: (watcher) => this.onDown(),
onupped: (watcher) => this.onUp(),
onactivated: (watcher) => this.onActivate(watcher.x, watcher.y),
ondelayedpress: (watcher) => this.onDelayedPress(watcher.x, watcher.y, watcher.altKey, watcher.shiftKey, watcher.ctrlKey)
});
if (init) {
if (init.lock !== undefined)
this.lock = init.lock;
if (init.onpressed !== undefined)
this.pressed.connect(init.onpressed);
if (init.ondowned !== undefined)
this.downed.connect(init.ondowned);
if (init.onupped !== undefined)
this.upped.connect(init.onupped);
if (init.onactivated !== undefined)
this.activated.connect(init.onactivated);
if (init.ondelayedpress !== undefined)
this.delayedpress.connect(init.ondelayedpress);
}
}
get isDown(): boolean {
return this.pressWatcher.isDown;
}
set lock(lock: boolean) {
this.pressWatcher.lock = lock;
if (lock)
this.drawing.style.cursor = '';
else
this.drawing.style.cursor = 'pointer';
}
get lock(): boolean {
return this.pressWatcher.lock;
}
protected onDown() {
this.downed.fire({ target: this });
}
protected onUp() {
this.upped.fire({ target: this });
}
press() {
if (!this.isDisabled && !this.lock)
this.onPress();
}
protected onPress(x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean) {
this.pressed.fire({ target: this, x: x, y: y, altKey: altKey, shiftKey: shiftKey, ctrlKey: ctrlKey });
}
protected onActivate(x?: number, y?: number) {
this.activated.fire({ target: this, x: x, y: y });
}
protected onDelayedPress(x?: number, y?: number, altKey?: boolean, shiftKey?: boolean, ctrlKey?: boolean) {
this.delayedpress.fire({ target: this, x: x, y: y, altKey: altKey, shiftKey: shiftKey, ctrlKey: ctrlKey });
}
protected onDisable() {
super.onDisable();
this.drawing.style.cursor = '';
}
protected onEnable() {
super.onEnable();
if (this.lock)
this.drawing.style.cursor = '';
else
this.drawing.style.cursor = 'pointer';
}
}
}
| PressWatcher |
std_logic.py |
class std_logic():
"""
class to represent a digital bit allowing for the same 9 values of a bit supported by IEEE 1164.
====== ===============
Value Interpreatation
------ ---------------
U Unitialized
X Unknown
0 Strong 0
1 Strong 1
Z High Impedance
W Weak unknown logic
L Weak logic 0
H Weak logic 1
- Don't care
====== ===============
Refer to https://en.wikipedia.org/wiki/IEEE_1164 for more details
"""
def __init__(self,initialvalue='U'):
"""
:param initialvalue: value to be loaded into the bit
:type initialvalue: int, bool, str
"""
self._value = 'U'
self.set(value=initialvalue)
def __str__(self):
return self._value
def __repr__(self):
base_repr = super().__repr__()
return base_repr[:-2] + ':%s>'%self._value
def __eq__(self, other):
if issubclass(other.__class__,std_logic):
return self._value == other._value
else:
raise NotImplementedError
def __and__(self,other):
return_value = NotImplemented
if issubclass(other.__class__,std_logic):
"""
truth table from std_logic_1164-body.vhdl
----------------------------------------------------
| U X 0 1 Z W L H - | |
----------------------------------------------------
( 'U', 'U', '0', 'U', 'U', 'U', '0', 'U', 'U' ), -- | U |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | X |
( '0', '0', '0', '0', '0', '0', '0', '0', '0' ), -- | 0 |
( 'U', 'X', '0', '1', 'X', 'X', '0', '1', 'X' ), -- | 1 |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | Z |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | W |
( '0', '0', '0', '0', '0', '0', '0', '0', '0' ), -- | L |
( 'U', 'X', '0', '1', 'X', 'X', '0', '1', 'X' ), -- | H |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ) -- | - |
"""
if self == std_logic('U'):
if other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
else:
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
else:
return_value = std_logic('X')
elif self == std_logic('0') or self == std_logic('L'):
return_value = std_logic(0)
elif self == std_logic('1') or self == std_logic('H'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
else:
raise TypeError('can not perform operation on classes')
return return_value
def __xor__(self, other):
"""
perfroms a bitwise xor operation
:param other:
:return: self ^ other
"""
return_value = NotImplemented
if issubclass(other.__class__,std_logic):
"""
truth table from std_logic_1164-body.vhdl
----------------------------------------------------
| U X 0 1 Z W L H - | |
----------------------------------------------------
('U', 'U', 'U', 'U', 'U', 'U', 'U', 'U', 'U'), -- | U |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | X |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | 0 |
('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X'), -- | 1 |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | Z |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | W |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | L |
('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X'), -- | H |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X') -- | - |
);
"""
if self == std_logic('U'):
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
if other == std_logic('U'):
return_value = std_logic('U')
else:
return_value = std_logic('X')
elif self == std_logic('1') or self == std_logic('H'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(1)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(0)
else:
return_value = std_logic('X')
elif self == std_logic('0') or self == std_logic('L'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
else:
raise TypeError('can not perform operation on classes')
return return_value
def __or__(self,other):
return_value = NotImplemented
if issubclass(other.__class__,std_logic):
"""
truth table from std_logic_1164-body.vhdl
----------------------------------------------------
| U X 0 1 Z W L H - | |
----------------------------------------------------
('U', 'U', 'U', '1', 'U', 'U', 'U', '1', 'U'), -- | U |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | X |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | 0 |
('1', '1', '1', '1', '1', '1', '1', '1', '1'), -- | 1 |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | Z |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | W |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | L |
('1', '1', '1', '1', '1', '1', '1', '1', '1'), -- | H |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X') -- | - |
)
"""
if self == std_logic('U'):
if other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
elif self == std_logic('1') or self == std_logic('H'):
return_value = std_logic(1)
elif self == std_logic('0') or self == std_logic('L'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
else:
raise TypeError('can not perform operation on classes')
return return_value
def | (self):
"""
truth table from std_logic_1164-body.vhdl
-------------------------------------------------
| U X 0 1 Z W L H - |
-------------------------------------------------
('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X')
"""
if self == std_logic('U'):
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
return_value = std_logic('X')
elif self == std_logic('0') or self == std_logic('L'):
return_value = std_logic(1)
elif self == std_logic('1') or self == std_logic('H'):
return_value = std_logic(0)
return return_value
def set(self,value):
"""
in place value set
:param value: value to be loaded into the bit
:type value: int, bool, str
"""
if isinstance(value,str):
if len(value) != 1:
raise ValueError('length is not 1')
if ((value == 'U') or
(value == 'X') or
(value == '0') or
(value == '1') or
(value == 'Z') or
(value == 'W') or
(value == 'L') or
(value == 'H') or
(value == '-')):
self._value = value
else:
raise ValueError('Unsupported value, only U,X,0,1,Z,W,L,H or - is permitted')
elif isinstance(value,bool):
if value is False:
self._value = '0'
elif value is True:
self._value = '1'
else:
raise ValueError('Illegal boolean value')
elif isinstance(value,int):
if (value == 0) or (value == 1):
self._value = str(value)
assert (self._value == '1') or (self._value == '0')
else:
raise ValueError('Unsupported integer value, only 0 or 1 is permitted')
else:
raise ValueError('Unsupported type')
| __invert__ |
logger.py | import logging
from .observer import Observer
import config
class Logger(Observer):
| def opportunity(self, profit, volume, buyprice, kask, sellprice, kbid, perc,
weighted_buyprice, weighted_sellprice):
logging.info("profit: %f %s with volume: %f %s - buy at %i (%s) sell at %i (%s) ~%.2f%%" %
(profit, config.s_coin, volume, config.p_coin, buyprice * 100000000, kask, sellprice * 100000000, kbid, perc)) |
|
index.tsx | import React from 'react';
import { GetStaticProps } from 'next';
import { Grid } from '@material-ui/core';
import { ThemeProvider } from '@material-ui/core/styles';
import { Graduate } from '../components/Graduate';
import { Introduction } from '../components/Introduction';
import { Jobs } from '../components/Jobs';
import { About } from '../components/About';
import { Projects } from '../components/Projects';
import { Technologies } from '../components/Technologies';
import { Publications } from '../components/Publications';
import MaterialTheme from '../styles/MaterialTheme';
import IRepository from '../DTOs/IRepository';
import api from '../services/api';
interface IHomeProps {
repos: IRepository[];
}
export default function Home({ repos }: IHomeProps) {
return (
<ThemeProvider theme={MaterialTheme}>
<Grid
container
direction="column"
justify="center"
alignItems="flex-start"
>
<Introduction />
<About />
<Projects repos={repos} />
<Jobs />
<Technologies />
<Graduate />
<Publications />
</Grid>
</ThemeProvider>
);
}
export const getStaticProps: GetStaticProps = async () => {
const response = await api.get<IRepository[]>(
'/users/guilhermebolfe11/repos?sort=pushed_at',
);
return {
props: {
repos: response.data,
}, | revalidate: 60 * 60 * 24, // 24 hours
};
}; | |
execution.go | // Copyright (c) 2019 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package postgres
import (
"context"
"database/sql"
"github.com/uber/cadence/common/persistence/sql/sqlplugin"
)
const (
executionsColumns = `shard_id, domain_id, workflow_id, run_id, next_event_id, last_write_version, data, data_encoding`
createExecutionQuery = `INSERT INTO executions(` + executionsColumns + `)
VALUES(:shard_id, :domain_id, :workflow_id, :run_id, :next_event_id, :last_write_version, :data, :data_encoding)`
updateExecutionQuery = `UPDATE executions SET
next_event_id = :next_event_id, last_write_version = :last_write_version, data = :data, data_encoding = :data_encoding
WHERE shard_id = :shard_id AND domain_id = :domain_id AND workflow_id = :workflow_id AND run_id = :run_id`
getExecutionQuery = `SELECT ` + executionsColumns + ` FROM executions
WHERE shard_id = $1 AND domain_id = $2 AND workflow_id = $3 AND run_id = $4`
deleteExecutionQuery = `DELETE FROM executions
WHERE shard_id = $1 AND domain_id = $2 AND workflow_id = $3 AND run_id = $4`
lockExecutionQueryBase = `SELECT next_event_id FROM executions
WHERE shard_id = $1 AND domain_id = $2 AND workflow_id = $3 AND run_id = $4`
writeLockExecutionQuery = lockExecutionQueryBase + ` FOR UPDATE`
readLockExecutionQuery = lockExecutionQueryBase + ` FOR SHARE`
createCurrentExecutionQuery = `INSERT INTO current_executions
(shard_id, domain_id, workflow_id, run_id, create_request_id, state, close_status, start_version, last_write_version) VALUES
(:shard_id, :domain_id, :workflow_id, :run_id, :create_request_id, :state, :close_status, :start_version, :last_write_version)`
deleteCurrentExecutionQuery = "DELETE FROM current_executions WHERE shard_id = $1 AND domain_id = $2 AND workflow_id = $3 AND run_id = $4"
getCurrentExecutionQuery = `SELECT
shard_id, domain_id, workflow_id, run_id, create_request_id, state, close_status, start_version, last_write_version
FROM current_executions WHERE shard_id = $1 AND domain_id = $2 AND workflow_id = $3`
lockCurrentExecutionJoinExecutionsQuery = `SELECT
ce.shard_id, ce.domain_id, ce.workflow_id, ce.run_id, ce.create_request_id, ce.state, ce.close_status, ce.start_version, e.last_write_version
FROM current_executions ce
INNER JOIN executions e ON e.shard_id = ce.shard_id AND e.domain_id = ce.domain_id AND e.workflow_id = ce.workflow_id AND e.run_id = ce.run_id
WHERE ce.shard_id = $1 AND ce.domain_id = $2 AND ce.workflow_id = $3 FOR UPDATE`
lockCurrentExecutionQuery = getCurrentExecutionQuery + ` FOR UPDATE`
updateCurrentExecutionsQuery = `UPDATE current_executions SET
run_id = :run_id,
create_request_id = :create_request_id,
state = :state,
close_status = :close_status,
start_version = :start_version,
last_write_version = :last_write_version
WHERE
shard_id = :shard_id AND
domain_id = :domain_id AND
workflow_id = :workflow_id
`
getTransferTasksQuery = `SELECT task_id, data, data_encoding
FROM transfer_tasks WHERE shard_id = $1 AND task_id > $2 AND task_id <= $3 ORDER BY shard_id, task_id`
createTransferTasksQuery = `INSERT INTO transfer_tasks(shard_id, task_id, data, data_encoding)
VALUES(:shard_id, :task_id, :data, :data_encoding)`
deleteTransferTaskQuery = `DELETE FROM transfer_tasks WHERE shard_id = $1 AND task_id = $2`
rangeDeleteTransferTaskQuery = `DELETE FROM transfer_tasks WHERE shard_id = $1 AND task_id > $2 AND task_id <= $3`
createTimerTasksQuery = `INSERT INTO timer_tasks (shard_id, visibility_timestamp, task_id, data, data_encoding)
VALUES (:shard_id, :visibility_timestamp, :task_id, :data, :data_encoding)`
getTimerTasksQuery = `SELECT visibility_timestamp, task_id, data, data_encoding FROM timer_tasks
WHERE shard_id = $1
AND ((visibility_timestamp >= $2 AND task_id >= $3) OR visibility_timestamp > $4)
AND visibility_timestamp < $5
ORDER BY visibility_timestamp,task_id LIMIT $6`
deleteTimerTaskQuery = `DELETE FROM timer_tasks WHERE shard_id = $1 AND visibility_timestamp = $2 AND task_id = $3`
rangeDeleteTimerTaskQuery = `DELETE FROM timer_tasks WHERE shard_id = $1 AND visibility_timestamp >= $2 AND visibility_timestamp < $3`
createReplicationTasksQuery = `INSERT INTO replication_tasks (shard_id, task_id, data, data_encoding)
VALUES(:shard_id, :task_id, :data, :data_encoding)`
getReplicationTasksQuery = `SELECT task_id, data, data_encoding FROM replication_tasks WHERE
shard_id = $1 AND
task_id > $2 AND
task_id <= $3
ORDER BY task_id LIMIT $4`
deleteReplicationTaskQuery = `DELETE FROM replication_tasks WHERE shard_id = $1 AND task_id = $2`
rangeDeleteReplicationTaskQuery = `DELETE FROM replication_tasks WHERE shard_id = $1 AND task_id <= $2`
getReplicationTasksDLQQuery = `SELECT task_id, data, data_encoding FROM replication_tasks_dlq WHERE
source_cluster_name = $1 AND
shard_id = $2 AND
task_id > $3 AND
task_id <= $4
ORDER BY task_id LIMIT $5`
getReplicationTaskDLQQuery = `SELECT count(1) as count FROM replication_tasks_dlq WHERE
source_cluster_name = $1 AND
shard_id = $2`
bufferedEventsColumns = `shard_id, domain_id, workflow_id, run_id, data, data_encoding`
createBufferedEventsQuery = `INSERT INTO buffered_events(` + bufferedEventsColumns + `)
VALUES (:shard_id, :domain_id, :workflow_id, :run_id, :data, :data_encoding)`
deleteBufferedEventsQuery = `DELETE FROM buffered_events WHERE shard_id = $1 AND domain_id = $2 AND workflow_id = $3 AND run_id = $4`
getBufferedEventsQuery = `SELECT data, data_encoding FROM buffered_events WHERE shard_id = $1 AND domain_id = $2 AND workflow_id = $3 AND run_id = $4`
insertReplicationTaskDLQQuery = `
INSERT INTO replication_tasks_dlq
(source_cluster_name,
shard_id,
task_id,
data,
data_encoding)
VALUES (:source_cluster_name,
:shard_id,
:task_id,
:data,
:data_encoding)
`
deleteReplicationTaskFromDLQQuery = `
DELETE FROM replication_tasks_dlq
WHERE source_cluster_name = $1
AND shard_id = $2
AND task_id = $3`
rangeDeleteReplicationTaskFromDLQQuery = `
DELETE FROM replication_tasks_dlq
WHERE source_cluster_name = $1
AND shard_id = $2
AND task_id > $3
AND task_id <= $4`
)
// InsertIntoExecutions inserts a row into executions table
func (pdb *db) InsertIntoExecutions(ctx context.Context, row *sqlplugin.ExecutionsRow) (sql.Result, error) {
return pdb.conn.NamedExecContext(ctx, createExecutionQuery, row)
}
// UpdateExecutions updates a single row in executions table
func (pdb *db) UpdateExecutions(ctx context.Context, row *sqlplugin.ExecutionsRow) (sql.Result, error) {
return pdb.conn.NamedExecContext(ctx, updateExecutionQuery, row)
}
// SelectFromExecutions reads a single row from executions table
func (pdb *db) SelectFromExecutions(ctx context.Context, filter *sqlplugin.ExecutionsFilter) (*sqlplugin.ExecutionsRow, error) {
var row sqlplugin.ExecutionsRow
err := pdb.conn.GetContext(ctx, &row, getExecutionQuery, filter.ShardID, filter.DomainID, filter.WorkflowID, filter.RunID)
if err != nil {
return nil, err
}
return &row, err
}
// DeleteFromExecutions deletes a single row from executions table
func (pdb *db) DeleteFromExecutions(ctx context.Context, filter *sqlplugin.ExecutionsFilter) (sql.Result, error) {
return pdb.conn.ExecContext(ctx, deleteExecutionQuery, filter.ShardID, filter.DomainID, filter.WorkflowID, filter.RunID)
}
// ReadLockExecutions acquires a write lock on a single row in executions table
func (pdb *db) ReadLockExecutions(ctx context.Context, filter *sqlplugin.ExecutionsFilter) (int, error) {
var nextEventID int
err := pdb.conn.GetContext(ctx, &nextEventID, readLockExecutionQuery, filter.ShardID, filter.DomainID, filter.WorkflowID, filter.RunID)
return nextEventID, err
}
// WriteLockExecutions acquires a write lock on a single row in executions table
func (pdb *db) WriteLockExecutions(ctx context.Context, filter *sqlplugin.ExecutionsFilter) (int, error) {
var nextEventID int
err := pdb.conn.GetContext(ctx, &nextEventID, writeLockExecutionQuery, filter.ShardID, filter.DomainID, filter.WorkflowID, filter.RunID)
return nextEventID, err
}
// InsertIntoCurrentExecutions inserts a single row into current_executions table
func (pdb *db) InsertIntoCurrentExecutions(ctx context.Context, row *sqlplugin.CurrentExecutionsRow) (sql.Result, error) {
return pdb.conn.NamedExecContext(ctx, createCurrentExecutionQuery, row)
}
// UpdateCurrentExecutions updates a single row in current_executions table
func (pdb *db) UpdateCurrentExecutions(ctx context.Context, row *sqlplugin.CurrentExecutionsRow) (sql.Result, error) {
return pdb.conn.NamedExecContext(ctx, updateCurrentExecutionsQuery, row)
}
// SelectFromCurrentExecutions reads one or more rows from current_executions table
func (pdb *db) SelectFromCurrentExecutions(ctx context.Context, filter *sqlplugin.CurrentExecutionsFilter) (*sqlplugin.CurrentExecutionsRow, error) {
var row sqlplugin.CurrentExecutionsRow
err := pdb.conn.GetContext(ctx, &row, getCurrentExecutionQuery, filter.ShardID, filter.DomainID, filter.WorkflowID)
return &row, err
}
// DeleteFromCurrentExecutions deletes a single row in current_executions table
func (pdb *db) DeleteFromCurrentExecutions(ctx context.Context, filter *sqlplugin.CurrentExecutionsFilter) (sql.Result, error) {
return pdb.conn.ExecContext(ctx, deleteCurrentExecutionQuery, filter.ShardID, filter.DomainID, filter.WorkflowID, filter.RunID)
}
// LockCurrentExecutions acquires a write lock on a single row in current_executions table
func (pdb *db) LockCurrentExecutions(ctx context.Context, filter *sqlplugin.CurrentExecutionsFilter) (*sqlplugin.CurrentExecutionsRow, error) {
var row sqlplugin.CurrentExecutionsRow
err := pdb.conn.GetContext(ctx, &row, lockCurrentExecutionQuery, filter.ShardID, filter.DomainID, filter.WorkflowID)
return &row, err
}
// LockCurrentExecutionsJoinExecutions joins a row in current_executions with executions table and acquires a
// write lock on the result
func (pdb *db) LockCurrentExecutionsJoinExecutions(ctx context.Context, filter *sqlplugin.CurrentExecutionsFilter) ([]sqlplugin.CurrentExecutionsRow, error) {
var rows []sqlplugin.CurrentExecutionsRow
err := pdb.conn.SelectContext(ctx, &rows, lockCurrentExecutionJoinExecutionsQuery, filter.ShardID, filter.DomainID, filter.WorkflowID)
return rows, err
}
// InsertIntoTransferTasks inserts one or more rows into transfer_tasks table
func (pdb *db) InsertIntoTransferTasks(ctx context.Context, rows []sqlplugin.TransferTasksRow) (sql.Result, error) {
return pdb.conn.NamedExecContext(ctx, createTransferTasksQuery, rows)
}
// SelectFromTransferTasks reads one or more rows from transfer_tasks table
func (pdb *db) SelectFromTransferTasks(ctx context.Context, filter *sqlplugin.TransferTasksFilter) ([]sqlplugin.TransferTasksRow, error) {
var rows []sqlplugin.TransferTasksRow
err := pdb.conn.SelectContext(ctx, &rows, getTransferTasksQuery, filter.ShardID, *filter.MinTaskID, *filter.MaxTaskID)
if err != nil {
return nil, err
}
return rows, err
}
// DeleteFromTransferTasks deletes one or more rows from transfer_tasks table
func (pdb *db) DeleteFromTransferTasks(ctx context.Context, filter *sqlplugin.TransferTasksFilter) (sql.Result, error) {
if filter.MinTaskID != nil {
return pdb.conn.ExecContext(ctx, rangeDeleteTransferTaskQuery, filter.ShardID, *filter.MinTaskID, *filter.MaxTaskID) | return pdb.conn.ExecContext(ctx, deleteTransferTaskQuery, filter.ShardID, *filter.TaskID)
}
// InsertIntoTimerTasks inserts one or more rows into timer_tasks table
func (pdb *db) InsertIntoTimerTasks(ctx context.Context, rows []sqlplugin.TimerTasksRow) (sql.Result, error) {
for i := range rows {
rows[i].VisibilityTimestamp = pdb.converter.ToPostgresDateTime(rows[i].VisibilityTimestamp)
}
return pdb.conn.NamedExecContext(ctx, createTimerTasksQuery, rows)
}
// SelectFromTimerTasks reads one or more rows from timer_tasks table
func (pdb *db) SelectFromTimerTasks(ctx context.Context, filter *sqlplugin.TimerTasksFilter) ([]sqlplugin.TimerTasksRow, error) {
var rows []sqlplugin.TimerTasksRow
*filter.MinVisibilityTimestamp = pdb.converter.ToPostgresDateTime(*filter.MinVisibilityTimestamp)
*filter.MaxVisibilityTimestamp = pdb.converter.ToPostgresDateTime(*filter.MaxVisibilityTimestamp)
err := pdb.conn.SelectContext(ctx, &rows, getTimerTasksQuery, filter.ShardID, *filter.MinVisibilityTimestamp,
filter.TaskID, *filter.MinVisibilityTimestamp, *filter.MaxVisibilityTimestamp, *filter.PageSize)
if err != nil {
return nil, err
}
for i := range rows {
rows[i].VisibilityTimestamp = pdb.converter.FromPostgresDateTime(rows[i].VisibilityTimestamp)
}
return rows, err
}
// DeleteFromTimerTasks deletes one or more rows from timer_tasks table
func (pdb *db) DeleteFromTimerTasks(ctx context.Context, filter *sqlplugin.TimerTasksFilter) (sql.Result, error) {
if filter.MinVisibilityTimestamp != nil {
*filter.MinVisibilityTimestamp = pdb.converter.ToPostgresDateTime(*filter.MinVisibilityTimestamp)
*filter.MaxVisibilityTimestamp = pdb.converter.ToPostgresDateTime(*filter.MaxVisibilityTimestamp)
return pdb.conn.ExecContext(ctx, rangeDeleteTimerTaskQuery, filter.ShardID, *filter.MinVisibilityTimestamp, *filter.MaxVisibilityTimestamp)
}
*filter.VisibilityTimestamp = pdb.converter.ToPostgresDateTime(*filter.VisibilityTimestamp)
return pdb.conn.ExecContext(ctx, deleteTimerTaskQuery, filter.ShardID, *filter.VisibilityTimestamp, filter.TaskID)
}
// InsertIntoBufferedEvents inserts one or more rows into buffered_events table
func (pdb *db) InsertIntoBufferedEvents(ctx context.Context, rows []sqlplugin.BufferedEventsRow) (sql.Result, error) {
return pdb.conn.NamedExecContext(ctx, createBufferedEventsQuery, rows)
}
// SelectFromBufferedEvents reads one or more rows from buffered_events table
func (pdb *db) SelectFromBufferedEvents(ctx context.Context, filter *sqlplugin.BufferedEventsFilter) ([]sqlplugin.BufferedEventsRow, error) {
var rows []sqlplugin.BufferedEventsRow
err := pdb.conn.SelectContext(ctx, &rows, getBufferedEventsQuery, filter.ShardID, filter.DomainID, filter.WorkflowID, filter.RunID)
for i := 0; i < len(rows); i++ {
rows[i].DomainID = filter.DomainID
rows[i].WorkflowID = filter.WorkflowID
rows[i].RunID = filter.RunID
rows[i].ShardID = filter.ShardID
}
return rows, err
}
// DeleteFromBufferedEvents deletes one or more rows from buffered_events table
func (pdb *db) DeleteFromBufferedEvents(ctx context.Context, filter *sqlplugin.BufferedEventsFilter) (sql.Result, error) {
return pdb.conn.ExecContext(ctx, deleteBufferedEventsQuery, filter.ShardID, filter.DomainID, filter.WorkflowID, filter.RunID)
}
// InsertIntoReplicationTasks inserts one or more rows into replication_tasks table
func (pdb *db) InsertIntoReplicationTasks(ctx context.Context, rows []sqlplugin.ReplicationTasksRow) (sql.Result, error) {
return pdb.conn.NamedExecContext(ctx, createReplicationTasksQuery, rows)
}
// SelectFromReplicationTasks reads one or more rows from replication_tasks table
func (pdb *db) SelectFromReplicationTasks(ctx context.Context, filter *sqlplugin.ReplicationTasksFilter) ([]sqlplugin.ReplicationTasksRow, error) {
var rows []sqlplugin.ReplicationTasksRow
err := pdb.conn.SelectContext(ctx, &rows, getReplicationTasksQuery, filter.ShardID, filter.MinTaskID, filter.MaxTaskID, filter.PageSize)
return rows, err
}
// DeleteFromReplicationTasks deletes one rows from replication_tasks table
func (pdb *db) DeleteFromReplicationTasks(ctx context.Context, filter *sqlplugin.ReplicationTasksFilter) (sql.Result, error) {
return pdb.conn.ExecContext(ctx, deleteReplicationTaskQuery, filter.ShardID, filter.TaskID)
}
// RangeDeleteFromReplicationTasks deletes multi rows from replication_tasks table
func (pdb *db) RangeDeleteFromReplicationTasks(ctx context.Context, filter *sqlplugin.ReplicationTasksFilter) (sql.Result, error) {
return pdb.conn.ExecContext(ctx, rangeDeleteReplicationTaskQuery, filter.ShardID, filter.InclusiveEndTaskID)
}
// InsertIntoReplicationTasksDLQ inserts one or more rows into replication_tasks_dlq table
func (pdb *db) InsertIntoReplicationTasksDLQ(ctx context.Context, row *sqlplugin.ReplicationTaskDLQRow) (sql.Result, error) {
return pdb.conn.NamedExecContext(ctx, insertReplicationTaskDLQQuery, row)
}
// SelectFromReplicationTasksDLQ reads one or more rows from replication_tasks_dlq table
func (pdb *db) SelectFromReplicationTasksDLQ(ctx context.Context, filter *sqlplugin.ReplicationTasksDLQFilter) ([]sqlplugin.ReplicationTasksRow, error) {
var rows []sqlplugin.ReplicationTasksRow
err := pdb.conn.SelectContext(
ctx,
&rows, getReplicationTasksDLQQuery,
filter.SourceClusterName,
filter.ShardID,
filter.MinTaskID,
filter.MaxTaskID,
filter.PageSize)
return rows, err
}
// SelectFromReplicationDLQ reads one row from replication_tasks_dlq table
func (pdb *db) SelectFromReplicationDLQ(ctx context.Context, filter *sqlplugin.ReplicationTaskDLQFilter) (int64, error) {
var size []int64
if err := pdb.conn.SelectContext(
ctx,
&size, getReplicationTaskDLQQuery,
filter.SourceClusterName,
filter.ShardID,
); err != nil {
return 0, err
}
return size[0], nil
}
// DeleteMessageFromReplicationTasksDLQ deletes one row from replication_tasks_dlq table
func (pdb *db) DeleteMessageFromReplicationTasksDLQ(
ctx context.Context,
filter *sqlplugin.ReplicationTasksDLQFilter,
) (sql.Result, error) {
return pdb.conn.ExecContext(
ctx,
deleteReplicationTaskFromDLQQuery,
filter.SourceClusterName,
filter.ShardID,
filter.TaskID,
)
}
// DeleteMessageFromReplicationTasksDLQ deletes one or more rows from replication_tasks_dlq table
func (pdb *db) RangeDeleteMessageFromReplicationTasksDLQ(
ctx context.Context,
filter *sqlplugin.ReplicationTasksDLQFilter,
) (sql.Result, error) {
return pdb.conn.ExecContext(
ctx,
rangeDeleteReplicationTaskFromDLQQuery,
filter.SourceClusterName,
filter.ShardID,
filter.TaskID,
filter.InclusiveEndTaskID,
)
} | } |
__init__.py | # -*- coding: utf-8 -*-
"""
pybit
------------------------
pybit is a lightweight and high-performance API connector for the
RESTful and WebSocket APIs of the Bybit exchange.
Documentation can be found at
https://github.com/verata-veritatis/pybit
:copyright: (c) 2020-2021 verata-veritatis
:license: MIT License
"""
import time
import hmac
import json
import logging
import threading
import requests
import websocket
from datetime import datetime as dt
from concurrent.futures import ThreadPoolExecutor
from .exceptions import FailedRequestError, InvalidRequestError
# Requests will use simplejson if available.
try:
from simplejson.errors import JSONDecodeError
except ImportError:
from json.decoder import JSONDecodeError
# Versioning.
VERSION = '1.1.18'
class HTTP:
"""
Connector for Bybit's HTTP API.
:param endpoint: The endpoint URL of the HTTP API, e.g.
'https://api-testnet.bybit.com'.
:type endpoint: str
:param api_key: Your API key. Required for authenticated endpoints. Defaults
to None.
:type api_key: str
:param api_secret: Your API secret key. Required for authenticated
endpoints. Defaults to None.
:type api_secret: str
:param logging_level: The logging level of the built-in logger. Defaults to
logging.INFO. Options are CRITICAL (50), ERROR (40), WARNING (30),
INFO (20), DEBUG (10), or NOTSET (0).
:type logging_level: Union[int, logging.level]
:param log_requests: Whether or not pybit should log each HTTP request.
:type log_requests: bool
:param request_timeout: The timeout of each API request in seconds. Defaults
to 10 seconds.
:type request_timeout: int
:param recv_window: How long an HTTP request is valid in ms. Default is
5000.
:type recv_window: int
:param force_retry: Whether or not pybit should retry a timed-out request.
:type force_retry: bool
:param retry_codes: A list of non-fatal status codes to retry on.
:type retry_codes: set
:param ignore_codes: A list of non-fatal status codes to ignore.
:type ignore_codes: set |
:param max_retries: The number of times to re-attempt a request.
:type max_retries: int
:param retry_delay: Seconds between retries for returned error or timed-out
requests. Default is 3 seconds.
:type retry_delay: int
:param referral_id: An optional referer ID can be added to each request for
identification.
:type referral_id: str
:returns: pybit.HTTP session.
"""
def __init__(self, endpoint=None, api_key=None, api_secret=None,
logging_level=logging.INFO, log_requests=False,
request_timeout=10, recv_window=5000, force_retry=False,
retry_codes=None, ignore_codes=None, max_retries=3,
retry_delay=3, referral_id=None):
"""Initializes the HTTP class."""
# Set the endpoint.
if endpoint is None:
self.endpoint = 'https://api.bybit.com'
else:
self.endpoint = endpoint
# Setup logger.
self.logger = logging.getLogger(__name__)
if len(logging.root.handlers) == 0:
#no handler on root logger set -> we add handler just for this logger to not mess with custom logic from outside
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
)
handler.setLevel(logging_level)
self.logger.addHandler(handler)
self.logger.debug('Initializing HTTP session.')
self.log_requests = log_requests
# Set API keys.
self.api_key = api_key
self.api_secret = api_secret
# Set timeout.
self.timeout = request_timeout
self.recv_window = recv_window
self.force_retry = force_retry
self.max_retries = max_retries
self.retry_delay = retry_delay
# Set whitelist of non-fatal Bybit status codes to retry on.
if retry_codes is None:
self.retry_codes = {10002, 10006, 30034, 30035, 130035, 130150}
else:
self.retry_codes = retry_codes
# Set whitelist of non-fatal Bybit status codes to ignore.
if ignore_codes is None:
self.ignore_codes = set()
else:
self.ignore_codes = ignore_codes
# Initialize requests session.
self.client = requests.Session()
self.client.headers.update(
{
'User-Agent': 'pybit-' + VERSION,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
)
# Add referral ID to header.
if referral_id:
self.client.headers.update({'Referer': referral_id})
def _exit(self):
"""Closes the request session."""
self.client.close()
self.logger.debug('HTTP session closed.')
def orderbook(self, **kwargs):
"""
Get the orderbook.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-orderbook.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/orderBook/L2',
query=kwargs
)
def query_kline(self, **kwargs):
"""
Get kline.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-querykline.
:returns: Request results as dictionary.
"""
# Replace query param 'from_time' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_time' in kwargs:
kwargs['from'] = kwargs.pop('from_time')
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/kline'
else:
suffix = '/v2/public/kline/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def latest_information_for_symbol(self, **kwargs):
"""
Get the latest information for symbol.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-latestsymbolinfo.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/tickers',
query=kwargs
)
def public_trading_records(self, **kwargs):
"""
Get recent trades. You can find a complete history of trades on Bybit
at https://public.bybit.com/.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-latestsymbolinfo.
:returns: Request results as dictionary.
"""
# Replace query param 'from_id' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_id' in kwargs:
kwargs['from'] = kwargs.pop('from_id')
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/recent-trading-records'
else:
suffix = '/v2/public/trading-records'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def query_symbol(self):
"""
Get symbol info.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/symbols'
)
def liquidated_orders(self, **kwargs):
"""
Retrieve the liquidated orders. The query range is the last seven days
of data.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-query_liqrecords.
:returns: Request results as dictionary.
"""
# Replace query param 'from_id' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_id' in kwargs:
kwargs['from'] = kwargs.pop('from_id')
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/liq-records',
query=kwargs
)
def query_mark_price_kline(self, **kwargs):
"""
Query mark price kline (like query_kline but for mark price).
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-markpricekline.
:returns: Request results as dictionary.
"""
# Replace query param 'from_time' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_time' in kwargs:
kwargs['from'] = kwargs.pop('from_time')
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/mark-price-kline'
else:
suffix = '/v2/public/mark-price-kline'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def open_interest(self, **kwargs):
"""
Gets the total amount of unsettled contracts. In other words, the total
number of contracts held in open positions.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-marketopeninterest.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/open-interest',
query=kwargs
)
def latest_big_deal(self, **kwargs):
"""
Obtain filled orders worth more than 500,000 USD within the last 24h.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-marketbigdeal.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/big-deal',
query=kwargs
)
def long_short_ratio(self, **kwargs):
"""
Gets the Bybit long-short ratio.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-marketaccountratio.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/account-ratio',
query=kwargs
)
def place_active_order(self, **kwargs):
"""
Places an active order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/create'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/create'
else:
suffix = '/v2/private/order/create'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def place_active_order_bulk(self, orders: list, max_in_parallel=10):
"""
Places multiple active orders in bulk using multithreading. For more
information on place_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.place_active_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def get_active_order(self, **kwargs):
"""
Gets an active order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-getactive.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-getactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/v2/private/order/list'
else:
suffix = '/futures/private/order/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_active_order(self, **kwargs):
"""
Cancels an active order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-cancelactive.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/cancel'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/cancel'
else:
suffix = '/v2/private/order/cancel'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_active_order_bulk(self, orders: list, max_in_parallel=10):
"""
Cancels multiple active orders in bulk using multithreading. For more
information on cancel_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-activeorders.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.cancel_active_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def cancel_all_active_orders(self, **kwargs):
"""
Cancel all active orders that are unfilled or partially filled. Fully
filled orders cannot be cancelled.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelallactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/cancel-all'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/cancelAll'
else:
suffix = '/v2/private/order/cancelAll'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_active_order(self, **kwargs):
"""
Replace order can modify/amend your active orders.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-replaceactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/replace'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order/replace'
else:
suffix = '/v2/private/order/replace'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_active_order_bulk(self, orders: list, max_in_parallel=10):
"""
Replaces multiple active orders in bulk using multithreading. For more
information on replace_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-replaceactive.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.replace_active_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def query_active_order(self, **kwargs):
"""
Query real-time active order information.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-queryactive.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/order/search'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/order'
else:
suffix = '/v2/private/order'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def place_conditional_order(self, **kwargs):
"""
Places a conditional order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-placecond.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-placecond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/create'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/create'
else:
suffix = '/v2/private/stop-order/create'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def place_conditional_order_bulk(self, orders: list, max_in_parallel=10):
"""
Places multiple conditional orders in bulk using multithreading. For
more information on place_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-placecond.
:param orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.place_conditional_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def get_conditional_order(self, **kwargs):
"""
Gets a conditional order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-getcond.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-getcond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/list'
else:
suffix = '/v2/private/stop-order/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_conditional_order(self, **kwargs):
"""
Cancels a conditional order. For more information, see
https://bybit-exchange.github.io/docs/inverse/#t-cancelcond.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelcond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/cancel'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/cancel'
else:
suffix = '/v2/private/stop-order/cancel'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cancel_conditional_order_bulk(self, orders: list, max_in_parallel=10):
"""
Cancels multiple conditional orders in bulk using multithreading. For
more information on cancel_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-cancelcond.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.cancel_conditional_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def cancel_all_conditional_orders(self, **kwargs):
"""
Cancel all conditional orders that are unfilled or partially filled.
Fully filled orders cannot be cancelled.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-cancelallcond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/cancel-all'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/cancelAll'
else:
suffix = '/v2/private/stop-order/cancelAll'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_conditional_order(self, **kwargs):
"""
Replace conditional order can modify/amend your conditional orders.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-replacecond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/replace'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order/replace'
else:
suffix = '/v2/private/stop-order/replace'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def replace_conditional_order_bulk(self, orders: list, max_in_parallel=10):
"""
Replaces multiple conditional orders in bulk using multithreading. For
more information on replace_active_order, see
https://bybit-exchange.github.io/docs/inverse/#t-replacecond.
:param list orders: A list of orders and their parameters.
:param max_in_parallel: The number of requests to be sent in parallel.
Note that you are limited to 50 requests per second.
:returns: Future request result dictionaries as a list.
"""
with ThreadPoolExecutor(max_workers=max_in_parallel) as executor:
executions = [
executor.submit(
self.replace_conditional_order,
**order
) for order in orders
]
executor.shutdown()
return [execution.result() for execution in executions]
def query_conditional_order(self, **kwargs):
"""
Query real-time conditional order information.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-querycond.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/stop-order/search'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/stop-order'
else:
suffix = '/v2/private/stop-order'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def my_position(self, **kwargs):
"""
Get my position list.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-myposition.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/list'
else:
suffix = '/v2/private/position/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def set_auto_add_margin(self, **kwargs):
"""
For linear markets only. Set auto add margin, or Auto-Margin
Replenishment.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-setautoaddmargin.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='POST',
path=self.endpoint + '/private/linear/position/set-auto-add-margin',
query=kwargs,
auth=True
)
def set_leverage(self, **kwargs):
"""
Change user leverage.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-setleverage.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/set-leverage'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/leverage/save'
else:
suffix = '/v2/private/position/leverage/save'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def cross_isolated_margin_switch(self, **kwargs):
"""
For linear markets only. Switch Cross/Isolated; must be leverage value
when switching from Cross to Isolated.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-marginswitch.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/switch-isolated'
else:
suffix = '/futures/private/position/switch-mode'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def position_mode_switch(self, **kwargs):
"""
For futures markets only. Switch Cross/Isolated; must set leverage
value when switching from Cross to Isolated.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse_futures/#t-marginswitch.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='POST',
path=self.endpoint + '/futures/private/position/switch-mode',
query=kwargs,
auth=True
)
def change_margin(self, **kwargs):
"""
Update margin.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-changemargin.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/change-position-margin'
else:
suffix = '/v2/private/position/change-position-margin'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def set_trading_stop(self, **kwargs):
"""
Set take profit, stop loss, and trailing stop for your open position.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-tradingstop.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/position/trading-stop'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/position/trading-stop'
else:
suffix = '/v2/private/position/trading-stop'
return self._submit_request(
method='POST',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def add_reduce_margin(self, **kwargs):
"""
For linear markets only. Add margin.
:param kwargs: See
https://bybit-exchange.github.io/docs/linear/#t-addmargin.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/private/linear/position/add-margin',
query=kwargs,
auth=True
)
def user_leverage(self, **kwargs):
"""
ABANDONED! Please use my_position instead. Fetches user leverage by
fetching user position.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-getleverage.
:returns: Request results as dictionary.
"""
self.logger.warning('This endpoint is deprecated and will be removed. Use my_position()')
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/position/list',
query=kwargs,
auth=True
)
def change_user_leverage(self, **kwargs):
"""
Change user leverage.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-changeleverage.
:returns: Request results as dictionary.
"""
self.logger.warning('This endpoint is deprecated and will be removed. Use set_leverage()')
return self._submit_request(
method='POST',
path=self.endpoint + '/user/leverage/save',
query=kwargs,
auth=True
)
def user_trade_records(self, **kwargs):
"""
Get user's trading records. The results are ordered in ascending order
(the first item is the oldest).
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-usertraderecords.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/trade/execution/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/execution/list'
else:
suffix = '/v2/private/execution/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def closed_profit_and_loss(self, **kwargs):
"""
Get user's closed profit and loss records. The results are ordered in
descending order (the first item is the latest).
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-closedprofitandloss.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/trade/closed-pnl/list'
elif kwargs.get('symbol', '')[-2:].isdigit():
suffix = '/futures/private/trade/closed-pnl/list'
else:
suffix = '/v2/private/trade/closed-pnl/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def get_risk_limit(self, is_linear=False):
"""
Get risk limit.
:param is_linear: True for linear, False for inverse. Defaults to
False.
:returns: Request results as dictionary.
"""
if is_linear:
suffix = '/public/linear/risk-limit'
else:
suffix = '/open-api/wallet/risk-limit/list'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
auth=True
)
def set_risk_limit(self, **kwargs):
"""
Set risk limit.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-setrisklimit.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='POST',
path=self.endpoint + '/open-api/wallet/risk-limit',
query=kwargs,
auth=True
)
def get_the_last_funding_rate(self, **kwargs):
"""
The funding rate is generated every 8 hours at 00:00 UTC, 08:00 UTC and
16:00 UTC. For example, if a request is sent at 12:00 UTC, the funding
rate generated earlier that day at 08:00 UTC will be sent.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-fundingrate.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/public/linear/funding/prev-funding-rate'
else:
suffix = '/v2/private/funding/prev-funding-rate'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs
)
def my_last_funding_fee(self, **kwargs):
"""
Funding settlement occurs every 8 hours at 00:00 UTC, 08:00 UTC and
16:00 UTC. The current interval's fund fee settlement is based on the
previous interval's fund rate. For example, at 16:00, the settlement is
based on the fund rate generated at 8:00. The fund rate generated at
16:00 will be used at 0:00 the next day.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-mylastfundingfee.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/funding/prev-funding'
else:
suffix = '/v2/private/funding/prev-funding'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def predicted_funding_rate(self, **kwargs):
"""
Get predicted funding rate and my funding fee.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-predictedfunding.
:returns: Request results as dictionary.
"""
if kwargs.get('symbol', '').endswith('USDT'):
suffix = '/private/linear/funding/predicted-funding'
else:
suffix = '/v2/private/funding/predicted-funding'
return self._submit_request(
method='GET',
path=self.endpoint + suffix,
query=kwargs,
auth=True
)
def api_key_info(self):
"""
Get user's API key info.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/account/api-key',
auth=True
)
def lcp_info(self, **kwargs):
"""
Get user's LCP (data refreshes once an hour). Only supports inverse
perpetual at present. See
https://bybit-exchange.github.io/docs/inverse/#t-liquidity to learn
more.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-lcp.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/account/lcp',
query=kwargs,
auth=True
)
def get_wallet_balance(self, **kwargs):
"""
Get wallet balance info.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-balance.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/wallet/balance',
query=kwargs,
auth=True
)
def wallet_fund_records(self, **kwargs):
"""
Get wallet fund records. This endpoint also shows exchanges from the
Asset Exchange, where the types for the exchange are
ExchangeOrderWithdraw and ExchangeOrderDeposit.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-walletrecords.
:returns: Request results as dictionary.
"""
# Replace query param 'from_id' since 'from' keyword is reserved.
# Temporary workaround until Bybit updates official request params
if 'from_id' in kwargs:
kwargs['from'] = kwargs.pop('from_id')
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/wallet/fund/records',
query=kwargs,
auth=True
)
def withdraw_records(self, **kwargs):
"""
Get withdrawal records.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-withdrawrecords.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/wallet/withdraw/list',
query=kwargs,
auth=True
)
def asset_exchange_records(self, **kwargs):
"""
Get asset exchange records.
:param kwargs: See
https://bybit-exchange.github.io/docs/inverse/#t-assetexchangerecords.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/private/exchange-order/list',
query=kwargs,
auth=True
)
def server_time(self):
"""
Get Bybit server time.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/time'
)
def announcement(self):
"""
Get Bybit OpenAPI announcements in the last 30 days by reverse order.
:returns: Request results as dictionary.
"""
return self._submit_request(
method='GET',
path=self.endpoint + '/v2/public/announcement'
)
'''
Additional Methods
These methods use two or more requests to perform a specific
function and are exclusive to pybit.
'''
def close_position(self, symbol):
"""
Closes your open position. Makes two requests (position, order).
Parameters
------------------------
symbol : str
Required parameter. The symbol of the market as a string,
e.g. 'BTCUSD'.
"""
# First we fetch the user's position.
try:
r = self.my_position(symbol=symbol)['result']
# If there is no returned position, we want to handle that.
except KeyError:
return self.logger.error('No position detected.')
# Next we generate a list of market orders
orders = [
{
'symbol': symbol,
'order_type': 'Market',
'side': 'Buy' if p['side'] == 'Sell' else 'Sell',
'qty': p['size'],
'time_in_force': 'ImmediateOrCancel',
'reduce_only': True,
'close_on_trigger': True
} for p in (r if isinstance(r, list) else [r]) if p['size'] > 0
]
if len(orders) == 0:
return self.logger.error('No position detected.')
# Submit a market order against each open position for the same qty.
return self.place_active_order_bulk(orders)
'''
Internal methods; signature and request submission.
For more information about the request signature, see
https://bybit-exchange.github.io/docs/inverse/#t-authentication.
'''
def _auth(self, method, params, recv_window):
"""
Generates authentication signature per Bybit API specifications.
Notes
-------------------
Since the POST method requires a JSONified dict, we need to ensure
the signature uses lowercase booleans instead of Python's
capitalized booleans. This is done in the bug fix below.
"""
api_key = self.api_key
api_secret = self.api_secret
if api_key is None or api_secret is None:
raise PermissionError('Authenticated endpoints require keys.')
# Append required parameters.
params['api_key'] = api_key
params['recv_window'] = recv_window
params['timestamp'] = int(time.time() * 10 ** 3)
# Sort dictionary alphabetically to create querystring.
_val = '&'.join(
[str(k) + '=' + str(v) for k, v in sorted(params.items()) if
(k != 'sign') and (v is not None)]
)
# Bug fix. Replaces all capitalized booleans with lowercase.
if method == 'POST':
_val = _val.replace('True', 'true').replace('False', 'false')
# Return signature.
return str(hmac.new(
bytes(api_secret, 'utf-8'),
bytes(_val, 'utf-8'), digestmod='sha256'
).hexdigest())
def _submit_request(self, method=None, path=None, query=None, auth=False):
"""
Submits the request to the API.
Notes
-------------------
We use the params argument for the GET method, and data argument for
the POST method. Dicts passed to the data argument must be
JSONified prior to submitting request.
"""
# Store original recv_window.
recv_window = self.recv_window
# Bug fix: change floating whole numbers to integers to prevent
# auth signature errors.
if query is not None:
for i in query.keys():
if isinstance(query[i], float) and query[i] == int(query[i]):
query[i] = int(query[i])
# Send request and return headers with body. Retry if failed.
retries_attempted = self.max_retries
req_params = None
while True:
retries_attempted -= 1
if retries_attempted < 0:
raise FailedRequestError(
request=f'{method} {path}: {req_params}',
message='Bad Request. Retries exceeded maximum.',
status_code=400,
time=dt.utcnow().strftime("%H:%M:%S")
)
retries_remaining = f'{retries_attempted} retries remain.'
# Authenticate if we are using a private endpoint.
if auth:
# Prepare signature.
signature = self._auth(
method=method,
params=query,
recv_window=recv_window,
)
# Sort the dictionary alphabetically.
query = dict(sorted(query.items(), key=lambda x: x))
# Append the signature to the dictionary.
query['sign'] = signature
# Define parameters and log the request.
if query is not None:
req_params = {k: v for k, v in query.items() if
v is not None}
else:
req_params = {}
# Log the request.
if self.log_requests:
self.logger.debug(f'Request -> {method} {path}: {req_params}')
# Prepare request; use 'params' for GET and 'data' for POST.
if method == 'GET':
r = self.client.prepare_request(
requests.Request(method, path, params=req_params)
)
else:
r = self.client.prepare_request(
requests.Request(method, path, data=json.dumps(req_params))
)
# Attempt the request.
try:
s = self.client.send(r, timeout=self.timeout)
# If requests fires an error, retry.
except (
requests.exceptions.ReadTimeout,
requests.exceptions.SSLError,
requests.exceptions.ConnectionError
) as e:
if self.force_retry:
self.logger.error(f'{e}. {retries_remaining}')
time.sleep(self.retry_delay)
continue
else:
raise e
# Convert response to dictionary, or raise if requests error.
try:
s_json = s.json()
# If we have trouble converting, handle the error and retry.
except JSONDecodeError as e:
if self.force_retry:
self.logger.error(f'{e}. {retries_remaining}')
time.sleep(self.retry_delay)
continue
else:
raise FailedRequestError(
request=f'{method} {path}: {req_params}',
message='Conflict. Could not decode JSON.',
status_code=409,
time=dt.utcnow().strftime("%H:%M:%S")
)
# If Bybit returns an error, raise.
if s_json['ret_code']:
# Generate error message.
error_msg = (
f'{s_json["ret_msg"]} (ErrCode: {s_json["ret_code"]})'
)
# Set default retry delay.
err_delay = self.retry_delay
# Retry non-fatal whitelisted error requests.
if s_json['ret_code'] in self.retry_codes:
# 10002, recv_window error; add 2.5 seconds and retry.
if s_json['ret_code'] == 10002:
error_msg += '. Added 2.5 seconds to recv_window'
recv_window += 2500
# 10006, ratelimit error; wait until rate_limit_reset_ms
# and retry.
elif s_json['ret_code'] == 10006:
self.logger.error(
f'{error_msg}. Ratelimited on current request. '
f'Sleeping, then trying again. Request: {path}'
)
# Calculate how long we need to wait.
limit_reset = s_json['rate_limit_reset_ms'] / 1000
reset_str = time.strftime(
'%X', time.localtime(limit_reset)
)
err_delay = int(limit_reset) - int(time.time())
error_msg = (
f'Ratelimit will reset at {reset_str}. '
f'Sleeping for {err_delay} seconds'
)
# Log the error.
self.logger.error(f'{error_msg}. {retries_remaining}')
time.sleep(err_delay)
continue
elif s_json['ret_code'] in self.ignore_codes:
pass
else:
raise InvalidRequestError(
request=f'{method} {path}: {req_params}',
message=s_json["ret_msg"],
status_code=s_json["ret_code"],
time=dt.utcnow().strftime("%H:%M:%S")
)
else:
return s_json
class WebSocket:
"""
Connector for Bybit's WebSocket API.
"""
def __init__(self, endpoint, api_key=None, api_secret=None,
subscriptions=None, logging_level=logging.INFO,
max_data_length=200, ping_interval=30, ping_timeout=10,
restart_on_error=True, purge_on_fetch=True,
trim_data=True):
"""
Initializes the websocket session.
:param endpoint: Required parameter. The endpoint of the remote
websocket.
:param api_key: Your API key. Required for authenticated endpoints.
Defaults to None.
:param api_secret: Your API secret key. Required for authenticated
endpoints. Defaults to None.
:param subscriptions: A list of desired topics to subscribe to. See API
documentation for more information. Defaults to an empty list, which
will raise an error.
:param logging_level: The logging level of the built-in logger. Defaults
to logging.INFO. Options are CRITICAL (50), ERROR (40),
WARNING (30), INFO (20), DEBUG (10), or NOTSET (0).
:param max_data_length: The maximum number of rows for the stored
dataset. A smaller number will prevent performance or memory issues.
:param ping_interval: The number of seconds between each automated ping.
:param ping_timeout: The number of seconds to wait for 'pong' before an
Exception is raised.
:param restart_on_error: Whether or not the connection should restart on
error.
:param purge_on_fetch: Whether or not stored data should be purged each
fetch. For example, if the user subscribes to the 'trade' topic, and
fetches, should the data show all trade history up to the maximum
length or only get the data since the last fetch?
:param trim_data: Decide whether the returning data should be
trimmed to only provide the data value.
:returns: WebSocket session.
"""
if not subscriptions:
raise Exception('Subscription list cannot be empty!')
# Require symbol on 'trade' topic.
if 'trade' in subscriptions:
raise Exception('\'trade\' requires a ticker, e.g. '
'\'trade.BTCUSD\'.')
# Require currency on 'insurance' topic.
if 'insurance' in subscriptions:
raise Exception('\'insurance\' requires a currency, e.g. '
'\'insurance.BTC\'.')
# Require timeframe and ticker on 'klineV2' topic.
if 'klineV2' in subscriptions:
raise Exception('\'klineV2\' requires a timeframe and ticker, e.g.'
' \'klineV2.5.BTCUSD\'.')
# set websocket name for logging purposes
self.wsName = 'Authenticated' if api_key else 'Non-Authenticated'
# Setup logger.
self.logger = logging.getLogger(__name__)
if len(logging.root.handlers) == 0:
# no handler on root logger set -> we add handler just for this logger to not mess with custom logic from outside
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
)
handler.setLevel(logging_level)
self.logger.addHandler(handler)
self.logger.debug(f'Initializing {self.wsName} WebSocket.')
# Ensure authentication for private topics.
if any(i in subscriptions for i in [
'position',
'execution',
'order',
'stop_order',
'wallet'
]) and api_key is None:
raise PermissionError('You must be authorized to use '
'private topics!')
# Set endpoint.
self.endpoint = endpoint
# Set API keys.
self.api_key = api_key
self.api_secret = api_secret
# Set topic subscriptions for WebSocket.
self.subscriptions = subscriptions
self.max_length = max_data_length
# Set ping settings.
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
# Other optional data handling settings.
self.handle_error = restart_on_error
self.purge = purge_on_fetch
self.trim = trim_data
# Set initial state, initialize dictionary and connnect.
self._reset()
self._connect(self.endpoint)
def fetch(self, topic):
"""
Fetches data from the subscribed topic.
:param topic: Required parameter. The subscribed topic to poll.
:returns: Filtered data as dict.
"""
# If topic isn't a string.
if not isinstance(topic, str):
self.logger.error('Topic argument must be a string.')
return
# If the topic given isn't in the initial subscribed list.
if topic not in self.subscriptions:
self.logger.error(f'You aren\'t subscribed to the {topic} topic.')
return
# Pop all trade or execution data on each poll.
# dont pop order or stop_order data as we will lose valuable state
if topic.startswith((
'trade',
'execution'
)) and not topic.startswith('orderBook'):
data = self.data[topic].copy()
if self.purge:
self.data[topic] = []
return data
else:
try:
return self.data[topic]
except KeyError:
return []
def ping(self):
"""
Pings the remote server to test the connection. The status of the
connection can be monitored using ws.ping().
"""
self.ws.send(json.dumps({'op': 'ping'}))
def exit(self):
"""
Closes the websocket connection.
"""
self.ws.close()
while self.ws.sock:
continue
self.exited = True
def _auth(self):
"""
Authorize websocket connection.
"""
# Generate expires.
expires = int((time.time() + 1) * 1000)
# Generate signature.
_val = f'GET/realtime{expires}'
signature = str(hmac.new(
bytes(self.api_secret, 'utf-8'),
bytes(_val, 'utf-8'), digestmod='sha256'
).hexdigest())
# Authenticate with API.
self.ws.send(
json.dumps({
'op': 'auth',
'args': [self.api_key, expires, signature]
})
)
def _connect(self, url):
"""
Open websocket in a thread.
"""
self.ws = websocket.WebSocketApp(
url=url,
on_message=lambda ws, msg: self._on_message(msg),
on_close=self._on_close(),
on_open=self._on_open(),
on_error=lambda ws, err: self._on_error(err)
)
# Setup the thread running WebSocketApp.
self.wst = threading.Thread(target=lambda: self.ws.run_forever(
ping_interval=self.ping_interval,
ping_timeout=self.ping_timeout
))
# Configure as daemon; start.
self.wst.daemon = True
self.wst.start()
# Attempt to connect for X seconds.
retries = 10
while retries > 0 and (not self.ws.sock or not self.ws.sock.connected):
retries -= 1
time.sleep(1)
# If connection was not successful, raise error.
if retries <= 0:
self.exit()
raise websocket.WebSocketTimeoutException('Connection failed.')
# If given an api_key, authenticate.
if self.api_key and self.api_secret:
self._auth()
# Check if subscriptions is a list.
if isinstance(self.subscriptions, str):
self.subscriptions = [self.subscriptions]
# Subscribe to the requested topics.
self.ws.send(
json.dumps({
'op': 'subscribe',
'args': self.subscriptions
})
)
# Initialize the topics.
for topic in self.subscriptions:
if topic not in self.data:
self.data[topic] = {}
@staticmethod
def _find_index(source, target, key):
"""
Find the index in source list of the targeted ID.
"""
return next(i for i, j in enumerate(source) if j[key] == target[key])
def _on_message(self, message):
"""
Parse incoming messages. Similar structure to the
official WS connector.
"""
# Load dict of message.
msg_json = json.loads(message)
# If 'success' exists
if 'success' in msg_json:
if msg_json['success']:
# If 'request' exists.
if 'request' in msg_json:
# If we get succesful auth, notify user.
if msg_json['request']['op'] == 'auth':
self.logger.debug('Authorization successful.')
self.auth = True
# If we get successful subscription, notify user.
if msg_json['request']['op'] == 'subscribe':
sub = msg_json['request']['args']
self.logger.debug(f'Subscription to {sub} successful.')
else:
response = msg_json['ret_msg']
if 'unknown topic' in response:
self.logger.error('Couldn\'t subscribe to topic.'
f' Error: {response}.')
# If we get unsuccesful auth, notify user.
elif msg_json['request']['op'] == 'auth':
self.logger.debug('Authorization failed. Please check your '
'API keys and restart.')
elif 'topic' in msg_json:
topic = msg_json['topic']
# If incoming 'orderbookL2' data.
if 'orderBook' in topic:
# Make updates according to delta response.
if 'delta' in msg_json['type']:
# Delete.
for entry in msg_json['data']['delete']:
index = self._find_index(self.data[topic], entry, 'id')
self.data[topic].pop(index)
# Update.
for entry in msg_json['data']['update']:
index = self._find_index(self.data[topic], entry, 'id')
self.data[topic][index] = entry
# Insert.
for entry in msg_json['data']['insert']:
self.data[topic].append(entry)
# Record the initial snapshot.
elif 'snapshot' in msg_json['type']:
self.data[topic] = msg_json['data']
# For incoming 'order' and 'stop_order' data.
elif any(i in topic for i in ['order', 'stop_order']):
# record incoming data
for i in msg_json['data']:
try:
# update existing entries
# temporary workaround for field anomaly in stop_order data
ord_id = topic + '_id' if i['symbol'].endswith('USDT') else 'order_id'
index = self._find_index(self.data[topic], i, ord_id)
self.data[topic][index] = i
except StopIteration:
# Keep appending or create new list if not already created.
try:
self.data[topic].append(i)
except AttributeError:
self.data[topic] = msg_json['data']
# For incoming 'trade' and 'execution' data.
elif any(i in topic for i in ['trade', 'execution']):
# Keep appending or create new list if not already created.
try:
for i in msg_json['data']:
self.data[topic].append(i)
except AttributeError:
self.data[topic] = msg_json['data']
# If list is too long, pop the first entry.
if len(self.data[topic]) > self.max_length:
self.data[topic].pop(0)
# If incoming 'insurance', 'klineV2', or 'wallet' data.
elif any(i in topic for i in ['insurance', 'klineV2', 'wallet',
'candle']):
# Record incoming data.
self.data[topic] = msg_json['data'][0] if self.trim else msg_json
# If incoming 'instrument_info' data.
elif 'instrument_info' in topic:
# Make updates according to delta response.
if 'delta' in msg_json['type']:
for i in msg_json['data']['update'][0]:
self.data[topic][i] = msg_json['data']['update'][0][i]
# Record the initial snapshot.
elif 'snapshot' in msg_json['type']:
self.data[topic] = msg_json['data'] if self.trim else msg_json
# If incoming 'position' data.
elif 'position' in topic:
# Record incoming position data.
for p in msg_json['data']:
# linear (USDT) positions have Buy|Sell side and
# updates contain all USDT positions.
# For linear tickers...
if p['symbol'].endswith('USDT'):
try:
self.data[topic][p['symbol']][p['side']] = p
# if side key hasn't been created yet...
except KeyError:
self.data[topic][p['symbol']] = {p['side']: p}
# For non-linear tickers...
else:
self.data[topic][p['symbol']] = p
def _on_error(self, error):
"""
Exit on errors and raise exception, or attempt reconnect.
"""
if not self.exited:
self.logger.error(f'WebSocket {self.wsName} encountered error: {error}.')
self.exit()
# Reconnect.
if self.handle_error:
self._reset()
self._connect(self.endpoint)
def _on_open(self):
"""
Log WS open.
"""
self.logger.debug(f'WebSocket {self.wsName} opened.')
def _on_close(self):
"""
Log WS close.
"""
self.logger.debug(f'WebSocket {self.wsName} closed.')
def _reset(self):
"""
Set state booleans and initialize dictionary.
"""
self.exited = False
self.auth = False
self.data = {} | |
distributed-cache.js | 'use strict'
import { relationType } from './make-relations'
import { importRemoteCache } from '.'
import domainEvents from '../domain/domain-events'
import asyncPipe from './util/async-pipe'
import { workerData } from 'worker_threads'
import { UseCaseService } from './use-cases'
const {
internalCacheRequest,
internalCacheResponse,
externalCacheRequest,
externalCacheResponse,
externalCrudEvent
} = domainEvents
/**
* @typedef {import("./model").Model} Model
*/
/**
* Implements distributed object cache. Find any model
* referenced by a relation that is not registered in
* the model factory and listen for remote CRUD events
* from it. On receipt of the event, import its remote
* modules if we don't already have them, then rehydrate
* and save the model instance to the cache. Subscribe
* to external and broadcast internal on-demand requests,
* i.e. cache misses.
*
* @param {{
* broker:import("./event-broker").EventBroker,
* datasources:import("./datasource-factory").DataSourceFactory,
* models:import("./model-factory").ModelFactory,
* subscribe:function(string,function()),
* publish:function(string,object),
* }} param0
*/
export default function | ({
models,
broker,
datasources,
publish,
subscribe
}) {
/**
* @typedef {{
* eventName:string,
* modelName:string,
* modelId:string,
* model:Model|Model[],
* args:[],
* relation:{
* type:string,
* modelName:string,
* foreignKey:string}
* }} Event the unit of data for tramsmission of cached data
*/
/** @typedef {import(".").ModelSpecification} ModelSpecification*/
/**
* parse {@link Event}
* @param {Event} payload
* @returns {Event}
*/
function parse (payload) {
if (!payload) {
throw new Error({ func: parse.name, error: 'no payload included' })
}
try {
const requiredFields = ['eventName', 'model']
const actuals = Object.keys(payload)
const missing = requiredFields.filter(k => !actuals.includes(k))
if (missing.length > 0) {
console.error(parse.name, 'missing fields:', missing)
throw new Error('missing required fields', { missing })
}
return {
...payload,
modelName: (payload.modelName || payload.model.modelName).toUpperCase(),
modelId: payload.modelId || payload.model.id,
args: payload.args || []
}
} catch (e) {
console.error('could not parse message', e, { payload })
}
}
/**
* Unmarshal deserialized object.
* @param {Array<Model>} model
* @param {import("./datasource").default} datasource
* @param {string} modelName
* @returns {Model}
*/
function hydrate (o) {
const { model, datasource, modelName } = o
return {
...o,
model: model.map(m => models.loadModel(broker, datasource, m, modelName))
}
}
/**
* Save model to cache.
* @param {Model[]} model
* @param {import("./datasource").default} datasource
* @param {function(m)=>m.id} return id to save
*/
async function save (o) {
const { model, modelName, datasource } = o
console.debug({
fn: save.name,
modelName,
ds: datasource.name
})
if (modelName !== datasource.name.toUpperCase()) {
console.error('wrong dataset, aborting')
return o
}
model.forEach(async m => await datasource.save(models.getModelId(m), m))
return o
}
/**
* Fetch {@link ModelSpecification} modules for `modelName` from repo.
* @param {string} modelName
*/
async function streamCode (o) {
const { modelName } = o
console.debug('check if we have the code for this object...')
if (!models.getModelSpec(modelName.toUpperCase())) {
console.debug("...we don't, stream it.")
// Stream the code for the model
await importRemoteCache(modelName.toUpperCase())
return o
}
console.debug('...we do.')
return o
}
/**
* @param {*} eventName
* @param {*} modelName
* @param {Event} event
* @returns
*/
async function handleDelete (eventName, modelName, event) {
if (
eventName === models.getEventName(models.EventTypes.DELETE, modelName)
) {
console.debug('deleting from cache', modelName, event.modelId)
await datasources.getDataSource(modelName).delete(event.modelId)
return true
}
return false
}
/**
* Pipes functions that instantiate the remote object(s) and upsert the cache
*/
const handleUpsert = asyncPipe(streamCode, hydrate, save)
/**
*
* @param {function(string):string} parser
* @param {function(object)} route what to do after updating
* @returns {function(message):Promise<void>}
*/
function updateCache (route) {
return async function (message) {
try {
const event = parse(message)
const eventName = event.eventName
const models = [event.model].flat()
const [model] = models
const modelNameUpper = model.modelName.toUpperCase()
console.debug('handle cache event', model, eventName)
if (!modelNameUpper) throw new Error('no model', event)
if (!model) {
console.error('no model found', eventName)
// no model found
if (route) await route(event)
return
}
if (await handleDelete(eventName, modelNameUpper, event)) return
const enrichedEvent = await handleUpsert({
modelName: modelNameUpper,
datasource: datasources.getDataSource(modelNameUpper),
model: models,
event
})
if (route) route(enrichedEvent)
} catch (error) {
console.error({ fn: updateCache.name, error })
}
}
}
/**
*
* @param {Event} event
* @returns {Promise<Event>}
* @throws
*/
async function createModels (event) {
const modelName = event.relation.modelName.toUpperCase()
const service = UseCaseService(modelName)
const models = await Promise.all(
event.args.map(async arg => {
return service.addModel(arg)
})
)
return { ...event, model: models }
}
/**
* Creates new, related models if relation function is called
* with arguments, e.g.
* ```js
* const customer = await order.customer(customerDetails);
* const customers = await order.customer([cust1, cust2]);
* ```
*
* @param {Event} event
* @returns {Promise<Event>}
* Updated source model (model that defines the relation)
* @throws
*/
async function saveModels (event) {
try {
const models = event.model
const datasource = datasources.getDataSource(
event.relation.modelName.toUpperCase()
)
models.forEach(model => datasource.save(model.getId(), model))
return event
} catch (error) {
console.error(saveModels.name, error)
return event
}
}
const newModels = asyncPipe(createModels, saveModels)
/**
* Returns function to search the cache.
* @param {function(string):string} parser
* @param {function(object)} route
* @returns {function(message):Promise<void>}
* function that searches the cache
*/
function searchCache (route) {
return async function (message) {
try {
const event = parse(message)
const { relation, model } = event
// args mean create an object
if (event.args?.length > 0) {
console.debug({
fn: searchCache.name,
models: event.model
})
return await route(await newModels(event))
}
// find the requested object or objects
const relatedModels = await relationType[relation.type](
model,
datasources.getDataSource(relation.modelName.toUpperCase()),
relation
)
console.debug({
fn: searchCache.name,
msg: 'related model(s)',
related: relatedModels
})
return await route({ ...event, model: relatedModels })
} catch (error) {
console.error(searchCache.name, error)
}
}
}
/**
* Listen for response to search request and notify requester.
* @param {*} responseName
* @param {*} internalName
*/
const receiveSearchResponse = (responseName, internalName) =>
subscribe(
responseName,
updateCache(async event => broker.notify(internalName, event))
)
/**
* Listen for search request from remote system, search and send response.
*
* @param {string} request name of event received from remote instance
* @param {string} response name of event sent in response to request
*/
const answerSearchRequest = (request, response) =>
subscribe(
request,
searchCache(event =>
publish({
...event,
eventName: response,
eventTarget: event.eventSource,
eventSource: event.eventTarget
})
)
)
/**
* Listen for internal events requesting cache search and send to remote systems.
* @param {string} internalEvent name of internal event
* @param {string} externalEvent name of external event
*/
const forwardSearchRequest = (internalEvent, externalEvent) =>
broker.on(internalEvent, event =>
publish({ ...event, eventName: externalEvent })
)
/**
* Listen for events from remote systems and update local cache.
* @param {string} eventName
*/
const receiveCrudBroadcast = eventName =>
subscribe(externalCrudEvent(eventName), updateCache())
/**
*
* @param {string} eventName
*/
const broadcastCrudEvent = eventName =>
broker.on(eventName, async event =>
publish({ ...event, eventName: externalCrudEvent(eventName) })
)
/**
* Subcribe to external CRUD events for related models.
* Also listen for request and response events for locally
* and remotely cached data.
*/
function start () {
const modelSpecs = models.getModelSpecs()
const localModels = [workerData.modelName.toUpperCase()]
const remoteModels = [
...new Set( // deduplicate
modelSpecs
.filter(m => m.relations) // only models with relations
.map(m =>
Object.keys(m.relations)
.filter(
// filter out existing local models
k =>
!localModels.includes(m.relations[k].modelName.toUpperCase())
)
.map(k => m.relations[k].modelName.toUpperCase())
)
.reduce((a, b) => a.concat(b), [])
)
]
console.info('local models', localModels, 'remote models', remoteModels)
// Forward requests to, handle responses from, remote models
remoteModels.forEach(function (modelName) {
// listen for internal requests and forward externally
forwardSearchRequest(
internalCacheRequest(modelName),
externalCacheRequest(modelName)
)
// listen for external responses to forwarded requests
receiveSearchResponse(
externalCacheResponse(modelName),
internalCacheResponse(modelName)
)
// listen for CRUD events from related, external models
;[
models.getEventName(models.EventTypes.UPDATE, modelName),
models.getEventName(models.EventTypes.CREATE, modelName),
models.getEventName(models.EventTypes.DELETE, modelName)
].forEach(receiveCrudBroadcast)
})
// Respond to search requests and broadcast CRUD events
localModels.forEach(function (modelName) {
// Listen for external requests and respond with search results
answerSearchRequest(
externalCacheRequest(modelName),
externalCacheResponse(modelName)
)
// Listen for local CRUD events and forward externally
;[
models.getEventName(models.EventTypes.UPDATE, modelName),
models.getEventName(models.EventTypes.CREATE, modelName),
models.getEventName(models.EventTypes.DELETE, modelName)
].forEach(broadcastCrudEvent)
})
}
console.info('distributed object cache running')
return Object.freeze({
start
})
}
| DistributedCache |
cast-rfc0401.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn illegal_cast<U:?Sized,V:?Sized>(u: *const U) -> *const V
{
u as *const V
}
fn | <U:?Sized>(u: *const U) -> *const str
{
u as *const str
}
trait Foo { fn foo(&self) {} }
impl<T> Foo for T {}
trait Bar { fn foo(&self) {} }
impl<T> Bar for T {}
enum E {
A, B
}
fn main()
{
let f: f32 = 1.2;
let v = 0 as *const u8;
let fat_v : *const [u8] = unsafe { &*(0 as *const [u8; 1])};
let fat_sv : *const [i8] = unsafe { &*(0 as *const [i8; 1])};
let foo: &Foo = &f;
let _ = v as &u8;
let _ = v as E;
let _ = v as fn();
let _ = v as (u32,);
let _ = Some(&v) as *const u8;
let _ = v as f32;
let _ = main as f64;
let _ = &v as usize;
let _ = f as *const u8;
let _ = 3_i32 as bool;
let _ = E::A as bool;
let _ = 0x61u32 as char;
let _ = false as f32;
let _ = E::A as f32;
let _ = 'a' as f32;
let _ = false as *const u8;
let _ = E::A as *const u8;
let _ = 'a' as *const u8;
let _ = 42usize as *const [u8];
let _ = v as *const [u8];
let _ = fat_v as *const Foo;
let _ = foo as *const str;
let _ = foo as *mut str;
let _ = main as *mut str;
let _ = &f as *mut f32;
let _ = &f as *const f64;
let _ = fat_sv as usize;
let a : *const str = "hello";
let _ = a as *const Foo;
// check no error cascade
let _ = main.f as *const u32;
let cf: *const Foo = &0;
let _ = cf as *const [u16];
let _ = cf as *const Bar;
vec![0.0].iter().map(|s| s as f32).collect::<Vec<f32>>();
}
| illegal_cast_2 |
fixtures.js | /** Dependencies */
import './fake-media-provider/define.js';
import '../container/define.js';
import '../controller/define.js';
import { fixture } from '@open-wc/testing';
import { html } from 'lit';
import {
MEDIA_CONTAINER_ELEMENT_TAG_NAME,
MediaContainerElement
} from '../container/index.js';
import { MediaControllerElement } from '../controller/index.js';
import {
FAKE_MEDIA_PROVIDER_ELEMENT_TAG_NAME,
FakeMediaProviderElement
} from './fake-media-provider/index.js';
/**
* @typedef {{
* controller: MediaControllerElement;
* container: MediaContainerElement;
* provider: FakeMediaProviderElement;
* }} MediaFixture
*/
/**
* @param {import('lit').TemplateResult} [uiSlot]
* @param {import('lit').TemplateResult} [mediaSlot]
* @returns {Promise<MediaFixture>}
*/
export async function | (uiSlot = html``, mediaSlot = html``) {
/** @type {MediaControllerElement} */
const controller = await fixture(
html`
<vds-media-controller>
<vds-media-container>
<vds-fake-media-provider>${mediaSlot}</vds-fake-media-provider>
${uiSlot}
</vds-media-container>
</vds-media-controller>
`
);
const container = /** @type {MediaContainerElement} */ (
controller.querySelector(MEDIA_CONTAINER_ELEMENT_TAG_NAME)
);
const provider = /** @type {FakeMediaProviderElement} */ (
controller.querySelector(FAKE_MEDIA_PROVIDER_ELEMENT_TAG_NAME)
);
return {
controller,
container,
provider
};
}
| buildMediaFixture |
bastionhosts.go | package network
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// BastionHostsClient is the network Client
type BastionHostsClient struct {
BaseClient
}
// NewBastionHostsClient creates an instance of the BastionHostsClient client.
func NewBastionHostsClient(subscriptionID string) BastionHostsClient {
return NewBastionHostsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewBastionHostsClientWithBaseURI creates an instance of the BastionHostsClient client using a custom endpoint. Use
// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewBastionHostsClientWithBaseURI(baseURI string, subscriptionID string) BastionHostsClient |
// CreateOrUpdate creates or updates the specified Bastion Host.
// Parameters:
// resourceGroupName - the name of the resource group.
// bastionHostName - the name of the Bastion Host.
// parameters - parameters supplied to the create or update Bastion Host operation.
func (client BastionHostsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, bastionHostName string, parameters BastionHost) (result BastionHostsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BastionHostsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, bastionHostName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "CreateOrUpdate", nil, "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client BastionHostsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, bastionHostName string, parameters BastionHost) (*http.Request, error) {
pathParameters := map[string]interface{}{
"bastionHostName": autorest.Encode("path", bastionHostName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-06-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
parameters.Etag = nil
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client BastionHostsClient) CreateOrUpdateSender(req *http.Request) (future BastionHostsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client BastionHostsClient) CreateOrUpdateResponder(resp *http.Response) (result BastionHost, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the specified Bastion Host.
// Parameters:
// resourceGroupName - the name of the resource group.
// bastionHostName - the name of the Bastion Host.
func (client BastionHostsClient) Delete(ctx context.Context, resourceGroupName string, bastionHostName string) (result BastionHostsDeleteFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BastionHostsClient.Delete")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, bastionHostName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "Delete", nil, "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client BastionHostsClient) DeletePreparer(ctx context.Context, resourceGroupName string, bastionHostName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"bastionHostName": autorest.Encode("path", bastionHostName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-06-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client BastionHostsClient) DeleteSender(req *http.Request) (future BastionHostsDeleteFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client BastionHostsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the specified Bastion Host.
// Parameters:
// resourceGroupName - the name of the resource group.
// bastionHostName - the name of the Bastion Host.
func (client BastionHostsClient) Get(ctx context.Context, resourceGroupName string, bastionHostName string) (result BastionHost, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BastionHostsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, bastionHostName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client BastionHostsClient) GetPreparer(ctx context.Context, resourceGroupName string, bastionHostName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"bastionHostName": autorest.Encode("path", bastionHostName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-06-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client BastionHostsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client BastionHostsClient) GetResponder(resp *http.Response) (result BastionHost, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List lists all Bastion Hosts in a subscription.
func (client BastionHostsClient) List(ctx context.Context) (result BastionHostListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BastionHostsClient.List")
defer func() {
sc := -1
if result.bhlr.Response.Response != nil {
sc = result.bhlr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.bhlr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "List", resp, "Failure sending request")
return
}
result.bhlr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "List", resp, "Failure responding to request")
return
}
if result.bhlr.hasNextLink() && result.bhlr.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListPreparer prepares the List request.
func (client BastionHostsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-06-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/bastionHosts", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client BastionHostsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client BastionHostsClient) ListResponder(resp *http.Response) (result BastionHostListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client BastionHostsClient) listNextResults(ctx context.Context, lastResults BastionHostListResult) (result BastionHostListResult, err error) {
req, err := lastResults.bastionHostListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.BastionHostsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.BastionHostsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client BastionHostsClient) ListComplete(ctx context.Context) (result BastionHostListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BastionHostsClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx)
return
}
// ListByResourceGroup lists all Bastion Hosts in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
func (client BastionHostsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result BastionHostListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BastionHostsClient.ListByResourceGroup")
defer func() {
sc := -1
if result.bhlr.Response.Response != nil {
sc = result.bhlr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.bhlr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result.bhlr, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "ListByResourceGroup", resp, "Failure responding to request")
return
}
if result.bhlr.hasNextLink() && result.bhlr.IsEmpty() {
err = result.NextWithContext(ctx)
return
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client BastionHostsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-06-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client BastionHostsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client BastionHostsClient) ListByResourceGroupResponder(resp *http.Response) (result BastionHostListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
func (client BastionHostsClient) listByResourceGroupNextResults(ctx context.Context, lastResults BastionHostListResult) (result BastionHostListResult, err error) {
req, err := lastResults.bastionHostListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.BastionHostsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.BastionHostsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request")
}
return
}
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client BastionHostsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result BastionHostListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BastionHostsClient.ListByResourceGroup")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
// UpdateTags updates bastion host tags.
// Parameters:
// resourceGroupName - the resource group name of the BastionHost.
// bastionHostName - the name of the bastionHost.
// bastionHostParameters - parameters supplied to update a bastion host tags.
func (client BastionHostsClient) UpdateTags(ctx context.Context, resourceGroupName string, bastionHostName string, bastionHostParameters TagsObject) (result BastionHostsUpdateTagsFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/BastionHostsClient.UpdateTags")
defer func() {
sc := -1
if result.FutureAPI != nil && result.FutureAPI.Response() != nil {
sc = result.FutureAPI.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdateTagsPreparer(ctx, resourceGroupName, bastionHostName, bastionHostParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "UpdateTags", nil, "Failure preparing request")
return
}
result, err = client.UpdateTagsSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.BastionHostsClient", "UpdateTags", nil, "Failure sending request")
return
}
return
}
// UpdateTagsPreparer prepares the UpdateTags request.
func (client BastionHostsClient) UpdateTagsPreparer(ctx context.Context, resourceGroupName string, bastionHostName string, bastionHostParameters TagsObject) (*http.Request, error) {
pathParameters := map[string]interface{}{
"bastionHostName": autorest.Encode("path", bastionHostName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-06-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}", pathParameters),
autorest.WithJSON(bastionHostParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateTagsSender sends the UpdateTags request. The method will close the
// http.Response Body if it receives an error.
func (client BastionHostsClient) UpdateTagsSender(req *http.Request) (future BastionHostsUpdateTagsFuture, err error) {
var resp *http.Response
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
var azf azure.Future
azf, err = azure.NewFutureFromResponse(resp)
future.FutureAPI = &azf
future.Result = future.result
return
}
// UpdateTagsResponder handles the response to the UpdateTags request. The method always
// closes the http.Response Body.
func (client BastionHostsClient) UpdateTagsResponder(resp *http.Response) (result BastionHost, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| {
return BastionHostsClient{NewWithBaseURI(baseURI, subscriptionID)}
} |
Subsets and Splits