file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
i18n_test.go | package plugin
import (
"testing"
"github.com/mattermost/mattermost-server/v5/plugin/plugintest"
"github.com/nicksnyder/go-i18n/v2/i18n"
"github.com/stretchr/testify/assert"
"github.com/matterpoll/matterpoll/server/store/mockstore"
"github.com/matterpoll/matterpoll/server/utils/testutils"
)
func TestLocalizeDefaultMessage(t *testing.T) {
t.Run("fine", func(t *testing.T) {
api := &plugintest.API{}
p := setupTestPlugin(t, api, &mockstore.Store{})
l := p.getServerLocalizer()
m := &i18n.Message{
Other: "test message",
}
assert.Equal(t, m.Other, p.LocalizeDefaultMessage(l, m))
})
t.Run("empty message", func(t *testing.T) {
api := &plugintest.API{}
defer api.AssertExpectations(t)
p := setupTestPlugin(t, api, &mockstore.Store{})
l := p.getServerLocalizer()
m := &i18n.Message{}
assert.Equal(t, "", p.LocalizeDefaultMessage(l, m))
})
}
func TestLocalizeWithConfig(t *testing.T) {
t.Run("fine", func(t *testing.T) {
api := &plugintest.API{}
p := setupTestPlugin(t, api, &mockstore.Store{})
l := p.getServerLocalizer()
lc := &i18n.LocalizeConfig{
DefaultMessage: &i18n.Message{
Other: "test messsage",
},
}
assert.Equal(t, lc.DefaultMessage.Other, p.LocalizeWithConfig(l, lc))
})
t.Run("empty config", func(t *testing.T) {
api := &plugintest.API{}
api.On("LogWarn", testutils.GetMockArgumentsWithType("string", 3)...).Return() | lc := &i18n.LocalizeConfig{}
assert.Equal(t, "", p.LocalizeWithConfig(l, lc))
})
t.Run("ids missmatch", func(t *testing.T) {
api := &plugintest.API{}
api.On("LogWarn", testutils.GetMockArgumentsWithType("string", 3)...).Return()
defer api.AssertExpectations(t)
p := setupTestPlugin(t, api, &mockstore.Store{})
l := p.getServerLocalizer()
lc := &i18n.LocalizeConfig{
DefaultMessage: &i18n.Message{
ID: "some ID",
},
MessageID: "some other ID",
}
assert.Equal(t, "", p.LocalizeWithConfig(l, lc))
})
} | defer api.AssertExpectations(t)
p := setupTestPlugin(t, api, &mockstore.Store{})
l := p.getServerLocalizer() |
__init__.py | import nonebot
from nonebot.adapters.cqhttp.message import Message
from nonebot.matcher import Matcher
from nonebot.typing import T_State
from nonebot.plugin import on_command
from nonebot.adapters.cqhttp import Bot, MessageEvent
import userlib
import os
from userlib.wiki import EntryInfoBase, WikiBase
from userlib.wiki.baidu import BaiduWiki
# from userlib.wiki.moegirl import MoegirlWiki
from userlib.plugin import PluginInfo
export = nonebot.export()
export.info = PluginInfo(
"Wiki",
"百科",
"支持 \"百度百科\"\n"
"发送 \"相应百科名称 + 词条名\" 即可使用"
)
async def _send_as_image(matcher: type[Matcher], message: str, times: float):
tmp = userlib.gen_tmp("wiki-", ".png")
try:
userlib.draw_img_from_text(
(16 * 80 + 2, int(34 * max(message.count("\n") * times, 24)) + 1),
message, (tmp, "png"), userlib.sarasa_f32
)
await matcher.send(Message("[CQ:image,file=file://%s]" % tmp))
except Exception as ex:
await matcher.send("处理数据时,发生了意料之外的错误/(ㄒoㄒ)/~~")
await matcher.send(getattr(ex, "message", str(ex)))
raise
finally:
os.remove(tmp)
async def _first_handle(
wiki: WikiBase,
matcher: type[Matcher],
event: MessageEvent,
state: T_State
):
name = event.get_message().extract_plain_text()
try:
infos = await wiki.get_entries(name)
except Exception as ex:
await matcher.send(
"请求时发生错误,错误信息:\n" +
getattr(ex, "message", str(ex))
)
raise
if not infos:
await matcher.finish("未找到名字为 %s 的词条" % name)
elif len(infos) == 1:
state["index"] = 1
else:
await matcher.send(
"%s 为多义词,请选择所需查询词条的编号,如需取消操作发送 \"取消\" 即可:\n(请等待图片处理……)" % name
)
await _send_ | index = int(index_raw)
except ValueError:
await matcher.reject("输入的值与期望类型 int 不符合\n输入“取消”以取消操作")
raise
if index < 1 or index > len(state["infos"]):
await matcher.reject("输入的值超出所预期的范围\n输入“取消”以取消操作")
info: EntryInfoBase = state["infos"][index - 1]
await matcher.send("链接:" + info.link)
try:
entry = await info.get_content()
except Exception as ex:
await matcher.send("请求时发生错误,错误信息:\n" + getattr(ex, "message", str(ex)))
raise
await _send_as_image(
matcher,
"%s\n简介:\n%s\n\n基本信息:\n%s" %
(
"%s(%s)" % (entry.name, info.field) if info.field else entry.name,
entry.summary,
"\n".join([
" " * 4 + "%s: %s" % (key, value)
for key, value in entry.basic_infos.items()
])
),
2
)
baidu = on_command("百度百科")
baidu_wiki = BaiduWiki()
@baidu.handle()
async def baidu_handle(bot: Bot, event: MessageEvent, state: T_State):
await _first_handle(baidu_wiki, baidu, event, state)
@baidu.got("index")
async def baidu_arg_handle(bot: Bot, event: MessageEvent, state: T_State):
await _arg_handle(baidu, state)
# moegirl = on_command("萌娘百科")
# moegirl_wiki = MoegirlWiki()
# @moegirl.handle()
# async def moegirl_handle(bot: Bot, event: MessageEvent, state: T_State):
# await _first_handle(moegirl_wiki, moegirl, event, state)
# @moegirl.got("index")
# async def moegirl_arg_handle(bot: Bot, event: MessageEvent, state: T_State):
# await _arg_handle(moegirl, state)
| as_image(
matcher,
"\n".join([
"%s. %s" % (index, info.field)
for index, info in enumerate(infos, 1)
]),
1.2
)
state["infos"] = infos
async def _arg_handle(matcher: type[Matcher], state: T_State):
index_raw = state["index"]
if (index_raw == "取消"):
await matcher.finish("已取消操作")
try:
|
dialog-manager-mixin.ts | import { OPPDomEvent } from "../common/dom/fire_event";
import { OppBaseEl } from "./opp-base-mixin";
import { makeDialogManager, showDialog } from "../dialogs/make-dialog-manager";
import { Constructor } from "../types";
interface RegisterDialogParams { | }
declare global {
// for fire event
interface OPPDomEvents {
"register-dialog": RegisterDialogParams;
}
// for add event listener
interface HTMLElementEventMap {
"register-dialog": OPPDomEvent<RegisterDialogParams>;
}
}
export const dialogManagerMixin = <T extends Constructor<OppBaseEl>>(
superClass: T
) =>
class extends superClass {
protected firstUpdated(changedProps) {
super.firstUpdated(changedProps);
// deprecated
this.addEventListener("register-dialog", (e) =>
this.registerDialog(e.detail)
);
makeDialogManager(this, this.shadowRoot!);
}
private registerDialog({
dialogShowEvent,
dialogTag,
dialogImport,
}: RegisterDialogParams) {
this.addEventListener(dialogShowEvent, (showEv) => {
showDialog(
this,
this.shadowRoot!,
dialogImport,
dialogTag,
(showEv as OPPDomEvent<unknown>).detail
);
});
}
}; | dialogShowEvent: keyof OPPDomEvents;
dialogTag: keyof HTMLElementTagNameMap;
dialogImport: () => Promise<unknown>; |
template.go | package template
import (
"bytes"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/kujtimiihoxha/gk/utils"
"path/filepath"
"reflect"
"strings"
"text/template"
)
var engine Engine
type Engine interface {
init()
Execute(name string, model interface{}) (string, error)
ExecuteString(data string, model interface{}) (string, error)
}
type DefaultEngine struct {
t *template.Template
}
func funcMap() template.FuncMap {
return template.FuncMap{
"last": func(x int, a interface{}) bool {
return x == reflect.ValueOf(a).Len()-1
},
"toSnakeCase": func(s string) string {
return utils.ToLowerSnakeCase(s)
},
"toUpperFirstCamelCase": func(s string) string {
return utils.ToUpperFirstCamelCase(s)
},
"toUpperFirst": func(s string) string {
return utils.ToUpperFirst(s)
},
"fileSeparator": func() string {
if filepath.Separator == '\\' {
return "\\\\"
}
return string(filepath.Separator)
},
"toCamelCase": func(s string) string {
return utils.ToCamelCase(s)
},
}
}
func | () Engine {
if engine == nil {
engine = &DefaultEngine{}
engine.init()
}
return engine
}
func (e *DefaultEngine) init() {
e.t = template.New("default")
e.t.Funcs(funcMap())
for n, v := range _bintree.Children["tmpl"].Children["partials"].Children {
a, _ := v.Func()
_, err := e.t.Parse(
fmt.Sprintf(
"{{define \"%s\"}} %s {{end}}",
strings.Replace(n, ".tmpl", "", 1),
string(a.bytes),
),
)
if err != nil {
logrus.Panic(err)
}
}
}
func (e *DefaultEngine) Execute(name string, model interface{}) (string, error) {
d, err := Asset(fmt.Sprintf("tmpl/%s.tmpl", name))
if err != nil {
logrus.Panic(err)
}
tmp, err := e.t.Parse(string(d))
if err != nil {
logrus.Panic(err)
}
ret := bytes.NewBufferString("")
err = tmp.Execute(ret, model)
return ret.String(), err
}
func (e *DefaultEngine) ExecuteString(data string, model interface{}) (string, error) {
tmp, err := e.t.Parse(data)
if err != nil {
logrus.Panic(err)
}
ret := bytes.NewBufferString("")
err = tmp.Execute(ret, model)
return ret.String(), err
}
| NewEngine |
watcher.go | /*
Copyright 2018 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
import (
"github.com/pkg/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
)
// PodWatcher returns a watcher that will report on all Pod Events (additions, modifications, etc.)
func PodWatcher(namespace string) (watch.Interface, error) {
kubeclient, err := Client()
if err != nil {
return nil, errors.Wrap(err, "getting k8s client")
}
client := kubeclient.CoreV1()
var forever int64 = 3600 * 24 * 365 * 100
return client.Pods(namespace).Watch(meta_v1.ListOptions{
IncludeUninitialized: true,
TimeoutSeconds: &forever,
})
}
// AggregatePodWatcher returns a watcher for multiple namespaces.
func AggregatePodWatcher(namespaces []string, aggregate chan watch.Event) (func(), error) {
watchers := make([]watch.Interface, 0, len(namespaces))
stopWatchers := func() {
for _, w := range watchers {
w.Stop()
}
}
for _, ns := range namespaces {
watcher, err := PodWatcher(ns)
if err != nil {
return stopWatchers, errors.Wrap(err, "initializing pod watcher for "+ns) | aggregate <- msg
}
}(watcher)
}
return stopWatchers, nil
} | }
watchers = append(watchers, watcher)
go func(w watch.Interface) {
for msg := range w.ResultChan() { |
loginticket.d.ts | export declare class | {
private envelope?;
private payload?;
/**
* Create a simple class to extract user ID from an ID Token
*
* @param {string} env Envelope of the jwt
* @param {TokenPayload} pay Payload of the jwt
* @constructor
*/
constructor(env?: string, pay?: TokenPayload);
getEnvelope(): string | undefined;
getPayload(): TokenPayload | undefined;
/**
* Create a simple class to extract user ID from an ID Token
*
* @return The user ID
*/
getUserId(): string | null;
/**
* Returns attributes from the login ticket. This can contain
* various information about the user session.
*
* @return The envelope and payload
*/
getAttributes(): {
envelope: string | undefined;
payload: TokenPayload | undefined;
};
}
export interface TokenPayload {
/**
* The Issuer Identifier for the Issuer of the response. Always
* https://accounts.google.com or accounts.google.com for Google ID tokens.
*/
iss: string;
/**
* Access token hash. Provides validation that the access token is tied to the
* identity token. If the ID token is issued with an access token in the
* server flow, this is always included. This can be used as an alternate
* mechanism to protect against cross-site request forgery attacks, but if you
* follow Step 1 and Step 3 it is not necessary to verify the access token.
*/
at_hash?: string;
/**
* True if the user's e-mail address has been verified; otherwise false.
*/
email_verified?: boolean;
/**
* An identifier for the user, unique among all Google accounts and never
* reused. A Google account can have multiple emails at different points in
* time, but the sub value is never changed. Use sub within your application
* as the unique-identifier key for the user.
*/
sub: string;
/**
* The client_id of the authorized presenter. This claim is only needed when
* the party requesting the ID token is not the same as the audience of the ID
* token. This may be the case at Google for hybrid apps where a web
* application and Android app have a different client_id but share the same
* project.
*/
azp?: string;
/**
* The user's email address. This may not be unique and is not suitable for
* use as a primary key. Provided only if your scope included the string
* "email".
*/
email?: string;
/**
* The URL of the user's profile page. Might be provided when:
* - The request scope included the string "profile"
* - The ID token is returned from a token refresh
* - When profile claims are present, you can use them to update your app's
* user records. Note that this claim is never guaranteed to be present.
*/
profile?: string;
/**
* The URL of the user's profile picture. Might be provided when:
* - The request scope included the string "profile"
* - The ID token is returned from a token refresh
* - When picture claims are present, you can use them to update your app's
* user records. Note that this claim is never guaranteed to be present.
*/
picture?: string;
/**
* The user's full name, in a displayable form. Might be provided when:
* - The request scope included the string "profile"
* - The ID token is returned from a token refresh
* - When name claims are present, you can use them to update your app's user
* records. Note that this claim is never guaranteed to be present.
*/
name?: string;
/**
* The user's given name, in a displayable form. Might be provided when:
* - The request scope included the string "profile"
* - The ID token is returned from a token refresh
* - When name claims are present, you can use them to update your app's user
* records. Note that this claim is never guaranteed to be present.
*/
given_name?: string;
/**
* The user's family name, in a displayable form. Might be provided when:
* - The request scope included the string "profile"
* - The ID token is returned from a token refresh
* - When name claims are present, you can use them to update your app's user
* records. Note that this claim is never guaranteed to be present.
*/
family_name?: string;
/**
* Identifies the audience that this ID token is intended for. It must be one
* of the OAuth 2.0 client IDs of your application.
*/
aud: string;
/**
* The time the ID token was issued, represented in Unix time (integer
* seconds).
*/
iat: number;
/**
* The time the ID token expires, represented in Unix time (integer seconds).
*/
exp: number;
/**
* The value of the nonce supplied by your app in the authentication request.
* You should enforce protection against replay attacks by ensuring it is
* presented only once.
*/
nonce?: string;
/**
* The hosted G Suite domain of the user. Provided only if the user belongs to
* a hosted domain.
*/
hd?: string;
}
| LoginTicket |
block.rs | //! Implementations for `BlockContext` methods.
use super::{
index::BoundsCheckResult, make_local, selection::Selection, Block, BlockContext, Dimension,
Error, Instruction, LocalType, LookupType, LoopContext, ResultMember, Writer, WriterFlags,
};
use crate::{arena::Handle, proc::TypeResolution};
use spirv::Word;
fn get_dimension(type_inner: &crate::TypeInner) -> Dimension {
match *type_inner {
crate::TypeInner::Scalar { .. } => Dimension::Scalar,
crate::TypeInner::Vector { .. } => Dimension::Vector,
crate::TypeInner::Matrix { .. } => Dimension::Matrix,
_ => unreachable!(),
}
}
/// The results of emitting code for a left-hand-side expression.
///
/// On success, `write_expression_pointer` returns one of these.
enum ExpressionPointer {
/// The pointer to the expression's value is available, as the value of the
/// expression with the given id.
Ready { pointer_id: Word },
/// The access expression must be conditional on the value of `condition`, a boolean
/// expression that is true if all indices are in bounds. If `condition` is true, then
/// `access` is an `OpAccessChain` instruction that will compute a pointer to the
/// expression's value. If `condition` is false, then executing `access` would be
/// undefined behavior.
Conditional {
condition: Word,
access: Instruction,
},
}
impl Writer {
// Flip Y coordinate to adjust for coordinate space difference
// between SPIR-V and our IR.
// The `position_id` argument is a pointer to a `vecN<f32>`,
// whose `y` component we will negate.
fn write_epilogue_position_y_flip(
&mut self,
position_id: Word,
body: &mut Vec<Instruction>,
) -> Result<(), Error> {
let float_ptr_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: Some(spirv::StorageClass::Output),
}));
let index_y_id = self.get_index_constant(1);
let access_id = self.id_gen.next();
body.push(Instruction::access_chain(
float_ptr_type_id,
access_id,
position_id,
&[index_y_id],
));
let float_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: None,
}));
let load_id = self.id_gen.next();
body.push(Instruction::load(float_type_id, load_id, access_id, None));
let neg_id = self.id_gen.next();
body.push(Instruction::unary(
spirv::Op::FNegate,
float_type_id,
neg_id,
load_id,
));
body.push(Instruction::store(access_id, neg_id, None));
Ok(())
}
// Clamp fragment depth between 0 and 1.
fn write_epilogue_frag_depth_clamp(
&mut self,
frag_depth_id: Word,
body: &mut Vec<Instruction>,
) -> Result<(), Error> {
let float_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: None,
}));
let value0_id = self.get_constant_scalar(crate::ScalarValue::Float(0.0), 4);
let value1_id = self.get_constant_scalar(crate::ScalarValue::Float(1.0), 4);
let original_id = self.id_gen.next();
body.push(Instruction::load(
float_type_id,
original_id,
frag_depth_id,
None,
));
let clamp_id = self.id_gen.next();
body.push(Instruction::ext_inst(
self.gl450_ext_inst_id,
spirv::GLOp::FClamp,
float_type_id,
clamp_id,
&[original_id, value0_id, value1_id],
));
body.push(Instruction::store(frag_depth_id, clamp_id, None));
Ok(())
}
fn write_entry_point_return(
&mut self,
value_id: Word,
ir_result: &crate::FunctionResult,
result_members: &[ResultMember],
body: &mut Vec<Instruction>,
) -> Result<(), Error> {
for (index, res_member) in result_members.iter().enumerate() {
let member_value_id = match ir_result.binding {
Some(_) => value_id,
None => {
let member_value_id = self.id_gen.next();
body.push(Instruction::composite_extract(
res_member.type_id,
member_value_id,
value_id,
&[index as u32],
));
member_value_id
}
};
body.push(Instruction::store(res_member.id, member_value_id, None));
match res_member.built_in {
Some(crate::BuiltIn::Position)
if self.flags.contains(WriterFlags::ADJUST_COORDINATE_SPACE) =>
{
self.write_epilogue_position_y_flip(res_member.id, body)?;
}
Some(crate::BuiltIn::FragDepth)
if self.flags.contains(WriterFlags::CLAMP_FRAG_DEPTH) =>
{
self.write_epilogue_frag_depth_clamp(res_member.id, body)?;
}
_ => {}
}
}
Ok(())
}
}
impl<'w> BlockContext<'w> {
/// Decide whether to put off emitting instructions for `expr_handle`.
///
/// We would like to gather together chains of `Access` and `AccessIndex`
/// Naga expressions into a single `OpAccessChain` SPIR-V instruction. To do
/// this, we don't generate instructions for these exprs when we first
/// encounter them. Their ids in `self.writer.cached.ids` are left as zero. Then,
/// once we encounter a `Load` or `Store` expression that actually needs the
/// chain's value, we call `write_expression_pointer` to handle the whole
/// thing in one fell swoop.
fn is_intermediate(&self, expr_handle: Handle<crate::Expression>) -> bool {
match self.ir_function.expressions[expr_handle] {
crate::Expression::GlobalVariable(_) | crate::Expression::LocalVariable(_) => true,
crate::Expression::FunctionArgument(index) => {
let arg = &self.ir_function.arguments[index as usize];
self.ir_module.types[arg.ty].inner.pointer_class().is_some()
}
// The chain rule: if this `Access...`'s `base` operand was
// previously omitted, then omit this one, too.
_ => self.cached.ids[expr_handle.index()] == 0,
}
}
/// Cache an expression for a value.
pub(super) fn cache_expression_value(
&mut self,
expr_handle: Handle<crate::Expression>,
block: &mut Block,
) -> Result<(), Error> {
let result_type_id = self.get_expression_type_id(&self.fun_info[expr_handle].ty);
let id = match self.ir_function.expressions[expr_handle] {
crate::Expression::Access { base, index: _ } if self.is_intermediate(base) => {
// See `is_intermediate`; we'll handle this later in
// `write_expression_pointer`.
0
}
crate::Expression::Access { base, index } => {
let base_ty = self.fun_info[base].ty.inner_with(&self.ir_module.types);
match *base_ty {
crate::TypeInner::Vector { .. } => (),
ref other => {
log::error!(
"Unable to access base {:?} of type {:?}",
self.ir_function.expressions[base],
other
);
return Err(Error::Validation(
"only vectors may be dynamically indexed by value",
));
}
};
self.write_vector_access(expr_handle, base, index, block)?
}
crate::Expression::AccessIndex { base, index: _ } if self.is_intermediate(base) => {
// See `is_intermediate`; we'll handle this later in
// `write_expression_pointer`.
0
}
crate::Expression::AccessIndex { base, index } => {
match *self.fun_info[base].ty.inner_with(&self.ir_module.types) {
crate::TypeInner::Vector { .. }
| crate::TypeInner::Matrix { .. }
| crate::TypeInner::Array { .. }
| crate::TypeInner::Struct { .. } => {
// We never need bounds checks here: dynamically sized arrays can
// only appear behind pointers, and are thus handled by the
// `is_intermediate` case above. Everything else's size is
// statically known and checked in validation.
let id = self.gen_id();
let base_id = self.cached[base];
block.body.push(Instruction::composite_extract(
result_type_id,
id,
base_id,
&[index],
));
id
}
ref other => {
log::error!("Unable to access index of {:?}", other);
return Err(Error::FeatureNotImplemented("access index for type"));
}
}
}
crate::Expression::GlobalVariable(handle) => {
self.writer.global_variables[handle.index()].id
}
crate::Expression::Constant(handle) => self.writer.constant_ids[handle.index()],
crate::Expression::Splat { size, value } => {
let value_id = self.cached[value];
let components = [value_id; 4];
let id = self.gen_id();
block.body.push(Instruction::composite_construct(
result_type_id,
id,
&components[..size as usize],
));
id
}
crate::Expression::Swizzle {
size,
vector,
pattern,
} => {
let vector_id = self.cached[vector];
self.temp_list.clear();
for &sc in pattern[..size as usize].iter() {
self.temp_list.push(sc as Word);
}
let id = self.gen_id();
block.body.push(Instruction::vector_shuffle(
result_type_id,
id,
vector_id,
vector_id,
&self.temp_list,
));
id
}
crate::Expression::Compose {
ty: _,
ref components,
} => {
self.temp_list.clear();
for &component in components {
self.temp_list.push(self.cached[component]);
}
let id = self.gen_id();
block.body.push(Instruction::composite_construct(
result_type_id,
id,
&self.temp_list,
));
id
}
crate::Expression::Unary { op, expr } => {
let id = self.gen_id();
let expr_id = self.cached[expr];
let expr_ty_inner = self.fun_info[expr].ty.inner_with(&self.ir_module.types);
let spirv_op = match op {
crate::UnaryOperator::Negate => match expr_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Float) => spirv::Op::FNegate,
Some(crate::ScalarKind::Sint) => spirv::Op::SNegate,
Some(crate::ScalarKind::Bool) => spirv::Op::LogicalNot,
Some(crate::ScalarKind::Uint) | None => {
log::error!("Unable to negate {:?}", expr_ty_inner);
return Err(Error::FeatureNotImplemented("negation"));
}
},
crate::UnaryOperator::Not => match expr_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Bool) => spirv::Op::LogicalNot,
_ => spirv::Op::Not,
},
};
block
.body
.push(Instruction::unary(spirv_op, result_type_id, id, expr_id));
id
}
crate::Expression::Binary { op, left, right } => {
let id = self.gen_id();
let left_id = self.cached[left];
let right_id = self.cached[right];
let left_ty_inner = self.fun_info[left].ty.inner_with(&self.ir_module.types);
let right_ty_inner = self.fun_info[right].ty.inner_with(&self.ir_module.types);
let left_dimension = get_dimension(left_ty_inner);
let right_dimension = get_dimension(right_ty_inner);
let mut preserve_order = true;
let spirv_op = match op {
crate::BinaryOperator::Add => match *left_ty_inner {
crate::TypeInner::Scalar { kind, .. }
| crate::TypeInner::Vector { kind, .. } => match kind {
crate::ScalarKind::Float => spirv::Op::FAdd,
_ => spirv::Op::IAdd,
},
_ => unimplemented!(),
},
crate::BinaryOperator::Subtract => match *left_ty_inner {
crate::TypeInner::Scalar { kind, .. }
| crate::TypeInner::Vector { kind, .. } => match kind {
crate::ScalarKind::Float => spirv::Op::FSub,
_ => spirv::Op::ISub,
},
_ => unimplemented!(),
},
crate::BinaryOperator::Multiply => match (left_dimension, right_dimension) {
(Dimension::Scalar, Dimension::Vector { .. }) => {
preserve_order = false;
spirv::Op::VectorTimesScalar
}
(Dimension::Vector, Dimension::Scalar { .. }) => {
spirv::Op::VectorTimesScalar
}
(Dimension::Vector, Dimension::Matrix) => spirv::Op::VectorTimesMatrix,
(Dimension::Matrix, Dimension::Scalar { .. }) => {
spirv::Op::MatrixTimesScalar
}
(Dimension::Scalar, Dimension::Matrix { .. }) => {
preserve_order = false;
spirv::Op::MatrixTimesScalar
}
(Dimension::Matrix, Dimension::Vector) => spirv::Op::MatrixTimesVector,
(Dimension::Matrix, Dimension::Matrix) => spirv::Op::MatrixTimesMatrix,
(Dimension::Vector, Dimension::Vector)
| (Dimension::Scalar, Dimension::Scalar)
if left_ty_inner.scalar_kind() == Some(crate::ScalarKind::Float) =>
{
spirv::Op::FMul
}
(Dimension::Vector, Dimension::Vector)
| (Dimension::Scalar, Dimension::Scalar) => spirv::Op::IMul,
},
crate::BinaryOperator::Divide => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) => spirv::Op::SDiv,
Some(crate::ScalarKind::Uint) => spirv::Op::UDiv,
Some(crate::ScalarKind::Float) => spirv::Op::FDiv,
_ => unimplemented!(),
},
crate::BinaryOperator::Modulo => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) => spirv::Op::SMod,
Some(crate::ScalarKind::Uint) => spirv::Op::UMod,
Some(crate::ScalarKind::Float) => spirv::Op::FMod,
_ => unimplemented!(),
},
crate::BinaryOperator::Equal => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) | Some(crate::ScalarKind::Uint) => {
spirv::Op::IEqual
}
Some(crate::ScalarKind::Float) => spirv::Op::FOrdEqual,
Some(crate::ScalarKind::Bool) => spirv::Op::LogicalEqual,
_ => unimplemented!(),
},
crate::BinaryOperator::NotEqual => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) | Some(crate::ScalarKind::Uint) => {
spirv::Op::INotEqual
}
Some(crate::ScalarKind::Float) => spirv::Op::FOrdNotEqual,
Some(crate::ScalarKind::Bool) => spirv::Op::LogicalNotEqual,
_ => unimplemented!(),
},
crate::BinaryOperator::Less => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) => spirv::Op::SLessThan,
Some(crate::ScalarKind::Uint) => spirv::Op::ULessThan,
Some(crate::ScalarKind::Float) => spirv::Op::FOrdLessThan,
_ => unimplemented!(),
},
crate::BinaryOperator::LessEqual => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) => spirv::Op::SLessThanEqual,
Some(crate::ScalarKind::Uint) => spirv::Op::ULessThanEqual,
Some(crate::ScalarKind::Float) => spirv::Op::FOrdLessThanEqual,
_ => unimplemented!(),
},
crate::BinaryOperator::Greater => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) => spirv::Op::SGreaterThan,
Some(crate::ScalarKind::Uint) => spirv::Op::UGreaterThan,
Some(crate::ScalarKind::Float) => spirv::Op::FOrdGreaterThan,
_ => unimplemented!(),
},
crate::BinaryOperator::GreaterEqual => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) => spirv::Op::SGreaterThanEqual,
Some(crate::ScalarKind::Uint) => spirv::Op::UGreaterThanEqual,
Some(crate::ScalarKind::Float) => spirv::Op::FOrdGreaterThanEqual,
_ => unimplemented!(),
},
crate::BinaryOperator::And => spirv::Op::BitwiseAnd,
crate::BinaryOperator::ExclusiveOr => spirv::Op::BitwiseXor,
crate::BinaryOperator::InclusiveOr => spirv::Op::BitwiseOr,
crate::BinaryOperator::LogicalAnd => spirv::Op::LogicalAnd,
crate::BinaryOperator::LogicalOr => spirv::Op::LogicalOr,
crate::BinaryOperator::ShiftLeft => spirv::Op::ShiftLeftLogical,
crate::BinaryOperator::ShiftRight => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) => spirv::Op::ShiftRightArithmetic,
Some(crate::ScalarKind::Uint) => spirv::Op::ShiftRightLogical,
_ => unimplemented!(),
},
};
block.body.push(Instruction::binary(
spirv_op,
result_type_id,
id,
if preserve_order { left_id } else { right_id },
if preserve_order { right_id } else { left_id },
));
id
}
crate::Expression::Math {
fun,
arg,
arg1,
arg2,
arg3,
} => {
use crate::MathFunction as Mf;
enum MathOp {
Ext(spirv::GLOp),
Custom(Instruction),
}
let arg0_id = self.cached[arg];
let arg_ty = self.fun_info[arg].ty.inner_with(&self.ir_module.types);
let arg_scalar_kind = arg_ty.scalar_kind();
let arg1_id = match arg1 {
Some(handle) => self.cached[handle],
None => 0,
};
let arg2_id = match arg2 {
Some(handle) => self.cached[handle],
None => 0,
};
let arg3_id = match arg3 {
Some(handle) => self.cached[handle],
None => 0,
};
let id = self.gen_id();
let math_op = match fun {
// comparison
Mf::Abs => {
match arg_scalar_kind {
Some(crate::ScalarKind::Float) => MathOp::Ext(spirv::GLOp::FAbs),
Some(crate::ScalarKind::Sint) => MathOp::Ext(spirv::GLOp::SAbs),
Some(crate::ScalarKind::Uint) => {
MathOp::Custom(Instruction::unary(
spirv::Op::CopyObject, // do nothing
result_type_id,
id,
arg0_id,
))
}
other => unimplemented!("Unexpected abs({:?})", other),
}
}
Mf::Min => MathOp::Ext(match arg_scalar_kind {
Some(crate::ScalarKind::Float) => spirv::GLOp::FMin,
Some(crate::ScalarKind::Sint) => spirv::GLOp::SMin,
Some(crate::ScalarKind::Uint) => spirv::GLOp::UMin,
other => unimplemented!("Unexpected min({:?})", other),
}),
Mf::Max => MathOp::Ext(match arg_scalar_kind {
Some(crate::ScalarKind::Float) => spirv::GLOp::FMax,
Some(crate::ScalarKind::Sint) => spirv::GLOp::SMax,
Some(crate::ScalarKind::Uint) => spirv::GLOp::UMax,
other => unimplemented!("Unexpected max({:?})", other),
}),
Mf::Clamp => MathOp::Ext(match arg_scalar_kind {
Some(crate::ScalarKind::Float) => spirv::GLOp::FClamp,
Some(crate::ScalarKind::Sint) => spirv::GLOp::SClamp,
Some(crate::ScalarKind::Uint) => spirv::GLOp::UClamp,
other => unimplemented!("Unexpected max({:?})", other),
}),
// trigonometry
Mf::Sin => MathOp::Ext(spirv::GLOp::Sin),
Mf::Sinh => MathOp::Ext(spirv::GLOp::Sinh),
Mf::Asin => MathOp::Ext(spirv::GLOp::Asin),
Mf::Cos => MathOp::Ext(spirv::GLOp::Cos),
Mf::Cosh => MathOp::Ext(spirv::GLOp::Cosh),
Mf::Acos => MathOp::Ext(spirv::GLOp::Acos),
Mf::Tan => MathOp::Ext(spirv::GLOp::Tan),
Mf::Tanh => MathOp::Ext(spirv::GLOp::Tanh),
Mf::Atan => MathOp::Ext(spirv::GLOp::Atan),
Mf::Atan2 => MathOp::Ext(spirv::GLOp::Atan2),
Mf::Asinh => MathOp::Ext(spirv::GLOp::Asinh),
Mf::Acosh => MathOp::Ext(spirv::GLOp::Acosh),
Mf::Atanh => MathOp::Ext(spirv::GLOp::Atanh),
// decomposition
Mf::Ceil => MathOp::Ext(spirv::GLOp::Ceil),
Mf::Round => MathOp::Ext(spirv::GLOp::RoundEven),
Mf::Floor => MathOp::Ext(spirv::GLOp::Floor),
Mf::Fract => MathOp::Ext(spirv::GLOp::Fract),
Mf::Trunc => MathOp::Ext(spirv::GLOp::Trunc),
Mf::Modf => MathOp::Ext(spirv::GLOp::Modf),
Mf::Frexp => MathOp::Ext(spirv::GLOp::Frexp),
Mf::Ldexp => MathOp::Ext(spirv::GLOp::Ldexp),
// geometry
Mf::Dot => MathOp::Custom(Instruction::binary(
spirv::Op::Dot,
result_type_id,
id,
arg0_id,
arg1_id,
)),
Mf::Outer => MathOp::Custom(Instruction::binary(
spirv::Op::OuterProduct,
result_type_id,
id,
arg0_id,
arg1_id,
)),
Mf::Cross => MathOp::Ext(spirv::GLOp::Cross),
Mf::Distance => MathOp::Ext(spirv::GLOp::Distance),
Mf::Length => MathOp::Ext(spirv::GLOp::Length),
Mf::Normalize => MathOp::Ext(spirv::GLOp::Normalize),
Mf::FaceForward => MathOp::Ext(spirv::GLOp::FaceForward),
Mf::Reflect => MathOp::Ext(spirv::GLOp::Reflect),
Mf::Refract => MathOp::Ext(spirv::GLOp::Refract),
// exponent
Mf::Exp => MathOp::Ext(spirv::GLOp::Exp),
Mf::Exp2 => MathOp::Ext(spirv::GLOp::Exp2),
Mf::Log => MathOp::Ext(spirv::GLOp::Log),
Mf::Log2 => MathOp::Ext(spirv::GLOp::Log2),
Mf::Pow => MathOp::Ext(spirv::GLOp::Pow),
// computational
Mf::Sign => MathOp::Ext(match arg_scalar_kind {
Some(crate::ScalarKind::Float) => spirv::GLOp::FSign,
Some(crate::ScalarKind::Sint) => spirv::GLOp::SSign,
other => unimplemented!("Unexpected sign({:?})", other),
}),
Mf::Fma => MathOp::Ext(spirv::GLOp::Fma),
Mf::Mix => {
let selector = arg2.unwrap();
let selector_ty =
self.fun_info[selector].ty.inner_with(&self.ir_module.types);
match (arg_ty, selector_ty) {
// if the selector is a scalar, we need to splat it
(
&crate::TypeInner::Vector { size, .. },
&crate::TypeInner::Scalar { kind, width },
) => {
let selector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind,
width,
pointer_class: None,
}));
self.temp_list.clear();
self.temp_list.resize(size as usize, arg2_id);
let selector_id = self.gen_id();
block.body.push(Instruction::composite_construct(
selector_type_id,
selector_id,
&self.temp_list,
));
MathOp::Custom(Instruction::ext_inst(
self.writer.gl450_ext_inst_id,
spirv::GLOp::FMix,
result_type_id,
id,
&[arg0_id, arg1_id, selector_id],
))
}
_ => MathOp::Ext(spirv::GLOp::FMix),
}
}
Mf::Step => MathOp::Ext(spirv::GLOp::Step),
Mf::SmoothStep => MathOp::Ext(spirv::GLOp::SmoothStep),
Mf::Sqrt => MathOp::Ext(spirv::GLOp::Sqrt),
Mf::InverseSqrt => MathOp::Ext(spirv::GLOp::InverseSqrt),
Mf::Inverse => MathOp::Ext(spirv::GLOp::MatrixInverse),
Mf::Transpose => MathOp::Custom(Instruction::unary(
spirv::Op::Transpose,
result_type_id,
id,
arg0_id,
)),
Mf::Determinant => MathOp::Ext(spirv::GLOp::Determinant),
Mf::ReverseBits | Mf::CountOneBits => {
log::error!("unimplemented math function {:?}", fun);
return Err(Error::FeatureNotImplemented("math function"));
}
Mf::ExtractBits => {
let op = match arg_scalar_kind {
Some(crate::ScalarKind::Uint) => spirv::Op::BitFieldUExtract,
Some(crate::ScalarKind::Sint) => spirv::Op::BitFieldSExtract,
other => unimplemented!("Unexpected sign({:?})", other),
};
MathOp::Custom(Instruction::ternary(
op,
result_type_id,
id,
arg0_id,
arg1_id,
arg2_id,
))
}
Mf::InsertBits => MathOp::Custom(Instruction::quaternary(
spirv::Op::BitFieldInsert,
result_type_id,
id,
arg0_id,
arg1_id,
arg2_id,
arg3_id,
)),
Mf::Pack4x8unorm => MathOp::Ext(spirv::GLOp::PackUnorm4x8),
Mf::Pack4x8snorm => MathOp::Ext(spirv::GLOp::PackSnorm4x8),
Mf::Pack2x16float => MathOp::Ext(spirv::GLOp::PackHalf2x16),
Mf::Pack2x16unorm => MathOp::Ext(spirv::GLOp::PackUnorm2x16),
Mf::Pack2x16snorm => MathOp::Ext(spirv::GLOp::PackSnorm2x16),
Mf::Unpack4x8unorm => MathOp::Ext(spirv::GLOp::UnpackUnorm4x8),
Mf::Unpack4x8snorm => MathOp::Ext(spirv::GLOp::UnpackSnorm4x8),
Mf::Unpack2x16float => MathOp::Ext(spirv::GLOp::UnpackHalf2x16),
Mf::Unpack2x16unorm => MathOp::Ext(spirv::GLOp::UnpackUnorm2x16),
Mf::Unpack2x16snorm => MathOp::Ext(spirv::GLOp::UnpackSnorm2x16),
};
block.body.push(match math_op {
MathOp::Ext(op) => Instruction::ext_inst(
self.writer.gl450_ext_inst_id,
op,
result_type_id,
id,
&[arg0_id, arg1_id, arg2_id, arg3_id][..fun.argument_count()],
),
MathOp::Custom(inst) => inst,
});
id
}
crate::Expression::LocalVariable(variable) => self.function.variables[&variable].id,
crate::Expression::Load { pointer } => {
match self.write_expression_pointer(pointer, block)? {
ExpressionPointer::Ready { pointer_id } => {
let id = self.gen_id();
let atomic_class =
match *self.fun_info[pointer].ty.inner_with(&self.ir_module.types) {
crate::TypeInner::Pointer { base, class } => {
match self.ir_module.types[base].inner {
crate::TypeInner::Atomic { .. } => Some(class),
_ => None,
}
}
_ => None,
};
let instruction = if let Some(class) = atomic_class {
let (semantics, scope) = class.to_spirv_semantics_and_scope();
let scope_constant_id = self.get_scope_constant(scope as u32);
let semantics_id = self.get_index_constant(semantics.bits());
Instruction::atomic_load(
result_type_id,
id,
pointer_id,
scope_constant_id,
semantics_id,
)
} else {
Instruction::load(result_type_id, id, pointer_id, None)
};
block.body.push(instruction);
id
}
ExpressionPointer::Conditional { condition, access } => {
//TODO: support atomics?
self.write_conditional_indexed_load(
result_type_id,
condition,
block,
move |id_gen, block| {
// The in-bounds path. Perform the access and the load.
let pointer_id = access.result_id.unwrap();
let value_id = id_gen.next();
block.body.push(access);
block.body.push(Instruction::load(
result_type_id,
value_id,
pointer_id,
None,
));
value_id
},
)
}
}
}
crate::Expression::FunctionArgument(index) => self.function.parameter_id(index),
crate::Expression::CallResult(_) | crate::Expression::AtomicResult { .. } => {
self.cached[expr_handle]
}
crate::Expression::As {
expr,
kind,
convert,
} => {
use crate::ScalarKind as Sk;
let expr_id = self.cached[expr];
let (src_kind, src_size, src_width) =
match *self.fun_info[expr].ty.inner_with(&self.ir_module.types) {
crate::TypeInner::Scalar { kind, width } => (kind, None, width),
crate::TypeInner::Vector { kind, width, size } => (kind, Some(size), width),
ref other => {
log::error!("As source {:?}", other);
return Err(Error::Validation("Unexpected Expression::As source"));
}
};
enum | {
Unary(spirv::Op),
Binary(spirv::Op, Word),
Ternary(spirv::Op, Word, Word),
}
let cast = match (src_kind, kind, convert) {
(_, _, None) | (Sk::Bool, Sk::Bool, Some(_)) => Cast::Unary(spirv::Op::Bitcast),
// casting to a bool - generate `OpXxxNotEqual`
(_, Sk::Bool, Some(_)) => {
let (op, value) = match src_kind {
Sk::Sint => (spirv::Op::INotEqual, crate::ScalarValue::Sint(0)),
Sk::Uint => (spirv::Op::INotEqual, crate::ScalarValue::Uint(0)),
Sk::Float => {
(spirv::Op::FUnordNotEqual, crate::ScalarValue::Float(0.0))
}
Sk::Bool => unreachable!(),
};
let zero_scalar_id = self.writer.get_constant_scalar(value, src_width);
let zero_id = match src_size {
Some(size) => {
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind: src_kind,
width: src_width,
pointer_class: None,
}));
let components = [zero_scalar_id; 4];
let zero_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
zero_id,
&components[..size as usize],
));
zero_id
}
None => zero_scalar_id,
};
Cast::Binary(op, zero_id)
}
// casting from a bool - generate `OpSelect`
(Sk::Bool, _, Some(dst_width)) => {
let (val0, val1) = match kind {
Sk::Sint => (crate::ScalarValue::Sint(0), crate::ScalarValue::Sint(1)),
Sk::Uint => (crate::ScalarValue::Uint(0), crate::ScalarValue::Uint(1)),
Sk::Float => (
crate::ScalarValue::Float(0.0),
crate::ScalarValue::Float(1.0),
),
Sk::Bool => unreachable!(),
};
let scalar0_id = self.writer.get_constant_scalar(val0, dst_width);
let scalar1_id = self.writer.get_constant_scalar(val1, dst_width);
let (accept_id, reject_id) = match src_size {
Some(size) => {
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind,
width: dst_width,
pointer_class: None,
}));
let components0 = [scalar0_id; 4];
let components1 = [scalar1_id; 4];
let vec0_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
vec0_id,
&components0[..size as usize],
));
let vec1_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
vec1_id,
&components1[..size as usize],
));
(vec1_id, vec0_id)
}
None => (scalar1_id, scalar0_id),
};
Cast::Ternary(spirv::Op::Select, accept_id, reject_id)
}
(Sk::Float, Sk::Uint, Some(_)) => Cast::Unary(spirv::Op::ConvertFToU),
(Sk::Float, Sk::Sint, Some(_)) => Cast::Unary(spirv::Op::ConvertFToS),
(Sk::Float, Sk::Float, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::FConvert)
}
(Sk::Sint, Sk::Float, Some(_)) => Cast::Unary(spirv::Op::ConvertSToF),
(Sk::Sint, Sk::Sint, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::SConvert)
}
(Sk::Uint, Sk::Float, Some(_)) => Cast::Unary(spirv::Op::ConvertUToF),
(Sk::Uint, Sk::Uint, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::UConvert)
}
// We assume it's either an identity cast, or int-uint.
_ => Cast::Unary(spirv::Op::Bitcast),
};
let id = self.gen_id();
let instruction = match cast {
Cast::Unary(op) => Instruction::unary(op, result_type_id, id, expr_id),
Cast::Binary(op, operand) => {
Instruction::binary(op, result_type_id, id, expr_id, operand)
}
Cast::Ternary(op, op1, op2) => {
Instruction::ternary(op, result_type_id, id, expr_id, op1, op2)
}
};
block.body.push(instruction);
id
}
crate::Expression::ImageLoad {
image,
coordinate,
array_index,
index,
} => {
self.write_image_load(result_type_id, image, coordinate, array_index, index, block)?
}
crate::Expression::ImageSample {
image,
sampler,
coordinate,
array_index,
offset,
level,
depth_ref,
} => self.write_image_sample(
result_type_id,
image,
sampler,
coordinate,
array_index,
offset,
level,
depth_ref,
block,
)?,
crate::Expression::Select {
condition,
accept,
reject,
} => {
let id = self.gen_id();
let mut condition_id = self.cached[condition];
let accept_id = self.cached[accept];
let reject_id = self.cached[reject];
let condition_ty = self.fun_info[condition]
.ty
.inner_with(&self.ir_module.types);
let object_ty = self.fun_info[accept].ty.inner_with(&self.ir_module.types);
if let (
&crate::TypeInner::Scalar {
kind: crate::ScalarKind::Bool,
width,
},
&crate::TypeInner::Vector { size, .. },
) = (condition_ty, object_ty)
{
self.temp_list.clear();
self.temp_list.resize(size as usize, condition_id);
let bool_vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind: crate::ScalarKind::Bool,
width,
pointer_class: None,
}));
let id = self.gen_id();
block.body.push(Instruction::composite_construct(
bool_vector_type_id,
id,
&self.temp_list,
));
condition_id = id
}
let instruction =
Instruction::select(result_type_id, id, condition_id, accept_id, reject_id);
block.body.push(instruction);
id
}
crate::Expression::Derivative { axis, expr } => {
use crate::DerivativeAxis as Da;
let id = self.gen_id();
let expr_id = self.cached[expr];
let op = match axis {
Da::X => spirv::Op::DPdx,
Da::Y => spirv::Op::DPdy,
Da::Width => spirv::Op::Fwidth,
};
block
.body
.push(Instruction::derivative(op, result_type_id, id, expr_id));
id
}
crate::Expression::ImageQuery { image, query } => {
self.write_image_query(result_type_id, image, query, block)?
}
crate::Expression::Relational { fun, argument } => {
use crate::RelationalFunction as Rf;
let arg_id = self.cached[argument];
let op = match fun {
Rf::All => spirv::Op::All,
Rf::Any => spirv::Op::Any,
Rf::IsNan => spirv::Op::IsNan,
Rf::IsInf => spirv::Op::IsInf,
//TODO: these require Kernel capability
Rf::IsFinite | Rf::IsNormal => {
return Err(Error::FeatureNotImplemented("is finite/normal"))
}
};
let id = self.gen_id();
block
.body
.push(Instruction::relational(op, result_type_id, id, arg_id));
id
}
crate::Expression::ArrayLength(expr) => self.write_runtime_array_length(expr, block)?,
};
self.cached[expr_handle] = id;
Ok(())
}
/// Build an `OpAccessChain` instruction.
///
/// Emit any needed bounds-checking expressions to `block`.
///
/// On success, the return value is an [`ExpressionPointer`] value; see the
/// documentation for that type.
fn write_expression_pointer(
&mut self,
mut expr_handle: Handle<crate::Expression>,
block: &mut Block,
) -> Result<ExpressionPointer, Error> {
let result_lookup_ty = match self.fun_info[expr_handle].ty {
TypeResolution::Handle(ty_handle) => LookupType::Handle(ty_handle),
TypeResolution::Value(ref inner) => LookupType::Local(make_local(inner).unwrap()),
};
let result_type_id = self.get_type_id(result_lookup_ty);
// The id of the boolean `and` of all dynamic bounds checks up to this point. If
// `None`, then we haven't done any dynamic bounds checks yet.
//
// When we have a chain of bounds checks, we combine them with `OpLogicalAnd`, not
// a short-circuit branch. This means we might do comparisons we don't need to,
// but we expect these checks to almost always succeed, and keeping branches to a
// minimum is essential.
let mut accumulated_checks = None;
self.temp_list.clear();
let root_id = loop {
expr_handle = match self.ir_function.expressions[expr_handle] {
crate::Expression::Access { base, index } => {
let index_id = match self.write_bounds_check(base, index, block)? {
BoundsCheckResult::KnownInBounds(known_index) => {
// Even if the index is known, `OpAccessIndex`
// requires expression operands, not literals.
let scalar = crate::ScalarValue::Uint(known_index as u64);
self.writer.get_constant_scalar(scalar, 4)
}
BoundsCheckResult::Computed(computed_index_id) => computed_index_id,
BoundsCheckResult::Conditional(comparison_id) => {
match accumulated_checks {
Some(prior_checks) => {
let combined = self.gen_id();
block.body.push(Instruction::binary(
spirv::Op::LogicalAnd,
self.writer.get_bool_type_id(),
combined,
prior_checks,
comparison_id,
));
accumulated_checks = Some(combined);
}
None => {
// Start a fresh chain of checks.
accumulated_checks = Some(comparison_id);
}
}
// Either way, the index to use is unchanged.
self.cached[index]
}
};
self.temp_list.push(index_id);
base
}
crate::Expression::AccessIndex { base, index } => {
let const_id = self.get_index_constant(index);
self.temp_list.push(const_id);
base
}
crate::Expression::GlobalVariable(handle) => {
let gv = &self.writer.global_variables[handle.index()];
break gv.id;
}
crate::Expression::LocalVariable(variable) => {
let local_var = &self.function.variables[&variable];
break local_var.id;
}
crate::Expression::FunctionArgument(index) => {
break self.function.parameter_id(index);
}
ref other => unimplemented!("Unexpected pointer expression {:?}", other),
}
};
let pointer = if self.temp_list.is_empty() {
ExpressionPointer::Ready {
pointer_id: root_id,
}
} else {
self.temp_list.reverse();
let pointer_id = self.gen_id();
let access =
Instruction::access_chain(result_type_id, pointer_id, root_id, &self.temp_list);
// If we generated some bounds checks, we need to leave it to our
// caller to generate the branch, the access, the load or store, and
// the zero value (for loads). Otherwise, we can emit the access
// ourselves, and just hand them the id of the pointer.
match accumulated_checks {
Some(condition) => ExpressionPointer::Conditional { condition, access },
None => {
block.body.push(access);
ExpressionPointer::Ready { pointer_id }
}
}
};
Ok(pointer)
}
pub(super) fn write_block(
&mut self,
label_id: Word,
statements: &[crate::Statement],
exit_id: Option<Word>,
loop_context: LoopContext,
) -> Result<(), Error> {
let mut block = Block::new(label_id);
for statement in statements {
match *statement {
crate::Statement::Emit(ref range) => {
for handle in range.clone() {
self.cache_expression_value(handle, &mut block)?;
}
}
crate::Statement::Block(ref block_statements) => {
let scope_id = self.gen_id();
self.function.consume(block, Instruction::branch(scope_id));
let merge_id = self.gen_id();
self.write_block(scope_id, block_statements, Some(merge_id), loop_context)?;
block = Block::new(merge_id);
}
crate::Statement::If {
condition,
ref accept,
ref reject,
} => {
let condition_id = self.cached[condition];
let merge_id = self.gen_id();
block.body.push(Instruction::selection_merge(
merge_id,
spirv::SelectionControl::NONE,
));
let accept_id = if accept.is_empty() {
None
} else {
Some(self.gen_id())
};
let reject_id = if reject.is_empty() {
None
} else {
Some(self.gen_id())
};
self.function.consume(
block,
Instruction::branch_conditional(
condition_id,
accept_id.unwrap_or(merge_id),
reject_id.unwrap_or(merge_id),
),
);
if let Some(block_id) = accept_id {
self.write_block(block_id, accept, Some(merge_id), loop_context)?;
}
if let Some(block_id) = reject_id {
self.write_block(block_id, reject, Some(merge_id), loop_context)?;
}
block = Block::new(merge_id);
}
crate::Statement::Switch {
selector,
ref cases,
} => {
let selector_id = self.cached[selector];
let merge_id = self.gen_id();
block.body.push(Instruction::selection_merge(
merge_id,
spirv::SelectionControl::NONE,
));
let default_id = self.gen_id();
let mut reached_default = false;
let mut raw_cases = Vec::with_capacity(cases.len());
let mut case_ids = Vec::with_capacity(cases.len());
for case in cases.iter() {
match case.value {
crate::SwitchValue::Integer(value) => {
let label_id = self.gen_id();
// No cases should be added after the default case is encountered
// since the default case catches all
if !reached_default {
raw_cases.push(super::instructions::Case {
value: value as Word,
label_id,
});
}
case_ids.push(label_id);
}
crate::SwitchValue::Default => {
case_ids.push(default_id);
reached_default = true;
}
}
}
self.function.consume(
block,
Instruction::switch(selector_id, default_id, &raw_cases),
);
let inner_context = LoopContext {
break_id: Some(merge_id),
..loop_context
};
for (i, (case, label_id)) in cases.iter().zip(case_ids.iter()).enumerate() {
let case_finish_id = if case.fall_through {
case_ids[i + 1]
} else {
merge_id
};
self.write_block(
*label_id,
&case.body,
Some(case_finish_id),
inner_context,
)?;
}
// If no default was encountered write a empty block to satisfy the presence of
// a block the default label
if !reached_default {
self.write_block(default_id, &[], Some(merge_id), inner_context)?;
}
block = Block::new(merge_id);
}
crate::Statement::Loop {
ref body,
ref continuing,
} => {
let preamble_id = self.gen_id();
self.function
.consume(block, Instruction::branch(preamble_id));
let merge_id = self.gen_id();
let body_id = self.gen_id();
let continuing_id = self.gen_id();
// SPIR-V requires the continuing to the `OpLoopMerge`,
// so we have to start a new block with it.
block = Block::new(preamble_id);
block.body.push(Instruction::loop_merge(
merge_id,
continuing_id,
spirv::SelectionControl::NONE,
));
self.function.consume(block, Instruction::branch(body_id));
self.write_block(
body_id,
body,
Some(continuing_id),
LoopContext {
continuing_id: Some(continuing_id),
break_id: Some(merge_id),
},
)?;
self.write_block(
continuing_id,
continuing,
Some(preamble_id),
LoopContext {
continuing_id: None,
break_id: Some(merge_id),
},
)?;
block = Block::new(merge_id);
}
crate::Statement::Break => {
self.function
.consume(block, Instruction::branch(loop_context.break_id.unwrap()));
return Ok(());
}
crate::Statement::Continue => {
self.function.consume(
block,
Instruction::branch(loop_context.continuing_id.unwrap()),
);
return Ok(());
}
crate::Statement::Return { value: Some(value) } => {
let value_id = self.cached[value];
let instruction = match self.function.entry_point_context {
// If this is an entry point, and we need to return anything,
// let's instead store the output variables and return `void`.
Some(ref context) => {
self.writer.write_entry_point_return(
value_id,
self.ir_function.result.as_ref().unwrap(),
&context.results,
&mut block.body,
)?;
Instruction::return_void()
}
None => Instruction::return_value(value_id),
};
self.function.consume(block, instruction);
return Ok(());
}
crate::Statement::Return { value: None } => {
self.function.consume(block, Instruction::return_void());
return Ok(());
}
crate::Statement::Kill => {
self.function.consume(block, Instruction::kill());
return Ok(());
}
crate::Statement::Barrier(flags) => {
let memory_scope = if flags.contains(crate::Barrier::STORAGE) {
spirv::Scope::Device
} else {
spirv::Scope::Workgroup
};
let mut semantics = spirv::MemorySemantics::ACQUIRE_RELEASE;
semantics.set(
spirv::MemorySemantics::UNIFORM_MEMORY,
flags.contains(crate::Barrier::STORAGE),
);
semantics.set(
spirv::MemorySemantics::WORKGROUP_MEMORY,
flags.contains(crate::Barrier::WORK_GROUP),
);
let exec_scope_id = self.get_index_constant(spirv::Scope::Workgroup as u32);
let mem_scope_id = self.get_index_constant(memory_scope as u32);
let semantics_id = self.get_index_constant(semantics.bits());
block.body.push(Instruction::control_barrier(
exec_scope_id,
mem_scope_id,
semantics_id,
));
}
crate::Statement::Store { pointer, value } => {
let value_id = self.cached[value];
match self.write_expression_pointer(pointer, &mut block)? {
ExpressionPointer::Ready { pointer_id } => {
let atomic_class = match *self.fun_info[pointer]
.ty
.inner_with(&self.ir_module.types)
{
crate::TypeInner::Pointer { base, class } => {
match self.ir_module.types[base].inner {
crate::TypeInner::Atomic { .. } => Some(class),
_ => None,
}
}
_ => None,
};
let instruction = if let Some(class) = atomic_class {
let (semantics, scope) = class.to_spirv_semantics_and_scope();
let scope_constant_id = self.get_scope_constant(scope as u32);
let semantics_id = self.get_index_constant(semantics.bits());
Instruction::atomic_store(
pointer_id,
scope_constant_id,
semantics_id,
value_id,
)
} else {
Instruction::store(pointer_id, value_id, None)
};
block.body.push(instruction);
}
ExpressionPointer::Conditional { condition, access } => {
let mut selection = Selection::start(&mut block, ());
selection.if_true(self, condition, ());
// The in-bounds path. Perform the access and the store.
let pointer_id = access.result_id.unwrap();
selection.block().body.push(access);
selection
.block()
.body
.push(Instruction::store(pointer_id, value_id, None));
// Finish the in-bounds block and start the merge block. This
// is the block we'll leave current on return.
selection.finish(self, ());
}
};
}
crate::Statement::ImageStore {
image,
coordinate,
array_index,
value,
} => self.write_image_store(image, coordinate, array_index, value, &mut block)?,
crate::Statement::Call {
function: local_function,
ref arguments,
result,
} => {
let id = self.gen_id();
self.temp_list.clear();
for &argument in arguments {
self.temp_list.push(self.cached[argument]);
}
let type_id = match result {
Some(expr) => {
self.cached[expr] = id;
self.get_expression_type_id(&self.fun_info[expr].ty)
}
None => self.writer.void_type,
};
block.body.push(Instruction::function_call(
type_id,
id,
self.writer.lookup_function[&local_function],
&self.temp_list,
));
}
crate::Statement::Atomic {
pointer,
ref fun,
value,
result,
} => {
let id = self.gen_id();
let result_type_id = self.get_expression_type_id(&self.fun_info[result].ty);
self.cached[result] = id;
let pointer_id = match self.write_expression_pointer(pointer, &mut block)? {
ExpressionPointer::Ready { pointer_id } => pointer_id,
ExpressionPointer::Conditional { .. } => {
return Err(Error::FeatureNotImplemented(
"Atomics out-of-bounds handling",
));
}
};
let class = match *self.fun_info[pointer].ty.inner_with(&self.ir_module.types) {
crate::TypeInner::Pointer { base: _, class } => class,
_ => unimplemented!(),
};
let (semantics, scope) = class.to_spirv_semantics_and_scope();
let scope_constant_id = self.get_scope_constant(scope as u32);
let semantics_id = self.get_index_constant(semantics.bits());
let value_id = self.cached[value];
let value_inner = self.fun_info[value].ty.inner_with(&self.ir_module.types);
let instruction = match *fun {
crate::AtomicFunction::Add => Instruction::atomic_binary(
spirv::Op::AtomicIAdd,
result_type_id,
id,
pointer_id,
scope_constant_id,
semantics_id,
value_id,
),
crate::AtomicFunction::Subtract => Instruction::atomic_binary(
spirv::Op::AtomicISub,
result_type_id,
id,
pointer_id,
scope_constant_id,
semantics_id,
value_id,
),
crate::AtomicFunction::And => Instruction::atomic_binary(
spirv::Op::AtomicAnd,
result_type_id,
id,
pointer_id,
scope_constant_id,
semantics_id,
value_id,
),
crate::AtomicFunction::InclusiveOr => Instruction::atomic_binary(
spirv::Op::AtomicOr,
result_type_id,
id,
pointer_id,
scope_constant_id,
semantics_id,
value_id,
),
crate::AtomicFunction::ExclusiveOr => Instruction::atomic_binary(
spirv::Op::AtomicXor,
result_type_id,
id,
pointer_id,
scope_constant_id,
semantics_id,
value_id,
),
crate::AtomicFunction::Min => {
let spirv_op = match *value_inner {
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Sint,
width: _,
} => spirv::Op::AtomicSMin,
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Uint,
width: _,
} => spirv::Op::AtomicUMin,
_ => unimplemented!(),
};
Instruction::atomic_binary(
spirv_op,
result_type_id,
id,
pointer_id,
scope_constant_id,
semantics_id,
value_id,
)
}
crate::AtomicFunction::Max => {
let spirv_op = match *value_inner {
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Sint,
width: _,
} => spirv::Op::AtomicSMax,
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Uint,
width: _,
} => spirv::Op::AtomicUMax,
_ => unimplemented!(),
};
Instruction::atomic_binary(
spirv_op,
result_type_id,
id,
pointer_id,
scope_constant_id,
semantics_id,
value_id,
)
}
crate::AtomicFunction::Exchange { compare: None } => {
Instruction::atomic_binary(
spirv::Op::AtomicExchange,
result_type_id,
id,
pointer_id,
scope_constant_id,
semantics_id,
value_id,
)
}
crate::AtomicFunction::Exchange { compare: Some(_) } => {
return Err(Error::FeatureNotImplemented("atomic CompareExchange"));
}
};
block.body.push(instruction);
}
}
}
let termination = match exit_id {
Some(id) => Instruction::branch(id),
// This can happen if the last branch had all the paths
// leading out of the graph (i.e. returning).
// Or it may be the end of the self.function.
None => match self.ir_function.result {
Some(ref result) if self.function.entry_point_context.is_none() => {
let type_id = self.get_type_id(LookupType::Handle(result.ty));
let null_id = self.writer.write_constant_null(type_id);
Instruction::return_value(null_id)
}
_ => Instruction::return_void(),
},
};
self.function.consume(block, termination);
Ok(())
}
}
| Cast |
diff.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/kubectl/apply/parse"
"k8s.io/kubernetes/pkg/kubectl/apply/strategy"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
"k8s.io/utils/exec"
)
var (
diffLong = templates.LongDesc(i18n.T(`
Diff configurations specified by filename or stdin between their local,
last-applied, live and/or "merged" versions.
LOCAL and LIVE versions are diffed by default. Other availble keywords
are MERGED and LAST.
Output is always YAML.
KUBERNETES_EXTERNAL_DIFF environment variable can be used to select your own
diff command. By default, the "diff" command available in your path will be
run with "-u" (unicode) and "-N" (treat new files as empty) options.`))
diffExample = templates.Examples(i18n.T(`
# Diff resources included in pod.json. By default, it will diff LOCAL and LIVE versions
kubectl alpha diff -f pod.json
# When one version is specified, diff that version against LIVE
cat service.yaml | kubectl alpha diff -f - MERGED
# Or specify both versions
kubectl alpha diff -f pod.json -f service.yaml LAST LOCAL`))
)
type DiffOptions struct {
FilenameOptions resource.FilenameOptions
}
func isValidArgument(arg string) error {
switch arg {
case "LOCAL", "LIVE", "LAST", "MERGED":
return nil
default:
return fmt.Errorf(`Invalid parameter %q, must be either "LOCAL", "LIVE", "LAST" or "MERGED"`, arg)
}
}
func parseDiffArguments(args []string) (string, string, error) {
if len(args) > 2 {
return "", "", fmt.Errorf("Invalid number of arguments: expected at most 2.")
}
// Default values
from := "LOCAL"
to := "LIVE"
if len(args) > 0 {
from = args[0]
}
if len(args) > 1 {
to = args[1]
}
if err := isValidArgument(to); err != nil {
return "", "", err
}
if err := isValidArgument(from); err != nil {
return "", "", err
}
return from, to, nil
}
func NewCmdDiff(f cmdutil.Factory, stdout, stderr io.Writer) *cobra.Command {
var options DiffOptions
diff := DiffProgram{
Exec: exec.New(),
Stdout: stdout,
Stderr: stderr,
}
cmd := &cobra.Command{
Use: "diff -f FILENAME",
Short: i18n.T("Diff different versions of configurations"),
Long: diffLong,
Example: diffExample,
Run: func(cmd *cobra.Command, args []string) {
from, to, err := parseDiffArguments(args)
cmdutil.CheckErr(err)
cmdutil.CheckErr(RunDiff(f, &diff, &options, from, to))
},
}
usage := "contains the configuration to diff"
cmdutil.AddFilenameOptionFlags(cmd, &options.FilenameOptions, usage)
cmd.MarkFlagRequired("filename")
return cmd
}
// DiffProgram finds and run the diff program. The value of
// KUBERNETES_EXTERNAL_DIFF environment variable will be used a diff
// program. By default, `diff(1)` will be used.
type DiffProgram struct {
Exec exec.Interface
Stdout io.Writer
Stderr io.Writer
}
func (d *DiffProgram) getCommand(args ...string) exec.Cmd {
diff := ""
if envDiff := os.Getenv("KUBERNETES_EXTERNAL_DIFF"); envDiff != "" {
diff = envDiff
} else {
diff = "diff"
args = append([]string{"-u", "-N"}, args...)
}
cmd := d.Exec.Command(diff, args...)
cmd.SetStdout(d.Stdout)
cmd.SetStderr(d.Stderr)
return cmd
}
// Run runs the detected diff program. `from` and `to` are the directory to diff.
func (d *DiffProgram) Run(from, to string) error {
d.getCommand(from, to).Run() // Ignore diff return code
return nil
}
// Printer is used to print an object.
type Printer struct{}
// Print the object inside the writer w.
func (p *Printer) Print(obj map[string]interface{}, w io.Writer) error {
if obj == nil {
return nil
}
data, err := yaml.Marshal(obj)
if err != nil {
return err
}
_, err = w.Write(data)
return err
}
// DiffVersion gets the proper version of objects, and aggregate them into a directory.
type DiffVersion struct {
Dir *Directory
Name string
}
// NewDiffVersion creates a new DiffVersion with the named version.
func NewDiffVersion(name string) (*DiffVersion, error) {
dir, err := CreateDirectory(name)
if err != nil {
return nil, err
}
return &DiffVersion{
Dir: dir,
Name: name,
}, nil
}
func (v *DiffVersion) getObject(obj Object) (map[string]interface{}, error) {
switch v.Name {
case "LIVE":
return obj.Live()
case "MERGED":
return obj.Merged()
case "LOCAL":
return obj.Local()
case "LAST":
return obj.Last()
}
return nil, fmt.Errorf("Unknown version: %v", v.Name)
}
// Print prints the object using the printer into a new file in the directory.
func (v *DiffVersion) Print(obj Object, printer Printer) error {
vobj, err := v.getObject(obj)
if err != nil {
return err
}
f, err := v.Dir.NewFile(obj.Name())
if err != nil {
return err
}
defer f.Close()
return printer.Print(vobj, f)
}
// Directory creates a new temp directory, and allows to easily create new files.
type Directory struct {
Name string
}
// CreateDirectory does create the actual disk directory, and return a
// new representation of it.
func CreateDirectory(prefix string) (*Directory, error) {
name, err := ioutil.TempDir("", prefix+"-")
if err != nil {
return nil, err
}
return &Directory{
Name: name,
}, nil
}
// NewFile creates a new file in the directory.
func (d *Directory) NewFile(name string) (*os.File, error) {
return os.OpenFile(filepath.Join(d.Name, name), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700)
}
// Delete removes the directory recursively.
func (d *Directory) Delete() error {
return os.RemoveAll(d.Name)
}
// Object is an interface that let's you retrieve multiple version of
// it.
type Object interface {
Local() (map[string]interface{}, error)
Live() (map[string]interface{}, error)
Last() (map[string]interface{}, error)
Merged() (map[string]interface{}, error)
Name() string
}
// InfoObject is an implementation of the Object interface. It gets all
// the information from the Info object.
type InfoObject struct {
Info *resource.Info
Encoder runtime.Encoder
Parser *parse.Factory
}
var _ Object = &InfoObject{}
func (obj InfoObject) toMap(data []byte) (map[string]interface{}, error) {
m := map[string]interface{}{}
if len(data) == 0 {
return m, nil
}
err := json.Unmarshal(data, &m)
return m, err
}
func (obj InfoObject) Local() (map[string]interface{}, error) {
data, err := runtime.Encode(obj.Encoder, obj.Info.Object)
if err != nil {
return nil, err
}
return obj.toMap(data)
}
func (obj InfoObject) Live() (map[string]interface{}, error) {
if obj.Info.Object == nil {
return nil, nil // Object doesn't exist on cluster.
}
data, err := runtime.Encode(obj.Encoder, obj.Info.Object)
if err != nil {
return nil, err
}
return obj.toMap(data)
}
func (obj InfoObject) Merged() (map[string]interface{}, error) {
local, err := obj.Local()
if err != nil {
return nil, err
}
live, err := obj.Live()
if err != nil {
return nil, err
}
last, err := obj.Last()
if err != nil {
return nil, err
}
if live == nil || last == nil {
return local, nil // We probably don't have a live verison, merged is local.
}
elmt, err := obj.Parser.CreateElement(last, local, live)
if err != nil {
return nil, err
}
result, err := elmt.Merge(strategy.Create(strategy.Options{}))
return result.MergedResult.(map[string]interface{}), err
}
func (obj InfoObject) Last() (map[string]interface{}, error) {
if obj.Info.Object == nil {
return nil, nil // No object is live, return empty
}
accessor, err := meta.Accessor(obj.Info.Object)
if err != nil {
return nil, err
}
annots := accessor.GetAnnotations()
if annots == nil {
return nil, nil // Not an error, just empty.
}
return obj.toMap([]byte(annots[api.LastAppliedConfigAnnotation]))
}
func (obj InfoObject) Name() string {
return obj.Info.Name
}
// Differ creates two DiffVersion and diffs them.
type Differ struct {
From *DiffVersion
To *DiffVersion
}
func NewDiffer(from, to string) (*Differ, error) {
differ := Differ{}
var err error
differ.From, err = NewDiffVersion(from)
if err != nil {
return nil, err
}
differ.To, err = NewDiffVersion(to)
if err != nil {
differ.From.Dir.Delete()
return nil, err
}
return &differ, nil
}
// Diff diffs to versions of a specific object, and print both versions to directories.
func (d *Differ) Diff(obj Object, printer Printer) error {
if err := d.From.Print(obj, printer); err != nil {
return err
}
if err := d.To.Print(obj, printer); err != nil {
return err
}
return nil
}
// Run runs the diff program against both directories.
func (d *Differ) Run(diff *DiffProgram) error {
return diff.Run(d.From.Dir.Name, d.To.Dir.Name)
}
// TearDown removes both temporary directories recursively.
func (d *Differ) TearDown() {
d.From.Dir.Delete() // Ignore error
d.To.Dir.Delete() // Ignore error
}
// RunDiff uses the factory to parse file arguments, find the version to
// diff, and find each Info object for each files, and runs against the
// differ.
func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, to string) error {
openapi, err := f.OpenAPISchema()
if err != nil {
return err
}
parser := &parse.Factory{Resources: openapi}
differ, err := NewDiffer(from, to)
if err != nil {
return err
}
defer differ.TearDown()
printer := Printer{}
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil |
r := f.NewBuilder().
Unstructured().
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, &options.FilenameOptions).
Flatten().
Do()
if err := r.Err(); err != nil {
return err
}
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
if err := info.Get(); err != nil {
if !errors.IsNotFound(err) {
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%v\nfrom server for:", info), info.Source, err)
}
info.Object = nil
}
obj := InfoObject{
Info: info,
Parser: parser,
Encoder: f.JSONEncoder(),
}
return differ.Diff(obj, printer)
})
if err != nil {
return err
}
differ.Run(diff)
return nil
}
| {
return err
} |
config.go | package telewatch
import (
"fmt"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
"path"
)
var (
ConfigFileNotExist = fmt.Errorf("token.yml file not found")
)
type TokenConfig struct {
Token string `yaml:"token"`
ChatId int64 `yaml:"chatId"`
}
func (config TokenConfig) Save() error {
configFilePath, err := getConfigFilePath()
if err != nil {
return err
}
configData, err := yaml.Marshal(config)
if err != nil {
return err
}
err = ioutil.WriteFile(configFilePath, configData,0644)
return err
}
func | () (string, error) {
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
configPath := path.Join(home, ".config", "telewatch")
configFilePath := path.Join(configPath, "token.yaml")
if _, err := os.Stat(configFilePath); os.IsNotExist(err) {
configFilePath = path.Join(configPath, "token.yml")
if _, err := os.Stat(configFilePath); os.IsNotExist(err) {
return "", ConfigFileNotExist
}
}
return configFilePath, nil
}
func LoadConfigurationFromHomeDirectory() (TokenConfig, error) {
ret := TokenConfig{}
configFilePath, err := getConfigFilePath()
if err != nil {
return ret, err
}
configFile, err := ioutil.ReadFile(configFilePath)
if err != nil {
return ret, err
}
err = yaml.Unmarshal(configFile, &ret)
if err != nil {
return ret, err
}
return ret, nil
}
| getConfigFilePath |
config.js | const localDataName = 'mateInvoice';
const orderStatusOptions = ['Pending', 'Shipped', 'Delivered'];
const fakedata = [
{
key: 1,
id: '1518713981654',
number: '#1231',
orderStatus: 'Shipped',
orderDate: 1518849188360,
currency: '$',
billTo: 'REDQ Inc.',
billToAddress:
'[email protected]\n\n405 Mulberry Rd, Mc Grady, \nNC, 28649 \n\nFax: +0(863) 228-7064 \nPhone: +(740) 927-9284',
billFrom: 'Pineapple Inc.',
billFromAddress:
'[email protected]\n\n86781 547th Ave, Osmond, \nNE, 68765 \n\nPhone: +(402) 748-3970',
invoiceList: [
{
key: 1,
itemName: 'A box of happiness',
costs: 200,
qty: 14,
price: 2800,
},
{ key: 2, itemName: 'Unicorn Tears', costs: 500, qty: 14, price: 7000 },
{ key: 3, itemName: 'Rainbow Machine', costs: 700, qty: 5, price: 3500 }, | ],
subTotal: 13300,
vatRate: 10,
vatPrice: 1330,
totalCost: 14630,
},
{
key: 2,
id: '1518713981655',
number: '#1232',
orderStatus: 'Pending',
orderDate: 1518849188360,
currency: '$',
billTo: 'REDQ Inc.',
billToAddress:
'[email protected]\n\n405 Mulberry Rd, Mc Grady, \nNC, 28649 \n\nFax: +0(863) 228-7064 \nPhone: +(740) 927-9284',
billFrom: 'Pineapple Inc.',
billFromAddress:
'[email protected]\n\n86781 547th Ave, Osmond, \nNE, 68765 \n\nPhone: +(402) 748-3970',
invoiceList: [
{
key: 1,
itemName: 'A box of happiness',
costs: 200,
qty: 14,
price: 2800,
},
{ key: 2, itemName: 'Unicorn Tears', costs: 500, qty: 14, price: 7000 },
{ key: 3, itemName: 'Rainbow Machine', costs: 700, qty: 5, price: 3500 },
],
subTotal: 13300,
vatRate: 10,
vatPrice: 1330,
totalCost: 14630,
},
{
key: 3,
id: '1518713981656',
number: '#1233',
orderStatus: 'Delivered',
orderDate: 1518849188360,
currency: '$',
billTo: 'REDQ Inc.',
billToAddress:
'[email protected]\n\n405 Mulberry Rd, Mc Grady, \nNC, 28649 \n\nFax: +0(863) 228-7064 \nPhone: +(740) 927-9284',
billFrom: 'Pineapple Inc.',
billFromAddress:
'[email protected]\n\n86781 547th Ave, Osmond, \nNE, 68765 \n\nPhone: +(402) 748-3970',
invoiceList: [
{
key: 1,
itemName: 'A box of happiness',
costs: 200,
qty: 14,
price: 2800,
},
{ key: 2, itemName: 'Unicorn Tears', costs: 500, qty: 14, price: 7000 },
{ key: 3, itemName: 'Rainbow Machine', costs: 700, qty: 5, price: 3500 },
],
subTotal: 13300,
vatRate: 10,
vatPrice: 1330,
totalCost: 14630,
},
];
const newInvoice = {
orderStatus: 'Pending',
orderDate: new Date().getTime(),
currency: '$',
billTo: '',
billToAddress: '',
billFrom: '',
billFromAddress: '',
invoiceList: [
{
key: 1,
itemName: '',
costs: 0,
qty: 0,
price: 0,
},
],
subTotal: 0,
vatRate: 10,
vatPrice: 0,
totalCost: 0,
};
const createDemoData = () => {
return fakedata;
};
export {
fakedata,
createDemoData,
localDataName,
newInvoice,
orderStatusOptions,
}; | |
adapter.rs | use ethabi::{Bytes, Error as ABIError, Function, ParamType, Token};
use failure::SyncFailure;
use futures::Future;
use futures03::future::TryFutureExt;
use mockall::predicate::*;
use mockall::*;
use petgraph::graphmap::GraphMap;
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::marker::Unpin;
use tiny_keccak::keccak256;
use web3::types::*;
use super::types::*;
use crate::components::metrics::{CounterVec, GaugeVec, HistogramVec};
use crate::prelude::*;
pub type EventSignature = H256;
/// A collection of attributes that (kind of) uniquely identify an Ethereum blockchain.
pub struct EthereumNetworkIdentifier {
pub net_version: String,
pub genesis_block_hash: H256,
}
/// A request for the state of a contract at a specific block hash and address.
pub struct EthereumContractStateRequest {
pub address: Address,
pub block_hash: H256,
}
/// An error that can occur when trying to obtain the state of a contract.
pub enum EthereumContractStateError {
Failed,
}
/// Representation of an Ethereum contract state.
pub struct EthereumContractState {
pub address: Address,
pub block_hash: H256,
pub data: Bytes,
}
#[derive(Clone, Debug)]
pub struct EthereumContractCall {
pub address: Address,
pub block_ptr: EthereumBlockPointer,
pub function: Function,
pub args: Vec<Token>,
}
#[derive(Fail, Debug)]
pub enum EthereumContractCallError {
#[fail(display = "ABI error: {}", _0)]
ABIError(SyncFailure<ABIError>),
/// `Token` is not of expected `ParamType`
#[fail(display = "type mismatch, token {:?} is not of kind {:?}", _0, _1)]
TypeError(Token, ParamType),
#[fail(display = "error encoding input call data: {}", _0)]
EncodingError(ethabi::Error),
#[fail(display = "call error: {}", _0)]
Web3Error(web3::Error),
#[fail(display = "call reverted: {}", _0)]
Revert(String),
#[fail(display = "ethereum node took too long to perform call")]
Timeout,
}
impl From<ABIError> for EthereumContractCallError {
fn from(e: ABIError) -> Self {
EthereumContractCallError::ABIError(SyncFailure::new(e))
}
}
#[derive(Fail, Debug)]
pub enum EthereumAdapterError {
/// The Ethereum node does not know about this block for some reason, probably because it
/// disappeared in a chain reorg.
#[fail(
display = "Block data unavailable, block was likely uncled (block hash = {:?})",
_0
)]
BlockUnavailable(H256),
/// An unexpected error occurred.
#[fail(display = "Ethereum adapter error: {}", _0)]
Unknown(Error),
}
impl From<Error> for EthereumAdapterError {
fn from(e: Error) -> Self {
EthereumAdapterError::Unknown(e)
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]
enum LogFilterNode {
Contract(Address),
Event(EventSignature),
}
/// Corresponds to an `eth_getLogs` call.
#[derive(Clone)]
pub struct EthGetLogsFilter {
pub contracts: Vec<Address>,
pub event_signatures: Vec<EventSignature>,
}
impl fmt::Display for EthGetLogsFilter {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.contracts.len() == 1 {
write!(
f,
"contract {:?}, {} events",
self.contracts[0],
self.event_signatures.len()
)
} else if self.event_signatures.len() == 1 {
write!(
f,
"event {:?}, {} contracts",
self.event_signatures[0],
self.contracts.len()
)
} else {
write!(f, "unreachable")
}
}
}
#[derive(Clone, Debug, Default)]
pub struct EthereumLogFilter {
/// Log filters can be represented as a bipartite graph between contracts and events. An edge
/// exists between a contract and an event if a data source for the contract has a trigger for
/// the event.
contracts_and_events_graph: GraphMap<LogFilterNode, (), petgraph::Undirected>,
// Event sigs with no associated address, matching on all addresses.
wildcard_events: HashSet<EventSignature>,
}
impl EthereumLogFilter {
/// Check if log bloom filter indicates a possible match for this log filter.
/// Returns `true` to indicate that a matching `Log` _might_ be contained.
/// Returns `false` to indicate that a matching `Log` _is not_ contained.
pub fn check_bloom(&self, _bloom: H2048) -> bool {
// TODO issue #352: implement bloom filter check
true // not even wrong
}
/// Check if this filter matches the specified `Log`.
pub fn matches(&self, log: &Log) -> bool {
// First topic should be event sig
match log.topics.first() {
None => false,
Some(sig) => {
// The `Log` matches the filter either if the filter contains
// a (contract address, event signature) pair that matches the
// `Log`, or if the filter contains wildcard event that matches.
let contract = LogFilterNode::Contract(log.address.clone());
let event = LogFilterNode::Event(*sig);
self.contracts_and_events_graph
.all_edges()
.any(|(s, t, ())| {
(s == contract && t == event) || (t == contract && s == event)
})
|| self.wildcard_events.contains(sig)
}
}
}
pub fn from_data_sources<'a>(iter: impl IntoIterator<Item = &'a DataSource>) -> Self {
let mut this = EthereumLogFilter::default();
for ds in iter {
for event_sig in ds.mapping.event_handlers.iter().map(|e| e.topic0()) {
match ds.source.address {
Some(contract) => {
this.contracts_and_events_graph.add_edge(
LogFilterNode::Contract(contract),
LogFilterNode::Event(event_sig),
(),
);
}
None => {
this.wildcard_events.insert(event_sig);
}
}
}
}
this
}
/// Extends this log filter with another one.
pub fn extend(&mut self, other: EthereumLogFilter) {
// Destructure to make sure we're checking all fields.
let EthereumLogFilter {
contracts_and_events_graph,
wildcard_events,
} = other;
for (s, t, ()) in contracts_and_events_graph.all_edges() {
self.contracts_and_events_graph.add_edge(s, t, ());
}
self.wildcard_events.extend(wildcard_events);
}
/// An empty filter is one that never matches.
pub fn is_empty(&self) -> bool {
// Destructure to make sure we're checking all fields.
let EthereumLogFilter {
contracts_and_events_graph,
wildcard_events,
} = self;
contracts_and_events_graph.edge_count() == 0 && wildcard_events.is_empty()
}
/// Filters for `eth_getLogs` calls. The filters will not return false positives. This attempts
/// to balance between having granular filters but too many calls and having few calls but too
/// broad filters causing the Ethereum endpoint to timeout.
pub fn eth_get_logs_filters(self) -> impl Iterator<Item = EthGetLogsFilter> {
let mut filters = Vec::new();
// First add the wildcard event filters.
for wildcard_event in self.wildcard_events {
filters.push(EthGetLogsFilter {
contracts: vec![],
event_signatures: vec![wildcard_event],
})
}
// The current algorithm is to repeatedly find the maximum cardinality vertex and turn all
// of its edges into a filter. This is nice because it is neutral between filtering by
// contract or by events, if there are many events that appear on only one data source
// we'll filter by many events on a single contract, but if there is an event that appears
// on a lot of data sources we'll filter by many contracts with a single event.
//
// From a theoretical standpoint we're finding a vertex cover, and this is not the optimal
// algorithm to find a minimum vertex cover, but should be fine as an approximation.
//
// One optimization we're not doing is to merge nodes that have the same neighbors into a
// single node. For example if a subgraph has two data sources, each with the same two
// events, we could cover that with a single filter and no false positives. However that
// might cause the filter to become too broad, so at the moment it seems excessive.
let mut g = self.contracts_and_events_graph;
while g.edge_count() > 0 {
// If there are edges, there are vertexes.
let max_vertex = g.nodes().max_by_key(|&n| g.neighbors(n).count()).unwrap();
let mut filter = match max_vertex {
LogFilterNode::Contract(address) => EthGetLogsFilter {
contracts: vec![address],
event_signatures: vec![],
},
LogFilterNode::Event(event_sig) => EthGetLogsFilter {
contracts: vec![],
event_signatures: vec![event_sig],
},
};
for neighbor in g.neighbors(max_vertex) {
match neighbor {
LogFilterNode::Contract(address) => filter.contracts.push(address),
LogFilterNode::Event(event_sig) => filter.event_signatures.push(event_sig),
}
}
// Sanity checks:
// - The filter is not a wildcard because all nodes have neighbors.
// - The graph is bipartite.
assert!(filter.contracts.len() > 0 && filter.event_signatures.len() > 0);
assert!(filter.contracts.len() == 1 || filter.event_signatures.len() == 1);
filters.push(filter);
g.remove_node(max_vertex);
}
filters.into_iter()
}
}
#[derive(Clone, Debug)]
pub struct EthereumCallFilter {
// Each call filter has a map of filters keyed by address, each containing a tuple with
// start_block and the set of function signatures
pub contract_addresses_function_signatures: HashMap<Address, (u64, HashSet<[u8; 4]>)>,
}
impl EthereumCallFilter {
pub fn matches(&self, call: &EthereumCall) -> bool {
// Ensure the call is to a contract the filter expressed an interest in
if !self
.contract_addresses_function_signatures
.contains_key(&call.to)
{
return false;
}
// If the call is to a contract with no specified functions, keep the call
if self
.contract_addresses_function_signatures
.get(&call.to)
.unwrap()
.1
.is_empty()
{
// Allow the ability to match on calls to a contract generally
// If you want to match on a generic call to contract this limits you
// from matching with a specific call to a contract
return true;
}
// Ensure the call is to run a function the filter expressed an interest in
self.contract_addresses_function_signatures
.get(&call.to)
.unwrap()
.1
.contains(&call.input.0[..4])
}
pub fn from_data_sources<'a>(iter: impl IntoIterator<Item = &'a DataSource>) -> Self {
iter.into_iter() | .mapping
.call_handlers
.iter()
.map(move |call_handler| {
let sig = keccak256(call_handler.function.as_bytes());
(start_block, contract_addr, [sig[0], sig[1], sig[2], sig[3]])
})
})
.flatten()
.collect()
}
/// Extends this call filter with another one.
pub fn extend(&mut self, other: EthereumCallFilter) {
// Extend existing address / function signature key pairs
// Add new address / function signature key pairs from the provided EthereumCallFilter
for (address, (proposed_start_block, new_sigs)) in
other.contract_addresses_function_signatures.into_iter()
{
match self
.contract_addresses_function_signatures
.get_mut(&address)
{
Some((existing_start_block, existing_sigs)) => {
*existing_start_block =
cmp::min(proposed_start_block, existing_start_block.clone());
existing_sigs.extend(new_sigs);
}
None => {
self.contract_addresses_function_signatures
.insert(address, (proposed_start_block, new_sigs));
}
}
}
}
/// An empty filter is one that never matches.
pub fn is_empty(&self) -> bool {
// Destructure to make sure we're checking all fields.
let EthereumCallFilter {
contract_addresses_function_signatures,
} = self;
contract_addresses_function_signatures.is_empty()
}
pub fn start_blocks(&self) -> Vec<u64> {
self.contract_addresses_function_signatures
.values()
.filter(|(start_block, _fn_sigs)| start_block > &0)
.map(|(start_block, _fn_sigs)| *start_block)
.collect()
}
}
impl FromIterator<(u64, Address, [u8; 4])> for EthereumCallFilter {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (u64, Address, [u8; 4])>,
{
let mut lookup: HashMap<Address, (u64, HashSet<[u8; 4]>)> = HashMap::new();
iter.into_iter()
.for_each(|(start_block, address, function_signature)| {
if !lookup.contains_key(&address) {
lookup.insert(address, (start_block, HashSet::default()));
}
lookup.get_mut(&address).map(|set| {
if set.0 > start_block {
set.0 = start_block
}
set.1.insert(function_signature);
set
});
});
EthereumCallFilter {
contract_addresses_function_signatures: lookup,
}
}
}
impl From<EthereumBlockFilter> for EthereumCallFilter {
fn from(ethereum_block_filter: EthereumBlockFilter) -> Self {
Self {
contract_addresses_function_signatures: ethereum_block_filter
.contract_addresses
.into_iter()
.map(|(start_block_opt, address)| (address, (start_block_opt, HashSet::default())))
.collect::<HashMap<Address, (u64, HashSet<[u8; 4]>)>>(),
}
}
}
#[derive(Clone, Debug, Default)]
pub struct EthereumBlockFilter {
pub contract_addresses: HashSet<(u64, Address)>,
pub trigger_every_block: bool,
}
impl EthereumBlockFilter {
pub fn from_data_sources<'a>(iter: impl IntoIterator<Item = &'a DataSource>) -> Self {
iter.into_iter()
.filter(|data_source| data_source.source.address.is_some())
.fold(Self::default(), |mut filter_opt, data_source| {
let has_block_handler_with_call_filter = data_source
.mapping
.block_handlers
.clone()
.into_iter()
.any(|block_handler| match block_handler.filter {
Some(ref filter) if *filter == BlockHandlerFilter::Call => return true,
_ => return false,
});
let has_block_handler_without_filter = data_source
.mapping
.block_handlers
.clone()
.into_iter()
.any(|block_handler| block_handler.filter.is_none());
filter_opt.extend(Self {
trigger_every_block: has_block_handler_without_filter,
contract_addresses: if has_block_handler_with_call_filter {
vec![(
data_source.source.start_block,
data_source.source.address.unwrap().to_owned(),
)]
.into_iter()
.collect()
} else {
HashSet::default()
},
});
filter_opt
})
}
pub fn extend(&mut self, other: EthereumBlockFilter) {
self.trigger_every_block = self.trigger_every_block || other.trigger_every_block;
self.contract_addresses = self.contract_addresses.iter().cloned().fold(
HashSet::new(),
|mut addresses, (start_block, address)| {
match other
.contract_addresses
.iter()
.cloned()
.find(|(_, other_address)| &address == other_address)
{
Some((other_start_block, address)) => {
addresses.insert((cmp::min(other_start_block, start_block), address));
}
None => {
addresses.insert((start_block, address));
}
}
addresses
},
);
}
pub fn start_blocks(&self) -> Vec<u64> {
self.contract_addresses
.iter()
.cloned()
.filter(|(start_block, _fn_sigs)| start_block > &0)
.map(|(start_block, _fn_sigs)| start_block)
.collect()
}
}
#[derive(Clone)]
pub struct ProviderEthRpcMetrics {
request_duration: Box<HistogramVec>,
errors: Box<CounterVec>,
}
impl ProviderEthRpcMetrics {
pub fn new(registry: Arc<impl MetricsRegistry>) -> Self {
let request_duration = registry
.new_histogram_vec(
"eth_rpc_request_duration",
"Measures eth rpc request duration",
vec![String::from("method")],
vec![0.05, 0.2, 0.5, 1.0, 3.0, 5.0],
)
.unwrap();
let errors = registry
.new_counter_vec(
"eth_rpc_errors",
"Counts eth rpc request errors",
vec![String::from("method")],
)
.unwrap();
Self {
request_duration,
errors,
}
}
pub fn observe_request(&self, duration: f64, method: &str) {
self.request_duration
.with_label_values(vec![method].as_slice())
.observe(duration);
}
pub fn add_error(&self, method: &str) {
self.errors.with_label_values(vec![method].as_slice()).inc();
}
}
#[derive(Clone)]
pub struct SubgraphEthRpcMetrics {
request_duration: Box<GaugeVec>,
errors: Box<CounterVec>,
}
impl SubgraphEthRpcMetrics {
pub fn new(registry: Arc<impl MetricsRegistry>, subgraph_hash: &str) -> Self {
let request_duration = registry
.new_deployment_gauge_vec(
"deployment_eth_rpc_request_duration",
"Measures eth rpc request duration for a subgraph deployment",
&subgraph_hash,
vec![String::from("method")],
)
.unwrap();
let errors = registry
.new_deployment_counter_vec(
"deployment_eth_rpc_errors",
"Counts eth rpc request errors for a subgraph deployment",
&subgraph_hash,
vec![String::from("method")],
)
.unwrap();
Self {
request_duration,
errors,
}
}
pub fn observe_request(&self, duration: f64, method: &str) {
self.request_duration
.with_label_values(vec![method].as_slice())
.set(duration);
}
pub fn add_error(&self, method: &str) {
self.errors.with_label_values(vec![method].as_slice()).inc();
}
}
#[derive(Clone)]
pub struct BlockStreamMetrics {
pub ethrpc_metrics: Arc<SubgraphEthRpcMetrics>,
pub blocks_behind: Box<Gauge>,
pub reverted_blocks: Box<Gauge>,
pub stopwatch: StopwatchMetrics,
}
impl BlockStreamMetrics {
pub fn new(
registry: Arc<impl MetricsRegistry>,
ethrpc_metrics: Arc<SubgraphEthRpcMetrics>,
deployment_id: &SubgraphDeploymentId,
stopwatch: StopwatchMetrics,
) -> Self {
let blocks_behind = registry
.new_deployment_gauge(
"deployment_blocks_behind",
"Track the number of blocks a subgraph deployment is behind the HEAD block",
deployment_id.as_str(),
)
.expect("failed to create `deployment_blocks_behind` gauge");
let reverted_blocks = registry
.new_deployment_gauge(
"deployment_reverted_blocks",
"Track the last reverted block for a subgraph deployment",
deployment_id.as_str(),
)
.expect("Failed to create `deployment_reverted_blocks` gauge");
Self {
ethrpc_metrics,
blocks_behind,
reverted_blocks,
stopwatch,
}
}
}
/// Common trait for components that watch and manage access to Ethereum.
///
/// Implementations may be implemented against an in-process Ethereum node
/// or a remote node over RPC.
#[automock]
pub trait EthereumAdapter: Send + Sync + 'static {
fn url_hostname(&self) -> &str;
/// Ask the Ethereum node for some identifying information about the Ethereum network it is
/// connected to.
fn net_identifiers(
&self,
logger: &Logger,
) -> Box<dyn Future<Item = EthereumNetworkIdentifier, Error = Error> + Send>;
/// Get the latest block, including full transactions.
fn latest_block(
&self,
logger: &Logger,
) -> Box<dyn Future<Item = LightEthereumBlock, Error = EthereumAdapterError> + Send + Unpin>;
/// Get the latest block, with only the header and transaction hashes.
fn latest_block_header(
&self,
logger: &Logger,
) -> Box<dyn Future<Item = web3::types::Block<H256>, Error = EthereumAdapterError> + Send>;
fn load_block(
&self,
logger: &Logger,
block_hash: H256,
) -> Box<dyn Future<Item = LightEthereumBlock, Error = Error> + Send>;
/// Load Ethereum blocks in bulk, returning results as they come back as a Stream.
/// May use the `chain_store` as a cache.
fn load_blocks(
&self,
logger: Logger,
chain_store: Arc<dyn ChainStore>,
block_hashes: HashSet<H256>,
) -> Box<dyn Stream<Item = LightEthereumBlock, Error = Error> + Send>;
/// Reorg safety: `to` must be a final block.
fn block_range_to_ptrs(
&self,
logger: Logger,
from: u64,
to: u64,
) -> Box<dyn Future<Item = Vec<EthereumBlockPointer>, Error = Error> + Send>;
/// Find a block by its hash.
fn block_by_hash(
&self,
logger: &Logger,
block_hash: H256,
) -> Box<dyn Future<Item = Option<LightEthereumBlock>, Error = Error> + Send>;
fn block_by_number(
&self,
logger: &Logger,
block_number: u64,
) -> Box<dyn Future<Item = Option<LightEthereumBlock>, Error = Error> + Send>;
/// Load full information for the specified `block` (in particular, transaction receipts).
fn load_full_block(
&self,
logger: &Logger,
block: LightEthereumBlock,
) -> Box<dyn Future<Item = EthereumBlock, Error = EthereumAdapterError> + Send>;
/// Load block pointer for the specified `block number`.
fn block_pointer_from_number(
&self,
logger: &Logger,
chain_store: Arc<dyn ChainStore>,
block_number: u64,
) -> Box<dyn Future<Item = EthereumBlockPointer, Error = EthereumAdapterError> + Send>;
/// Find a block by its number. The `block_is_final` flag indicates whether
/// it is ok to remove blocks in the block cache with that number but with
/// a different hash which were left over from reorgs we saw before we
/// settled on a final block. Since our overall logic depends on being
/// able to access uncled blocks back to the main chain when we revert
/// blocks, we need to make sure we keep those in the block cache
///
/// Careful: don't use this function without considering race conditions.
/// Chain reorgs could happen at any time, and could affect the answer received.
/// Generally, it is only safe to use this function with blocks that have received enough
/// confirmations to guarantee no further reorgs, **and** where the Ethereum node is aware of
/// those confirmations.
/// If the Ethereum node is far behind in processing blocks, even old blocks can be subject to
/// reorgs.
fn block_hash_by_block_number(
&self,
logger: &Logger,
chain_store: Arc<dyn ChainStore>,
block_number: u64,
block_is_final: bool,
) -> Box<dyn Future<Item = Option<H256>, Error = Error> + Send>;
/// Obtain all uncle blocks for a given block hash.
fn uncles(
&self,
logger: &Logger,
block: &LightEthereumBlock,
) -> Box<dyn Future<Item = Vec<Option<Block<H256>>>, Error = Error> + Send>;
/// Check if `block_ptr` refers to a block that is on the main chain, according to the Ethereum
/// node.
///
/// Careful: don't use this function without considering race conditions.
/// Chain reorgs could happen at any time, and could affect the answer received.
/// Generally, it is only safe to use this function with blocks that have received enough
/// confirmations to guarantee no further reorgs, **and** where the Ethereum node is aware of
/// those confirmations.
/// If the Ethereum node is far behind in processing blocks, even old blocks can be subject to
/// reorgs.
fn is_on_main_chain(
&self,
logger: &Logger,
metrics: Arc<SubgraphEthRpcMetrics>,
chain_store: Arc<dyn ChainStore>,
block_ptr: EthereumBlockPointer,
) -> Box<dyn Future<Item = bool, Error = Error> + Send>;
fn calls_in_block(
&self,
logger: &Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
block_number: u64,
block_hash: H256,
) -> Box<dyn Future<Item = Vec<EthereumCall>, Error = Error> + Send>;
fn logs_in_block_range(
&self,
logger: &Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: u64,
to: u64,
log_filter: EthereumLogFilter,
) -> DynTryFuture<'static, Vec<Log>, Error>;
fn calls_in_block_range(
&self,
logger: &Logger,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: u64,
to: u64,
call_filter: EthereumCallFilter,
) -> Box<dyn Stream<Item = EthereumCall, Error = Error> + Send>;
/// Call the function of a smart contract.
fn contract_call(
&self,
logger: &Logger,
call: EthereumContractCall,
cache: Arc<dyn EthereumCallCache>,
) -> Box<dyn Future<Item = Vec<Token>, Error = EthereumContractCallError> + Send>;
}
fn parse_log_triggers(
log_filter: EthereumLogFilter,
block: &EthereumBlock,
) -> Vec<EthereumTrigger> {
block
.transaction_receipts
.iter()
.flat_map(move |receipt| {
let log_filter = log_filter.clone();
receipt
.logs
.iter()
.filter(move |log| log_filter.matches(log))
.map(move |log| EthereumTrigger::Log(log.clone()))
})
.collect()
}
fn parse_call_triggers(
call_filter: EthereumCallFilter,
block: &EthereumBlockWithCalls,
) -> Vec<EthereumTrigger> {
block.calls.as_ref().map_or(vec![], |calls| {
calls
.iter()
.filter(move |call| call_filter.matches(call))
.map(move |call| EthereumTrigger::Call(call.clone()))
.collect()
})
}
fn parse_block_triggers(
block_filter: EthereumBlockFilter,
block: &EthereumBlockWithCalls,
) -> Vec<EthereumTrigger> {
let block_ptr = EthereumBlockPointer::from(&block.ethereum_block);
let trigger_every_block = block_filter.trigger_every_block;
let call_filter = EthereumCallFilter::from(block_filter);
let mut triggers = block.calls.as_ref().map_or(vec![], |calls| {
calls
.iter()
.filter(move |call| call_filter.matches(call))
.map(move |call| {
EthereumTrigger::Block(block_ptr, EthereumBlockTriggerType::WithCallTo(call.to))
})
.collect::<Vec<EthereumTrigger>>()
});
if trigger_every_block {
triggers.push(EthereumTrigger::Block(
block_ptr,
EthereumBlockTriggerType::Every,
));
}
triggers
}
pub async fn triggers_in_block(
adapter: Arc<dyn EthereumAdapter>,
logger: Logger,
chain_store: Arc<dyn ChainStore>,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
log_filter: EthereumLogFilter,
call_filter: EthereumCallFilter,
block_filter: EthereumBlockFilter,
ethereum_block: BlockFinality,
) -> Result<EthereumBlockWithTriggers, Error> {
match ðereum_block {
BlockFinality::Final(block) => {
let mut blocks = blocks_with_triggers(
adapter,
logger,
chain_store,
subgraph_metrics,
block.number(),
block.number(),
log_filter,
call_filter,
block_filter,
)
.compat()
.await?;
assert!(blocks.len() <= 1);
Ok(blocks
.pop()
.unwrap_or_else(|| EthereumBlockWithTriggers::new(vec![], ethereum_block)))
}
BlockFinality::NonFinal(full_block) => {
let mut triggers = Vec::new();
triggers.append(&mut parse_log_triggers(
log_filter,
&full_block.ethereum_block,
));
triggers.append(&mut parse_call_triggers(call_filter, &full_block));
triggers.append(&mut parse_block_triggers(block_filter, &full_block));
Ok(EthereumBlockWithTriggers::new(triggers, ethereum_block))
}
}
}
/// Returns blocks with triggers, corresponding to the specified range and filters.
/// If a block contains no triggers, there may be no corresponding item in the stream.
/// However the `to` block will always be present, even if triggers are empty.
///
/// Careful: don't use this function without considering race conditions.
/// Chain reorgs could happen at any time, and could affect the answer received.
/// Generally, it is only safe to use this function with blocks that have received enough
/// confirmations to guarantee no further reorgs, **and** where the Ethereum node is aware of
/// those confirmations.
/// If the Ethereum node is far behind in processing blocks, even old blocks can be subject to
/// reorgs.
/// It is recommended that `to` be far behind the block number of latest block the Ethereum
/// node is aware of.
pub fn blocks_with_triggers(
adapter: Arc<dyn EthereumAdapter>,
logger: Logger,
chain_store: Arc<dyn ChainStore>,
subgraph_metrics: Arc<SubgraphEthRpcMetrics>,
from: u64,
to: u64,
log_filter: EthereumLogFilter,
call_filter: EthereumCallFilter,
block_filter: EthereumBlockFilter,
) -> Box<dyn Future<Item = Vec<EthereumBlockWithTriggers>, Error = Error> + Send> {
// Each trigger filter needs to be queried for the same block range
// and the blocks yielded need to be deduped. If any error occurs
// while searching for a trigger type, the entire operation fails.
let eth = adapter.clone();
let mut trigger_futs: futures::stream::FuturesUnordered<
Box<dyn Future<Item = Vec<EthereumTrigger>, Error = Error> + Send>,
> = futures::stream::FuturesUnordered::new();
// Scan the block range from triggers to find relevant blocks
if !log_filter.is_empty() {
trigger_futs.push(Box::new(
eth.logs_in_block_range(&logger, subgraph_metrics.clone(), from, to, log_filter)
.map_ok(|logs: Vec<Log>| logs.into_iter().map(EthereumTrigger::Log).collect())
.compat(),
))
}
if !call_filter.is_empty() {
trigger_futs.push(Box::new(
eth.calls_in_block_range(&logger, subgraph_metrics.clone(), from, to, call_filter)
.map(EthereumTrigger::Call)
.collect(),
));
}
if block_filter.trigger_every_block {
trigger_futs.push(Box::new(
adapter
.block_range_to_ptrs(logger.clone(), from, to)
.map(move |ptrs| {
ptrs.into_iter()
.map(|ptr| EthereumTrigger::Block(ptr, EthereumBlockTriggerType::Every))
.collect()
}),
))
} else if !block_filter.contract_addresses.is_empty() {
// To determine which blocks include a call to addresses
// in the block filter, transform the `block_filter` into
// a `call_filter` and run `blocks_with_calls`
let call_filter = EthereumCallFilter::from(block_filter);
trigger_futs.push(Box::new(
eth.calls_in_block_range(&logger, subgraph_metrics.clone(), from, to, call_filter)
.map(|call| {
EthereumTrigger::Block(
EthereumBlockPointer::from(&call),
EthereumBlockTriggerType::WithCallTo(call.to),
)
})
.collect(),
));
}
let logger1 = logger.cheap_clone();
let logger2 = logger.cheap_clone();
let eth_clone = eth.cheap_clone();
Box::new(
trigger_futs
.concat2()
.join(
adapter
.clone()
.block_hash_by_block_number(&logger, chain_store.clone(), to, true)
.then(move |to_hash| match to_hash {
Ok(n) => n.ok_or_else(|| {
warn!(logger2,
"Ethereum endpoint is behind";
"url" => eth_clone.url_hostname()
);
format_err!("Block {} not found in the chain", to)
}),
Err(e) => Err(e),
}),
)
.map(move |(triggers, to_hash)| {
let mut block_hashes: HashSet<H256> =
triggers.iter().map(EthereumTrigger::block_hash).collect();
let mut triggers_by_block: HashMap<u64, Vec<EthereumTrigger>> =
triggers.into_iter().fold(HashMap::new(), |mut map, t| {
map.entry(t.block_number()).or_default().push(t);
map
});
debug!(logger, "Found {} relevant block(s)", block_hashes.len());
// Make sure `to` is included, even if empty.
block_hashes.insert(to_hash);
triggers_by_block.entry(to).or_insert(Vec::new());
(block_hashes, triggers_by_block)
})
.and_then(move |(block_hashes, mut triggers_by_block)| {
adapter
.load_blocks(logger1, chain_store, block_hashes)
.and_then(
move |block| match triggers_by_block.remove(&block.number()) {
Some(triggers) => Ok(EthereumBlockWithTriggers::new(
triggers,
BlockFinality::Final(block),
)),
None => Err(format_err!(
"block {:?} not found in `triggers_by_block`",
block
)),
},
)
.collect()
.map(|mut blocks| {
blocks.sort_by_key(|block| block.ethereum_block.number());
blocks
})
}),
)
}
#[cfg(test)]
mod tests {
use super::EthereumCallFilter;
use web3::types::Address;
use std::collections::{HashMap, HashSet};
use std::iter::FromIterator;
#[test]
fn extending_ethereum_call_filter() {
let mut base = EthereumCallFilter {
contract_addresses_function_signatures: HashMap::from_iter(vec![
(
Address::from_low_u64_be(0),
(0, HashSet::from_iter(vec![[0u8; 4]])),
),
(
Address::from_low_u64_be(1),
(1, HashSet::from_iter(vec![[1u8; 4]])),
),
]),
};
let extension = EthereumCallFilter {
contract_addresses_function_signatures: HashMap::from_iter(vec![
(
Address::from_low_u64_be(0),
(2, HashSet::from_iter(vec![[2u8; 4]])),
),
(
Address::from_low_u64_be(3),
(3, HashSet::from_iter(vec![[3u8; 4]])),
),
]),
};
base.extend(extension);
assert_eq!(
base.contract_addresses_function_signatures
.get(&Address::from_low_u64_be(0)),
Some(&(0, HashSet::from_iter(vec![[0u8; 4], [2u8; 4]])))
);
assert_eq!(
base.contract_addresses_function_signatures
.get(&Address::from_low_u64_be(3)),
Some(&(3, HashSet::from_iter(vec![[3u8; 4]])))
);
assert_eq!(
base.contract_addresses_function_signatures
.get(&Address::from_low_u64_be(1)),
Some(&(1, HashSet::from_iter(vec![[1u8; 4]])))
);
}
} | .filter_map(|data_source| data_source.source.address.map(|addr| (addr, data_source)))
.map(|(contract_addr, data_source)| {
let start_block = data_source.source.start_block;
data_source |
test-run-card.directive.js | (function() {
'use strict';
angular.module('app.testRunCard').directive('testRunCard', function() {
return {
templateUrl: 'app/components/blocks/test-run-card/test-run-card.html',
controller: function TestRunCardController(windowWidthService,
testsRunsService, $rootScope, UtilService,
$state, $timeout, $mdDialog, $mdToast,
SlackService, TestRunService, UserService,
$interval, DownloadService) {
const local = {
currentUser: UserService.getCurrentUser(),
testRunInDebugMode: null,
stopConnectingDebug: null,
debugHost: null,
debugPort: null,
jenkins: $rootScope.jenkins,
};
const vm = {
testRun: null,
singleMode: false,
singleWholeInfo: false,
showNotifyInSlackOption: false,
showBuildNowOption: false,
showDeleteTestRunOption: false,
isMobile: windowWidthService.isMobile,
isSlackAvailable: false,
slackChannels: null,
currentOffset: $rootScope.currentOffset,
tools: $rootScope.tools,
addToSelectedtestRuns: addToSelectedtestRuns,
showDetails: showDetails,
openMenu: openMenu,
openTestRun: openTestRun,
copyLink: copyLink,
markAsReviewed: markAsReviewed,
showCommentsDialog: showCommentsDialog,
sendAsEmail: sendAsEmail,
createSpreadsheet: createSpreadsheet,
exportTestRun: exportTestRun,
notifyInSlack: notifyInSlack,
buildNow: buildNow,
abort: abort,
rerun: rerun,
startDebug: startDebug,
onTestRunDelete: onTestRunDelete,
checkFilePresence: checkFilePresence,
downloadApplication: downloadApplication,
goToTestRun: goToTestRun,
onBackClick: onBackClick,
};
vm.$onInit = init;
return vm;
function init() {
initSlackChannels();
initSlackAvailability();
}
function initSlackChannels() {
vm.slackChannels = testsRunsService.getSlackChannels();
if (!vm.slackChannels) {
testsRunsService.fetchSlackChannels().then(function(slackChannels) {
vm.slackChannels = slackChannels;
});
}
}
function initSlackAvailability() {
if (testsRunsService.isSlackAvailabilityFetched()) {
vm.isSlackAvailable = testsRunsService.getSlackAvailability();
} else {
testsRunsService.fetchSlackAvailability().then(function(isSlackAvailable) {
vm.isSlackAvailable = isSlackAvailable;
});
}
}
function addToSelectedtestRuns() {
vm.onSelect && vm.onSelect(vm.testRun);
}
function showDetails(value) {
vm.singleWholeInfo = value;
}
function initMenuRights() {
vm.showNotifyInSlackOption = (vm.isSlackAvailable && vm.slackChannels.indexOf(vm.testRun.job.name) !== -1) && vm.testRun.reviewed;
vm.showBuildNowOption = local.jenkins.enabled;
vm.showDeleteTestRunOption = true;
}
function openMenu($event, $msMenuCtrl) {
initMenuRights();
UtilService.setOffset($event);
$timeout(function() {
vm.currentOffset = $rootScope.currentOffset;
$msMenuCtrl.open($event);
});
}
function openTestRun() {
const url = $state.href('tests/run', {testRunId: vm.testRun.id});
window.open(url,'_blank');
}
function goToTestRun() {
$state.go('tests/run', {testRunId: vm.testRun.id, testRun: vm.testRun});
}
function onBackClick() {
$state.go('tests/runs', {activeTestRunId: vm.testRun.id});
}
function copyLink() {
const url = $state.href('tests/run', {testRunId: vm.testRun.id}, {absolute : true});
url.copyToClipboard();
}
function markAsReviewed() {
showCommentsDialog();
}
function sendAsEmail(event) {
showEmailDialog([vm.testRun], event);
}
function createSpreadsheet(event) {
showCreateSpreadsheetDialog([vm.testRun], event);
};
function showCommentsDialog(event) {
$mdDialog.show({
controller: 'CommentsController',
templateUrl: 'app/components/modals/comments/comments.html',
parent: angular.element(document.body),
targetEvent: event,
clickOutsideToClose:true,
fullscreen: true,
locals: {
testRun: vm.testRun,
isSlackAvailable: vm.isSlackAvailable,
slackChannels: vm.slackChannels
}
}).then(function(answer) {
vm.testRun.reviewed = answer.reviewed;
vm.testRun.comments = answer.comments;
});
}
function | (testRuns, event) {
$mdDialog.show({
controller: 'EmailController',
templateUrl: 'app/components/modals/email/email.html',
parent: angular.element(document.body),
targetEvent: event,
clickOutsideToClose:true,
fullscreen: true,
locals: {
testRuns: testRuns
}
});
}
function showCreateSpreadsheetDialog(testRuns, event) {
$mdDialog.show({
controller: 'SpreadsheetController',
templateUrl: 'app/components/modals/spreadsheet/spreadsheet.html',
parent: angular.element(document.body),
targetEvent: event,
clickOutsideToClose:true,
fullscreen: true,
locals: {
testRuns: testRuns
}
})
.then(undefined, function(links) {
if (links && links.length) {
showToastWithLinks(links);
}
});
}
function showToastWithLinks(links) {
$mdToast.show({
hideDelay: 0,
position: 'bottom right',
locals: {
links: links
},
controller: function ToastWithLinksController($mdToast, links) {
return {
links: links,
closeToast: closeToast,
};
function closeToast() {
$mdToast.hide();
}
},
controllerAs: '$ctrl',
template: '<md-toast>\n' +
' <a target="_blank" ng-repeat="link in links" ng-href="{{ link }}" class="md-toast-text" flex>Google spreadsheet</a>\n' +
' <md-button id="close" ng-click="$ctrl.closeToast();">\n' +
' Close\n' +
' </md-button>\n' +
'</md-toast>'
});
}
function exportTestRun() {
TestRunService.exportTestRunResultsHTML(vm.testRun.id).then(function(rs) {
if (rs.success) {
downloadFromByteArray(vm.testRun.testSuite.name.split(' ').join('_') + '.html', rs, 'html');
} else {
alertify.error(rs.message);
}
});
}
function downloadFromByteArray(filename, array, contentType) {
const blob = new Blob([array.data], {type: contentType ? contentType : array.headers('Content-Type')});
const link = document.createElement('a');
link.style = 'display: none';
document.body.appendChild(link);
link.href = window.URL.createObjectURL(blob);
link.download = filename;
link.click();
//remove link after 10sec
$timeout(() => {
link && document.body.removeChild(link);
}, 10000);
}
function notifyInSlack() {
SlackService.triggerReviewNotif(vm.testRun.id);
}
function buildNow(event) {
showBuildNowDialog(event);
}
function rerun(event) {
showRerunDialog(event);
}
function showRerunDialog(event) {
$mdDialog.show({
controller: 'TestRunRerunController',
templateUrl: 'app/components/modals/rerun/rerun.html',
parent: angular.element(document.body),
targetEvent: event,
clickOutsideToClose: true,
fullscreen: true,
locals: {
testRun: vm.testRun,
jenkins: local.jenkins
}
});
}
function startDebug() {
if (confirm('Start debugging?')) {
local.testRunInDebugMode = angular.copy(vm.testRun);
debugTestRun(local.testRunInDebugMode);
}
}
function debugTestRun(testRunForDebug) {
TestRunService.debugTestRun(testRunForDebug.id).then(function (rs) {
if (rs.success) {
showDebugToast();
let debugLog = '';
let disconnectDebugTimeout;
const parseLogsInterval = $interval(function() {
TestRunService.getConsoleOutput(testRunForDebug.id, testRunForDebug.ciRunId, 200, 50).then(function (rs) {
if (rs.success) {
const map = rs.data;
Object.keys(map).forEach(function(key) {
const value = map[key];
if (value.includes('Listening for transport dt_socket at address:')) {
if (debugLog === '') {
getDebugData(value);
}
$timeout.cancel(connectDebugTimeout);
disconnectDebugTimeout = $timeout(function() {
stopDebugMode();
$mdToast.hide();
}, 60 * 10 * 1000);
if (debugLog === '') {
debugLog = value;
}
if (debugLog !== value) {
$timeout.cancel(disconnectDebugTimeout);
$interval.cancel(parseLogsInterval);
mdToast.hide();
alertify.success('Tests started in debug');
}
}
});
} else {
stopDebugMode();
alertify.error(rs.message);
}
});
}, 10000);
const connectDebugTimeout = $timeout(function() {
alertify.error('Problems with starting debug mode occurred, disabling');
stopDebugMode();
}, 60 * 10 * 1000);
local.stopConnectingDebug = function() {
$interval.cancel(parseLogsInterval);
$timeout.cancel(disconnectDebugTimeout);
$timeout.cancel(connectDebugTimeout);
};
} else {
alertify.error(rs.message);
}
});
}
function getDebugData(log){
if (log) {
const portLine = log.split('Enabling remote debug on ');
const debugValues = portLine[1].split(':');
local.debugHost = debugValues[0];
local.debugPort = debugValues[1].split('\n')[0];
}
}
function showDebugToast() {
$mdToast.show({
hideDelay: 1200000,
position: 'bottom right',
locals: {
debugPort: local.debugPort,
debugHost: local.debugHost,
stopDebugMode: stopDebugMode
},
controller : 'DebugModeController',
controllerAs: '$ctrl',
bindToController: true,
templateUrl : 'app/components/toasts/debug-mode/debug-mode.html'
});
}
function stopDebugMode() {
local.stopConnectingDebug && local.stopConnectingDebug();
if (local.testRunInDebugMode) {
abortDebug(local.testRunInDebugMode);
local.testRunInDebugMode = null;
local.debugHost = null;
local.debugPort = null;
alertify.warning('Debug mode is disabled');
}
}
function abortDebug(debuggedTestRun) {
if (local.jenkins.enabled) {
TestRunService.abortDebug(debuggedTestRun.id, debuggedTestRun.ciRunId).then(function (rs) {
if (rs.success) {
const abortCause = {};
abortCause.comment = 'Debug mode was disconnected';
TestRunService.abortTestRun(debuggedTestRun.id, debuggedTestRun.ciRunId, abortCause).then(function(rs) {
if (rs.success) {
debuggedTestRun.status = 'ABORTED';
alertify.success('Testrun ' + debuggedTestRun.testSuite.name + ' is aborted');
} else {
alertify.error(rs.message);
}
});
} else {
alertify.error(rs.message);
}
});
} else {
alertify.error('Unable connect to jenkins');
}
}
function showBuildNowDialog(event) {
$mdDialog.show({
controller: 'BuildNowController',
templateUrl: 'app/components/modals/build-now/build-now.html',
parent: angular.element(document.body),
targetEvent: event,
clickOutsideToClose:true,
fullscreen: true,
locals: {
testRun: vm.testRun
}
});
}
function abort() {
if (local.jenkins.enabled) {
TestRunService.abortCIJob(vm.testRun.id, vm.testRun.ciRunId).then(function (rs) {
if (rs.success) {
const abortCause = {};
abortCause.comment = 'Aborted by ' + local.currentUser.username;
TestRunService.abortTestRun(vm.testRun.id, vm.testRun.ciRunId, abortCause).then(function(rs) {
if (rs.success){
vm.testRun.status = 'ABORTED';
alertify.success('Testrun ' + vm.testRun.testSuite.name + ' is aborted');
} else {
alertify.error(rs.message);
}
});
} else {
alertify.error(rs.message);
}
});
} else {
alertify.error('Unable connect to jenkins');
}
}
function onTestRunDelete() {
if (vm.singleMode) {
deleteTestRun();
} else {
vm.onDelete && vm.onDelete(vm.testRun);
}
}
function deleteTestRun() {
const confirmation = confirm('Do you really want to delete "' + vm.testRun.testSuite.name + '" test run?');
if (confirmation) {
const id = vm.testRun.id;
TestRunService.deleteTestRun(id).then(function(rs) {
const messageData = rs.success ? {success: rs.success, id: id, message: 'Test run{0} {1} removed'} : {id: id, message: 'Unable to delete test run{0} {1}'};
UtilService.showDeleteMessage(messageData, [id], [], []);
if (rs.success) {
$timeout(function() {
testsRunsService.clearDataCache();
$state.go('tests/runs');
}, 1000);
}
});
}
}
function checkFilePresence() {
if (!vm.testRun.appVersionValid) {
vm.testRun.appVersionLoading = true;
DownloadService.check(vm.testRun.appVersion).then(function (rs) {
if (rs.success) {
vm.testRun.appVersionValid = rs.data;
} else {
//alertify.error(rs.message);
}
delete vm.testRun.appVersionLoading;
return rs.data;
});
}
}
function downloadApplication() {
const appVersion = $ctrl.testRun.appVersion;
DownloadService.download(appVersion).then(function (rs) {
if (rs.success) {
downloadFromByteArray(appVersion, rs.res);
} else {
alertify.error(rs.message);
}
});
}
},
scope: {
singleMode: '=',
testRun: '=',
onSelect: '&',
onDelete: '&'
},
controllerAs: '$ctrl',
restrict: 'E',
replace: true,
bindToController: true
};
});
})();
| showEmailDialog |
_Person.py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from learning_topic/Person.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Person(genpy.Message):
_md5sum = "8cf74e85a44e7a35ab62353a46e326a3"
_type = "learning_topic/Person"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string name
uint8 sex
uint8 age
uint8 unknown =0
uint8 male=1
uint8 female=2
uint16 age1
float64 height"""
# Pseudo-constants
unknown = 0
male = 1
female = 2
__slots__ = ['name','sex','age','age1','height']
_slot_types = ['string','uint8','uint8','uint16','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name,sex,age,age1,height
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Person, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
if self.sex is None:
self.sex = 0
if self.age is None:
self.age = 0
if self.age1 is None:
self.age1 = 0
if self.height is None:
self.height = 0.
else:
self.name = ''
self.sex = 0
self.age = 0
self.age1 = 0
self.height = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type | (length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2BHd = None
def _get_struct_2BHd():
global _struct_2BHd
if _struct_2BHd is None:
_struct_2BHd = struct.Struct("<2BHd")
return _struct_2BHd | try:
end = 0
start = end
end += 4 |
0010_auto_20180112_1721.py | # Generated by Django 1.11.7 on 2018-01-12 17:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("letters", "0009_auto_20170826_0742")]
operations = [ | "permissions": (
("can_filter_eml", "Can filter eml"),
("recognize_letter", "Can recognize letter"),
),
"verbose_name": "Letter",
"verbose_name_plural": "Letters",
},
)
] | migrations.AlterModelOptions(
name="letter",
options={
"ordering": ["created"], |
app.module.ts | import { NgModule } from '@angular/core';
import { BrowserModule } from '@angular/platform-browser';
import { HttpModule } from '@angular/http';
import { RouterModule } from '@angular/router';
import { AppComponent } from './app.component';
import { WelcomeComponent } from './home/welcome.component';
import { ProductModule } from './products/product.module';
@NgModule({
imports: [
BrowserModule,
HttpModule,
ProductModule,
RouterModule.forRoot([
{path: 'welcome', component: WelcomeComponent},
{path: '', redirectTo: 'welcome', pathMatch: 'full'},
{path:'**', redirectTo: 'welcome', pathMatch: 'full'}
])
],
declarations: [
AppComponent,
WelcomeComponent
],
bootstrap: [ AppComponent ]
})
export class | { }
| AppModule |
test_federation.py | import pytest
from pytest_mock import MockerFixture
from pystratis.api.federation.responsemodels import *
from pystratis.api.federation import Federation
from pystratis.core.networks import StraxMain, CirrusMain
def test_all_strax_endpoints_implemented(strax_swagger_json):
paths = [key.lower() for key in strax_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_cirrus_endpoints_implemented(cirrus_swagger_json):
paths = [key.lower() for key in cirrus_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_interfluxstrax_endpoints_implemented(interfluxstrax_swagger_json):
paths = [key.lower() for key in interfluxstrax_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_interfluxcirrus_endpoints_implemented(interfluxcirrus_swagger_json):
paths = [key.lower() for key in interfluxcirrus_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_reconstruct(mocker: MockerFixture, network):
data = "Reconstruction flag set, please restart the node."
mocker.patch.object(Federation, 'put', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.reconstruct()
assert response == data
# noinspection PyUnresolvedReferences
federation.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_members_current(mocker: MockerFixture, network, generate_compressed_pubkey, get_datetime):
data = {
"pollStartBlockHeight": None,
"pollNumberOfVotesAcquired": None,
"pollFinishedBlockHeight": None,
"pollWillFinishInBlocks": None,
"pollExecutedBlockHeight": None,
"memberWillStartMiningAtBlockHeight": None,
"memberWillStartEarningRewardsEstimateHeight": None,
"pollType": None,
"rewardEstimatePerBlock": 0.05,
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:32.9200000"
}
mocker.patch.object(Federation, 'get', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.members_current()
assert response == FederationMemberDetailedModel(**data)
# noinspection PyUnresolvedReferences
federation.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_member(mocker: MockerFixture, network, generate_compressed_pubkey, get_datetime):
data = [
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:32.9200000" | "collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:33.9200000"
},
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:34.9200000"
}
]
mocker.patch.object(Federation, 'get', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.members()
assert response == [FederationMemberModel(**x) for x in data]
# noinspection PyUnresolvedReferences
federation.get.assert_called_once() | },
{
"pubkey": generate_compressed_pubkey, |
patch.go | package operations
import (
"context"
"io/ioutil"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
const (
patchDescriptionFlagName = "description"
patchFinalizeFlagName = "finalize"
patchVerboseFlagName = "verbose"
patchAliasFlagName = "alias"
patchBrowseFlagName = "browse"
)
func getPatchFlags(flags ...cli.Flag) []cli.Flag {
return mergeFlagSlices(addProjectFlag(flags...), addVariantsFlag(), addTasksFlag(), addLargeFlag(), addYesFlag(
cli.StringFlag{
Name: joinFlagNames(patchDescriptionFlagName, "d"),
Usage: "description for the patch",
},
cli.StringFlag{
Name: joinFlagNames(patchAliasFlagName, "a"),
Usage: "patch alias (set by project admin)",
},
cli.BoolFlag{
Name: joinFlagNames(patchFinalizeFlagName, "f"),
Usage: "schedule tasks immediately",
},
cli.BoolFlag{
Name: joinFlagNames(patchBrowseFlagName),
Usage: "open patch url in browser",
},
cli.BoolFlag{
Name: patchVerboseFlagName,
Usage: "show patch summary",
}))
}
func Patch() cli.Command {
return cli.Command{
Name: "patch",
Before: setPlainLogger,
Aliases: []string{"create-patch", "submit-patch"},
Usage: "submit a new patch to evergreen",
Flags: getPatchFlags(),
Action: func(c *cli.Context) error {
confPath := c.Parent().String(confFlagName)
args := c.Args()
params := &patchParams{
Project: c.String(projectFlagName),
Variants: c.StringSlice(variantsFlagName),
Tasks: c.StringSlice(tasksFlagName),
SkipConfirm: c.Bool(yesFlagName),
Description: c.String(patchDescriptionFlagName),
Finalize: c.Bool(patchFinalizeFlagName),
Browse: c.Bool(patchBrowseFlagName),
ShowSummary: c.Bool(patchVerboseFlagName),
Large: c.Bool(largeFlagName),
Alias: c.String(patchAliasFlagName),
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conf, err := NewClientSettings(confPath)
if err != nil {
return errors.Wrap(err, "problem loading configuration")
}
comm := conf.GetRestCommunicator(ctx)
ac, _, err := conf.getLegacyClients()
if err != nil {
return errors.Wrap(err, "problem accessing evergreen service")
}
ref, err := params.validatePatchCommand(ctx, conf, ac, comm)
if err != nil {
return err
}
diffData, err := loadGitData(ref.Branch, args...)
if err != nil {
return err
}
return params.createPatch(ac, conf, diffData)
},
}
}
func | () cli.Command {
const (
baseFlagName = "base"
diffPathFlagName = "diff-file"
)
return cli.Command{
Name: "patch-file",
Usage: "submit patch using a diff file",
Flags: getPatchFlags(
cli.StringFlag{
Name: joinFlagNames("base", "b"),
Usage: "githash of base",
},
cli.StringFlag{
Name: diffPathFlagName,
Usage: "path to a file for diff of the patch",
},
),
Before: mergeBeforeFuncs(requireFileExists(diffPathFlagName)),
Action: func(c *cli.Context) error {
confPath := c.Parent().String(confFlagName)
params := &patchParams{
Project: c.String(projectFlagName),
Variants: c.StringSlice(variantsFlagName),
Tasks: c.StringSlice(tasksFlagName),
SkipConfirm: c.Bool(yesFlagName),
Description: c.String(patchDescriptionFlagName),
Finalize: c.Bool(patchFinalizeFlagName),
ShowSummary: c.Bool(patchVerboseFlagName),
Large: c.Bool(largeFlagName),
}
diffPath := c.String(diffPathFlagName)
base := c.String(baseFlagName)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
conf, err := NewClientSettings(confPath)
if err != nil {
return errors.Wrap(err, "problem loading configuration")
}
comm := conf.GetRestCommunicator(ctx)
ac, _, err := conf.getLegacyClients()
if err != nil {
return errors.Wrap(err, "problem accessing evergreen service")
}
if _, err = params.validatePatchCommand(ctx, conf, ac, comm); err != nil {
return err
}
fullPatch, err := ioutil.ReadFile(diffPath)
if err != nil {
return errors.Wrap(err, "problem reading diff file")
}
diffData := &localDiff{string(fullPatch), "", "", base}
return params.createPatch(ac, conf, diffData)
},
}
}
| PatchFile |
package.py | # -*- coding: utf-8 -*-
"""datapackage model"""
import os.path as osp
from typing import List, Tuple, Dict, Union, Callable
import attr
import json
from itertools import product
from collections import OrderedDict
from tqdm import tqdm
import pandas as pd
from .ddf import DDF, Concept, EntityDomain, Entity, DaskDataPoint, Synonym
from .utils import absolute_path
import logging
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True, repr=False)
class TableSchema:
"""Table Schema Object Class"""
fields: List[dict]
primaryKey: Union[List[str], str]
@classmethod
def from_dict(cls, d: dict):
fields = d['fields']
primaryKey = d['primaryKey']
return cls(fields, primaryKey)
@property
def field_names(self):
return [f['name'] for f in self.fields]
@property
def common_fields(self):
field_names = self.field_names
pkey = self.primaryKey
if isinstance(pkey, str):
common_fields = list(filter(lambda x: x != pkey, field_names))
else:
common_fields = list(filter(lambda x: x not in pkey, field_names))
return common_fields
def __repr__(self):
return "TableSchema(primaryKey: {}, fields: {})".format(self.primaryKey, self.common_fields)
@attr.s(auto_attribs=True)
class Resource:
name: str
path: str
schema: TableSchema
@classmethod
def from_dict(cls, d: dict):
path = d['path']
name = d['name']
schema = TableSchema.from_dict(d['schema'])
return cls(name, path, schema)
def to_dict(self):
res = vars(self).copy()
if 'schema' in res:
res['schema'] = vars(res['schema']).copy()
return res
@attr.s(auto_attribs=True)
class DDFSchema:
primaryKey: List[str]
value: str
resources: List[str] # a list of resource names
@classmethod
def from_dict(cls, d: dict):
primaryKey = d['primaryKey']
value = d['value']
resources = d['resources']
return cls(primaryKey=primaryKey, value=value, resources=resources)
@attr.s(auto_attribs=True, repr=False)
class DataPackage:
base_path: str
resources: List[Resource]
props: dict = attr.ib(factory=dict)
def __attrs_post_init__(self):
self.base_path = absolute_path(self.base_path)
def __repr__(self):
|
@classmethod
def from_dict(cls, d_: dict, base_path='./'):
d = d_.copy()
resources = list(map(Resource.from_dict, d.pop('resources')))
return cls(base_path=base_path, resources=resources, props=d)
@classmethod
def from_json(cls, json_path):
json_path = absolute_path(json_path)
base_path = osp.dirname(json_path)
d = json.load(open(json_path))
return cls.from_dict(d, base_path)
@classmethod
def from_path(cls, path):
path = absolute_path(path)
json_path = osp.join(path, 'datapackage.json')
return cls.from_json(json_path)
def to_dict(self):
"""dump the datapackage to disk"""
raise NotImplementedError
@attr.s(repr=False)
class DDFcsv(DataPackage):
"""DDFCSV datapackage."""
ddfSchema: Dict[str, List[DDFSchema]] = attr.ib(factory=dict)
ddf: DDF = attr.ib(init=False)
concepts_resources: List[Resource] = attr.ib(init=False)
entities_resources: List[Resource] = attr.ib(init=False)
datapoints_resources: List[Resource] = attr.ib(init=False)
synonyms_resources: List[Resource] = attr.ib(init=False)
# config for read_csv
_default_reader_options = {'keep_default_na': False, 'na_values': ['']}
_default_dask_reader_options = {'keep_default_na': False,
'na_values': [''],
'sample_rows': 1000000}
def __attrs_post_init__(self):
super(DDFcsv, self).__attrs_post_init__()
conc = list()
ent = list()
dp = list()
syn = list()
for r in self.resources:
pkey = r.schema.primaryKey
if isinstance(pkey, str):
if pkey == 'concept':
conc.append(r)
else:
ent.append(r)
else: # TODO: datapoints key might be one column, not list of columns?
if 'synonym' in pkey:
syn.append(r)
else:
dp.append(r)
self.concepts_resources = conc
self.entities_resources = ent
self.datapoints_resources = dp
self.synonyms_resources = syn
self.ddf = self.load_ddf()
@classmethod
def from_dict(cls, d_: dict, base_path='./'):
d = d_.copy()
resources = list(map(Resource.from_dict, d.pop('resources')))
if 'ddfSchema' in d.keys():
ddf_schema_ = d.pop('ddfSchema')
ddf_schema = dict()
for k, v in ddf_schema_.items():
ddf_schema[k] = [DDFSchema.from_dict(d) for d in v]
else:
ddf_schema = {}
return cls(base_path=base_path, resources=resources, ddfSchema=ddf_schema, props=d)
def to_dict(self):
res = OrderedDict(self.props.copy())
res['resources'] = [r.to_dict() for r in self.resources]
if self.ddfSchema:
res['ddfSchema'] = dict()
for k, v in self.ddfSchema.items():
res['ddfSchema'][k] = [vars(sch).copy() for sch in v]
return res
def _gen_concepts(self):
concepts_paths = [osp.join(self.base_path, r.path) for r in self.concepts_resources]
for p in concepts_paths:
df = pd.read_csv(p, index_col='concept', dtype=str, **self._default_reader_options)
for concept, row in df.iterrows():
concept_type = row['concept_type']
props = row.drop('concept_type').to_dict()
yield (concept, Concept(id=concept, concept_type=concept_type, props=props))
def _gen_entities(self, concepts: Dict[str, Concept]):
for r in self.entities_resources:
pkey = r.schema.primaryKey
if concepts[pkey].concept_type == 'entity_domain':
domain = concepts[pkey].id
else:
domain = concepts[pkey].props['domain']
df = pd.read_csv(osp.join(self.base_path, r.path), dtype=str, # TODO: is it okay to use str for all?
**self._default_reader_options)
df = df.set_index(pkey)
is_cols = list(filter(lambda x: x.startswith('is--'), df.columns.values))
for ent, row in df.iterrows():
sets = list()
for c in is_cols:
if row[c] == 'TRUE' and c[4:] != domain:
sets.append(c[4:]) # strip the 'is--' part, only keep set name
yield (domain, Entity(id=ent, domain=domain, sets=sets, props=row.drop(is_cols).to_dict()))
def _gen_datapoints(self):
for r in self.datapoints_resources:
fields = r.schema.common_fields
pkey = r.schema.primaryKey
for f in fields:
yield (f, pkey, osp.join(self.base_path, r.path))
def _gen_synonyms(self):
for r in self.synonyms_resources:
# there should be only two columns
pkey = r.schema.primaryKey
if pkey[0] == 'synonym':
concept = pkey[1]
else:
concept = pkey[0]
df = pd.read_csv(osp.join(self.base_path, r.path), **self._default_reader_options)
sym = Synonym(concept_id=concept, synonyms=df.set_index('synonym')[concept].to_dict())
yield (concept, sym)
@staticmethod
def entity_domain_to_categorical(domain: EntityDomain):
entities = [e.id for e in domain.entities]
return pd.api.types.CategoricalDtype(entities)
@staticmethod
def entity_set_to_categorical(domain: EntityDomain, s: str):
entity_set = domain.get_entity_set(s)
entities = [e.id for e in entity_set]
return pd.api.types.CategoricalDtype(entities)
def load_ddf(self):
"""-> DDF"""
# load concepts
concepts = dict(self._gen_concepts())
# load entities
entities = list(self._gen_entities(concepts))
domains = dict()
domains_tmp = dict()
for domain, entity in entities:
if domain not in domains_tmp.keys():
domains_tmp[domain] = list()
domains_tmp[domain].append(entity)
for domain, entities_ in domains_tmp.items():
# TODO: maybe get properties from concepts table
# Allow duplicated entity because they may be defined in multiple resources
# i.e. multiple entity sets in separated files.
domains[domain] = EntityDomain.from_entity_list(domain_id=domain, entities=entities_, allow_duplicated=True)
# load datapoints. Here we will use Dask for all
# 1. create categories for entity domains
dtypes = dict()
# parse_dates = list()
concept_types = dict()
for domain_name, domain in domains.items():
dtypes[domain_name] = self.entity_domain_to_categorical(domain)
for eset in domain.entity_sets:
dtypes[eset] = self.entity_set_to_categorical(domain, eset)
# 2. get all concept types, update dtypes for time concepts
for c_id, c in concepts.items():
concept_types[c_id] = c.concept_type
if c.concept_type == 'time':
dtypes[c_id] = 'str'
# 3. group files for same indicator together
indicators = dict()
for field, pkey, path in self._gen_datapoints():
# import ipdb; ipdb.set_trace()
indicator = field
pkey = tuple(sorted(pkey))
if indicator not in indicators:
indicators.setdefault(indicator, dict([(pkey, [path])]))
else:
if pkey not in indicators[indicator]:
indicators[indicator][pkey] = [path]
else:
indicators[indicator][pkey].append(path)
datapoints = dict()
for i, v in indicators.items():
datapoints[i] = dict()
# dtypes_ = dtypes.copy()
# dtypes_[i] = 'float' # TODO: supporting string/float datatypes, not just float
read_csv_options = self._default_dask_reader_options.copy()
read_csv_options.update(dict(dtype=dtypes))
for k, paths in v.items():
dp = DaskDataPoint(id=i, dimensions=k, path=paths, concept_types=concept_types,
read_csv_options=read_csv_options)
datapoints[i][k] = dp
# load synonyms
synonyms = dict(self._gen_synonyms())
# return complete DDF object
return DDF(concepts=concepts, entities=domains, datapoints=datapoints, synonyms=synonyms, props=self.props)
def generate_ddf_schema(self, progress_bar=False):
"""generate ddf schema from all resources.
Parameters
----------
progress_bar : bool
whether progress bar should be shown when generating ddfSchema.
"""
hash_table = {}
ddf_schema = {'concepts': [], 'entities': [], 'datapoints': [], 'synonyms': []}
entity_value_cache = dict()
dtypes = dict()
# check if we need progress bar
if progress_bar:
if logger.getEffectiveLevel() == 10: # debug: force not showing progress bar
logger.warning("progress bar will be disabled in debugging mode.")
progress_bar = False
# generate set-membership details for every single entity in dataset
# also create dtypes for later use
for domain_id, domain in self.ddf.entities.items():
dtypes[domain_id] = self.entity_domain_to_categorical(domain)
for s in self.ddf.entities[domain_id].entity_sets:
dtypes[s] = self.entity_set_to_categorical(domain, s)
entity_value_cache[domain_id] = dict()
for ent in domain.entities:
sets = set()
sets.add(domain_id)
for s in ent.sets:
sets.add(s)
entity_value_cache[domain_id][ent.id] = tuple(sets)
def _which_sets(entity_, domain_):
try:
return entity_value_cache[domain_][entity_]
except KeyError:
logger.debug('entity {} is not in {} domain!'.format(entity_, domain_))
raise
def _gen_key_value_object(resource: Resource):
logger.debug('working on: {}'.format(resource.path))
if isinstance(resource.schema.primaryKey, str):
pkeys = [resource.schema.primaryKey]
else:
pkeys = resource.schema.primaryKey
entity_cols = [x for x in pkeys
if x in self.ddf.concepts
and self.ddf.concepts[x].concept_type in ['entity_domain', 'entity_set']]
value_cols = resource.schema.common_fields
data = pd.read_csv(osp.join(self.base_path, resource.path), dtype=dtypes,
**self._default_reader_options)
# check if entity columns data match entity defined in entity files
for c in entity_cols:
if data[c].hasnans:
data_ = pd.read_csv(osp.join(self.base_path, resource.path), dtype={c: str}, **self._default_reader_options)
ents = dtypes[c].categories.values
ents_ = data_[c].unique()
diff = set(ents_) - set(ents)
logger.critical("in file {}:".format(resource.path))
logger.critical("{} column contains entity which does not belong to {} domain/set: {}".format(c, c, list(diff)))
raise ValueError("entity mismatch")
# for resources that have entity_columns: only consider all permutations on entity columns
if len(entity_cols) > 0:
data = data[entity_cols].drop_duplicates()
pkeys_prop = dict()
for c in pkeys:
if c == 'cocnept':
pkeys_prop[c] = {'type': 'concept'}
elif c not in self.ddf.concepts:
pkeys_prop[c] = {'type': 'non_concept'}
else:
concept = self.ddf.concepts[c]
if concept.concept_type == 'entity_set':
pkeys_prop[c] = {'type': 'entity_set',
'domain': concept.props['domain']}
elif concept.concept_type == 'entity_domain':
pkeys_prop[c] = {'type': 'entity_domain'}
else:
pkeys_prop[c] = {'type': 'others'}
all_permutations = set()
for _, r in data.iterrows():
perm = list()
for c in pkeys:
if pkeys_prop[c]['type'] == 'entity_set':
domain = pkeys_prop[c]['domain']
perm.append(_which_sets(r[c], domain))
elif pkeys_prop[c]['type'] == 'entity_domain':
perm.append(_which_sets(r[c], c))
else:
perm.append(tuple([c]))
all_permutations.add(tuple(perm))
# if data is empty. Just emit an object with primarykey and null value
if len(all_permutations) == 0:
obj = {'primaryKey': pkeys, 'value': None, 'resource': resource.name}
logger.debug('yielding: {}'.format(str(obj)))
yield obj
for row in all_permutations:
for perm in product(*row):
if len(value_cols) > 0:
for c in value_cols:
obj = {'primaryKey': list(perm), 'value': c, 'resource': resource.name}
logger.debug('yielding: {}'.format(str(obj)))
yield obj
else:
obj = {'primaryKey': list(perm), 'value': None, 'resource': resource.name}
logger.debug('yielding: {}'.format(str(obj)))
yield obj
def _add_to_schema(resource_schema):
"""handle objects generated by ``_gen_key_value_object``"""
key = '-'.join(sorted(resource_schema['primaryKey']))
if not pd.isnull(resource_schema['value']):
hash_val = key + '--' + resource_schema['value']
else:
hash_val = key + '--' + 'nan'
if hash_val not in hash_table.keys():
hash_table[hash_val] = {
'primaryKey': sorted(resource_schema['primaryKey']),
'value': resource_schema['value'],
'resources': {resource_schema['resource']}
}
else:
hash_table[hash_val]['resources'].add(resource_schema['resource'])
# make progressbar and run the process to generate schema
if progress_bar:
pbar = tqdm(total=len(self.resources))
for g in map(_gen_key_value_object, self.resources):
if progress_bar:
pbar.update(1)
for kvo in g:
logging.debug("adding kvo {}".format(str(kvo)))
_add_to_schema(kvo)
if progress_bar:
pbar.close()
for sch in hash_table.values():
sch['resources'] = list(sch['resources']) # convert set to list
sch_object = DDFSchema.from_dict(sch)
if len(sch['primaryKey']) == 1:
if sch['primaryKey'][0] == 'concept':
ddf_schema['concepts'].append(sch_object)
else:
ddf_schema['entities'].append(sch_object)
else:
if 'synonym' in sch['primaryKey']:
ddf_schema['synonyms'].append(sch_object)
else:
ddf_schema['datapoints'].append(sch_object)
return ddf_schema
def get_ddf_schema(self, update=False):
if not update and self.ddfSchema is not None:
return self.ddfSchema
elif not update and self.ddfSchema is None:
raise ValueError('No ddfSchema, please use update=True to generate one')
else:
self.ddfSchema = self.generate_ddf_schema()
return self.ddfSchema
| return f"DataPackage({self.base_path})" |
dot_bdd.py | #Tyler Sorensen
#University of Utah
#March 1, 2012
#dot_bdd.py
#This simply prints a .dot file for visualizing the bdd
#Only public function
def print_bdd(bdd, fileName):
"""
Generate a dot file with the bdd in it. Run the dot file through
dot and generate a ps file.
"""
#open the file
f1 = open(fileName, 'w')
#Give it a readable header
_prDotHeader(f1)
#Print the Nodes
_prNodes(f1, bdd)
#Print the ranks
_prRanks(f1, bdd)
#Determine and print the edges
_prEdges(f1, bdd, bdd["u"], [])
#Close the file
_prClosing(f1)
def _prClosing(f1):
"""
A nice readable closing
"""
f1.write("/* Unix command: dot -Tps bdd.dot > bdd.ps */\n")
f1.write(r"/* For further details, see the `dot' manual */")
f1.write("\n}")
| f1.write("digraph G {\n" )
f1.write("/* Defaults */\n" )
f1.write(" fontsize = 12;\n" )
f1.write(" graph [dpi = 600];\n" )
f1.write(" ratio = compress; \n")
f1.write("/* Bounding box */\n" )
f1.write(" size = \"4,4\";\n" )
def _prNodes(f1, bdd):
"""
prints the definition for the Nodes
"""
u = bdd["u"]
if u != 1:
s = "Node0 [label=0, color=Red, shape=box, peripheries=2]\n"
f1.write(s)
if u != 0:
s = "Node1 [label=1, color=Blue, shape=box, peripheries=2]\n"
f1.write(s)
for q in bdd["t_table"]:
if q != 0 and q!= 1:
s = "Node%i " % q
s = "%s[label=%s" % (s, _get_var_name(bdd,q))
s = "%s, shape=circle, peripheries=1]\n" % s
f1.write(s)
#Helper for _prNodes
def _get_var_name(bdd, u):
"""
Given a variable index u in the BDD, return the variable
Name
"""
var_index = bdd["t_table"][u][0]-1
return bdd["var_order"][var_index]
def _prEdges(f1, bdd, u, drawn_list):
"""
Recursive function to draw all the edges.
Red for low, Blue for High
"""
if u == 1:
return
if u == 0:
return
if u not in drawn_list:
s = "Node%i->Node%i [color=red, label = \"0\"]\n" % (u, bdd["t_table"][u][1])
f1.write(s)
s = "Node%i->Node%i [color=blue, label = \"1\"]\n" % (u, bdd["t_table"][u][2])
f1.write(s)
_prEdges(f1, bdd, bdd["t_table"][u][1], drawn_list)
_prEdges(f1, bdd, bdd["t_table"][u][2], drawn_list)
drawn_list.append(u)
def _prRanks(f1, bdd):
"""
Make all the nodes with the same variables the same rank
"""
ar = [0]*len(bdd["var_order"])
#Count how many times each variable appears
for q in bdd["t_table"]:
if q != 0 and q != 1:
ar[bdd["t_table"][q][0]-1] += 1
i = 0
while i < len(bdd["var_order"]):
if ar[i] > 1:
l = find(bdd, i)
s = "{rank=same;"
for q in l:
s = "%s Node%s" % (s, str(q))
s = "%s}\n" % s
f1.write(s)
i += 1
#Helper function for prRanks
def find(bdd, i):
"""
returns a list of all the u numbers of variable i
"""
l = []
for q in bdd["t_table"]:
if bdd["t_table"][q][0]-1 == i:
l.append(q)
return l | def _prDotHeader(f1):
"""
Header that sets up initial variables and settings
""" |
display.rs | use std::fmt;
#[derive(Debug)]
struct Complex {
real: f32,
imag: f32,
}
impl fmt::Display for Complex {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} + {}i", self.real, self.imag)
}
}
fn | () {
let c = Complex {
real: 3.3,
imag: 7.2,
};
println!("Display: {}", c);
println!("Debug: {:?}", c);
}
| main |
lib.rs | mod help {
pub fn greet(name: &str) -> String {
String::from("Hello, ") + name | }
#[cfg(test)]
mod tests {
use help;
#[test]
fn it_works() {
assert_eq!(help::greet("drats"), "Hello, drats");
}
} | } |
article.py | """
Article object definitions
"""
from collections import OrderedDict
from elifearticle import utils
class BaseObject:
"base object for shared functions"
def __str__(self):
"""
Return `str` representation of the simple object properties,
if there is a list or dict just return an empty representation
for easier viewing and test case scenario writing
"""
_dict = {}
for key in self.__dict__:
if isinstance(self.__dict__.get(key), list):
_dict[key] = []
elif isinstance(self.__dict__.get(key), dict):
_dict[key] = {}
else:
_dict[key] = str(self.__dict__.get(key))
return str(_dict)
class Article(BaseObject):
"""
We include some boiler plate in the init, namely article_type
"""
contributors = []
def __init__(self, doi=None, title=None):
self.article_type = "research-article"
self.display_channel = None
self.doi = doi
self.id = None
self.contributors = []
self.editors = []
self.title = title
self.abstract = ""
self.abstract_json = None
self.abstract_xml = None
self.digest = None
self.research_organisms = []
self.manuscript = None
self.dates = {}
self.license = None
self.article_categories = []
self.conflict_default = None
self.ethics = []
self.author_keywords = []
self.funding_awards = []
self.ref_list = []
self.component_list = []
# For PubMed function a hook to specify if article was ever through PoA pipeline
self.was_ever_poa = None
self.is_poa = None
self.volume = None
self.elocation_id = None
self.pii = None
self.related_articles = []
self.version = None
self.datasets = []
self.data_availability = None
self.funding_awards = []
self.funding_note = None
self.journal_issn = None
self.journal_title = None
self.self_uri_list = []
self.version = None
self.publisher_name = None
self.issue = None
self.review_articles = []
self.clinical_trials = []
self.preprint = None
self.related_objects = []
def add_contributor(self, contributor):
self.contributors.append(contributor)
def add_research_organism(self, research_organism):
self.research_organisms.append(research_organism)
def add_date(self, date):
self.dates[date.date_type] = date
def get_date(self, date_type):
"get date by date type"
try:
return self.dates[date_type]
except (KeyError, TypeError):
return None
def get_display_channel(self):
"display-channel string partly relates to the article_type"
return self.display_channel
def add_article_category(self, article_category):
self.article_categories.append(article_category)
def has_contributor_conflict(self):
# Return True if any contributors have a conflict
for contributor in self.contributors:
if contributor.conflict:
return True
return False
def add_ethic(self, ethic):
self.ethics.append(ethic)
def add_author_keyword(self, author_keyword):
self.author_keywords.append(author_keyword)
def add_dataset(self, dataset):
self.datasets.append(dataset)
def get_datasets(self, dataset_type=None):
if dataset_type:
return [d for d in self.datasets if d.dataset_type == dataset_type]
return self.datasets
def add_funding_award(self, funding_award):
self.funding_awards.append(funding_award)
def add_self_uri(self, uri):
self.self_uri_list.append(uri)
def get_self_uri(self, content_type):
"return the first self uri with the content_type"
try:
return [
self_uri
for self_uri in self.self_uri_list
if self_uri.content_type == content_type
][0]
except IndexError:
return None
def pretty(self):
"sort values and format output for viewing and comparing in test scenarios"
pretty_obj = OrderedDict()
for key, value in sorted(self.__dict__.items()):
if value is None:
pretty_obj[key] = None
elif isinstance(value, str):
pretty_obj[key] = self.__dict__.get(key)
elif isinstance(value, list):
pretty_obj[key] = []
elif isinstance(value, dict):
pretty_obj[key] = {}
else:
pretty_obj[key] = str(value)
return pretty_obj
class ArticleDate(BaseObject):
"""
A struct_time date and a date_type
"""
date_type = None
date = None
pub_type = None
publication_format = None
day = None
month = None
year = None
def __init__(self, date_type, date):
self.date_type = date_type
# Date as a time.struct_time
self.date = date
class Contributor(BaseObject):
"""
Currently we are not sure that we can get an auth_id for
all contributors, so this attribute remains an optional attribute.
"""
corresp = False
equal_contrib = False
contrib_type = None
auth_id = None
orcid = None
surname = None
given_name = None
suffix = None
collab = None
conflict = []
group_author_key = None
def __init__(self, contrib_type, surname, given_name, collab=None):
self.contrib_type = contrib_type
self.surname = surname
self.given_name = given_name
self.affiliations = []
self.conflict = []
self.collab = collab
def set_affiliation(self, affiliation):
self.affiliations.append(affiliation)
def set_conflict(self, conflict):
self.conflict.append(conflict)
class Affiliation(BaseObject):
phone = None
fax = None
email = None
department = None
institution = None
city = None
country = None
text = None
def __init__(self):
pass
class Dataset(BaseObject):
"""
Article component representing a dataset
"""
def __init__(self):
self.dataset_type = None
self.authors = []
# source_id is the uri in PoA generation
# todo: refactor PoA use the uri attribute then delete the source_id attribute here
self.source_id = None
self.year = None
self.title = None
self.license_info = None
self.accession_id = None
self.assigning_authority = None
self.doi = None
self.uri = None
self.comment = None
def add_author(self, author):
self.authors.append(author)
class FundingAward(BaseObject):
"""
An award group as part of a funding group
"""
def __init__(self):
self.award_group_id = None
self.award_ids = []
self.institution_name = None
self.institution_id = None
self.principal_award_recipients = []
def add_award_id(self, award_id):
self.award_ids.append(award_id)
def add_principal_award_recipient(self, contributor):
"Accepts an instance of Contributor"
self.principal_award_recipients.append(contributor) | try:
return self.institution_id.split("/")[-1]
except AttributeError:
return None
def get_funder_name(self):
"Alias for institution_name parsed from the XML"
return self.institution_name
class License(BaseObject):
"""
License with some preset values by license_id
"""
license_id = None
license_type = None
copyright = False
copyright_statement = None
href = None
name = None
paragraph1 = None
paragraph2 = None
def __init__(self, license_id=None):
self.license_id = license_id
class Citation(BaseObject):
"""
A ref or citation in the article to support crossref VOR deposits initially
"""
def __init__(self):
self.publication_type = None
self.id = None
self.authors = []
# For journals
self.article_title = None
self.source = None
self.volume = None
self.issue = None
self.fpage = None
self.lpage = None
self.elocation_id = None
self.doi = None
self.uri = None
self.pmid = None
self.isbn = None
self.year = None
self.year_iso_8601_date = None
self.year_numeric = None
self.date_in_citation = None
self.publisher_loc = None
self.publisher_name = None
self.edition = None
self.version = None
self.comment = None
self.data_title = None
self.conf_name = None
# For patents
self.patent = None
self.country = None
# For books
self.volume_title = None
self.chapter_title = None
# For data
self.accession = None
def add_author(self, author):
"Author is a dict of values"
self.authors.append(author)
def get_journal_title(self):
"Alias for source"
return self.source
class Component(BaseObject):
"""
An article component with a component DOI, primarily for crossref VOR deposits
"""
def __init__(self):
self.id = None
self.type = None
self.asset = None
self.title = None
self.subtitle = None
self.mime_type = None
self.doi = None
self.doi_resource = None
self.permissions = None
class RelatedArticle(BaseObject):
"""
Related article tag data as an object
"""
def __init__(self):
self.xlink_href = None
self.related_article_type = None
self.ext_link_type = None
class Uri(BaseObject):
"A URI, initially created for holding self-uri data"
def __init__(self):
self.xlink_href = None
self.content_type = None
class RelatedObject(BaseObject):
def __init__(self):
self.id = None
self.xlink_href = None
self.link_type = None
class ClinicalTrial(BaseObject):
def __init__(self):
self.id = None
self.content_type = None
self.document_id = None
self.document_id_type = None
self.source_id = None
self.source_id_type = None
self.source_type = None
self.text = None
self.xlink_href = None
self.registry_doi = None
def get_registry_doi(self, registry_name_to_doi_map=None):
"""return the DOI for the registry"""
if self.registry_doi:
return self.registry_doi
if (
self.source_id_type
and self.source_id
and self.source_id_type == "crossref-doi"
):
return self.source_id
if (
registry_name_to_doi_map
and self.source_id_type
and self.source_id
and self.source_id_type == "registry-name"
):
# look for the DOI value in the name to DOI map
if self.source_id in registry_name_to_doi_map:
return registry_name_to_doi_map[self.source_id]
return None
class Preprint(BaseObject):
def __init__(self, uri=None, doi=None):
self.uri = uri
self.doi = doi
class ContentBlock:
def __init__(self, block_type=None, content=None, attr=None):
self.block_type = block_type
self.content = content
self.content_blocks = []
self.attr = {}
if attr:
self.attr = attr
def attr_names(self):
"""list of tag attribute names"""
return utils.attr_names(self.attr)
def attr_string(self):
"""tag attributes formatted as a string"""
return utils.attr_string(self.attr) |
def get_funder_identifier(self):
"Funder identifier is the unique id found in the institution_id DOI" |
storage.py | import os
import errno
import shutil
import mimetypes
import logging
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
class StorageException(Exception):
pass
class PathNotFound(StorageException):
pass
class Storage(object):
def join_path(self, *args):
raise NotImplementedError
def listdir(self, path):
raise NotImplementedError
def path_exists(self, path):
raise NotImplementedError
def put_contents(self, contents, dest, sync=False):
raise NotImplementedError
def put_file(self, src, dest, sync=False):
raise NotImplementedError
class LocalFileSystemStorage(Storage):
def __init__(self, base_path):
self.base_path = base_path
@classmethod
def from_config(cls, config):
storage_config = config.storage_config
return cls(storage_config['base_path'])
def join_path(self, *args):
return os.path.join(*args)
def listdir(self, path):
path = self.join_path(self.base_path, path)
try:
entries = os.listdir(path)
except OSError as e:
if e.errno == errno.ENOENT:
raise PathNotFound('Path {0} not found'.format(path))
raise e
return [
('{}/'.format(entry) if os.path.isdir(entry) else entry)
for entry in entries]
def path_exists(self, path):
path = self.join_path(self.base_path, path)
return os.path.exists(path)
def ensure_dir(self, path):
if not os.path.exists(path):
os.makedirs(path)
def put_contents(self, contents, dest, sync=False):
dest_path = self.join_path(self.base_path, dest)
self.ensure_dir(os.path.dirname(dest_path))
with open(dest_path, 'w') as f:
f.write(contents)
# In LocalFileSystemStorage sync makes no sense
return dest_path
def put_file(self, src, dest, sync=False):
dest_path = self.join_path(self.base_path, dest)
self.ensure_dir(os.path.dirname(dest_path))
shutil.copy(src, dest_path)
return dest_path
def __repr__(self):
return (
'<LocalFileSystemStorage(base_path="{0}")>'
).format(self.base_path)
class AWSS3Storage(Storage):
def __init__(self, bucket, creds, acl, prefix=None,
endpoint=None, region=None):
access_key, secret_key, session_token = creds
session = boto3.Session(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token)
self.endpoint = endpoint
self.region = region
kwargs = dict()
if endpoint is not None:
kwargs['endpoint_url'] = endpoint
if region is not None:
kwargs['region_name'] = region
self.s3 = s3 = session.resource('s3', **kwargs)
self.bucket = s3.Bucket(bucket)
self.prefix = prefix
self.acl = acl
@classmethod
def from_config(cls, config):
storage_config = config.storage_config
env = config.env
bucket = storage_config['bucket']
prefix = storage_config.get('prefix')
acl = storage_config.get('acl', 'private')
endpoint = storage_config.get('endpoint', None)
region = storage_config.get('region', None)
creds = (env['PP_S3_ACCESS_KEY'], env['PP_S3_SECRET_KEY'], env.get('PP_S3_SESSION_TOKEN', None))
return cls(bucket, creds, acl, prefix=prefix, endpoint=endpoint,
region=region)
def join_path(self, *args):
return '/'.join(args)
def prefixed_path(self, path):
|
def listdir(self, path):
path = self.prefixed_path(path)
if path != '' and not path.endswith('/'):
s3_prefix = '{0}/'.format(path)
else:
s3_prefix = path
logger.debug('Listing objects prefixed with: {0}'.format(s3_prefix))
client = self.s3.meta.client
paginator = client.get_paginator('list_objects')
response = paginator.paginate(Bucket=self.bucket.name,
Prefix=s3_prefix,
Delimiter='/')
file_objs = [c for c in response.search('Contents') if c]
dir_objs = [cp for cp in response.search('CommonPrefixes') if cp]
# If no objs found, it means the path doesn't exist
if len(file_objs) == len(dir_objs) == 0:
raise PathNotFound('Path {0} not found'.format(s3_prefix))
files = (c['Key'][len(s3_prefix):] for c in file_objs)
files = [f for f in files if f != '']
dirs = [cp['Prefix'][len(s3_prefix):] for cp in dir_objs]
return files + dirs
def path_exists(self, path):
path = self.prefixed_path(path)
logger.debug('Checking if key exists: {0}'.format(path))
client = self.s3.meta.client
try:
client.head_object(Bucket=self.bucket.name, Key=path)
except ClientError as e:
logger.debug('Handled ClientError: {0}'.format(e))
return False
else:
return True
@staticmethod
def _guess_content_type(path, default='application/octet-stream'):
ctype = mimetypes.guess_type(path)[0] or default
logger.debug('Guessed ctype of "{0}": "{1}"'.format(path, ctype))
return ctype
def put_contents(self, contents, dest, sync=False):
dest_path = self.prefixed_path(dest)
client = self.s3.meta.client
logger.debug('Writing content to s3: {0}'.format(dest_path))
client.put_object(Bucket=self.bucket.name,
Key=dest_path,
Body=contents.encode('utf-8'),
ContentType=self._guess_content_type(dest),
ACL=self.acl)
if sync:
waiter = client.get_waiter('object_exists')
waiter.wait(Bucket=self.bucket.name, Key=dest_path)
def put_file(self, src, dest, sync=False):
dest_path = self.prefixed_path(dest)
client = self.s3.meta.client
logger.debug('Uploading file to s3: {0} -> {1}'.format(src, dest_path))
with open(src, 'rb') as f:
client.put_object(Bucket=self.bucket.name,
Key=dest_path,
Body=f,
ContentType=self._guess_content_type(dest),
ACL=self.acl)
if sync:
waiter = client.get_waiter('object_exists')
waiter.wait(Bucket=self.bucket.name, Key=dest_path)
def __repr__(self):
return (
'<AWSS3Storage(bucket="{0}", prefix="{1}")>'
).format(self.bucket.name, self.prefix)
def load_storage(config):
if config.storage == 'local-filesystem':
return LocalFileSystemStorage.from_config(config)
elif config.storage == 'aws-s3':
return AWSS3Storage.from_config(config)
else:
raise ValueError('Unsupported storage "{0}"'.format(config.storage))
| parts = []
if self.prefix:
parts.append(self.prefix)
if path != '.':
parts.append(path)
return self.join_path(*parts) |
plaza.js | import { Map, List, OrderedMap } from 'immutable';
import moment from 'moment';
import uuidv4 from 'uuid';
const plaza = (state = {}, action) => {
const plazaName = state.get('plazaName');
const speechId = state.getIn([plazaName, 'speechId']);
switch (action.type) {
case 'PLAZA_CHOOSE_PLACE': {
state = state.set('plazaName', action.plazaName);
const title = {
tiananmen: '天安门',
freedom: '时代广场',
france: '协和广场',
russia: '红场',
flyArea: '飞地微斯人',
};
state = state.set('title', title[action.plazaName]);
return state.setIn([action.plazaName, 'viewMode'], 'main');
}
case 'SPEECH_DISCUSS':
state = state.setIn([plazaName, 'viewMode'], 'discuss');
state = state.set('title', state.getIn([plazaName, 'content', action.speechId, 'title']));
return state.setIn([plazaName, 'speechId'], action.speechId);
case 'CREATE_SPEECH':
state = state.set('title', '创建演讲');
return state.setIn([plazaName, 'viewMode'], 'createSpeech');
case 'PLAZA_CHANGE_DISCUSS_VISIBILITY':
return state.setIn([plazaName, 'content', speechId, 'discuss', 'visibility'], action.visibility);
case 'PLAZA_DISCUSS_CHANGE_TEXT':
return state.setIn([plazaName, 'content', speechId, 'discuss', 'location', 'text'], action.text);
case 'PLAZA_DICUSS_COMFIRM': {
const location = state.getIn([plazaName, 'content', speechId, 'discuss', 'location']);
const to = location.get('discussId') === '' ? state.getIn([plazaName, 'content', speechId, 'userName']) : state.getIn([plazaName, 'content', speechId, 'discuss', 'content', location.get('time'), location.get('discussId'), 'userName']);
const lastTime = List(state.getIn([plazaName, 'content', speechId, 'discuss', 'content']).keySeq()).last();
const lastTimeArray = lastTime.split('-');
if (Number(moment().format('YYYYMMDD')) > (Number(lastTimeArray[0]) * 10000) + (Number(lastTimeArray[1]) * 100) + Number(lastTimeArray[2])) {
state = state.updateIn([plazaName, 'content', speechId, 'discuss', 'content'], content => content.concat(OrderedMap({
[moment().format('YYYY-MM-DD')]: OrderedMap({
[uuidv4()]: Map({
userName: action.userInfo.get('userName'),
gender: action.userInfo.get('gender'),
to,
text: location.get('text'),
time: moment().format('YYYY-MM-DD hh:mm'),
}),
}),
})));
} else {
state = state.updateIn([plazaName, 'content', speechId, 'discuss', 'content', lastTime], content => content.concat(OrderedMap({
[uuidv4()]: Map({
userName: action.userInfo.get('userName'),
gender: action.userInfo.get('gender'),
to,
text: location.get('text'),
time: moment().format('YYYY-MM-DD hh:mm'),
}),
})));
}
return state.setIn([plazaName, 'content', speechId, 'discuss', 'location'], Map({
text: '',
time: '',
discussId: '',
}));
}
case 'PLAZA_DISCUSS_PRESS_DIALOG':
if (state.getIn([plazaName, 'content', speechId, 'discuss', 'location', 'discussId']) === action.discussId) {
state = state.setIn([plazaName, 'content', speechId, 'discuss', 'location', 'discussId'], '');
} else {
state = state.setIn([plazaName, 'content', speechId, 'discuss', 'location', 'discussId'], action.discussId);
}
return state.setIn([plazaName, 'content', speechId, 'discuss', 'location', 'time'], action.time);
case 'PLAZA_CREATESPEECH_PROTOCOL':
state = state.set('title', '广场演讲发言公约');
return state.setIn([plazaName, 'createSpeech', 'viewMode'], 'protocol');
case 'PLAZA_CREATESPEECH_CHANGE_TEXT':
return state.setIn([plazaName, 'createSpeech', 'text'], action.value);
case 'PLAZA_CREATESPEECH_CHANGE_TITLE':
return state.setIn([plazaName, 'createSpeech', 'title'], action.value);
case 'PLAZA_CREATESPEECH_COMFIRM': {
const createSpeechId = uuidv4();
state = state.setIn([plazaName, 'content', createSpeechId], Map({
userName: '我就是扯淡',
gender: 'female',
title: state.getIn([plazaName, 'createSpeech', 'title']),
text: state.getIn([plazaName, 'createSpeech', 'text']),
share: Map({
number: 0,
}),
collect: Map({
number: 0,
}),
discuss: Map({
visibility: false,
location: ({
time: '',
discussId: '',
}),
content: OrderedMap({}),
}),
}));
state = state.setIn([plazaName, 'createSpeech', 'speechId'], createSpeechId);
return state.setIn([plazaName, 'viewMode'], 'main');
} |
case 'PLAZA_CREATESPEECH_CANCEL':
state = state.setIn([plazaName, 'viewMode'], 'main');
state = state.setIn([plazaName, 'createSpeech', 'title'], '');
return state.setIn([plazaName, 'createSpeech', 'text'], '');
case 'PLAZA_SPEECH_DELETE':
return state.deleteIn([plazaName, 'content', action.speechId]);
case 'PLAZA_SHARE_PRESS':
return state.setIn([plazaName, 'viewMode'], 'share');
case 'PLAZA_MOST': {
const content = state.getIn([plazaName, 'content']);
let mostSpeechId;
switch (action.viewMode) {
case '回复最多':
mostSpeechId = List(content.keySeq()).maxBy(speechContentId => content.getIn([speechContentId, 'discuss', 'content']).reduce((sum, review) => (sum + review.size), 0));
break;
case '转发最多':
mostSpeechId = List(content.keySeq()).maxBy(speechContentId => content.getIn([speechContentId, 'share', 'number']));
break;
case '收藏最多':
mostSpeechId = List(content.keySeq()).maxBy(speechContentId => content.getIn([speechContentId, 'collect', 'number']));
break;
default:
break;
}
state = state.setIn([plazaName, 'viewMode'], 'discuss');
return state.setIn([plazaName, 'speechId'], mostSpeechId);
}
default:
return state;
}
};
export default plaza; | |
OCR_CNN_Trainning.py | import numpy as np
import cv2
import os
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.layers import Dropout,Flatten
from keras.layers.convolutional import Conv2D,MaxPooling2D
import pickle
from tensorflow.keras.layers import Activation, Dense, Conv2D, Dropout, Flatten, MaxPooling2D
# PARAMETERS
path = 'numbers/train2'
testRatio = 0.2
valRatio = 0.2
imageDimensions= (28,28,3)
batchSizeVal= 6
epochsVal = 10
stepsPerEpochVal = 2000
# IMPORTING DATA/IMAGES FROM FOLDERS
count = 0
images = [] # LIST CONTAINING ALL THE IMAGES
classNo = [] # LIST CONTAINING ALL THE CORRESPONDING CLASS ID OF IMAGES
myList = os.listdir(path)
print("Total Classes Detected:",len(myList))
noOfClasses = len(myList)
print("Importing Classes .......")
for x in range (0,noOfClasses):
myPicList = os.listdir(path+"/"+str(x))
for y in myPicList:
curImg = cv2.imread(path+"/"+str(x)+"/"+y)
curImg = cv2.resize(curImg,(28,28))
images.append(curImg)
classNo.append(x)
print(x,end= " ")
print(" ")
print("Total Images in Images List = ",len(images))
print("Total IDS in classNo List= ",len(classNo))
# CONVERT TO NUMPY ARRAY
images = np.array(images)
classNo = np.array(classNo)
print(images.shape)
print(classNo.shape)
# SPLITTING THE DATA
X_train,X_test,y_train,y_test = train_test_split(images,classNo,test_size=testRatio)
X_train,X_validation,y_train,y_validation = train_test_split(X_train,y_train,test_size=valRatio)
print(X_train.shape)
print(X_test.shape)
print(X_validation.shape)
# PLOT BAR CHART FOR DISTRIBUTION OF IMAGES
numOfSamples= []
for x in range(0,noOfClasses):
#print(len(np.where(y_train==x)[0]))
numOfSamples.append(len(np.where(y_train==x)[0]))
print(numOfSamples)
plt.figure(figsize=(10,5))
plt.bar(range(0,noOfClasses),numOfSamples)
plt.title("No of Images for each Class")
plt.xlabel("Class ID")
plt.ylabel("Number of Images")
plt.show()
# PREPOSSESSING FUNCTION FOR IMAGES FOR TRAINING
def | (img):
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.equalizeHist(img)
img = img/255
return img
#img = preProcessing(X_train[30])
#img = cv2.resize(img,(300,300))
#cv2.imshow("PreProcesssed",img)
#cv2.waitKey(0)
X_train= np.array(list(map(preProcessing,X_train)))
X_test= np.array(list(map(preProcessing,X_test)))
X_validation= np.array(list(map(preProcessing,X_validation)))
# RESHAPE IMAGES
X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2],1)
X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],X_test.shape[2],1)
X_validation = X_validation.reshape(X_validation.shape[0],X_validation.shape[1],X_validation.shape[2],1)
# IMAGE AUGMENTATION
dataGen = ImageDataGenerator(width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
shear_range=0.1,
rotation_range=10)
dataGen.fit(X_train)
# ONE HOT ENCODING OF MATRICES
y_train = to_categorical(y_train,noOfClasses)
y_test = to_categorical(y_test,noOfClasses)
y_validation = to_categorical(y_validation,noOfClasses)
# CREATING THE MODEL
def myModel():
model = Sequential()
model.add(Conv2D(64, kernel_size=(3,3), input_shape= (28, 28, 1)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation("relu"))
model.add(Dense(32))
model.add(Activation("relu"))
model.add(Dense(10))
model.add(Activation("softmax"))
model.compile(Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
return model
model = myModel()
print(model.summary())
#### STARTING THE TRAINING PROCESS
history = model.fit_generator(dataGen.flow(X_train,y_train,
batch_size=batchSizeVal),
steps_per_epoch=stepsPerEpochVal,
epochs=epochsVal,
validation_data=(X_validation,y_validation),
shuffle=1)
# PLOT THE RESULTS
plt.figure(1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training','validation'])
plt.title('Loss')
plt.xlabel('epoch')
plt.figure(2)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training','validation'])
plt.title('Accuracy')
plt.xlabel('epoch')
plt.show()
# EVALUATE USING TEST IMAGES
score = model.evaluate(X_test,y_test,verbose=0)
print('Test Score = ',score[0])
print('Test Accuracy =', score[1])
# SAVE THE TRAINED MODEL
model.save('model11.h5')
| preProcessing |
chpt4-list-of-depths.js | // Make a linked list for all of the ndeos at each level of a binary tree
// We can keep track of the level as one of the arguments and recursively add linked lists as we go
// See ref file for linked list class
function createListForEachDepth(rootNode, arrayOfLists, level) {
// If no node is there, terminate
if (rootNode === null) {
return ;
}
// If we're at a new level, add a new linked list
if (arrayOfLists.length < level) {
arrayOfLists.push(new LinkedList());
} | createListForEachDepth(rootNode.left, arrayOfLists, level + 1);
createListForEachDepth(rootNode.right, arrayOfLists, level + 1);
}
var ourList = createListForEachDepth(binaryTree, [], 0); | // Append the node to the linked list at that level
arrayOfLists[level - 1].appendNode = rootNode;
// Take a step down in the tree and add a level |
python_dependencies.py | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pkg_resources import DistributionNotFound, VersionConflict, get_distribution
logger = logging.getLogger(__name__)
# REQUIREMENTS is a simple list of requirement specifiers[1], and must be
# installed. It is passed to setup() as install_requires in setup.py.
#
# CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict
# of lists. The dict key is the optional dependency name and can be passed to
# pip when installing. The list is a series of requirement specifiers[1] to be
# installed when that optional dependency requirement is specified. It is passed
# to setup() as extras_require in setup.py
#
# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
REQUIREMENTS = [
"jsonschema>=2.5.1",
"frozendict>=1",
"unpaddedbase64>=1.1.0",
"canonicaljson>=1.1.3",
"signedjson>=1.0.0",
"pynacl>=1.2.1",
"service_identity>=16.0.0",
# our logcontext handling relies on the ability to cancel inlineCallbacks
# (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
"Twisted>=18.7.0",
"treq>=15.1",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyopenssl>=16.0.0",
"pyyaml>=3.11",
"pyasn1>=0.1.9",
"pyasn1-modules>=0.0.7",
"daemonize>=2.3.1",
"bcrypt>=3.1.0",
"pillow>=3.1.2",
"sortedcontainers>=1.4.4",
"psutil>=2.0.0",
"pymacaroons>=0.13.0",
"msgpack>=0.5.0",
"phonenumbers>=8.2.0",
"six>=1.10",
# prometheus_client 0.4.0 changed the format of counter metrics
# (cf https://github.com/matrix-org/synapse/issues/4001)
"prometheus_client>=0.0.18,<0.4.0", | # Twisted 18.7.0 requires attrs>=17.4.0
"attrs>=17.4.0",
"netaddr>=0.7.18",
]
CONDITIONAL_REQUIREMENTS = {
"email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"],
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
"postgres": ["psycopg2>=2.6"],
# ConsentResource uses select_autoescape, which arrived in jinja 2.9
"resources.consent": ["Jinja2>=2.9"],
# ACME support is required to provision TLS certificates from authorities
# that use the protocol, such as Let's Encrypt.
"acme": ["txacme>=0.9.2"],
"saml2": ["pysaml2>=4.5.0"],
"url_preview": ["lxml>=3.5.0"],
"test": ["mock>=2.0", "parameterized"],
"sentry": ["sentry-sdk>=0.7.2"],
}
def list_requirements():
deps = set(REQUIREMENTS)
for opt in CONDITIONAL_REQUIREMENTS.values():
deps = set(opt) | deps
return list(deps)
class DependencyException(Exception):
@property
def message(self):
return "\n".join([
"Missing Requirements: %s" % (", ".join(self.dependencies),),
"To install run:",
" pip install --upgrade --force %s" % (" ".join(self.dependencies),),
"",
])
@property
def dependencies(self):
for i in self.args[0]:
yield '"' + i + '"'
def check_requirements(for_feature=None, _get_distribution=get_distribution):
deps_needed = []
errors = []
if for_feature:
reqs = CONDITIONAL_REQUIREMENTS[for_feature]
else:
reqs = REQUIREMENTS
for dependency in reqs:
try:
_get_distribution(dependency)
except VersionConflict as e:
deps_needed.append(dependency)
errors.append(
"Needed %s, got %s==%s"
% (dependency, e.dist.project_name, e.dist.version)
)
except DistributionNotFound:
deps_needed.append(dependency)
errors.append("Needed %s but it was not installed" % (dependency,))
if not for_feature:
# Check the optional dependencies are up to date. We allow them to not be
# installed.
OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), [])
for dependency in OPTS:
try:
_get_distribution(dependency)
except VersionConflict as e:
deps_needed.append(dependency)
errors.append(
"Needed optional %s, got %s==%s"
% (dependency, e.dist.project_name, e.dist.version)
)
except DistributionNotFound:
# If it's not found, we don't care
pass
if deps_needed:
for e in errors:
logging.error(e)
raise DependencyException(deps_needed)
if __name__ == "__main__":
import sys
sys.stdout.writelines(req + "\n" for req in list_requirements()) |
# we use attr.s(slots), which arrived in 16.0.0 |
raw_node.rs | // Copyright 2019 EinsteinDB Project Authors. Licensed under Apache-2.0.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The raw node of the violetabft module.
//!
//! This module contains the value types for the node and it's connection to other
//! nodes but not the violetabft consensus itself. Generally, you'll interact with the
//! RawNode first and use it to access the inner workings of the consensus protocol.
use std::mem;
use protobuf::Message as PbMessage;
use violetabft_proto::ConfChangeI;
use crate::config::Config;
use crate::evioletabftpb::{ConfState, Entry, EntryType, HardState, Message, MessageType, Snapshot};
use crate::errors::{Error, Result};
use crate::read_only::ReadState;
use crate::{VioletaBFT, SoftState, Status, Storage};
use slog::Logger;
/// Represents a Peer node in the cluster.
#[derive(Debug, Default)]
pub struct Peer {
/// The ID of the peer.
pub id: u64,
/// If there is context associated with the peer (like connection information), it can be
/// serialized and stored here.
pub context: Option<Vec<u8>>,
}
/// The status of the snapshot.
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum SnapshotStatus {
/// Represents that the snapshot is finished being created.
Finish,
/// Indicates that the snapshot failed to build or is not ready.
Failure,
}
/// Checks if certain message type should be used internally.
pub fn is_local_msg(t: MessageType) -> bool {
match t {
MessageType::MsgHup
| MessageType::MsgBeat
| MessageType::MsgUnreachable
| MessageType::MsgSnapStatus
| MessageType::MsgCheckQuorum => true,
_ => false,
}
}
fn is_response_msg(t: MessageType) -> bool {
match t {
MessageType::MsgAppendResponse
| MessageType::MsgRequestVoteResponse
| MessageType::MsgHeartbeatResponse
| MessageType::MsgUnreachable
| MessageType::MsgRequestPreVoteResponse => true,
_ => false,
}
}
/// For a given snapshot, determine if it's empty or not.
#[deprecated(since = "0.6.0", note = "Please use `Snapshot::is_empty` instead")]
pub fn is_empty_snap(s: &Snapshot) -> bool {
s.is_empty()
}
/// Ready encapsulates the entries and messages that are ready to read,
/// be saved to stable storage, committed or sent to other peers.
/// All fields in Ready are read-only.
#[derive(Default, Debug, PartialEq)]
pub struct Ready {
ss: Option<SoftState>,
hs: Option<HardState>,
read_states: Vec<ReadState>,
entries: Vec<Entry>,
snapshot: Snapshot,
/// CommittedEntries specifies entries to be committed to a
/// store/state-machine. These have previously been committed to stable
/// store.
pub committed_entries: Option<Vec<Entry>>,
/// Messages specifies outbound messages to be sent AFTER Entries are
/// committed to stable storage.
/// If it contains a MsgSnap message, the application MUST report back to violetabft
/// when the snapshot has been received or has failed by calling ReportSnapshot.
pub messages: Vec<Message>,
must_sync: bool,
}
impl Ready {
fn new<T: Storage>(
violetabft: &mut VioletaBFT<T>,
prev_ss: &SoftState,
prev_hs: &HardState,
since_idx: Option<u64>,
) -> Ready {
let mut rd = Ready {
entries: violetabft.violetabft_log.unstable_entries().unwrap_or(&[]).to_vec(),
..Default::default()
};
if !violetabft.msgs.is_empty() {
mem::swap(&mut violetabft.msgs, &mut rd.messages);
}
rd.committed_entries = Some(
(match since_idx {
None => violetabft.violetabft_log.next_entries(),
Some(idx) => violetabft.violetabft_log.next_entries_since(idx),
})
.unwrap_or_else(Vec::new),
);
let ss = violetabft.soft_state();
if &ss != prev_ss {
rd.ss = Some(ss);
}
let hs = violetabft.hard_state();
if &hs != prev_hs {
if hs.vote != prev_hs.vote || hs.term != prev_hs.term || !rd.entries.is_empty() {
rd.must_sync = true;
}
rd.hs = Some(hs);
}
if violetabft.violetabft_log.unstable.snapshot.is_some() {
rd.snapshot = violetabft.violetabft_log.unstable.snapshot.clone().unwrap();
}
if !violetabft.read_states.is_empty() {
rd.read_states = violetabft.read_states.clone();
}
rd
}
/// The current volatile state of a Node.
/// SoftState will be nil if there is no update.
/// It is not required to consume or store SoftState.
#[inline]
pub fn ss(&self) -> Option<&SoftState> {
self.ss.as_ref()
}
/// The current state of a Node to be saved to stable storage BEFORE
/// Messages are sent.
/// HardState will be equal to empty state if there is no update.
#[inline]
pub fn hs(&self) -> Option<&HardState> {
self.hs.as_ref()
}
/// States can be used for node to serve linearizable read requests locally
/// when its applied index is greater than the index in ReadState.
/// Note that the read_state will be returned when violetabft receives MsgReadIndex.
/// The returned is only valid for the request that requested to read.
#[inline]
pub fn read_states(&self) -> &[ReadState] |
/// Entries specifies entries to be saved to stable storage BEFORE
/// Messages are sent.
#[inline]
pub fn entries(&self) -> &[Entry] {
&self.entries
}
/// Snapshot specifies the snapshot to be saved to stable storage.
#[inline]
pub fn snapshot(&self) -> &Snapshot {
&self.snapshot
}
/// MustSync indicates whether the HardState and Entries must be synchronously
/// written to disk or if an asynchronous write is permissible.
#[inline]
pub fn must_sync(&self) -> bool {
self.must_sync
}
}
/// RawNode is a thread-unsafe Node.
/// The methods of this struct correspond to the methods of Node and are described
/// more fully there.
pub struct RawNode<T: Storage> {
/// The internal violetabft state.
pub violetabft: VioletaBFT<T>,
prev_ss: SoftState,
prev_hs: HardState,
}
impl<T: Storage> RawNode<T> {
#[allow(clippy::new_ret_no_self)]
/// Create a new RawNode given some [`Config`](../struct.Config.html).
pub fn new(config: &Config, store: T, logger: &Logger) -> Result<Self> {
assert_ne!(config.id, 0, "config.id must not be zero");
let r = VioletaBFT::new(config, store, logger)?;
let mut rn = RawNode {
violetabft: r,
prev_hs: Default::default(),
prev_ss: Default::default(),
};
rn.prev_hs = rn.violetabft.hard_state();
rn.prev_ss = rn.violetabft.soft_state();
info!(
rn.violetabft.logger,
"RawNode created with id {id}.",
id = rn.violetabft.id
);
Ok(rn)
}
/// Create a new RawNode given some [`Config`](../struct.Config.html) and the default logger.
///
/// The default logger is an `slog` to `log` adapter.
#[cfg(feature = "default-logger")]
#[allow(clippy::new_ret_no_self)]
pub fn with_default_logger(c: &Config, store: T) -> Result<Self> {
Self::new(c, store, &crate::default_logger())
}
/// Sets priority of node.
#[inline]
pub fn set_priority(&mut self, priority: u64) {
self.violetabft.set_priority(priority);
}
fn commit_ready(&mut self, rd: Ready) {
if rd.ss.is_some() {
self.prev_ss = rd.ss.unwrap();
}
if let Some(e) = rd.hs {
if e != HardState::default() {
self.prev_hs = e;
}
}
if !rd.entries.is_empty() {
let e = rd.entries.last().unwrap();
self.violetabft.violetabft_log.stable_to(e.index, e.term);
}
if rd.snapshot != Snapshot::default() {
self.violetabft
.violetabft_log
.stable_snap_to(rd.snapshot.get_metadata().index);
}
if !rd.read_states.is_empty() {
self.violetabft.read_states.clear();
}
}
fn commit_apply(&mut self, applied: u64) {
self.violetabft.commit_apply(applied);
}
/// Tick advances the internal logical clock by a single tick.
///
/// Returns true to indicate that there will probably be some readiness which
/// needs to be handled.
pub fn tick(&mut self) -> bool {
self.violetabft.tick()
}
/// Campaign causes this RawNode to transition to candidate state.
pub fn campaign(&mut self) -> Result<()> {
let mut m = Message::default();
m.set_msg_type(MessageType::MsgHup);
self.violetabft.step(m)
}
/// Propose proposes data be appended to the violetabft log.
pub fn propose(&mut self, context: Vec<u8>, data: Vec<u8>) -> Result<()> {
let mut m = Message::default();
m.set_msg_type(MessageType::MsgPropose);
m.from = self.violetabft.id;
let mut e = Entry::default();
e.data = data;
e.context = context;
m.set_entries(vec![e].into());
self.violetabft.step(m)
}
/// Broadcast heartbeats to all the followers.
///
/// If it's not leader, nothing will happen.
pub fn ping(&mut self) {
self.violetabft.ping()
}
/// ProposeConfChange proposes a config change.
///
/// If the node enters joint state with `auto_leave` set to true, it's
/// caller's responsibility to propose an empty conf change again to force
/// leaving joint state.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::needless_pass_by_value))]
pub fn propose_conf_change(&mut self, context: Vec<u8>, cc: impl ConfChangeI) -> Result<()> {
let (data, ty) = if let Some(cc) = cc.as_v1() {
(cc.write_to_bytes()?, EntryType::EntryConfChange)
} else {
(cc.as_v2().write_to_bytes()?, EntryType::EntryConfChangeV2)
};
let mut m = Message::default();
m.set_msg_type(MessageType::MsgPropose);
let mut e = Entry::default();
e.set_entry_type(ty);
e.data = data;
e.context = context;
m.set_entries(vec![e].into());
self.violetabft.step(m)
}
/// Applies a config change to the local node. The app must call this when it
/// applies a configuration change, except when it decides to reject the
/// configuration change, in which case no call must take place.
pub fn apply_conf_change(&mut self, cc: &impl ConfChangeI) -> Result<ConfState> {
self.violetabft.apply_conf_change(&cc.as_v2())
}
/// Step advances the state machine using the given message.
pub fn step(&mut self, m: Message) -> Result<()> {
// ignore unexpected local messages receiving over network
if is_local_msg(m.get_msg_type()) {
return Err(Error::StepLocalMsg);
}
if self.violetabft.prs().get(m.from).is_some() || !is_response_msg(m.get_msg_type()) {
return self.violetabft.step(m);
}
Err(Error::StepPeerNotFound)
}
/// Given an index, creates a new Ready value from that index.
pub fn ready_since(&mut self, applied_idx: u64) -> Ready {
Ready::new(
&mut self.violetabft,
&self.prev_ss,
&self.prev_hs,
Some(applied_idx),
)
}
/// Ready returns the current point-in-time state of this RawNode.
pub fn ready(&mut self) -> Ready {
Ready::new(&mut self.violetabft, &self.prev_ss, &self.prev_hs, None)
}
/// Given an index, can determine if there is a ready state from that time.
pub fn has_ready_since(&self, applied_idx: Option<u64>) -> bool {
let violetabft = &self.violetabft;
if !violetabft.msgs.is_empty() || violetabft.violetabft_log.unstable_entries().is_some() {
return true;
}
if !violetabft.read_states.is_empty() {
return true;
}
if self.snap().map_or(false, |s| !s.is_empty()) {
return true;
}
let has_unapplied_entries = match applied_idx {
None => violetabft.violetabft_log.has_next_entries(),
Some(idx) => violetabft.violetabft_log.has_next_entries_since(idx),
};
if has_unapplied_entries {
return true;
}
if violetabft.soft_state() != self.prev_ss {
return true;
}
let hs = violetabft.hard_state();
if hs != HardState::default() && hs != self.prev_hs {
return true;
}
false
}
/// HasReady called when RawNode user need to check if any Ready pending.
/// Checking logic in this method should be consistent with Ready.containsUpdates().
#[inline]
pub fn has_ready(&self) -> bool {
self.has_ready_since(None)
}
/// Grabs the snapshot from the violetabft if available.
#[inline]
pub fn snap(&self) -> Option<&Snapshot> {
self.violetabft.snap()
}
/// Advance notifies the RawNode that the application has applied and saved progress in the
/// last Ready results.
pub fn advance(&mut self, rd: Ready) {
self.advance_append(rd);
let commit_idx = self.prev_hs.commit;
if commit_idx != 0 {
// In most cases, prevHardSt and rd.HardState will be the same
// because when there are new entries to apply we just sent a
// HardState with an updated Commit value. However, on initial
// startup the two are different because we don't send a HardState
// until something changes, but we do send any un-applied but
// committed entries (and previously-committed entries may be
// incorporated into the snapshot, even if rd.CommittedEntries is
// empty). Therefore we mark all committed entries as applied
// whether they were included in rd.HardState or not.
self.advance_apply(commit_idx);
}
}
/// Appends and commits the ready value.
#[inline]
pub fn advance_append(&mut self, rd: Ready) {
self.commit_ready(rd);
}
/// Advance apply to the passed index.
#[inline]
pub fn advance_apply(&mut self, applied: u64) {
self.commit_apply(applied);
}
/// Status returns the current status of the given group.
#[inline]
pub fn status(&self) -> Status {
Status::new(&self.violetabft)
}
/// ReportUnreachable reports the given node is not reachable for the last send.
pub fn report_unreachable(&mut self, id: u64) {
let mut m = Message::default();
m.set_msg_type(MessageType::MsgUnreachable);
m.from = id;
// we don't care if it is ok actually
let _ = self.violetabft.step(m);
}
/// ReportSnapshot reports the status of the sent snapshot.
pub fn report_snapshot(&mut self, id: u64, status: SnapshotStatus) {
let rej = status == SnapshotStatus::Failure;
let mut m = Message::default();
m.set_msg_type(MessageType::MsgSnapStatus);
m.from = id;
m.reject = rej;
// we don't care if it is ok actually
let _ = self.violetabft.step(m);
}
/// Request a snapshot from a leader.
/// The snapshot's index must be greater or equal to the request_index.
pub fn request_snapshot(&mut self, request_index: u64) -> Result<()> {
self.violetabft.request_snapshot(request_index)
}
/// TransferLeader tries to transfer leadership to the given transferee.
pub fn transfer_leader(&mut self, transferee: u64) {
let mut m = Message::default();
m.set_msg_type(MessageType::MsgTransferLeader);
m.from = transferee;
let _ = self.violetabft.step(m);
}
/// ReadIndex requests a read state. The read state will be set in ready.
/// Read State has a read index. Once the application advances further than the read
/// index, any linearizable read requests issued before the read request can be
/// processed safely. The read state will have the same rctx attached.
pub fn read_index(&mut self, rctx: Vec<u8>) {
let mut m = Message::default();
m.set_msg_type(MessageType::MsgReadIndex);
let mut e = Entry::default();
e.data = rctx;
m.set_entries(vec![e].into());
let _ = self.violetabft.step(m);
}
/// Returns the store as an immutable reference.
#[inline]
pub fn store(&self) -> &T {
self.violetabft.store()
}
/// Returns the store as a mutable reference.
#[inline]
pub fn mut_store(&mut self) -> &mut T {
self.violetabft.mut_store()
}
/// Set whether skip broadcast empty commit messages at runtime.
#[inline]
pub fn skip_bcast_commit(&mut self, skip: bool) {
self.violetabft.skip_bcast_commit(skip)
}
/// Set whether to batch append msg at runtime.
#[inline]
pub fn set_batch_append(&mut self, batch_append: bool) {
self.violetabft.set_batch_append(batch_append)
}
}
#[cfg(test)]
mod test {
use crate::evioletabftpb::MessageType;
use super::is_local_msg;
#[test]
fn test_is_local_msg() {
let tests = vec![
(MessageType::MsgHup, true),
(MessageType::MsgBeat, true),
(MessageType::MsgUnreachable, true),
(MessageType::MsgSnapStatus, true),
(MessageType::MsgCheckQuorum, true),
(MessageType::MsgPropose, false),
(MessageType::MsgAppend, false),
(MessageType::MsgAppendResponse, false),
(MessageType::MsgRequestVote, false),
(MessageType::MsgRequestVoteResponse, false),
(MessageType::MsgSnapshot, false),
(MessageType::MsgHeartbeat, false),
(MessageType::MsgHeartbeatResponse, false),
(MessageType::MsgTransferLeader, false),
(MessageType::MsgTimeoutNow, false),
(MessageType::MsgReadIndex, false),
(MessageType::MsgReadIndexResp, false),
(MessageType::MsgRequestPreVote, false),
(MessageType::MsgRequestPreVoteResponse, false),
];
for (msg_type, result) in tests {
assert_eq!(is_local_msg(msg_type), result);
}
}
}
| {
&self.read_states
} |
task-serve.ts | import * as d from '../../declarations';
import { normalizePath } from '@utils';
import path from 'path';
export async function | (process: NodeJS.Process, config: d.Config) {
config.suppressLogs = true;
config.flags.serve = true;
config.devServer.openBrowser = config.flags.open;
config.devServer.reloadStrategy = null;
config.devServer.initialLoadUrl = '/';
config.devServer.websocket = false;
config.maxConcurrentWorkers = 1;
config.devServer.root = process.cwd();
if (typeof config.flags.root === 'string') {
if (!path.isAbsolute(config.flags.root)) {
config.devServer.root = path.relative(process.cwd(), config.flags.root);
}
}
config.devServer.root = normalizePath(config.devServer.root);
const { startServer } = await import('@stencil/core/dev-server');
const devServer = await startServer(config.devServer, config.logger);
process.once('SIGINT', () => {
devServer && devServer.close();
});
}
| taskServe |
messages.py | ######################
HELP_USER = """**>>Send File/Video\n>>Select desired Option\n>>And Done wait for it to process files**"""
DOWNLOAD_MSG = "**Downloading **⏬"
DOWNLOAD_FAIL_MSG = "**Failed to Download File**❎"
UPLOAD_MSG = "**Uploading** ⏫"
UPLOAD_FAIL_MSG = "**Failed to Upload File**❎"
UPLOAD_DONE_MSG = "**Uploaded Successfully 💡" | class Translation(object):
START_TEXT = "**I'm a Rename and Convert Bot\nJust send me any media to change file name.\nUse /help command for more details **"
|
|
api_op_XmlEmptyBlobs.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package query
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
func (c *Client) XmlEmptyBlobs(ctx context.Context, params *XmlEmptyBlobsInput, optFns ...func(*Options)) (*XmlEmptyBlobsOutput, error) {
if params == nil {
params = &XmlEmptyBlobsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "XmlEmptyBlobs", params, optFns, addOperationXmlEmptyBlobsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*XmlEmptyBlobsOutput)
out.ResultMetadata = metadata
return out, nil
}
type XmlEmptyBlobsInput struct {
}
type XmlEmptyBlobsOutput struct {
Data []byte
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationXmlEmptyBlobsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsquery_serializeOpXmlEmptyBlobs{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsquery_deserializeOpXmlEmptyBlobs{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil |
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opXmlEmptyBlobs(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opXmlEmptyBlobs(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
OperationName: "XmlEmptyBlobs",
}
}
| {
return err
} |
pack_non_conda.py | # Aim: Mostly for phenix users and those don't like using Miniconda
# 1. wget url_to_tar_file.tar
# 2. tar -xf url_to_tar_file.tar
# 3. source amber17/ambersh
# 4. Just it
""" Usage example: python pack_non_conda.py ambertools-17.0.1-py27_1.tar.bz2
Note: You can use file pattern
This script will unpack that bz2 file, then do some editing, then pack it to ./non-conda-install folder.
This should be done after doing conda-build
"""
import os
import subprocess
from glob import glob
import argparse
# local file, in the same folder as this script
from edit_package import editing_conda_package
import update_shebang
def main():
parser = argparse.ArgumentParser()
parser.add_argument('tarfile', nargs='?', help='targer file')
parser.add_argument(
"--output-dir",
type=str,
default='./non-conda-install',
dest="output_dir",
help="output directory")
parser.add_argument(
"--date", action="store_true", help="Add date to output tarfile") |
def pack_non_conda_package(opt):
with editing_conda_package(
opt.tarfile,
output_dir=opt.output_dir,
add_date=opt.date,
dry_run=opt.dry_run):
update_shebang.update_python_env('./bin/')
# No need to copy here since we alread done in conda build step?
if __name__ == '__main__':
main() | parser.add_argument("-d", "--dry_run", action="store_true", help="dry run")
opt = parser.parse_args()
pack_non_conda_package(opt) |
element_attributes.rs | use html_parser::{Dom, Result};
use insta::assert_json_snapshot;
#[test]
fn it_can_parse_double_quote() -> Result<()> {
let html = "<div id=\"one\"></div>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_single_quote() -> Result<()> {
let html = "<div id='one'></div>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_no_quote() -> Result<()> {
let html = "<div id=one></div>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_attribute_key_mixed_case_symbols() -> Result<()> {
let html = "<div data-cat='morris'></div>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_multiple_attributes_single_quote() -> Result<()> {
let html = "<div cat='mjau' dog='woff' ape=oh></div>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_multiple_attributes_double_quote() -> Result<()> {
let html = "<div cat=\"mjau\" dog=\"woff\" ape=\"oh\"></div>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn | () -> Result<()> {
let html = "<div cat=mjau dog=woff ape=oh></div>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_attribute_multiple_values_single_quote() -> Result<()> {
let html = "<div cat='mjau mjau' />";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_attribute_multiple_values_double_quote() -> Result<()> {
let html = "<div cat=\"mjau mjau\" />";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_attribute_with_empty_value() -> Result<()> {
let html = "<img hidden/>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_id() -> Result<()> {
let html = "<img id=a/>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
#[test]
fn it_can_parse_classes() -> Result<()> {
let html = "<img class='a b c'/>";
let dom = Dom::parse(html)?;
assert_json_snapshot!(dom);
Ok(())
}
| it_can_parse_multiple_attributes_no_quote |
management_test.go | // Copyright (C) 2015-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under
// the terms of the under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package on_demand_service_broker_test
import (
"fmt"
"net/http"
"encoding/json"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/pivotal-cf/on-demand-service-broker/boshdirector"
"github.com/pivotal-cf/on-demand-service-broker/broker"
"github.com/pivotal-cf/on-demand-service-broker/cf"
brokerConfig "github.com/pivotal-cf/on-demand-service-broker/config"
"github.com/pivotal-cf/on-demand-service-broker/mgmtapi"
"github.com/pivotal-cf/on-demand-service-broker/service"
sdk "github.com/pivotal-cf/on-demand-services-sdk/serviceadapter"
"github.com/pkg/errors"
)
var _ = Describe("Management API", func() {
var (
conf brokerConfig.Config
)
BeforeEach(func() {
conf = brokerConfig.Config{
Broker: brokerConfig.Broker{
Port: serverPort, Username: brokerUsername, Password: brokerPassword,
},
ServiceCatalog: brokerConfig.ServiceOffering{
Name: serviceName,
Plans: brokerConfig.Plans{
{
Name: dedicatedPlanName,
ID: dedicatedPlanID,
Quotas: brokerConfig.Quotas{ServiceInstanceLimit: &dedicatedPlanQuota},
LifecycleErrands: &sdk.LifecycleErrands{
PostDeploy: []sdk.Errand{{
Name: "post-deploy-errand",
Instances: []string{},
}},
},
},
{
Name: highMemoryPlanName,
ID: highMemoryPlanID,
},
},
},
}
})
JustBeforeEach(func() {
StartServer(conf)
})
Describe("GET /mgmt/service_instances", func() {
const (
serviceInstancesPath = "service_instances"
)
It("returns some service instances results", func() {
fakeCfClient.GetInstancesOfServiceOfferingReturns([]service.Instance{
{
GUID: "service-instance-id",
PlanUniqueID: "plan-id",
},
{
GUID: "another-service-instance-id",
PlanUniqueID: "another-plan-id",
},
}, nil)
response, bodyContent := doGetRequest(serviceInstancesPath)
By("returning the correct status code")
Expect(response.StatusCode).To(Equal(http.StatusOK))
By("returning the service instances")
Expect(bodyContent).To(MatchJSON(
`[
{"service_instance_id": "service-instance-id", "plan_id":"plan-id"},
{"service_instance_id": "another-service-instance-id", "plan_id":"another-plan-id"}
]`,
))
})
It("returns 500 when getting instances fails", func() {
fakeCfClient.GetInstancesOfServiceOfferingReturns([]service.Instance{}, errors.New("something failed"))
response, _ := doGetRequest(serviceInstancesPath)
By("returning the correct status code")
Expect(response.StatusCode).To(Equal(http.StatusInternalServerError))
By("logging the failure")
Expect(loggerBuffer).To(gbytes.Say(`error occurred querying instances: something failed`))
})
})
Describe("GET /mgmt/orphan_deployments", func() {
const (
orphanDeploymentsPath = "orphan_deployments"
)
It("responds with the orphan deployments", func() {
fakeCfClient.GetInstancesOfServiceOfferingReturns([]service.Instance{
{
GUID: "not-orphan",
PlanUniqueID: "plan-id",
},
}, nil)
fakeBoshClient.GetDeploymentsReturns([]boshdirector.Deployment{
{Name: "service-instance_not-orphan"},
{Name: "service-instance_orphan"},
}, nil)
response, bodyContent := doGetRequest(orphanDeploymentsPath)
By("returning the correct status code")
Expect(response.StatusCode).To(Equal(http.StatusOK))
By("returning the service instances")
Expect(bodyContent).To(MatchJSON(
`[ {"deployment_name": "service-instance_orphan"}]`,
))
})
It("responds with 500 when CF API call fails", func() {
fakeCfClient.GetInstancesOfServiceOfferingReturns([]service.Instance{}, errors.New("something failed on cf"))
response, _ := doGetRequest(orphanDeploymentsPath)
By("returning the correct status code")
Expect(response.StatusCode).To(Equal(http.StatusInternalServerError))
By("logging the failure")
Expect(loggerBuffer).To(gbytes.Say(`error occurred querying orphan deployments: something failed on cf`))
})
It("responds with 500 when BOSH API call fails", func() {
fakeCfClient.GetInstancesOfServiceOfferingReturns([]service.Instance{
{
GUID: "not-orphan",
PlanUniqueID: "plan-id",
},
}, nil)
fakeBoshClient.GetDeploymentsReturns([]boshdirector.Deployment{}, errors.New("some bosh error"))
response, _ := doGetRequest(orphanDeploymentsPath)
By("returning the correct status code")
Expect(response.StatusCode).To(Equal(http.StatusInternalServerError))
By("logging the failure")
Expect(loggerBuffer).To(gbytes.Say(`error occurred querying orphan deployments: some bosh error`))
})
})
Describe("GET /mgmt/metrics", func() {
const (
metricsPath = "metrics"
)
BeforeEach(func() {
quota := 12
conf.ServiceCatalog.GlobalQuotas = brokerConfig.Quotas{ServiceInstanceLimit: "a}
servicePlan := cf.ServicePlan{
ServicePlanEntity: cf.ServicePlanEntity{
UniqueID: dedicatedPlanID,
},
}
anotherServicePlan := cf.ServicePlan{
ServicePlanEntity: cf.ServicePlanEntity{
UniqueID: highMemoryPlanID,
},
}
fakeCfClient.CountInstancesOfServiceOfferingReturns(map[cf.ServicePlan]int{servicePlan: 1, anotherServicePlan: 4}, nil)
})
It("responds with some metrics", func() {
metricsResp, bodyContent := doGetRequest(metricsPath)
Expect(metricsResp.StatusCode).To(Equal(http.StatusOK))
var brokerMetrics []mgmtapi.Metric
Expect(json.Unmarshal(bodyContent, &brokerMetrics)).To(Succeed())
Expect(brokerMetrics).To(ConsistOf(
mgmtapi.Metric{
Key: "/on-demand-broker/service-name/dedicated-plan-name/total_instances",
Value: 1,
Unit: "count",
},
mgmtapi.Metric{
Key: "/on-demand-broker/service-name/dedicated-plan-name/quota_remaining",
Value: 0,
Unit: "count",
},
mgmtapi.Metric{
Key: "/on-demand-broker/service-name/high-memory-plan-name/total_instances",
Value: 4,
Unit: "count",
},
mgmtapi.Metric{
Key: "/on-demand-broker/service-name/total_instances",
Value: 5,
Unit: "count",
},
mgmtapi.Metric{
Key: "/on-demand-broker/service-name/quota_remaining",
Value: 7,
Unit: "count",
},
))
})
Context("when no global quota is configured", func() {
BeforeEach(func() {
conf.ServiceCatalog.GlobalQuotas = brokerConfig.Quotas{}
})
It("does not include global quota metric", func() {
metricsResp, bodyContent := doGetRequest(metricsPath)
Expect(metricsResp.StatusCode).To(Equal(http.StatusOK))
var brokerMetrics []mgmtapi.Metric
Expect(json.Unmarshal(bodyContent, &brokerMetrics)).To(Succeed())
Expect(brokerMetrics).To(ConsistOf(
mgmtapi.Metric{
Key: "/on-demand-broker/service-name/dedicated-plan-name/total_instances",
Value: 1,
Unit: "count",
},
mgmtapi.Metric{
Key: "/on-demand-broker/service-name/dedicated-plan-name/quota_remaining",
Value: 0,
Unit: "count",
},
mgmtapi.Metric{
Key: "/on-demand-broker/service-name/high-memory-plan-name/total_instances",
Value: 4,
Unit: "count",
},
mgmtapi.Metric{
Key: "/on-demand-broker/service-name/total_instances",
Value: 5,
Unit: "count",
},
))
})
})
It("fails when the broker is not registered with CF", func() { | fakeCfClient.CountInstancesOfServiceOfferingReturns(map[cf.ServicePlan]int{}, nil)
response, _ := doGetRequest(metricsPath)
Expect(response.StatusCode).To(Equal(http.StatusServiceUnavailable))
By("logging the error with the same request ID")
Eventually(loggerBuffer).Should(gbytes.Say(fmt.Sprintf(`The %s service broker must be registered with Cloud Foundry before metrics can be collected`, serviceName)))
})
It("fails when the CF API fails", func() {
fakeCfClient.CountInstancesOfServiceOfferingReturns(map[cf.ServicePlan]int{}, errors.New("CF API error"))
response, _ := doGetRequest(metricsPath)
Expect(response.StatusCode).To(Equal(http.StatusInternalServerError))
By("logging the error with the same request ID")
Eventually(loggerBuffer).Should(gbytes.Say(fmt.Sprintf(`error getting instance count for service offering %s: CF API error`, serviceName)))
})
})
Describe("PATCH /mgmt/service_instances/:id?operation_type=", func() {
const (
instanceID = "some-instance-id"
)
Context("when performing an upgrade", func() {
const (
operationType = "upgrade"
)
It("responds with the upgrade operation data", func() {
taskID := 123
fakeTaskBoshClient.GetDeploymentReturns(nil, true, nil)
fakeTaskBoshClient.DeployReturns(taskID, nil)
response, bodyContent := doProcessRequest(instanceID, fmt.Sprintf(`{"plan_id": "%s"}`, dedicatedPlanID), operationType)
Expect(response.StatusCode).To(Equal(http.StatusAccepted))
By("upgrades the correct instance")
input, actualOthers := fakeCommandRunner.RunWithInputParamsArgsForCall(0)
actualInput, ok := input.(sdk.InputParams)
Expect(ok).To(BeTrue(), "command runner takes a sdk.inputparams obj")
Expect(actualOthers[1]).To(Equal("generate-manifest"))
Expect(actualInput.GenerateManifest.ServiceDeployment).To(ContainSubstring(`"deployment_name":"service-instance_some-instance-id"`))
_, contextID, _, _ := fakeTaskBoshClient.DeployArgsForCall(0)
Expect(contextID).NotTo(BeEmpty())
By("updating the bosh configs")
Expect(fakeTaskBoshClient.UpdateConfigCallCount()).To(Equal(1), "UpdateConfig should have been called")
By("returning the correct operation data")
var operationData broker.OperationData
Expect(json.Unmarshal(bodyContent, &operationData)).To(Succeed())
Expect(operationData).To(Equal(broker.OperationData{
OperationType: broker.OperationTypeUpgrade,
BoshTaskID: 123,
BoshContextID: operationData.BoshContextID,
Errands: []brokerConfig.Errand{{
Name: "post-deploy-errand",
Instances: []string{},
}},
}))
})
Context("when post-deploy errand instances are provided", func() {
BeforeEach(func() {
conf.ServiceCatalog.Plans[0].LifecycleErrands.PostDeploy[0].Instances = []string{"instance-group-name/0"}
})
It("responds with the upgrade operation data", func() {
taskID := 123
fakeTaskBoshClient.GetDeploymentReturns(nil, true, nil)
fakeTaskBoshClient.DeployReturns(taskID, nil)
response, bodyContent := doProcessRequest(instanceID, fmt.Sprintf(`{"plan_id": "%s"}`, dedicatedPlanID), operationType)
Expect(response.StatusCode).To(Equal(http.StatusAccepted))
By("upgrades the correct instance")
input, actualOthers := fakeCommandRunner.RunWithInputParamsArgsForCall(0)
actualInput, ok := input.(sdk.InputParams)
Expect(ok).To(BeTrue(), "command runner takes a sdk.inputparams obj")
Expect(actualOthers[1]).To(Equal("generate-manifest"))
Expect(actualInput.GenerateManifest.ServiceDeployment).To(ContainSubstring(`"deployment_name":"service-instance_some-instance-id"`))
_, contextID, _, _ := fakeTaskBoshClient.DeployArgsForCall(0)
Expect(contextID).NotTo(BeEmpty())
By("returning the correct operation data")
var operationData broker.OperationData
Expect(json.Unmarshal(bodyContent, &operationData)).To(Succeed())
Expect(operationData).To(Equal(broker.OperationData{
OperationType: broker.OperationTypeUpgrade,
BoshTaskID: 123,
BoshContextID: operationData.BoshContextID,
Errands: []brokerConfig.Errand{{
Name: "post-deploy-errand",
Instances: []string{"instance-group-name/0"},
}},
}))
})
})
It("responds with 422 when the request body is empty", func() {
response, _ := doProcessRequest(instanceID, "", operationType)
Expect(response.StatusCode).To(Equal(http.StatusUnprocessableEntity))
})
When("Bosh configs are disabled", func() {
BeforeEach(func() {
conf.Broker.DisableBoshConfigs = true
taskID := 123
fakeTaskBoshClient.GetDeploymentReturns(nil, true, nil)
fakeTaskBoshClient.DeployReturns(taskID, nil)
})
It("succeeds when generate manifest output doesn't include bosh configs", func() {
generateManifestOutput := sdk.MarshalledGenerateManifest{
Manifest: `name: service-instance_some-instance-id`,
ODBManagedSecrets: map[string]interface{}{
"": nil,
},
}
generateManifestOutputBytes, err := json.Marshal(generateManifestOutput)
Expect(err).NotTo(HaveOccurred())
zero := 0
fakeCommandRunner.RunWithInputParamsReturns(generateManifestOutputBytes, []byte{}, &zero, nil)
response, _ := doProcessRequest(instanceID, fmt.Sprintf(`{"plan_id": "%s"}`, dedicatedPlanID), operationType)
Expect(response.StatusCode).To(Equal(http.StatusAccepted))
Expect(fakeTaskBoshClient.GetConfigsCallCount()).To(Equal(0), "GetConfigs shouldn't be called")
Expect(fakeTaskBoshClient.UpdateConfigCallCount()).To(Equal(0), "UpdateConfig shouldn't be called")
})
It("fails when the adapter generate manifest output includes bosh configs", func() {
response, _ := doProcessRequest(instanceID, fmt.Sprintf(`{"plan_id": "%s"}`, dedicatedPlanID), operationType)
Expect(response.StatusCode).To(Equal(http.StatusInternalServerError))
Expect(fakeTaskBoshClient.GetConfigsCallCount()).To(Equal(0), "GetConfigs shouldn't be called")
Expect(fakeTaskBoshClient.UpdateConfigCallCount()).To(Equal(0), "UpdateConfig shouldn't be called")
})
})
})
Context("when performing a recreate", func() {
const (
operationType = "recreate"
)
It("responds with the recreate operation data", func() {
taskID := 123
fakeTaskBoshClient.GetDeploymentReturns(nil, true, nil)
fakeTaskBoshClient.RecreateReturns(taskID, nil)
response, bodyContent := doProcessRequest(instanceID, fmt.Sprintf(`{"plan_id": "%s"}`, dedicatedPlanID), operationType)
Expect(response.StatusCode).To(Equal(http.StatusAccepted))
By("recreates the correct instance")
deploymentName, _, _, _ := fakeTaskBoshClient.RecreateArgsForCall(0)
Expect(deploymentName).To(Equal(fmt.Sprintf("service-instance_%s", instanceID)))
Expect(fakeCommandRunner.RunWithInputParamsCallCount()).To(BeZero())
By("returning the correct operation data")
var operationData broker.OperationData
Expect(json.Unmarshal(bodyContent, &operationData)).To(Succeed())
Expect(operationData).To(Equal(broker.OperationData{
OperationType: broker.OperationTypeRecreate,
BoshTaskID: 123,
BoshContextID: operationData.BoshContextID,
Errands: []brokerConfig.Errand{{
Name: "post-deploy-errand",
Instances: []string{},
}},
}))
})
It("responds with 422 when the request body is empty", func() {
response, _ := doProcessRequest(instanceID, "", operationType)
Expect(response.StatusCode).To(Equal(http.StatusUnprocessableEntity))
})
})
Context("With a valid operation type", func() {
const (
operationType = "upgrade"
)
It("responds with 410 when instance's deployment cannot be found in BOSH", func() {
// This is the default for the fake, but just to be explicit
fakeTaskBoshClient.GetDeploymentReturns(nil, false, nil)
response, _ := doProcessRequest(instanceID, fmt.Sprintf(`{"plan_id": "%s"}`, dedicatedPlanID), operationType)
Expect(response.StatusCode).To(Equal(http.StatusGone))
})
It("responds with 409 when there are incomplete tasks for the instance's deployment", func() {
fakeTaskBoshClient.GetTasksReturns(boshdirector.BoshTasks{
{State: boshdirector.TaskProcessing},
}, nil)
response, _ := doProcessRequest(instanceID, fmt.Sprintf(`{"plan_id": "%s"}`, dedicatedPlanID), operationType)
Expect(response.StatusCode).To(Equal(http.StatusConflict))
})
})
It("responds with 400 'Bad Request' when operation_type is unknown", func() {
response, _ := doProcessRequest(instanceID, "", "unknown_operation_type")
Expect(response.StatusCode).To(Equal(http.StatusBadRequest))
})
})
})
func doProcessRequest(serviceInstanceID, body, operationType string) (*http.Response, []byte) {
return doRequest(http.MethodPatch, fmt.Sprintf("http://%s/mgmt/service_instances/%s?operation_type=%s", serverURL, serviceInstanceID, operationType), strings.NewReader(body))
}
func doGetRequest(path string) (*http.Response, []byte) {
return doRequest(http.MethodGet, fmt.Sprintf("http://%s/mgmt/%s", serverURL, path), nil)
} | |
filter.ts | ///<reference path="common.ts" />
module Dust.Filter {
export interface Filter {
apply(item: any): any;
}
export class SuppressEscape implements Filter {
apply(item: any) { return item; }
}
export class HtmlEscape implements Filter {
static replacers = Pct.newAssocArray({
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": '''
});
apply(item: any) {
if (!is_string(item)) return item;
return str_replace(array_keys(HtmlEscape.replacers),
array_values(HtmlEscape.replacers), <string>item);
}
}
export class JavaScriptEscape implements Filter {
static replacers = Pct.newAssocArray({
'\\': '\\\\',
'\r': '\\r',
'\n': '\\n',
'\f': '\\f',
"'": "\\'",
'"': "\\\"",
'\t': '\\t'
});
apply(item: any) {
if (!is_string(item)) return item;
return str_replace(array_keys(JavaScriptEscape.replacers),
array_values(JavaScriptEscape.replacers), <string>item);
}
}
export class En | mplements Filter {
//ref: http://stackoverflow.com/questions/4929584/encodeuri-in-php
static replacers = Pct.newAssocArray({
//unescaped
'%2D': '-',
'%5F': '_',
'%2E': '.',
'%21': '!',
'%7E': '~',
'%2A': '*',
'%27': "'",
'%28': '(',
'%29': ')',
//reserved
'%3B': ';',
'%2C': ',',
'%2F': '/',
'%3F':'?',
'%3A': ':',
'%40': '@',
'%26': '&',
'%3D': '=',
'%2B': '+',
'%24': '$',
//score
'%23': '#'
});
apply(item: any) {
if (!is_string(item)) return item;
return strtr(rawurlencode(<string>item), EncodeUri.replacers);
}
}
export class EncodeUriComponent implements Filter {
//ref: http://stackoverflow.com/questions/1734250/what-is-the-equivalent-of-javascripts-encodeuricomponent-in-php
static replacers = Pct.newAssocArray({
'%21': '!',
'%2A': '*',
'%27': "'",
'%28': '(',
'%29': ')'
});
apply(item: any) {
if (!is_string(item)) return item;
return strtr(rawurlencode(<string>item), EncodeUriComponent.replacers);
}
}
export class JsonEncode implements Filter {
apply(item: any) { return json_encode(item); }
}
export class JsonDecode implements Filter {
apply(item: any) { return json_decode(item); }
}
} | codeUri i |
0001_initial.py | # Generated by Django 2.1.4 on 2019-03-24 19:19
import datetime
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
| initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=16, null=True, verbose_name='用户名')),
('gender', models.CharField(choices=[('male', '男'), ('female', '女')], default='female', max_length=6, verbose_name='性别')),
('mobile', models.CharField(blank=True, max_length=11, null=True, verbose_name='电话')),
('email', models.CharField(blank=True, max_length=100, null=True, verbose_name='邮箱')),
('top_img', models.ImageField(max_length=200, null=True, upload_to='user/')),
('create_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
|
interactive.go | package internal
import (
"unicode"
"github.com/nsf/termbox-go"
)
type keyDir struct {
key termbox.Key
char rune
dir int
}
var keyDirs = []*keyDir{
{termbox.KeyArrowUp, 'k', Up},
{termbox.KeyArrowDown, 'j', Down},
{termbox.KeyArrowLeft, 'h', Left},
{termbox.KeyArrowRight, 'l', Right},
}
func interactive(maze *InfiniMaze, format *Format) {
events := make(chan termbox.Event)
go func() {
for {
events <- termbox.PollEvent()
}
}()
strwriter := make(chan string)
go printTermbox(strwriter)
maze.CurrentMaze.Write(strwriter, format)
loop:
for {
select {
case event := <-events:
if event.Type == termbox.EventKey {
for _, keydir := range keyDirs {
if event.Key == keydir.key || event.Ch == keydir.char {
maze.Move(keydir.dir)
maze.CurrentMaze.Write(strwriter, format)
continue loop
}
}
if event.Ch == 'q' || event.Ch == 'Q' || event.Key == termbox.KeyCtrlC || event.Key == termbox.KeyCtrlD {
break loop
}
}
}
}
}
func printTermbox(strwriter chan string) {
x, y := 1, 0
for {
str := <-strwriter
switch str {
case "\u0000":
_ = termbox.Flush()
x, y = 1, 0
default:
printString(str, &x, &y)
}
}
}
func printString(str string, x *int, y *int) | {
attr, skip, d0, d1, d := false, false, '0', '0', false
fg, bg := termbox.ColorDefault, termbox.ColorDefault
for _, c := range str {
if c == '\n' {
*x, *y = (*x)+1, 0
} else if c == '\x1b' || attr && c == '[' {
attr = true
} else if attr && unicode.IsDigit(c) {
if !skip {
if d {
d1 = c
} else {
d0, d = c, true
}
}
} else if attr && c == ';' {
skip = true
} else if attr && c == 'm' {
if d0 == '7' && d1 == '0' {
fg, bg = termbox.AttrReverse, termbox.AttrReverse
} else if d0 == '3' {
fg, bg = termbox.Attribute(uint64(d1-'0'+1)), termbox.ColorDefault
} else if d0 == '4' {
fg, bg = termbox.ColorDefault, termbox.Attribute(uint64(d1-'0'+1))
} else {
fg, bg = termbox.ColorDefault, termbox.ColorDefault
}
attr, skip, d0, d1, d = false, false, '0', '0', false
} else {
termbox.SetCell(*y, *x, c, fg, bg)
*y = *y + 1
}
}
} |
|
list.go | package volumes
import (
"context"
"html/template"
"io"
"os"
"strings"
"text/tabwriter"
"github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/pkg/domain/entities"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
volumeLsDescription = `
podman volume ls
List all available volumes. The output of the volumes can be filtered
and the output format can be changed to JSON or a user specified Go template.`
lsCommand = &cobra.Command{
Use: "ls",
Aliases: []string{"list"},
Args: cobra.NoArgs,
Short: "List volumes",
Long: volumeLsDescription,
RunE: list,
}
)
var (
// Temporary struct to hold cli values.
cliOpts = struct {
Filter []string
Format string
Quiet bool
}{}
lsOpts = entities.VolumeListOptions{}
)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Mode: []entities.EngineMode{entities.ABIMode, entities.TunnelMode},
Command: lsCommand,
Parent: volumeCmd,
})
flags := lsCommand.Flags()
flags.StringSliceVarP(&cliOpts.Filter, "filter", "f", []string{}, "Filter volume output")
flags.StringVar(&cliOpts.Format, "format", "{{.Driver}}\t{{.Name}}\n", "Format volume output using Go template")
flags.BoolVarP(&cliOpts.Quiet, "quiet", "q", false, "Print volume output in quiet mode")
}
func | (cmd *cobra.Command, args []string) error {
var w io.Writer = os.Stdout
if cliOpts.Quiet && cmd.Flag("format").Changed {
return errors.New("quiet and format flags cannot be used together")
}
for _, f := range cliOpts.Filter {
filterSplit := strings.Split(f, "=")
if len(filterSplit) < 2 {
return errors.Errorf("filter input must be in the form of filter=value: %s is invalid", f)
}
lsOpts.Filter[filterSplit[0]] = append(lsOpts.Filter[filterSplit[0]], filterSplit[1:]...)
}
responses, err := registry.ContainerEngine().VolumeList(context.Background(), lsOpts)
if err != nil {
return err
}
if len(responses) < 1 {
return nil
}
// "\t" from the command line is not being recognized as a tab
// replacing the string "\t" to a tab character if the user passes in "\t"
cliOpts.Format = strings.Replace(cliOpts.Format, `\t`, "\t", -1)
if cliOpts.Quiet {
cliOpts.Format = "{{.Name}}\n"
}
headers := "DRIVER\tVOLUME NAME\n"
row := cliOpts.Format
if !strings.HasSuffix(cliOpts.Format, "\n") {
row += "\n"
}
format := "{{range . }}" + row + "{{end}}"
if !cliOpts.Quiet && !cmd.Flag("format").Changed {
w = tabwriter.NewWriter(os.Stdout, 12, 2, 2, ' ', 0)
format = headers + format
}
tmpl, err := template.New("listVolume").Parse(format)
if err != nil {
return err
}
if err := tmpl.Execute(w, responses); err != nil {
return err
}
if flusher, ok := w.(interface{ Flush() error }); ok {
return flusher.Flush()
}
return nil
}
| list |
relation.js | 'use strict';
var Relation = require('../src/relation');
var CollectionBase = require('../src/collection');
describe('Relation', function () {
describe('Constructor', function () {
it('accepts a type', function () {
expect(new Relation('type').type).to.equal('type');
});
it('accepts a target model', function () {
expect(new Relation(null, 'Target').target).to.equal('Target');
});
it('applies a set of options', function () {
expect(new Relation(null, null, {
foo: 'bar'
})).to.have.property('foo', 'bar');
});
});
var relation;
beforeEach(function () {
relation = new Relation();
});
| relation.target = sinon.spy();
relation.target.prototype.name = 'target';
});
describe('isSingle', function () {
beforeEach(function () {
sinon.stub(relation, 'isSingle').returns(true);
});
it('creates a target model', function () {
expect(relation.initialize({target_id: 0})).to.be.an.instanceOf(relation.target);
expect(relation.target).to.have.been.calledWithNew;
expect(relation.target).to.have.been.calledWithMatch({id: 0});
});
it('can use a custom key', function () {
relation.key = 'foo_id';
relation.initialize({foo_id: 0});
expect(relation.target).to.have.been.calledWithMatch({id: 0});
});
});
describe('!isSingle', function () {
beforeEach(function () {
sinon.stub(relation, 'isSingle').returns(false);
});
it('creates a target collection', function () {
var collection = relation.initialize({name: 'foo', id: 0});
expect(collection).to.be.an.instanceOf(CollectionBase);
expect(collection.model).to.equal(relation.target);
expect(collection.attributes).to.contain({
foo_id: 0
});
});
it('can use a custom foreign key', function () {
relation.foreignKey = 'bar_id';
expect(relation.initialize({name: 'foo', id: 0}).attributes)
.to.have.property('bar_id', 0);
});
});
});
describe('#isSingle', function () {
it('is truthy for belongsTo', function () {
expect(new Relation('belongsTo').isSingle()).to.be.ok;
});
it('is falsy for hasMany', function () {
expect(new Relation('hasMany').isSingle()).to.not.be.ok;
});
});
}); | describe('#initialize', function () {
beforeEach(function () { |
pipelines_builder_test.go | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builder
import (
"context"
"testing"
"go.uber.org/zap"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
"github.com/open-telemetry/opentelemetry-service/config"
"github.com/open-telemetry/opentelemetry-service/config/configmodels"
"github.com/open-telemetry/opentelemetry-service/consumer/consumerdata"
"github.com/open-telemetry/opentelemetry-service/processor/addattributesprocessor"
)
func TestPipelinesBuilder_Build(t *testing.T) {
tests := []struct {
name string
pipelineName string
exporterNames []string
}{
{
name: "one-exporter",
pipelineName: "traces",
exporterNames: []string{"exampleexporter"},
},
{
name: "multi-exporter",
pipelineName: "traces/2",
exporterNames: []string{"exampleexporter", "exampleexporter/2"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testPipeline(t, test.pipelineName, test.exporterNames)
})
}
}
func testPipeline(t *testing.T, pipelineName string, exporterNames []string) |
func TestPipelinesBuilder_Error(t *testing.T) {
receiverFactories, processorsFactories, exporterFactories, err := config.ExampleComponents()
assert.Nil(t, err)
attrFactory := &addattributesprocessor.Factory{}
processorsFactories[attrFactory.Type()] = attrFactory
cfg, err := config.LoadConfigFile(
t, "testdata/pipelines_builder.yaml", receiverFactories, processorsFactories, exporterFactories,
)
require.Nil(t, err)
// Corrupt the pipeline, change data type to metrics. We have to forcedly do it here
// since there is no way to have such config loaded by LoadConfigFile, it would not
// pass validation. We are doing this to test failure mode of PipelinesBuilder.
pipeline := cfg.Pipelines["traces"]
pipeline.InputType = configmodels.MetricsDataType
exporters, err := NewExportersBuilder(zap.NewNop(), cfg, exporterFactories).Build()
assert.NoError(t, err)
// This should fail because "attributes" processor defined in the config does
// not support metrics data type.
_, err = NewPipelinesBuilder(zap.NewNop(), cfg, exporters, processorsFactories).Build()
assert.NotNil(t, err)
}
| {
receiverFactories, processorsFactories, exporterFactories, err := config.ExampleComponents()
assert.Nil(t, err)
attrFactory := &addattributesprocessor.Factory{}
processorsFactories[attrFactory.Type()] = attrFactory
cfg, err := config.LoadConfigFile(
t, "testdata/pipelines_builder.yaml", receiverFactories, processorsFactories, exporterFactories,
)
// Load the config
require.Nil(t, err)
// Build the pipeline
allExporters, err := NewExportersBuilder(zap.NewNop(), cfg, exporterFactories).Build()
assert.NoError(t, err)
pipelineProcessors, err := NewPipelinesBuilder(zap.NewNop(), cfg, allExporters, processorsFactories).Build()
assert.NoError(t, err)
require.NotNil(t, pipelineProcessors)
processor := pipelineProcessors[cfg.Pipelines[pipelineName]]
// Ensure pipeline has its fields correctly populated.
require.NotNil(t, processor)
assert.NotNil(t, processor.tc)
assert.Nil(t, processor.mc)
// Compose the list of created exporters.
var exporters []*builtExporter
for _, name := range exporterNames {
// Ensure exporter is created.
exp := allExporters[cfg.Exporters[name]]
require.NotNil(t, exp)
exporters = append(exporters, exp)
}
// Send TraceData via processor and verify that all exporters of the pipeline receive it.
// First check that there are no traces in the exporters yet.
var exporterConsumers []*config.ExampleExporterConsumer
for _, exporter := range exporters {
consumer := exporter.tc.(*config.ExampleExporterConsumer)
exporterConsumers = append(exporterConsumers, consumer)
require.Equal(t, len(consumer.Traces), 0)
}
// Send one trace.
name := tracepb.TruncatableString{Value: "testspanname"}
traceData := consumerdata.TraceData{
SourceFormat: "test-source-format",
Spans: []*tracepb.Span{
{Name: &name},
},
}
processor.tc.ConsumeTraceData(context.Background(), traceData)
// Now verify received data.
for _, consumer := range exporterConsumers {
// Check that the trace is received by exporter.
require.Equal(t, 1, len(consumer.Traces))
assert.Equal(t, traceData, consumer.Traces[0])
// Check that the span was processed by "attributes" processor and an
// attribute was added.
assert.Equal(t, int64(12345),
consumer.Traces[0].Spans[0].Attributes.AttributeMap["attr1"].GetIntValue())
}
} |
accept_txntypes_test.go | package consensus
import (
"testing"
"github.com/uplo-tech/errors"
"github.com/uplo-tech/fastrand"
"github.com/uplo-tech/uplo/crypto"
"github.com/uplo-tech/uplo/types"
)
// testBlockSuite tests a wide variety of blocks.
func (cst *consensusSetTester) testBlockSuite() {
cst.testSimpleBlock()
cst.testSpendUplocoinsBlock()
cst.testValidStorageProofBlocks()
cst.testMissedStorageProofBlocks()
cst.testFileContractRevision()
cst.testSpendUplofunds()
}
// testSimpleBlock mines a simple block (no transactions except those
// automatically added by the miner) and adds it to the consnesus set.
func (cst *consensusSetTester) testSimpleBlock() {
// Get the starting hash of the consenesus set.
initialChecksum := cst.cs.dbConsensusChecksum()
initialHeight := cst.cs.dbBlockHeight()
initialBlockID := cst.cs.dbCurrentBlockID()
// Mine and submit a block
block, err := cst.miner.AddBlock()
if err != nil {
panic(err)
}
// Check that the consensus info functions changed as expected.
resultingChecksum := cst.cs.dbConsensusChecksum()
if initialChecksum == resultingChecksum {
panic("checksum is unchanged after mining a block")
}
resultingHeight := cst.cs.dbBlockHeight()
if resultingHeight != initialHeight+1 {
panic("height of consensus set did not increase as expected")
}
currentPB := cst.cs.dbCurrentProcessedBlock()
if currentPB.Block.ParentID != initialBlockID {
panic("new processed block does not have correct information")
}
if currentPB.Block.ID() != block.ID() {
panic("the state's current block is not reporting as the recently mined block.")
}
if currentPB.Height != initialHeight+1 {
panic("the processed block is not reporting the correct height")
}
pathID, err := cst.cs.dbGetPath(currentPB.Height)
if err != nil {
panic(err)
}
if pathID != block.ID() {
panic("current path does not point to the correct block")
}
// Revert the block that was just added to the consensus set and check for
// parity with the original state of consensus.
parent, err := cst.cs.dbGetBlockMap(currentPB.Block.ParentID)
if err != nil {
panic(err)
}
_, _, err = cst.cs.dbForkBlockchain(parent)
if err != nil {
panic(err)
}
if cst.cs.dbConsensusChecksum() != initialChecksum {
panic("adding and reverting a block changed the consensus set")
}
// Re-add the block and check for parity with the first time it was added.
// This test is useful because a different codepath is followed if the
// diffs have already been generated.
_, _, err = cst.cs.dbForkBlockchain(currentPB)
if err != nil {
panic(err)
}
if cst.cs.dbConsensusChecksum() != resultingChecksum {
panic("adding, reverting, and reading a block was inconsistent with just adding the block")
}
}
// TestIntegrationSimpleBlock creates a consensus set tester and uses it to
// call testSimpleBlock.
func TestIntegrationSimpleBlock(t *testing.T) |
// testSpendUplocoinsBlock mines a block with a transaction spending Uplocoins
// and adds it to the consensus set.
func (cst *consensusSetTester) testSpendUplocoinsBlock() {
// Create a random destination address for the output in the transaction.
destAddr := randAddress()
// Create a block containing a transaction with a valid Uplocoin output.
txnValue := types.NewCurrency64(1200)
txnBuilder, err := cst.wallet.StartTransaction()
if err != nil {
panic(err)
}
err = txnBuilder.FundUplocoins(txnValue)
if err != nil {
panic(err)
}
outputIndex := txnBuilder.AddUplocoinOutput(types.UplocoinOutput{Value: txnValue, UnlockHash: destAddr})
txnSet, err := txnBuilder.Sign(true)
if err != nil {
panic(err)
}
err = cst.tpool.AcceptTransactionSet(txnSet)
if err != nil {
panic(err)
}
// Mine and apply the block to the consensus set.
_, err = cst.miner.AddBlock()
if err != nil {
panic(err)
}
// See that the destination output was created.
outputID := txnSet[len(txnSet)-1].UplocoinOutputID(outputIndex)
sco, err := cst.cs.dbGetUplocoinOutput(outputID)
if err != nil {
panic(err)
}
if !sco.Value.Equals(txnValue) {
panic("output added with wrong value")
}
if sco.UnlockHash != destAddr {
panic("output sent to the wrong address")
}
}
// TestIntegrationSpendUplocoinsBlock creates a consensus set tester and uses it
// to call testSpendUplocoinsBlock.
func TestIntegrationSpendUplocoinsBlock(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
cst, err := createConsensusSetTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer func() {
if err := cst.Close(); err != nil {
t.Fatal(err)
}
}()
cst.testSpendUplocoinsBlock()
}
// testValidStorageProofBlocks adds a block with a file contract, and then
// submits a storage proof for that file contract.
func (cst *consensusSetTester) testValidStorageProofBlocks() {
// COMPATv0.4.0 - Step the block height up past the hardfork amount. This
// code stops nondeterministic failures when producing storage proofs that
// is related to buggy old code.
for cst.cs.dbBlockHeight() <= 10 {
_, err := cst.miner.AddBlock()
if err != nil {
panic(err)
}
}
// Create a file (as a bytes.Buffer) that will be used for the file
// contract.
filesize := uint64(4e3)
file := fastrand.Bytes(int(filesize))
merkleRoot := crypto.MerkleRoot(file)
// Create a file contract that will be successful.
validProofDest := randAddress()
payout := types.NewCurrency64(400e6)
fc := types.FileContract{
FileSize: filesize,
FileMerkleRoot: merkleRoot,
WindowStart: cst.cs.dbBlockHeight() + 1,
WindowEnd: cst.cs.dbBlockHeight() + 2,
Payout: payout,
ValidProofOutputs: []types.UplocoinOutput{{
UnlockHash: validProofDest,
Value: types.PostTax(cst.cs.dbBlockHeight(), payout),
}},
MissedProofOutputs: []types.UplocoinOutput{{
UnlockHash: types.UnlockHash{},
Value: types.PostTax(cst.cs.dbBlockHeight(), payout),
}},
}
// Submit a transaction with the file contract.
oldUplofundPool := cst.cs.dbGetUplofundPool()
txnBuilder, err := cst.wallet.StartTransaction()
if err != nil {
panic(err)
}
err = txnBuilder.FundUplocoins(payout)
if err != nil {
panic(err)
}
fcIndex := txnBuilder.AddFileContract(fc)
txnSet, err := txnBuilder.Sign(true)
if err != nil {
panic(err)
}
err = cst.tpool.AcceptTransactionSet(txnSet)
if err != nil {
panic(err)
}
_, err = cst.miner.AddBlock()
if err != nil {
panic(err)
}
// Check that the uplofund pool was increased by the tax on the payout.
uplofundPool := cst.cs.dbGetUplofundPool()
if !uplofundPool.Equals(oldUplofundPool.Add(types.Tax(cst.cs.dbBlockHeight()-1, payout))) {
panic("uplofund pool was not increased correctly")
}
// Check that the file contract made it into the database.
ti := len(txnSet) - 1
fcid := txnSet[ti].FileContractID(fcIndex)
_, err = cst.cs.dbGetFileContract(fcid)
if err != nil {
panic(err)
}
// Create and submit a storage proof for the file contract.
segmentIndex, err := cst.cs.StorageProofSegment(fcid)
if err != nil {
panic(err)
}
segment, hashSet := crypto.MerkleProof(file, segmentIndex)
sp := types.StorageProof{
ParentID: fcid,
HashSet: hashSet,
}
copy(sp.Segment[:], segment)
txnBuilder, err = cst.wallet.StartTransaction()
if err != nil {
panic(err)
}
txnBuilder.AddStorageProof(sp)
txnSet, err = txnBuilder.Sign(true)
if err != nil {
panic(err)
}
err = cst.tpool.AcceptTransactionSet(txnSet)
if err != nil {
panic(err)
}
_, err = cst.miner.AddBlock()
if err != nil {
panic(err)
}
// Check that the file contract has been removed.
_, err = cst.cs.dbGetFileContract(fcid)
if !errors.Contains(err, errNilItem) {
panic("file contract should not exist in the database")
}
// Check that the uplofund pool has not changed.
postProofPool := cst.cs.dbGetUplofundPool()
if !postProofPool.Equals(uplofundPool) {
panic("uplofund pool should not change after submitting a storage proof")
}
// Check that a delayed output was created for the valid proof.
spoid := fcid.StorageProofOutputID(types.ProofValid, 0)
dsco, err := cst.cs.dbGetDSCO(cst.cs.dbBlockHeight()+types.MaturityDelay, spoid)
if err != nil {
panic(err)
}
if dsco.UnlockHash != fc.ValidRenterOutput().UnlockHash {
panic("wrong unlock hash in dsco")
}
if !dsco.Value.Equals(fc.ValidRenterPayout()) {
panic("wrong sco value in dsco")
}
}
// TestIntegrationValidStorageProofBlocks creates a consensus set tester and
// uses it to call testValidStorageProofBlocks.
func TestIntegrationValidStorageProofBlocks(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
cst, err := createConsensusSetTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer func() {
if err := cst.Close(); err != nil {
t.Fatal(err)
}
}()
cst.testValidStorageProofBlocks()
}
// testMissedStorageProofBlocks adds a block with a file contract, and then
// fails to submit a storage proof before expiration.
func (cst *consensusSetTester) testMissedStorageProofBlocks() {
// Create a file contract that will be successful.
filesize := uint64(4e3)
payout := types.NewCurrency64(400e6)
missedProofDest := randAddress()
fc := types.FileContract{
FileSize: filesize,
FileMerkleRoot: crypto.Hash{},
WindowStart: cst.cs.dbBlockHeight() + 1,
WindowEnd: cst.cs.dbBlockHeight() + 2,
Payout: payout,
ValidProofOutputs: []types.UplocoinOutput{{
UnlockHash: types.UnlockHash{},
Value: types.PostTax(cst.cs.dbBlockHeight(), payout),
}},
MissedProofOutputs: []types.UplocoinOutput{{
UnlockHash: missedProofDest,
Value: types.PostTax(cst.cs.dbBlockHeight(), payout),
}},
}
// Submit a transaction with the file contract.
oldUplofundPool := cst.cs.dbGetUplofundPool()
txnBuilder, err := cst.wallet.StartTransaction()
if err != nil {
panic(err)
}
err = txnBuilder.FundUplocoins(payout)
if err != nil {
panic(err)
}
fcIndex := txnBuilder.AddFileContract(fc)
txnSet, err := txnBuilder.Sign(true)
if err != nil {
panic(err)
}
err = cst.tpool.AcceptTransactionSet(txnSet)
if err != nil {
panic(err)
}
_, err = cst.miner.AddBlock()
if err != nil {
panic(err)
}
// Check that the uplofund pool was increased by the tax on the payout.
uplofundPool := cst.cs.dbGetUplofundPool()
if !uplofundPool.Equals(oldUplofundPool.Add(types.Tax(cst.cs.dbBlockHeight()-1, payout))) {
panic("uplofund pool was not increased correctly")
}
// Check that the file contract made it into the database.
ti := len(txnSet) - 1
fcid := txnSet[ti].FileContractID(fcIndex)
_, err = cst.cs.dbGetFileContract(fcid)
if err != nil {
panic(err)
}
// Mine a block to close the storage proof window.
_, err = cst.miner.AddBlock()
if err != nil {
panic(err)
}
// Check that the file contract has been removed.
_, err = cst.cs.dbGetFileContract(fcid)
if !errors.Contains(err, errNilItem) {
panic("file contract should not exist in the database")
}
// Check that the uplofund pool has not changed.
postProofPool := cst.cs.dbGetUplofundPool()
if !postProofPool.Equals(uplofundPool) {
panic("uplofund pool should not change after submitting a storage proof")
}
// Check that a delayed output was created for the missed proof.
spoid := fcid.StorageProofOutputID(types.ProofMissed, 0)
dsco, err := cst.cs.dbGetDSCO(cst.cs.dbBlockHeight()+types.MaturityDelay, spoid)
if err != nil {
panic(err)
}
if dsco.UnlockHash != fc.MissedRenterOutput().UnlockHash {
panic("wrong unlock hash in dsco")
}
if !dsco.Value.Equals(fc.MissedRenterOutput().Value) {
panic("wrong sco value in dsco")
}
}
// TestIntegrationMissedStorageProofBlocks creates a consensus set tester and
// uses it to call testMissedStorageProofBlocks.
func TestIntegrationMissedStorageProofBlocks(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
cst, err := createConsensusSetTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer func() {
if err := cst.Close(); err != nil {
t.Fatal(err)
}
}()
cst.testMissedStorageProofBlocks()
}
// testFileContractRevision creates and revises a file contract on the
// blockchain.
func (cst *consensusSetTester) testFileContractRevision() {
// COMPATv0.4.0 - Step the block height up past the hardfork amount. This
// code stops nondeterministic failures when producing storage proofs that
// is related to buggy old code.
for cst.cs.dbBlockHeight() <= 10 {
_, err := cst.miner.AddBlock()
if err != nil {
panic(err)
}
}
// Create a file (as a bytes.Buffer) that will be used for the file
// contract.
filesize := uint64(4e3)
file := fastrand.Bytes(int(filesize))
merkleRoot := crypto.MerkleRoot(file)
// Create a spendable unlock hash for the file contract.
sk, pk := crypto.GenerateKeyPair()
uc := types.UnlockConditions{
PublicKeys: []types.UploPublicKey{{
Algorithm: types.SignatureEd25519,
Key: pk[:],
}},
SignaturesRequired: 1,
}
// Create a file contract that will be revised.
validProofDest := randAddress()
payout := types.NewCurrency64(400e6)
fc := types.FileContract{
FileSize: filesize,
FileMerkleRoot: crypto.Hash{},
WindowStart: cst.cs.dbBlockHeight() + 2,
WindowEnd: cst.cs.dbBlockHeight() + 3,
Payout: payout,
ValidProofOutputs: []types.UplocoinOutput{{
UnlockHash: validProofDest,
Value: types.PostTax(cst.cs.dbBlockHeight(), payout),
}},
MissedProofOutputs: []types.UplocoinOutput{{
UnlockHash: types.UnlockHash{},
Value: types.PostTax(cst.cs.dbBlockHeight(), payout),
}},
UnlockHash: uc.UnlockHash(),
}
// Submit a transaction with the file contract.
txnBuilder, err := cst.wallet.StartTransaction()
if err != nil {
panic(err)
}
err = txnBuilder.FundUplocoins(payout)
if err != nil {
panic(err)
}
fcIndex := txnBuilder.AddFileContract(fc)
txnSet, err := txnBuilder.Sign(true)
if err != nil {
panic(err)
}
err = cst.tpool.AcceptTransactionSet(txnSet)
if err != nil {
panic(err)
}
_, err = cst.miner.AddBlock()
if err != nil {
panic(err)
}
// Submit a revision for the file contract.
ti := len(txnSet) - 1
fcid := txnSet[ti].FileContractID(fcIndex)
fcr := types.FileContractRevision{
ParentID: fcid,
UnlockConditions: uc,
NewRevisionNumber: 69292,
NewFileSize: filesize,
NewFileMerkleRoot: merkleRoot,
NewWindowStart: cst.cs.dbBlockHeight() + 1,
NewWindowEnd: cst.cs.dbBlockHeight() + 2,
NewValidProofOutputs: fc.ValidProofOutputs,
NewMissedProofOutputs: fc.MissedProofOutputs,
NewUnlockHash: uc.UnlockHash(),
}
ts := types.TransactionSignature{
ParentID: crypto.Hash(fcid),
CoveredFields: types.CoveredFields{WholeTransaction: true},
PublicKeyIndex: 0,
}
txn := types.Transaction{
FileContractRevisions: []types.FileContractRevision{fcr},
TransactionSignatures: []types.TransactionSignature{ts},
}
encodedSig := crypto.SignHash(txn.SigHash(0, 0), sk)
txn.TransactionSignatures[0].Signature = encodedSig[:]
err = cst.tpool.AcceptTransactionSet([]types.Transaction{txn})
if err != nil {
panic(err)
}
_, err = cst.miner.AddBlock()
if err != nil {
panic(err)
}
// Create and submit a storage proof for the file contract.
segmentIndex, err := cst.cs.StorageProofSegment(fcid)
if err != nil {
panic(err)
}
segment, hashSet := crypto.MerkleProof(file, segmentIndex)
sp := types.StorageProof{
ParentID: fcid,
HashSet: hashSet,
}
copy(sp.Segment[:], segment)
txnBuilder, err = cst.wallet.StartTransaction()
if err != nil {
panic(err)
}
txnBuilder.AddStorageProof(sp)
txnSet, err = txnBuilder.Sign(true)
if err != nil {
panic(err)
}
err = cst.tpool.AcceptTransactionSet(txnSet)
if err != nil {
panic(err)
}
_, err = cst.miner.AddBlock()
if err != nil {
panic(err)
}
// Check that the file contract has been removed.
_, err = cst.cs.dbGetFileContract(fcid)
if !errors.Contains(err, errNilItem) {
panic("file contract should not exist in the database")
}
}
// TestIntegrationFileContractRevision creates a consensus set tester and uses
// it to call testFileContractRevision.
func TestIntegrationFileContractRevision(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
cst, err := createConsensusSetTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer func() {
if err := cst.Close(); err != nil {
t.Fatal(err)
}
}()
cst.testFileContractRevision()
}
// testSpendUplofunds spends uplofunds on the blockchain.
func (cst *consensusSetTester) testSpendUplofunds() {
// Create a random destination address for the output in the transaction.
destAddr := randAddress()
// Create a block containing a transaction with a valid uplofund output.
txnValue := types.NewCurrency64(3)
txnBuilder, err := cst.wallet.StartTransaction()
if err != nil {
panic(err)
}
err = txnBuilder.FundUplofunds(txnValue)
if err != nil {
panic(err)
}
outputIndex := txnBuilder.AddUplofundOutput(types.UplofundOutput{Value: txnValue, UnlockHash: destAddr})
txnSet, err := txnBuilder.Sign(true)
if err != nil {
panic(err)
}
err = cst.tpool.AcceptTransactionSet(txnSet)
if err != nil {
panic(err)
}
// Find the uplofund inputs used in the txn set.
var claimValues []types.Currency
var claimIDs []types.UplocoinOutputID
for _, txn := range txnSet {
for _, sfi := range txn.UplofundInputs {
sfo, err := cst.cs.dbGetUplofundOutput(sfi.ParentID)
if err != nil {
// It's not in the database because it's in an earlier
// transaction: disregard it - testing the first layer of
// dependencies is sufficient.
continue
}
poolDiff := cst.cs.dbGetUplofundPool().Sub(sfo.ClaimStart)
value := poolDiff.Div(types.UplofundCount).Mul(sfo.Value)
claimValues = append(claimValues, value)
claimIDs = append(claimIDs, sfi.ParentID.uploclaimOutputID())
}
}
if len(claimValues) == 0 {
panic("no uplofund outputs created?")
}
// Mine and apply the block to the consensus set.
_, err = cst.miner.AddBlock()
if err != nil {
panic(err)
}
// See that the destination output was created.
outputID := txnSet[len(txnSet)-1].UplofundOutputID(outputIndex)
sfo, err := cst.cs.dbGetUplofundOutput(outputID)
if err != nil {
panic(err)
}
if !sfo.Value.Equals(txnValue) {
panic("output added with wrong value")
}
if sfo.UnlockHash != destAddr {
panic("output sent to the wrong address")
}
if !sfo.ClaimStart.Equals(cst.cs.dbGetUplofundPool()) {
panic("ClaimStart is not being set correctly")
}
// Verify that all expected claims were created and added to the set of
// delayed Uplocoin outputs.
for i, id := range claimIDs {
dsco, err := cst.cs.dbGetDSCO(cst.cs.dbBlockHeight()+types.MaturityDelay, id)
if err != nil {
panic(err)
}
if !dsco.Value.Equals(claimValues[i]) {
panic("expected a different claim value on the uploclaim")
}
}
}
// TestIntegrationSpendUplofunds creates a consensus set tester and uses it
// to call testSpendUplofunds.
func (cst *consensusSetTester) TestIntegrationSpendUplofunds(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
cst, err := createConsensusSetTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer func() {
if err := cst.Close(); err != nil {
t.Fatal(err)
}
}()
cst.testSpendUplofunds()
}
// testDelayedOutputMaturity adds blocks that result in many delayed outputs
// maturing at the same time, verifying that bulk maturity is handled
// correctly.
// TestRegressionDelayedOutputMaturity creates a consensus set tester and uses
// it to call testDelayedOutputMaturity. In the past, bolt's ForEach function
// had been used incorrectly resulting in the incorrect processing of bulk
// delayed outputs.
// testFileContractMaturity adds blocks that result in many file contracts
// being closed at the same time.
// TestRegressionFileContractMaturity creates a consensus set tester and uses
// it to call testFileContractMaturity. In the past, bolt's ForEach function
// had been used incorrectly, resulting in the incorrect processing of bulk
// file contracts.
/*
// testPaymentChannelBlocks submits blocks to set up, use, and close a payment
// channel.
func (cst *consensusSetTester) testPaymentChannelBlocks() error {
// The current method of doing payment channels is gimped because public
// keys do not have timelocks. We will be hardforking to include timelocks
// in public keys in 0.4.0, but in the meantime we need an alternate
// method.
// Gimped payment channels: 2-of-2 multisig where one key is controlled by
// the funding entity, and one key is controlled by the receiving entity. An
// address is created containing both keys, and then the funding entity
// creates, but does not sign, a transaction sending coins to the channel
// address. A second transaction is created that sends all the coins in the
// funding output back to the funding entity. The receiving entity signs the
// transaction with a timelocked signature. The funding entity will get the
// refund after T blocks as long as the output is not double spent. The
// funding entity then signs the first transaction and opens the channel.
//
// Creating the channel:
// 1. Create a 2-of-2 unlock conditions, one key held by each entity.
// 2. Funding entity creates, but does not sign, a transaction sending
// money to the payment channel address. (txn A)
// 3. Funding entity creates and signs a transaction spending the output
// created in txn A that sends all the money back as a refund. (txn B)
// 4. Receiving entity signs txn B with a timelocked signature, so that the
// funding entity cannot get the refund for several days. The funding entity
// is given a fully signed and eventually-spendable txn B.
// 5. The funding entity signs and broadcasts txn A.
//
// Using the channel:
// Each the receiving entity and the funding entity keeps a record of how
// much has been sent down the unclosed channel, and watches the
// blockchain for a channel closing transaction. To send more money down
// the channel, the funding entity creates and signs a transaction sending
// X+y coins to the receiving entity from the channel address. The
// transaction is sent to the receiving entity, who will keep it and
// potentially sign and broadcast it later. The funding entity will only
// send money down the channel if 'work' or some other sort of event has
// completed that indicates the receiving entity should get more money.
//
// Closing the channel:
// The receiving entity will sign the transaction that pays them the most
// money and then broadcast that transaction. This will spend the output
// and close the channel, invalidating txn B and preventing any future
// transactions from being made over the channel. The channel must be
// closed before the timelock expires on the second signature in txn B,
// otherwise the funding entity will be able to get a full refund.
//
// The funding entity should be waiting until either the receiving entity
// closes the channel or the timelock expires. If the receiving entity
// closes the channel, all is good. If not, then the funding entity can
// close the channel and get a full refund.
// Create a 2-of-2 unlock conditions, 1 key for each the sender and the
// receiver in the payment channel.
sk1, pk1, err := crypto.StdKeyGen.Generate() // Funding entity.
if err != nil {
return err
}
sk2, pk2, err := crypto.StdKeyGen.Generate() // Receiving entity.
if err != nil {
return err
}
uc := types.UnlockConditions{
PublicKeys: []types.UploPublicKey{
{
Algorithm: types.SignatureEd25519,
Key: pk1[:],
},
{
Algorithm: types.SignatureEd25519,
Key: pk2[:],
},
},
SignaturesRequired: 2,
}
channelAddress := uc.UnlockHash()
// Funding entity creates but does not sign a transaction that funds the
// channel address. Because the wallet is not very flexible, the channel
// txn needs to be fully custom. To get a custom txn, manually create an
// address and then use the wallet to fund that address.
channelSize := types.NewCurrency64(10e3)
channelFundingSK, channelFundingPK, err := crypto.StdKeyGen.Generate()
if err != nil {
return err
}
channelFundingUC := types.UnlockConditions{
PublicKeys: []types.UploPublicKey{{
Algorithm: types.SignatureEd25519,
Key: channelFundingPK[:],
}},
SignaturesRequired: 1,
}
channelFundingAddr := channelFundingUC.UnlockHash()
fundTxnBuilder := cst.wallet.StartTransaction()
if err != nil {
return err
}
err = fundTxnBuilder.FundUplocoins(channelSize)
if err != nil {
return err
}
scoFundIndex := fundTxnBuilder.AddUplocoinOutput(types.UplocoinOutput{Value: channelSize, UnlockHash: channelFundingAddr})
fundTxnSet, err := fundTxnBuilder.Sign(true)
if err != nil {
return err
}
fundOutputID := fundTxnSet[len(fundTxnSet)-1].UplocoinOutputID(int(scoFundIndex))
channelTxn := types.Transaction{
UplocoinInputs: []types.UplocoinInput{{
ParentID: fundOutputID,
UnlockConditions: channelFundingUC,
}},
UplocoinOutputs: []types.UplocoinOutput{{
Value: channelSize,
UnlockHash: channelAddress,
}},
TransactionSignatures: []types.TransactionSignature{{
ParentID: crypto.Hash(fundOutputID),
PublicKeyIndex: 0,
CoveredFields: types.CoveredFields{WholeTransaction: true},
}},
}
// Funding entity creates and signs a transaction that spends the full
// channel output.
channelOutputID := channelTxn.UplocoinOutputID(0)
refundUC, err := cst.wallet.NextAddress()
refundAddr := refundUC.UnlockHash()
if err != nil {
return err
}
refundTxn := types.Transaction{
UplocoinInputs: []types.UplocoinInput{{
ParentID: channelOutputID,
UnlockConditions: uc,
}},
UplocoinOutputs: []types.UplocoinOutput{{
Value: channelSize,
UnlockHash: refundAddr,
}},
TransactionSignatures: []types.TransactionSignature{{
ParentID: crypto.Hash(channelOutputID),
PublicKeyIndex: 0,
CoveredFields: types.CoveredFields{WholeTransaction: true},
}},
}
sigHash := refundTxn.SigHash(0)
cryptoSig1, err := crypto.SignHash(sigHash, sk1)
if err != nil {
return err
}
refundTxn.TransactionSignatures[0].Signature = cryptoSig1[:]
// Receiving entity signs the transaction that spends the full channel
// output, but with a timelock.
refundTxn.TransactionSignatures = append(refundTxn.TransactionSignatures, types.TransactionSignature{
ParentID: crypto.Hash(channelOutputID),
PublicKeyIndex: 1,
Timelock: cst.cs.dbBlockHeight() + 2,
CoveredFields: types.CoveredFields{WholeTransaction: true},
})
sigHash = refundTxn.SigHash(1)
cryptoSig2, err := crypto.SignHash(sigHash, sk2)
if err != nil {
return err
}
refundTxn.TransactionSignatures[1].Signature = cryptoSig2[:]
// Funding entity will now sign and broadcast the funding transaction.
sigHash = channelTxn.SigHash(0)
cryptoSig0, err := crypto.SignHash(sigHash, channelFundingSK)
if err != nil {
return err
}
channelTxn.TransactionSignatures[0].Signature = cryptoSig0[:]
err = cst.tpool.AcceptTransactionSet(append(fundTxnSet, channelTxn))
if err != nil {
return err
}
// Put the txn in a block.
_, err = cst.miner.AddBlock()
if err != nil {
return err
}
// Try to submit the refund transaction before the timelock has expired.
err = cst.tpool.AcceptTransactionSet([]types.Transaction{refundTxn})
if !errors.Contains(err, types.ErrPrematureSignature){
return err
}
// Create a transaction that has partially used the channel, and submit it
// to the blockchain to close the channel.
closeTxn := types.Transaction{
UplocoinInputs: []types.UplocoinInput{{
ParentID: channelOutputID,
UnlockConditions: uc,
}},
UplocoinOutputs: []types.UplocoinOutput{
{
Value: channelSize.Sub(types.NewCurrency64(5)),
UnlockHash: refundAddr,
},
{
Value: types.NewCurrency64(5),
},
},
TransactionSignatures: []types.TransactionSignature{
{
ParentID: crypto.Hash(channelOutputID),
PublicKeyIndex: 0,
CoveredFields: types.CoveredFields{WholeTransaction: true},
},
{
ParentID: crypto.Hash(channelOutputID),
PublicKeyIndex: 1,
CoveredFields: types.CoveredFields{WholeTransaction: true},
},
},
}
sigHash = closeTxn.SigHash(0)
cryptoSig3, err := crypto.SignHash(sigHash, sk1)
if err != nil {
return err
}
closeTxn.TransactionSignatures[0].Signature = cryptoSig3[:]
sigHash = closeTxn.SigHash(1)
cryptoSig4, err := crypto.SignHash(sigHash, sk2)
if err != nil {
return err
}
closeTxn.TransactionSignatures[1].Signature = cryptoSig4[:]
err = cst.tpool.AcceptTransactionSet([]types.Transaction{closeTxn})
if err != nil {
return err
}
// Mine the block with the transaction.
_, err = cst.miner.AddBlock()
if err != nil {
return err
}
closeRefundID := closeTxn.UplocoinOutputID(0)
closePaymentID := closeTxn.UplocoinOutputID(1)
exists := cst.cs.db.inUplocoinOutputs(closeRefundID)
if !exists {
return errors.New("close txn refund output doesn't exist")
}
exists = cst.cs.db.inUplocoinOutputs(closePaymentID)
if !exists {
return errors.New("close txn payment output doesn't exist")
}
// Create a payment channel where the receiving entity never responds to
// the initial transaction.
{
// Funding entity creates but does not sign a transaction that funds the
// channel address. Because the wallet is not very flexible, the channel
// txn needs to be fully custom. To get a custom txn, manually create an
// address and then use the wallet to fund that address.
channelSize := types.NewCurrency64(10e3)
channelFundingSK, channelFundingPK, err := crypto.StdKeyGen.Generate()
if err != nil {
return err
}
channelFundingUC := types.UnlockConditions{
PublicKeys: []types.UploPublicKey{{
Algorithm: types.SignatureEd25519,
Key: channelFundingPK[:],
}},
SignaturesRequired: 1,
}
channelFundingAddr := channelFundingUC.UnlockHash()
fundTxnBuilder := cst.wallet.StartTransaction()
err = fundTxnBuilder.FundUplocoins(channelSize)
if err != nil {
return err
}
scoFundIndex := fundTxnBuilder.AddUplocoinOutput(types.UplocoinOutput{Value: channelSize, UnlockHash: channelFundingAddr})
fundTxnSet, err := fundTxnBuilder.Sign(true)
if err != nil {
return err
}
fundOutputID := fundTxnSet[len(fundTxnSet)-1].UplocoinOutputID(int(scoFundIndex))
channelTxn := types.Transaction{
UplocoinInputs: []types.UplocoinInput{{
ParentID: fundOutputID,
UnlockConditions: channelFundingUC,
}},
UplocoinOutputs: []types.UplocoinOutput{{
Value: channelSize,
UnlockHash: channelAddress,
}},
TransactionSignatures: []types.TransactionSignature{{
ParentID: crypto.Hash(fundOutputID),
PublicKeyIndex: 0,
CoveredFields: types.CoveredFields{WholeTransaction: true},
}},
}
// Funding entity creates and signs a transaction that spends the full
// channel output.
channelOutputID := channelTxn.UplocoinOutputID(0)
refundUC, err := cst.wallet.NextAddress()
refundAddr := refundUC.UnlockHash()
if err != nil {
return err
}
refundTxn := types.Transaction{
UplocoinInputs: []types.UplocoinInput{{
ParentID: channelOutputID,
UnlockConditions: uc,
}},
UplocoinOutputs: []types.UplocoinOutput{{
Value: channelSize,
UnlockHash: refundAddr,
}},
TransactionSignatures: []types.TransactionSignature{{
ParentID: crypto.Hash(channelOutputID),
PublicKeyIndex: 0,
CoveredFields: types.CoveredFields{WholeTransaction: true},
}},
}
sigHash := refundTxn.SigHash(0)
cryptoSig1, err := crypto.SignHash(sigHash, sk1)
if err != nil {
return err
}
refundTxn.TransactionSignatures[0].Signature = cryptoSig1[:]
// Receiving entity never communitcates, funding entity must reclaim
// the 'channelSize' coins that were intended to go to the channel.
reclaimUC, err := cst.wallet.NextAddress()
reclaimAddr := reclaimUC.UnlockHash()
if err != nil {
return err
}
reclaimTxn := types.Transaction{
UplocoinInputs: []types.UplocoinInput{{
ParentID: fundOutputID,
UnlockConditions: channelFundingUC,
}},
UplocoinOutputs: []types.UplocoinOutput{{
Value: channelSize,
UnlockHash: reclaimAddr,
}},
TransactionSignatures: []types.TransactionSignature{{
ParentID: crypto.Hash(fundOutputID),
PublicKeyIndex: 0,
CoveredFields: types.CoveredFields{WholeTransaction: true},
}},
}
sigHash = reclaimTxn.SigHash(0)
cryptoSig, err := crypto.SignHash(sigHash, channelFundingSK)
if err != nil {
return err
}
reclaimTxn.TransactionSignatures[0].Signature = cryptoSig[:]
err = cst.tpool.AcceptTransactionSet(append(fundTxnSet, reclaimTxn))
if err != nil {
return err
}
block, _ := cst.miner.FindBlock()
err = cst.cs.AcceptBlock(block)
if err != nil {
return err
}
reclaimOutputID := reclaimTxn.UplocoinOutputID(0)
exists := cst.cs.db.inUplocoinOutputs(reclaimOutputID)
if !exists {
return errors.New("failed to reclaim an output that belongs to the funding entity")
}
}
// Create a channel and the open the channel, but close the channel using
// the timelocked signature.
{
// Funding entity creates but does not sign a transaction that funds the
// channel address. Because the wallet is not very flexible, the channel
// txn needs to be fully custom. To get a custom txn, manually create an
// address and then use the wallet to fund that address.
channelSize := types.NewCurrency64(10e3)
channelFundingSK, channelFundingPK, err := crypto.StdKeyGen.Generate()
if err != nil {
return err
}
channelFundingUC := types.UnlockConditions{
PublicKeys: []types.UploPublicKey{{
Algorithm: types.SignatureEd25519,
Key: channelFundingPK[:],
}},
SignaturesRequired: 1,
}
channelFundingAddr := channelFundingUC.UnlockHash()
fundTxnBuilder := cst.wallet.StartTransaction()
err = fundTxnBuilder.FundUplocoins(channelSize)
if err != nil {
return err
}
scoFundIndex := fundTxnBuilder.AddUplocoinOutput(types.UplocoinOutput{Value: channelSize, UnlockHash: channelFundingAddr})
fundTxnSet, err := fundTxnBuilder.Sign(true)
if err != nil {
return err
}
fundOutputID := fundTxnSet[len(fundTxnSet)-1].UplocoinOutputID(int(scoFundIndex))
channelTxn := types.Transaction{
UplocoinInputs: []types.UplocoinInput{{
ParentID: fundOutputID,
UnlockConditions: channelFundingUC,
}},
UplocoinOutputs: []types.UplocoinOutput{{
Value: channelSize,
UnlockHash: channelAddress,
}},
TransactionSignatures: []types.TransactionSignature{{
ParentID: crypto.Hash(fundOutputID),
PublicKeyIndex: 0,
CoveredFields: types.CoveredFields{WholeTransaction: true},
}},
}
// Funding entity creates and signs a transaction that spends the full
// channel output.
channelOutputID := channelTxn.UplocoinOutputID(0)
refundUC, err := cst.wallet.NextAddress()
refundAddr := refundUC.UnlockHash()
if err != nil {
return err
}
refundTxn := types.Transaction{
UplocoinInputs: []types.UplocoinInput{{
ParentID: channelOutputID,
UnlockConditions: uc,
}},
UplocoinOutputs: []types.UplocoinOutput{{
Value: channelSize,
UnlockHash: refundAddr,
}},
TransactionSignatures: []types.TransactionSignature{{
ParentID: crypto.Hash(channelOutputID),
PublicKeyIndex: 0,
CoveredFields: types.CoveredFields{WholeTransaction: true},
}},
}
sigHash := refundTxn.SigHash(0)
cryptoSig1, err := crypto.SignHash(sigHash, sk1)
if err != nil {
return err
}
refundTxn.TransactionSignatures[0].Signature = cryptoSig1[:]
// Receiving entity signs the transaction that spends the full channel
// output, but with a timelock.
refundTxn.TransactionSignatures = append(refundTxn.TransactionSignatures, types.TransactionSignature{
ParentID: crypto.Hash(channelOutputID),
PublicKeyIndex: 1,
Timelock: cst.cs.dbBlockHeight() + 2,
CoveredFields: types.CoveredFields{WholeTransaction: true},
})
sigHash = refundTxn.SigHash(1)
cryptoSig2, err := crypto.SignHash(sigHash, sk2)
if err != nil {
return err
}
refundTxn.TransactionSignatures[1].Signature = cryptoSig2[:]
// Funding entity will now sign and broadcast the funding transaction.
sigHash = channelTxn.SigHash(0)
cryptoSig0, err := crypto.SignHash(sigHash, channelFundingSK)
if err != nil {
return err
}
channelTxn.TransactionSignatures[0].Signature = cryptoSig0[:]
err = cst.tpool.AcceptTransactionSet(append(fundTxnSet, channelTxn))
if err != nil {
return err
}
// Put the txn in a block.
block, _ := cst.miner.FindBlock()
err = cst.cs.AcceptBlock(block)
if err != nil {
return err
}
// Receiving entity never signs another transaction, so the funding
// entity waits until the timelock is complete, and then submits the
// refundTxn.
for i := 0; i < 3; i++ {
block, _ := cst.miner.FindBlock()
err = cst.cs.AcceptBlock(block)
if err != nil {
return err
}
}
err = cst.tpool.AcceptTransactionSet([]types.Transaction{refundTxn})
if err != nil {
return err
}
block, _ = cst.miner.FindBlock()
err = cst.cs.AcceptBlock(block)
if err != nil {
return err
}
refundOutputID := refundTxn.UplocoinOutputID(0)
exists := cst.cs.db.inUplocoinOutputs(refundOutputID)
if !exists {
return errors.New("timelocked refund transaction did not get spent correctly")
}
}
return nil
}
*/
/*
// TestPaymentChannelBlocks creates a consensus set tester and uses it to call
// testPaymentChannelBlocks.
func TestPaymentChannelBlocks(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
cst, err := createConsensusSetTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer func() {
if err := cst.Close(); err != nil {
t.Fatal(err)
}
}()
err = cst.testPaymentChannelBlocks()
if err != nil {
t.Fatal(err)
}
}
*/
| {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
cst, err := createConsensusSetTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer func() {
if err := cst.Close(); err != nil {
t.Fatal(err)
}
}()
cst.testSimpleBlock()
} |
user.go | package models
import (
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
"gorm.io/gorm"
)
// User user
type User struct {
gorm.Model
// city
City string `json:"city,omitempty"`
// company name
CompanyName string `json:"companyName,omitempty"`
// country
Country string `json:"country,omitempty"`
// email
Email string `json:"email,omitempty"`
// first name
FirstName string `json:"firstName,omitempty"`
// last name
LastName string `json:"lastName,omitempty"`
// password
Password string `json:"password,omitempty"`
// position
Position string `json:"position,omitempty"`
| // priority
// Enum: [Low Medium High]
Priority string `json:"priority,omitempty"`
// username
Username string `gorm:"unique;not null"`
}
// Validate validates this user
func (m *User) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePriority(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var userTypePriorityPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["Low","Medium","High"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
userTypePriorityPropEnum = append(userTypePriorityPropEnum, v)
}
}
const (
// UserPriorityLow captures enum value "Low"
UserPriorityLow string = "Low"
// UserPriorityMedium captures enum value "Medium"
UserPriorityMedium string = "Medium"
// UserPriorityHigh captures enum value "High"
UserPriorityHigh string = "High"
)
// prop value enum
func (m *User) validatePriorityEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, userTypePriorityPropEnum); err != nil {
return err
}
return nil
}
func (m *User) validatePriority(formats strfmt.Registry) error {
if swag.IsZero(m.Priority) { // not required
return nil
}
// value enum
if err := m.validatePriorityEnum("priority", "body", m.Priority); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *User) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *User) UnmarshalBinary(b []byte) error {
var res User
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
} | |
update_results.py | #! /usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.getcwd())
import re
import sys
import shutil
import glob
import argparse
import datetime
import json
from scripts.utils import Tree, SortingCriteria, get_system_type
SCENARIO_PERF_RES_METADATA = {
# scenario: (result regex, SortingCriteria)
"Offline": (r"Samples per second: (\d+\.?\d*e?[-+]?\d*)", SortingCriteria.Higher),
"Server": (r"99\.00 percentile latency \(ns\) : (\d+\.?\d*e?[-+]?\d*)", SortingCriteria.Lower),
"SingleStream": (r"90th percentile latency \(ns\) : (\d+\.?\d*e?[-+]?\d*)", SortingCriteria.Lower),
"MultiStream": (r"99\.00 percentile latency \(ns\) : (\d+\.?\d*e?[-+]?\d*)", SortingCriteria.Lower),
}
def sort_perf_list(perf_file_list, scenario):
|
def find_valid_runs(input_list, scenario):
# Check for query constraints documented in https://github.com/mlperf/inference_policies/blob/master/inference_rules.adoc#scenarios
QUERY_METRIC_CONSTRAINTS = {
"Offline": (r"samples_per_query : (\d+\.?\d*e?[-+]?\d*)", 24576),
"Server": (r"min_query_count : (\d+\.?\d*e?[-+]?\d*)", 270336),
"MultiStream": (r"min_query_count : (\d+\.?\d*e?[-+]?\d*)", 270336),
"SingleStream": (r"min_query_count : (\d+\.?\d*e?[-+]?\d*)", 1024),
}
perf_list = []
accu_list = []
for input_file in input_list:
# Check if this is Accuracy run or Performance run.
if os.path.getsize(input_file) > 4:
accu_list.append(input_file)
# Check for valid perf run
is_valid = False
satisfies_query_constraint = False
summary = input_file.replace("_accuracy.json", "_summary.txt")
with open(summary) as f:
for line in f:
# Result validity check
match = re.match(r"Result is : (VALID|INVALID)", line)
if match is not None and match.group(1) == "VALID":
is_valid = True
# Query constraint check
match = re.match(QUERY_METRIC_CONSTRAINTS[scenario][0], line)
if match is not None and float(match.group(1)) >= QUERY_METRIC_CONSTRAINTS[scenario][1]:
satisfies_query_constraint = True
if is_valid and satisfies_query_constraint:
perf_list.append(input_file)
return perf_list, accu_list
def process_results(args, system_ids, metadata):
time_now = str(datetime.datetime.utcnow())
result_id = args.result_id if args.result_id is not None else "manual-{:}".format(time_now)
for system_id in system_ids:
system_type = get_system_type(system_id)
for benchmark in system_ids[system_id]:
# Skip DLRM and BERT-99.9 for Edge
if system_type == "edge" and (benchmark.startswith("dlrm") or benchmark == "bert-99.9"):
print("{:} is an edge system. Skipping {:}".format(system_id, benchmark))
continue
# Skip SSD MobileNet for datacenter
if system_type == "datacenter" and benchmark == "ssd-mobilenet":
print("{:} is a datacenter system. Skipping {:}".format(system_id, benchmark))
continue
for scenario in system_ids[system_id][benchmark]:
# Skip Server for Edge systems
if system_type == "edge" and scenario in {"Server"}:
print("{:} is an edge system. Skipping Server scenario".format(system_id))
continue
# Skip SingleStream and MultiStream for Datacenter systems
if system_type == "datacenter" and scenario in {"SingleStream", "MultiStream"}:
print("{:} is a datacenter system. Skipping {:} scenario".format(system_id, scenario))
continue
print(">>>>>>>> Processing {:}-{:}-{:} <<<<<<<<".format(system_id, benchmark, scenario))
input_list = system_ids[system_id][benchmark][scenario]
print("Found {:} log files".format(len(input_list)))
perf_list, accu_list = find_valid_runs(input_list, scenario)
# For DLRM and 3d-UNET, the 99.9% and 99% accuracy targets use the same engines. We use the same
# logs here to make it more prominent that they are the same
if benchmark in {"dlrm-99", "3d-unet-99"}:
perf_list, accu_list = find_valid_runs(system_ids[system_id][benchmark + ".9"][scenario], scenario)
print("\t{:} perf logs".format(len(perf_list)))
print("\t{:} acc logs".format(len(accu_list)))
metadata.insert([system_id, benchmark, scenario, "accuracy", "count"], len(accu_list))
metadata.insert([system_id, benchmark, scenario, "performance", "count"], len(perf_list))
# Update accuracy run
if len(accu_list) == 0:
print("WARNING: Cannot find valid accuracy run.")
if args.abort_missing_accuracy:
return
else:
if len(accu_list) > 1:
print("WARNING: Found {:d} accuracy runs, which is more than needed. Empirically choose the last one.".format(len(accu_list)))
print(accu_list)
output_dir = os.path.join(args.output_dir, system_id, benchmark, scenario, "accuracy")
if not args.dry_run:
os.makedirs(output_dir, exist_ok=True)
for suffix in ["_accuracy.json", "_detail.txt", "_summary.txt"]:
input_file = accu_list[-1].replace("_accuracy.json", suffix)
output_file = os.path.join(output_dir, "mlperf_log{:}".format(suffix))
print("Copy {:} -> {:}".format(input_file, output_file))
if not args.dry_run:
shutil.copy(input_file, output_file)
input_file = os.path.join(os.path.dirname(input_file), "accuracy.txt")
output_file = os.path.join(output_dir, "accuracy.txt")
print("Copy {:} -> {:}".format(input_file, output_file))
if not args.dry_run:
shutil.copy(input_file, output_file)
# Update perf run
perf_count = 1
if len(perf_list) < perf_count:
print("WARNING: Cannot find enough passing perf runs. Only found {:d} runs.".format(len(perf_list)))
if args.abort_insufficient_runs:
return
elif len(perf_list) > perf_count:
print("WARNING: Found {:d} passing perf runs, which is more than needed. Choosing the highest perf one(s).".format(len(perf_list)))
perf_list = sort_perf_list(perf_list, scenario)[-perf_count:]
starting_idx = metadata.get([system_id, benchmark, scenario, "performance", "last_updated"])
if starting_idx is None:
starting_idx = 0
else:
# Starting idx is in range 1..perf_count, whereas actual indices are 0..perf_count-1. We wish the
# first index we modify to be the one after Starting idx, so taking (N mod perf_count) works.
starting_idx = starting_idx % perf_count
for run_idx in range(0, len(perf_list)):
run_num = ((run_idx + starting_idx) % perf_count) + 1
output_dir = os.path.join(args.output_dir, system_id, benchmark, scenario, "performance", "run_{:d}".format(run_num))
if not args.dry_run:
os.makedirs(output_dir, exist_ok=True)
for suffix in ["_accuracy.json", "_detail.txt", "_summary.txt"]:
input_file = perf_list[run_idx].replace("_accuracy.json", suffix)
output_file = os.path.join(output_dir, "mlperf_log{:}".format(suffix))
print("Copy {:} -> {:}".format(input_file, output_file))
if not args.dry_run:
shutil.copy(input_file, output_file)
metadata.insert([system_id, benchmark, scenario, "performance", "last_updated"], run_num)
metadata.insert([system_id, benchmark, scenario, "results_export_timestamp"], time_now)
metadata.insert([system_id, benchmark, scenario, "result_id"], result_id)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", "-d",
help="Specifies the directory containing the logs.",
default="build/logs"
)
parser.add_argument(
"--output_dir", "-o",
help="Specifies the directory to output the results/ entries to",
default="results"
)
parser.add_argument(
"--result_id",
help="Specifies a unique ID to use for this result",
default=None
)
parser.add_argument(
"--abort_insufficient_runs",
help="Abort instead if there are not enough perf runs to be considered valid",
action="store_true"
)
parser.add_argument(
"--abort_missing_accuracy",
help="Abort instead if there isn't a valid accuracy run",
action="store_true"
)
parser.add_argument(
"--dry_run",
help="Don't actually copy files, just log the actions taken.",
action="store_true"
)
parser.add_argument(
"--metadata_file",
help="File that stores metadata about these results",
default="results_metadata.json"
)
parser.add_argument(
"--add_metadata",
help="Save a field as part of metadata to the results directory. Format period.separated.key:value",
action="append"
)
return parser.parse_args()
def main():
args = get_args()
glob_to_logs = os.path.join(args.input_dir, "**", "mlperf_log_accuracy.json")
print("Looking for logs in {:}".format(glob_to_logs))
all_logs = glob.glob(glob_to_logs, recursive=True)
print("Found {:} mlperf_log entries".format(len(all_logs)))
# Loop through input_list to find all the system_ids
system_ids = Tree()
for entry in all_logs:
parts = entry.split("/")
system_id = parts[-4] # [input_dir]/<timestamp>/system_id/benchmark/scenario/*.json
benchmark = parts[-3]
scenario = parts[-2]
system_ids.insert([system_id, benchmark, scenario], entry, append=True)
metadata = None
if os.path.exists(args.metadata_file):
with open(args.metadata_file) as f:
metadata = json.load(f)
metadata = Tree(starting_val=metadata)
process_results(args, system_ids, metadata)
# Write out custom metadata
if args.add_metadata:
for md in args.add_metadata:
tmp = md.split(":")
if len(tmp) != 2:
print("WARNING: Invalid metadata \"{:}\"".format(md))
continue
keyspace = tmp[0].split(".")
value = tmp[1]
metadata.insert(keyspace, value)
if not args.dry_run:
with open(args.metadata_file, 'w') as f:
json.dump(metadata.tree, f, indent=4, sort_keys=True)
else:
print(json.dumps(metadata.tree, indent=4, sort_keys=True))
print("Done!")
if __name__ == '__main__':
main()
| perf_vals = []
for perf_file in perf_file_list:
summary_file = perf_file.replace("_accuracy.json", "_summary.txt")
found_perf = False
with open(summary_file) as f:
log = f.read().split("\n")
for line in log:
matches = re.match(SCENARIO_PERF_RES_METADATA[scenario][0], line)
if matches is None:
continue
perf_vals.append((perf_file, float(matches.group(1))))
found_perf = True
break
if not found_perf:
raise Exception("Could not find perf value in file: " + summary_file)
sorted_perf_vals = sorted(perf_vals, key=lambda k: k[1],
reverse=(SCENARIO_PERF_RES_METADATA[scenario][1] == SortingCriteria.Lower))
return [k[0] for k in sorted_perf_vals] |
servicediscovery.go | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/submariner-io/submariner-operator/apis/submariner/v1alpha1"
scheme "github.com/submariner-io/submariner-operator/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ServiceDiscoveriesGetter has a method to return a ServiceDiscoveryInterface.
// A group's client should implement this interface.
type ServiceDiscoveriesGetter interface {
ServiceDiscoveries(namespace string) ServiceDiscoveryInterface
}
// ServiceDiscoveryInterface has methods to work with ServiceDiscovery resources.
type ServiceDiscoveryInterface interface {
Create(ctx context.Context, serviceDiscovery *v1alpha1.ServiceDiscovery, opts v1.CreateOptions) (*v1alpha1.ServiceDiscovery, error)
Update(ctx context.Context, serviceDiscovery *v1alpha1.ServiceDiscovery, opts v1.UpdateOptions) (*v1alpha1.ServiceDiscovery, error)
UpdateStatus(ctx context.Context, serviceDiscovery *v1alpha1.ServiceDiscovery, opts v1.UpdateOptions) (*v1alpha1.ServiceDiscovery, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ServiceDiscovery, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ServiceDiscoveryList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceDiscovery, err error)
ServiceDiscoveryExpansion
}
// serviceDiscoveries implements ServiceDiscoveryInterface
type serviceDiscoveries struct {
client rest.Interface
ns string
}
// newServiceDiscoveries returns a ServiceDiscoveries
func newServiceDiscoveries(c *SubmarinerV1alpha1Client, namespace string) *serviceDiscoveries |
// Get takes name of the serviceDiscovery, and returns the corresponding serviceDiscovery object, and an error if there is any.
func (c *serviceDiscoveries) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceDiscovery, err error) {
result = &v1alpha1.ServiceDiscovery{}
err = c.client.Get().
Namespace(c.ns).
Resource("servicediscoveries").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ServiceDiscoveries that match those selectors.
func (c *serviceDiscoveries) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceDiscoveryList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.ServiceDiscoveryList{}
err = c.client.Get().
Namespace(c.ns).
Resource("servicediscoveries").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested serviceDiscoveries.
func (c *serviceDiscoveries) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("servicediscoveries").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a serviceDiscovery and creates it. Returns the server's representation of the serviceDiscovery, and an error, if there is any.
func (c *serviceDiscoveries) Create(ctx context.Context, serviceDiscovery *v1alpha1.ServiceDiscovery, opts v1.CreateOptions) (result *v1alpha1.ServiceDiscovery, err error) {
result = &v1alpha1.ServiceDiscovery{}
err = c.client.Post().
Namespace(c.ns).
Resource("servicediscoveries").
VersionedParams(&opts, scheme.ParameterCodec).
Body(serviceDiscovery).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a serviceDiscovery and updates it. Returns the server's representation of the serviceDiscovery, and an error, if there is any.
func (c *serviceDiscoveries) Update(ctx context.Context, serviceDiscovery *v1alpha1.ServiceDiscovery, opts v1.UpdateOptions) (result *v1alpha1.ServiceDiscovery, err error) {
result = &v1alpha1.ServiceDiscovery{}
err = c.client.Put().
Namespace(c.ns).
Resource("servicediscoveries").
Name(serviceDiscovery.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(serviceDiscovery).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *serviceDiscoveries) UpdateStatus(ctx context.Context, serviceDiscovery *v1alpha1.ServiceDiscovery, opts v1.UpdateOptions) (result *v1alpha1.ServiceDiscovery, err error) {
result = &v1alpha1.ServiceDiscovery{}
err = c.client.Put().
Namespace(c.ns).
Resource("servicediscoveries").
Name(serviceDiscovery.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(serviceDiscovery).
Do(ctx).
Into(result)
return
}
// Delete takes name of the serviceDiscovery and deletes it. Returns an error if one occurs.
func (c *serviceDiscoveries) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("servicediscoveries").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *serviceDiscoveries) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("servicediscoveries").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched serviceDiscovery.
func (c *serviceDiscoveries) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceDiscovery, err error) {
result = &v1alpha1.ServiceDiscovery{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("servicediscoveries").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
| {
return &serviceDiscoveries{
client: c.RESTClient(),
ns: namespace,
}
} |
auto_scaling_policy_summary.py | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class AutoScalingPolicySummary(object):
"""
Summary information for an autoscaling policy.
"""
def __init__(self, **kwargs):
"""
Initializes a new AutoScalingPolicySummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this AutoScalingPolicySummary.
:type id: str
:param display_name:
The value to assign to the display_name property of this AutoScalingPolicySummary.
:type display_name: str
:param policy_type:
The value to assign to the policy_type property of this AutoScalingPolicySummary.
:type policy_type: str
:param is_enabled:
The value to assign to the is_enabled property of this AutoScalingPolicySummary.
:type is_enabled: bool
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'policy_type': 'str',
'is_enabled': 'bool'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'policy_type': 'policyType',
'is_enabled': 'isEnabled'
}
self._id = None
self._display_name = None
self._policy_type = None
self._is_enabled = None
@property
def id(self):
"""
**[Required]** Gets the id of this AutoScalingPolicySummary.
The ID of the autoscaling policy that is assigned after creation.
:return: The id of this AutoScalingPolicySummary.
:rtype: str
"""
return self._id
@id.setter
def | (self, id):
"""
Sets the id of this AutoScalingPolicySummary.
The ID of the autoscaling policy that is assigned after creation.
:param id: The id of this AutoScalingPolicySummary.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
Gets the display_name of this AutoScalingPolicySummary.
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:return: The display_name of this AutoScalingPolicySummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this AutoScalingPolicySummary.
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param display_name: The display_name of this AutoScalingPolicySummary.
:type: str
"""
self._display_name = display_name
@property
def policy_type(self):
"""
**[Required]** Gets the policy_type of this AutoScalingPolicySummary.
The type of autoscaling policy.
:return: The policy_type of this AutoScalingPolicySummary.
:rtype: str
"""
return self._policy_type
@policy_type.setter
def policy_type(self, policy_type):
"""
Sets the policy_type of this AutoScalingPolicySummary.
The type of autoscaling policy.
:param policy_type: The policy_type of this AutoScalingPolicySummary.
:type: str
"""
self._policy_type = policy_type
@property
def is_enabled(self):
"""
Gets the is_enabled of this AutoScalingPolicySummary.
Whether the autoscaling policy is enabled.
:return: The is_enabled of this AutoScalingPolicySummary.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this AutoScalingPolicySummary.
Whether the autoscaling policy is enabled.
:param is_enabled: The is_enabled of this AutoScalingPolicySummary.
:type: bool
"""
self._is_enabled = is_enabled
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| id |
cache.go | package main
import "sync"
type cache struct {
locks *sync.Map
values *sync.Map
}
func newCache() cache {
return cache{&sync.Map{}, &sync.Map{}}
}
func (c cache) LoadOrStore(key string) (interface{}, func(interface{})) {
if x, ok := c.values.Load(key); ok |
g := &sync.WaitGroup{}
g.Add(1)
if g, ok := c.locks.LoadOrStore(key, g); ok {
g.(*sync.WaitGroup).Wait()
x, _ := c.values.Load(key)
return x, nil
}
return nil, func(x interface{}) {
c.values.Store(key, x)
g.Done()
}
}
| {
return x, nil
} |
gettoken.py | import requests
import threading
import random
import json
usernames = json.loads(open("usernames.json", "r").read())
password = '%4B%65%6E%79%6F%6E%35%25' # A hex encoded password
siteurl = '192.168.122.61'
def run():
username = random.choice(usernames)
token = requests.get('http://' + siteurl + '/login/token.php?username=' + username + '&password=' + password + '&service=moodle_mobile_app').json()["token"]
print(f'{token}')
while True:
#run()
#"""
numthreads = 200
threads = []
for i in range(numthreads):
t = threading.Thread(target = run)
t.daemon = True
threads.append(t)
for i in range(numthreads):
threads[i].start()
for i in range(numthreads):
threads[i].join() | #""" | |
builder.go | package main
type Builder struct {
r Reader
w Writer
}
func NewBuilder(r Reader, w Writer) *Builder {
return &Builder{r, w}
}
func (b *Builder) write(n *Node) {
switch n.Type {
case BlockNode:
b.w.Block(n.Text)
case ListNode:
b.w.List(n.Text)
case SectionNode:
b.w.Section(n.Text)
case TextNode:
b.w.Text(n.Text)
case TextBoldNode:
b.w.TextBold(n.Text)
case TextUnderlineNode:
b.w.TextUnderline(n.Text)
case BreakNode:
b.w.Break(n.Text)
}
for _, c := range n.Childs {
b.write(c)
}
}
func (b *Builder) Build(f *AppInfo) (string, error) {
root, err := b.r.Read(f.Doc)
if err != nil {
return "", err
}
b.w.Meta(f.Name, f.Time)
b.w.Section("Name")
for _, node := range root.Childs {
if node.Type == SectionNode {
break
}
b.write(node) | b.w.Section("Synopsis")
b.w.Text(f.Name)
b.w.TextUnderline("[options...]")
b.w.TextUnderline("[argument...]")
b.w.Section("Options")
for _, opt := range f.Flags {
var doc string
b.w.Flag(opt.Name, opt.Short, opt.IsBoolean)
if opt.Doc != "" {
doc = opt.Doc
} else {
doc = opt.Usage
}
node, err := b.r.Read(doc)
if err != nil {
panic(err)
}
b.write(node)
}
for _, node := range root.Childs {
if node.Type == SectionNode {
b.write(node)
}
}
return b.w.Done(), nil
} | }
|
cassette-calculator.py | #!/usr/bin/python3
import time
def isTime(str):
try:
time.strptime(str, '%M:%S')
return True
except ValueError:
return False
def timeToSec(input):
return int(input.split(":")[0]) * 60 + int(input.split(":")[1])
def secToTime(input):
return str(input // 60) + ":" + '{:02d}'.format(input % 60)
def main():
print("Select an option:")
print("1) Enter tracks to find album length")
print("2) Enter tracks to split album evenly")
print(" ) Enter tracks to find ideal side length")
print("4) Convert min:sec to seconds")
print("5) Convert seconds to min:sec")
print("6) Find average track length")
print("Q) Exit calculator")
choice = input("> ")
if choice == "1":
print("(Enter \'q\' to calculate)")
total_time = 0
i = 0
while (True):
i += 1
print("Track " + str(i) + " length (min:sec): ")
choice = input("> ")
if isTime(choice):
total_time += timeToSec(choice)
else:
break
print("Total album length is " + secToTime(total_time) + ".")
elif choice == "2":
print("(Enter \'q\' to calculate)")
tracks = []
i = 0
while (True):
i += 1
print("Track " + str(i) + " length (min:sec): ")
choice = input("> ")
if isTime(choice):
tracks.append(timeToSec(choice))
else:
break
if len(tracks) == 0:
print("It'll be really easy to divide the tracks in your album when there are none.")
elif len(tracks) == 1:
print("It'll be really easy to divide the tracks in your album when there\'s only one.")
elif len(tracks) == 2:
print("The total album length is " + secToTime(sum(tracks)) + ".")
print("The album will most evenly be split if track 1 is on Side A (" + secToTime(tracks[0]) + "),")
print("And track 2 is on Side B (" + secToTime(tracks[1]) + ").")
else:
halfway_time = sum(tracks) // 2
halfway_track = -1
k = 0
while (k != len(tracks)):
if sum(tracks[:k + 1]) >= halfway_time:
break
k += 1
print(str(halfway_time))
print("The total album length is " + secToTime(sum(tracks)) + ".")
print("The album will most evenly be split if tracks 1-" + str(k) + " are on Side A (" + secToTime(sum(tracks[:k])) + "),")
print("And tracks " + str(k+1) + "-" + str(len(tracks)) + " are on Side B (" + secToTime(sum(tracks[k:])) + ").")
elif choice == "4":
print("Input time in format (min:sec): ")
choice = input("> ")
print(timeToSec(choice))
elif choice == "5":
print("Input time in seconds: ")
choice = input("> ")
print(secToTime(int(choice)))
elif choice == "6":
print("(Enter \'q\' to calculate)")
total_time = 0
i = 0
while (True):
i += 1
print("Track " + str(i) + " length (min:sec): ")
choice = input("> ")
if isTime(choice):
total_time += timeToSec(choice)
else:
break
if total_time == 0:
print("It doesn\'t seem as if you've input any tracks!")
else:
print("The average track length (" + secToTime(total_time) + " / " + str(i - 1) + " tracks) is " + secToTime(total_time // (i - 1)) + ".")
elif choice == str.lower("q"):
return 0
| print("")
main()
print("* Cassette Calculator *")
main() | |
mod.rs | extern crate nalgebra as na;
use na::{distance, Matrix2, Point2, Rotation2, Unit, Vector2};
pub type Float = f64;
pub type P2 = Point2<Float>;
pub type V2 = Vector2<Float>;
pub type U2 = Unit<V2>;
pub type Normal = Unit<V2>;
pub type Rot2 = Rotation2<Float>;
pub type Reflection = (P2, Normal);
pub type ClosestReflection = (Reflection, Vec<Reflection>);
pub const EPSILON: Float = 0.000_001;
pub struct OneOrTwo<T: Copy + Clone> {
items: (T, Option<T>),
iter_ix: usize,
}
impl<T: Copy + Clone> OneOrTwo<T> {
pub fn new(item: T) -> OneOrTwo<T> {
OneOrTwo {
items: (item, None),
iter_ix: 0,
}
}
pub fn map<U, F>(self, func: F) -> OneOrTwo<U>
where
F: Fn(T) -> U,
U: Copy + Clone,
{
OneOrTwo {
items: (func(self.items.0), self.items.1.map(func)),
iter_ix: 0,
}
}
pub fn to_vec(&self) -> Vec<T> {
if let Some(item_b) = self.items.1 {
vec![self.items.0, item_b]
} else {
vec![self.items.0]
}
}
pub fn into_vec(self) -> Vec<T> {
if let Some(item_b) = self.items.1 {
vec![self.items.0, item_b]
} else {
vec![self.items.0]
}
}
pub fn add(&mut self, item: T) {
if self.items.1.is_none() {
self.items.1 = Some(item);
}
}
pub fn mappend(&mut self, other: OneOrTwo<T>) {
self.add(other.items.0);
}
pub fn is_full(&self) -> bool {
self.items.1.is_some()
}
pub fn get_items(&self) -> Option<(T, T)> {
if let (item_a, Some(item_b)) = self.items { | } else {
None
}
}
pub fn get_first(&self) -> T {
self.items.0
}
pub fn get_second(&self) -> Option<T> {
self.items.1
}
pub fn swap(&mut self) {
if let Some(two) = self.items.1 {
let new_two = self.items.0;
self.items.0 = two;
self.items.1 = Some(new_two);
}
}
}
impl<T: Copy + Clone> Iterator for OneOrTwo<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
let res = match self.iter_ix {
0 => Some(self.items.0),
1 => self.items.1,
_ => None,
};
self.iter_ix += 1;
res
}
}
pub fn between(num: Float, a: Float, b: Float) -> bool {
(a..=b).contains(&num)
}
/// The smallest positive value is always in the first position.
/// Returns None if both values are negative
pub fn smallest_positive_sort(a: Float, b: Float) -> Option<(Float, Float)> {
if a >= 0. {
if b > a || b < 0. {
Some((a, b))
} else {
Some((b, a))
}
} else if b >= 0. {
Some((b, a))
} else {
None
}
}
pub fn get_smallest_positive_by<F, T>(v: &mut Vec<T>, f: F) -> Option<T>
where
F: Fn(&T) -> Float,
T: Copy,
{
let mut min_eval = Float::MAX;
let mut res = None;
for num in v {
let eval = f(&num);
if eval >= 0. && eval < min_eval {
min_eval = eval;
res = Some(*num);
}
}
res
}
pub fn first<A, B>((a, _): (A, B)) -> A {
a
}
pub fn nearest_option<T: Iterator<Item = Reflection>, U: Iterator<Item = Reflection>>(
p: &P2,
ova: Option<T>,
ovb: Option<U>,
) -> Option<ClosestReflection> {
let oa: Option<ClosestReflection> = ova.and_then(|iter| {
let mut min_distance = Float::MAX;
let mut res = None;
let v: Vec<Reflection> = iter.collect();
for a in v.iter() {
let dist = distance(p, &a.0);
if dist < min_distance {
min_distance = dist;
res = Some(*a);
}
}
res.map(|p| (p, v))
});
let ob: Option<ClosestReflection> = ovb.and_then(|iter| {
let mut min_distance = Float::MAX;
let mut res = None;
let v: Vec<Reflection> = iter.collect();
for b in v.iter() {
let dist = distance(p, &b.0);
if dist < min_distance {
min_distance = dist;
res = Some(*b);
}
}
res.map(|p| (p, v))
});
match (oa, ob) {
(Some((a, va)), Some((b, vb))) => {
if distance(p, &a.0) < distance(p, &b.0) {
Some((a, va))
} else {
Some((b, vb))
}
}
(Some((a, va)), None) => Some((a, va)),
(None, Some((b, vb))) => Some((b, vb)),
(None, None) => None,
}
}
pub fn farthest_option<T: Iterator<Item = Reflection>, U: Iterator<Item = Reflection>>(
p: &P2,
ova: Option<T>,
ovb: Option<U>,
) -> Option<ClosestReflection> {
let oa: Option<ClosestReflection> = ova.and_then(|iter| {
let mut max_distance = 0.;
let mut res = None;
let v: Vec<Reflection> = iter.collect();
for a in v.iter() {
let dist = distance(p, &a.0);
if dist > max_distance {
max_distance = dist;
res = Some(*a);
}
}
res.map(|a| (a, v))
});
let ob: Option<ClosestReflection> = ovb.and_then(|iter| {
let mut max_distance = 0.;
let mut res = None;
let v: Vec<Reflection> = iter.collect();
for b in v.iter() {
let dist = distance(p, &b.0);
if dist > max_distance {
max_distance = dist;
res = Some(*b);
}
}
res.map(|b| (b, v))
});
match (oa, ob) {
(Some((a, va)), Some((b, vb))) => {
if distance(p, &a.0) > distance(p, &b.0) {
Some((a, va))
} else {
Some((b, vb))
}
}
(Some((a, va)), None) => Some((a, va)),
(None, Some((b, vb))) => Some((b, vb)),
(None, None) => None,
}
}
pub fn extend_opt_vec<T>(isa: Option<Vec<T>>, isb: Option<Vec<T>>) -> Option<Vec<T>> {
let mut res;
match (isa, isb) {
(Some(va), Some(vb)) => {
res = va;
if res.is_empty() {
if vb.is_empty() {
None
} else {
Some(vb)
}
} else if vb.is_empty() {
Some(res)
} else {
res.extend(vb.into_iter());
Some(res)
}
}
(Some(va), _) => {
if !va.is_empty() {
Some(va)
} else {
None
}
}
(_, Some(vb)) => {
if !vb.is_empty() {
Some(vb)
} else {
None
}
}
_ => None,
}
}
pub fn inverse(mat: &Matrix2<Float>) -> Option<Matrix2<Float>> {
let a = mat.index(0);
// b <==> c because storage in Matrix2 is column major
let c = mat.index(1);
let b = mat.index(2);
let d = mat.index(3);
let det = a * d - b * c;
if det == 0.0 {
return None;
}
let idet = 1.0 / det;
Some(Matrix2::new(d * idet, -b * idet, -c * idet, a * idet))
}
pub fn is_clockwise_points(p1: &V2, p2: &V2, p3: &V2) -> bool {
let v21: V2 = p1 - p2;
// counterclowkwise rotation of diff
let perpendicular = V2::new(-v21.y, v21.x);
(p3 - p2).dot(&perpendicular).is_sign_positive()
}
pub fn is_clockwise_directions(d1: &V2, d2: &V2) -> bool {
// quarter turn counter clockwise
let perpendicular = V2::new(-d1.y, d1.x);
d2.dot(&perpendicular) < 0.
}
/// This function returns the projected points x-value
pub fn separation_axis_projection(origin: &P2, direction: &U2, p: &P2) -> Float {
let rot = Rotation2::rotation_between(direction, &U2::new_unchecked(V2::new(0., 1.)));
(rot * (p - origin)).x
}
/// This function returns the global coordinates of a local point with its rotation and origin
pub fn local_to_global(origin: &P2, rotation: &Rotation2<Float>, p: &V2) -> P2 {
origin + rotation * p
}
pub fn quadratic_roots(a: Float, b: Float) -> Vec<Float> {
let mah = -1. * a * 0.5;
let discriminant = mah * mah - b;
match discriminant {
_ if discriminant.abs() == 0. => vec![mah],
_ if discriminant > 0. => {
let root = discriminant.sqrt();
vec![mah - root, mah + root]
}
_ => vec![],
}
}
pub fn cubic_roots(steps: u8, a: Float, b: Float, c: Float) -> Vec<Float> {
let big_q = (a * a - 3. * b) / 9.;
let big_r = (2. * a.powi(3) - 9. * a * b + 27. * c) / 54.;
let theta;
let x3;
if big_r * big_r < big_q.powi(3) {
theta = (big_r / big_q.powf(1.5)).acos();
x3 = -2. * big_q.sqrt() * (theta / 3.).cos() - a / 3.;
} else {
let big_a = -big_r.signum() * (big_r.abs() + (big_r.powi(2) - big_q.powi(3)).sqrt()).cbrt();
let big_b = if big_a != 0. { big_q / big_a } else { 0. };
x3 = big_a + big_b - a / 3.;
}
let (mut u1, mut u2);
let mut gamma = -x3;
let mut alpha = a - gamma;
let mut beta = b - gamma * alpha;
let (mut delta1, mut delta2, mut delta3);
let (mut q1, mut q2, mut q3);
let (mut e1, mut e2, mut e3) = (0., 0., c - gamma * beta);
for _ in 0..steps {
u1 = alpha - gamma;
u2 = beta - gamma * u1;
q1 = e1;
q2 = e2 - gamma * q1;
q3 = e3 - gamma * q2;
if u2 == 0. {
delta3 = 0.;
} else {
delta3 = q3 / u2;
}
delta2 = q2 - u1 * delta3;
delta1 = q1 - delta3;
alpha += delta1;
beta += delta2;
gamma += delta3;
e1 = a - gamma - alpha;
e2 = b - alpha * gamma - beta;
e3 = c - gamma * beta;
}
// solve the quadratic equation
let mut res = quadratic_roots(alpha, beta);
res.push(x3);
res
} | Some((item_a, item_b)) |
snippetBuilder.tsx | /// <reference path="../../built/pxtlib.d.ts" />
import * as React from "react";
import * as data from "./data";
import * as sui from "./sui";
import * as md from "./marked";
import * as compiler from './compiler';
import * as ReactDOM from 'react-dom';
import * as pkg from './package';
import * as toolbox from "./toolbox";
import * as core from "./core";
import { InputHandler } from './inputHandler';
type ISettingsProps = pxt.editor.ISettingsProps;
interface SnippetBuilderProps extends ISettingsProps {
mainWorkspace: Blockly.Workspace;
config: pxt.SnippetConfig;
}
interface DefaultAnswersMap {
[answerToken: string]: pxt.SnippetAnswerTypes;
}
interface AnswersMap {
[answerToken: string]: pxt.SnippetAnswerTypes;
}
interface SnippetBuilderState {
visible?: boolean;
tsOutput?: string;
mdOutput?: string;
answers?: AnswersMap;
history: number[];
defaults: DefaultAnswersMap; // Will be typed once more clearly defined
config?: pxt.SnippetConfig; // Will be a config type
actions?: sui.ModalButton[];
}
/**
* Snippet builder takes a static config file and builds a modal with inputs and outputs based on config settings.
* An output type is attached to the start of your markdown allowing you to define a number of markdown output. (blocks, lang)
* An initial output is set and outputs defined at each questions are appended to the initial output.
* answerTokens can be defined and are replaced before being outputted. This allows you to output answers and default values.
*/
export class SnippetBuilder extends data.Component<SnippetBuilderProps, SnippetBuilderState> {
constructor(props: SnippetBuilderProps) {
super(props);
this.state = {
visible: false,
answers: {},
history: [0], // Index to track current question
defaults: {},
config: props.config,
tsOutput: props.config.initialOutput
};
this.cleanup = this.cleanup.bind(this);
this.hide = this.hide.bind(this);
this.cancel = this.cancel.bind(this);
this.confirm = this.confirm.bind(this);
this.backPage = this.backPage.bind(this);
this.nextPage = this.nextPage.bind(this);
}
/**
* Creates a hashmap with answerToken keys and the default value pair as
* provided by our config file.
*/
buildDefaults() {
const { config } = this.state;
const defaults: DefaultAnswersMap = {};
for (const question of config.questions) {
const { inputs } = question;
for (const input of inputs) {
const { defaultAnswer, answerToken } = input;
defaults[answerToken] = defaultAnswer;
}
}
this.setState({ defaults }, this.generateOutputMarkdown);
}
toggleActionButton() {
let newActionButton: sui.ModalButton;
if (this.isLastQuestion()) {
newActionButton = {
label: lf("Done"),
onclick: this.confirm,
icon: "check",
className: "approve positive"
};
} else {
newActionButton = {
label: lf("Next"),
onclick: this.nextPage,
icon: 'arrow right',
className: 'arrow right',
};
}
if (this.state.actions[1] !== newActionButton) {
this.setState({
actions: [
this.state.actions[0],
newActionButton
]
});
}
}
initializeActionButtons() {
const actions: sui.ModalButton[] = [
{
label: lf("Back"),
onclick: this.backPage,
icon: 'arrow left',
className: 'arrow left',
labelPosition: 'left',
},
{
label: lf("Next"),
onclick: this.nextPage,
icon: 'arrow right',
className: 'arrow right',
},
];
this.setState({ actions });
}
componentDidMount() {
// Sets default values
this.buildDefaults();
}
/**
* @param output - Takes in a string and returns the tokenized output
* Loops over each token previously added to defaults and replaces with the answer value if one
* exists. Otherwise it replaces the token with the provided default value.
*/
replaceTokens(tsOutput: string) {
const { answers, defaults } = this.state;
let tokenizedOutput = tsOutput;
const tokens = Object.keys(defaults);
// Replaces output tokens with answer if available or default value
for (let token of tokens) {
const value = answers[token] || defaults[token];
tokenizedOutput = tokenizedOutput.split(`$${token}`).join(value);
}
return tokenizedOutput;
}
/**
*
* This attaches three backticks to the front followed by an output type (blocks, lang)
* The current output is then tokenized and three backticks are appended to the end of the string.
*/
generateOutputMarkdown = pxt.Util.debounce(() => {
const { config, tsOutput } = this.state;
// Attaches starting and ending line based on output type
let md = `\`\`\`${config.outputType}\n`;
md += this.replaceTokens(tsOutput);
md += `\n\`\`\``;
this.setState({ mdOutput: md });
}, 300, false);
hide() {
this.setState({
visible: false
});
}
show() {
pxt.tickEvent('snippet.builder.show', null, { interactiveConsent: true });
this.initializeActionButtons();
this.setState({
visible: true,
});
}
cleanup() {
// Reset state to initial values
this.setState({
answers: {},
history: [0],
tsOutput: this.props.config.initialOutput,
});
Blockly.hideChaff();
}
cancel() {
const { name } = this.state.config;
pxt.tickEvent("snippet.builder.cancel", { snippet: name, page: this.getCurrentPage() }, { interactiveConsent: true });
this.hide();
this.cleanup();
}
findRootBlock(xmlDOM: Element, type?: string): Element {
for (const child in xmlDOM.children) {
const xmlChild = xmlDOM.children[child];
if (xmlChild.tagName === 'block') {
if (type) {
const childType = xmlChild.getAttribute('type');
if (childType && childType === type) {
return xmlChild
// return this.findRootBlock(xmlChild);
}
} else {
return xmlChild;
}
}
const childChildren = this.findRootBlock(xmlChild);
if (childChildren) {
return childChildren;
}
}
return null;
}
getOnStartBlock(mainWorkspace: Blockly.Workspace) {
const topBlocks = mainWorkspace.getTopBlocks(true);
for (const block of topBlocks) {
if (block.type === 'pxt-on-start') {
return block;
}
}
return null;
} | /**
* Takes the output from state, runs replace tokens, decompiles the resulting typescript
* and outputs the result as a Blockly xmlDOM. This then uses appendDomToWorkspace to attach
* our xmlDOM to the mainWorkspaces passed to the component.
*/
injectBlocksToWorkspace() {
const { tsOutput } = this.state;
const { mainWorkspace } = this.props
compiler.getBlocksAsync()
.then(blocksInfo => compiler.decompileBlocksSnippetAsync(this.replaceTokens(tsOutput), blocksInfo))
.then(resp => {
// Convert XML text to xml dom in order to parse
const xmlDOM = Blockly.Xml.textToDom(resp);
// TODO(jb) hard coded in topmost child should be generalized
const xmlOnStartBlock = this.findRootBlock(xmlDOM, 'pxt-on-start');
// Finds the on start blocks children
const toAttach = this.findRootBlock(xmlOnStartBlock);
const rootConnection = Blockly.Xml.domToBlock(toAttach, mainWorkspace);
// Hard coded in top blocks
this.getOnStartBlock(mainWorkspace)
.getInput("HANDLER").connection.connect(rootConnection.previousConnection);
}).catch((e) => {
core.errorNotification(e);
throw new Error(`Failed to decompile snippet output`);
});
}
confirm() {
const { name } = this.state.config;
pxt.tickEvent('snippet.builder.back.page', { snippet: name, page: this.getCurrentPage() }, { interactiveConsent: true });
this.injectBlocksToWorkspace();
Blockly.hideChaff();
this.hide();
}
getCurrentPage() {
const { history } = this.state;
return history[history.length - 1];
}
getCurrentQuestion() {
const { config } = this.state;
return config.questions[this.getCurrentPage()];
}
getNextQuestion() {
const { config } = this.state;
const currentQuestion = this.getCurrentQuestion();
if (currentQuestion.goto) {
return config.questions[currentQuestion.goto.question];
}
return null;
}
isLastQuestion() {
if (this.getCurrentQuestion().goto) {
return false;
}
return true;
}
updateOutput(question: pxt.SnippetQuestions) {
const { tsOutput } = this.state;
if (question.output && tsOutput.indexOf(question.output) === -1) {
this.setState({ tsOutput: `${tsOutput}\n${question.output}`}, this.generateOutputMarkdown);
}
}
/**
* Changes page by 1 if next question exists.
* Looks for output and appends the next questions output if it exists and
* is not already attached to the current output.
*/
nextPage() {
const { config, history } = this.state;
const currentQuestion = this.getCurrentQuestion();
const goto = currentQuestion.goto;
if (this.isLastQuestion()) {
this.confirm();
} else if (goto) {
// Look ahead and update markdown
const nextQuestion = this.getNextQuestion();
this.updateOutput(nextQuestion);
this.setState({ history: [...history, goto.question ]}, this.toggleActionButton)
pxt.tickEvent('snippet.builder.next.page', { snippet: config.name, page: goto.question}, { interactiveConsent: true });
}
}
backPage() {
const { history, config } = this.state;
if (history.length > 1) {
this.setState({ history: history.slice(0, history.length - 1)}, () => {
this.toggleActionButton();
pxt.tickEvent('snippet.builder.back.page', { snippet: config.name, page: this.getCurrentPage() }, { interactiveConsent: true });
});
}
}
onChange = (answerToken: string) => (v: string) => {
this.setState((prevState: SnippetBuilderState) => ({
answers: {
...prevState.answers,
[answerToken]: v,
}
}), this.generateOutputMarkdown);
}
renderCore() {
const { visible, answers, config, mdOutput, actions } = this.state;
const { parent } = this.props;
const currentQuestion = this.getCurrentQuestion();
return (
<sui.Modal isOpen={visible} className={'snippet-builder'} size="large"
closeOnEscape={false} closeIcon={true} closeOnDimmerClick={false} closeOnDocumentClick={false}
dimmer={true} buttons={actions} header={config.name} onClose={this.cancel}
>
<div className="ui equal width grid">
<div className='column snippet-question'>
{currentQuestion &&
<div>
<div className='ui segment raised'>
<h3>{pxt.Util.rlf(currentQuestion.title)}</h3>
<div className='ui equal width grid'>
{currentQuestion.inputs.map((input: pxt.SnippetQuestionInput) =>
<span className='column' key={`span-${input.answerToken}`}>
<InputHandler
onChange={this.onChange(input.answerToken)}
input={input}
value={answers[input.answerToken] || ''}
onEnter={this.nextPage}
key={input.answerToken}
/>
</span>
)}
</div>
{currentQuestion.errorMessage && <p className='snippet-error'>{currentQuestion.errorMessage}</p>}
</div>
{currentQuestion.hint &&
<div className='snippet hint ui segment'>{pxt.Util.rlf(currentQuestion.hint)}</div>}
</div>
}
</div>
<div className='snippet output-section column'>
{mdOutput && <md.MarkedContent className='snippet-markdown-content' markdown={mdOutput} parent={parent} />}
</div>
</div>
</sui.Modal>
)
}
}
function getSnippetExtensions(): pxt.SnippetConfig[] {
const snippetConfigs = pxt.Util.concat(pkg.allEditorPkgs().map(p => p.sortedFiles()))
.filter(file => file.name === 'pxtsnippets.json')
.map(file => pxt.Util.jsonTryParse(file.content)) as pxt.SnippetConfig[][];
return pxt.Util.concat(snippetConfigs);
}
function openSnippetDialog(config: pxt.SnippetConfig, editor: Blockly.WorkspaceSvg, parent: pxt.editor.IProjectView) {
const wrapper = document.body.appendChild(document.createElement('div'));
const props = { parent: parent, mainWorkspace: editor, config };
const snippetBuilder = ReactDOM.render(
React.createElement(SnippetBuilder, props),
wrapper
) as SnippetBuilder;
snippetBuilder.show();
}
export function initializeSnippetExtensions(ns: string, extraBlocks: (toolbox.BlockDefinition | toolbox.ButtonDefinition)[], editor: Blockly.WorkspaceSvg, parent: pxt.editor.IProjectView) {
const snippetExtensions = getSnippetExtensions();
snippetExtensions
.filter(snippet => snippet.namespace == ns)
.forEach(snippet => {
extraBlocks.push({
name: `SNIPPET${name}_BUTTON`,
type: "button",
attributes: {
blockId: `SNIPPET${name}_BUTTON`,
label: snippet.label ? pxt.Util.rlf(snippet.label) : pxt.Util.lf("Editor"),
weight: 101,
group: snippet.group && snippet.group,
},
callback: () => {
openSnippetDialog(snippet, editor, parent);
}
});
});
} | |
getDatabaseAccount.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20210115
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// An Azure Cosmos DB database account.
func LookupDatabaseAccount(ctx *pulumi.Context, args *LookupDatabaseAccountArgs, opts ...pulumi.InvokeOption) (*LookupDatabaseAccountResult, error) |
type LookupDatabaseAccountArgs struct {
// Cosmos DB database account name.
AccountName string `pulumi:"accountName"`
// The name of the resource group. The name is case insensitive.
ResourceGroupName string `pulumi:"resourceGroupName"`
}
// An Azure Cosmos DB database account.
type LookupDatabaseAccountResult struct {
// API specific properties.
ApiProperties *ApiPropertiesResponse `pulumi:"apiProperties"`
// The object representing the policy for taking backups on an account.
BackupPolicy interface{} `pulumi:"backupPolicy"`
// List of Cosmos DB capabilities for the account
Capabilities []CapabilityResponse `pulumi:"capabilities"`
// The cassandra connector offer type for the Cosmos DB database C* account.
ConnectorOffer *string `pulumi:"connectorOffer"`
// The consistency policy for the Cosmos DB database account.
ConsistencyPolicy *ConsistencyPolicyResponse `pulumi:"consistencyPolicy"`
// The CORS policy for the Cosmos DB database account.
Cors []CorsPolicyResponse `pulumi:"cors"`
// The offer type for the Cosmos DB database account. Default value: Standard.
DatabaseAccountOfferType string `pulumi:"databaseAccountOfferType"`
// Disable write operations on metadata resources (databases, containers, throughput) via account keys
DisableKeyBasedMetadataWriteAccess *bool `pulumi:"disableKeyBasedMetadataWriteAccess"`
// The connection endpoint for the Cosmos DB database account.
DocumentEndpoint string `pulumi:"documentEndpoint"`
// Flag to indicate whether to enable storage analytics.
EnableAnalyticalStorage *bool `pulumi:"enableAnalyticalStorage"`
// Enables automatic failover of the write region in the rare event that the region is unavailable due to an outage. Automatic failover will result in a new write region for the account and is chosen based on the failover priorities configured for the account.
EnableAutomaticFailover *bool `pulumi:"enableAutomaticFailover"`
// Enables the cassandra connector on the Cosmos DB C* account
EnableCassandraConnector *bool `pulumi:"enableCassandraConnector"`
// Flag to indicate whether Free Tier is enabled.
EnableFreeTier *bool `pulumi:"enableFreeTier"`
// Enables the account to write in multiple locations
EnableMultipleWriteLocations *bool `pulumi:"enableMultipleWriteLocations"`
// An array that contains the regions ordered by their failover priorities.
FailoverPolicies []FailoverPolicyResponse `pulumi:"failoverPolicies"`
// The unique resource identifier of the ARM resource.
Id string `pulumi:"id"`
// Identity for the resource.
Identity *ManagedServiceIdentityResponse `pulumi:"identity"`
// List of IpRules.
IpRules []IpAddressOrRangeResponse `pulumi:"ipRules"`
// Flag to indicate whether to enable/disable Virtual Network ACL rules.
IsVirtualNetworkFilterEnabled *bool `pulumi:"isVirtualNetworkFilterEnabled"`
// The URI of the key vault
KeyVaultKeyUri *string `pulumi:"keyVaultKeyUri"`
// Indicates the type of database account. This can only be set at database account creation.
Kind *string `pulumi:"kind"`
// The location of the resource group to which the resource belongs.
Location *string `pulumi:"location"`
// An array that contains all of the locations enabled for the Cosmos DB account.
Locations []LocationResponse `pulumi:"locations"`
// The name of the ARM resource.
Name string `pulumi:"name"`
// Indicates what services are allowed to bypass firewall checks.
NetworkAclBypass *string `pulumi:"networkAclBypass"`
// An array that contains the Resource Ids for Network Acl Bypass for the Cosmos DB account.
NetworkAclBypassResourceIds []string `pulumi:"networkAclBypassResourceIds"`
// List of Private Endpoint Connections configured for the Cosmos DB account.
PrivateEndpointConnections []PrivateEndpointConnectionResponse `pulumi:"privateEndpointConnections"`
// The status of the Cosmos DB account at the time the operation was called. The status can be one of following. 'Creating' – the Cosmos DB account is being created. When an account is in Creating state, only properties that are specified as input for the Create Cosmos DB account operation are returned. 'Succeeded' – the Cosmos DB account is active for use. 'Updating' – the Cosmos DB account is being updated. 'Deleting' – the Cosmos DB account is being deleted. 'Failed' – the Cosmos DB account failed creation. 'DeletionFailed' – the Cosmos DB account deletion failed.
ProvisioningState string `pulumi:"provisioningState"`
// Whether requests from Public Network are allowed
PublicNetworkAccess *string `pulumi:"publicNetworkAccess"`
// An array that contains of the read locations enabled for the Cosmos DB account.
ReadLocations []LocationResponse `pulumi:"readLocations"`
// Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
Tags map[string]string `pulumi:"tags"`
// The type of Azure resource.
Type string `pulumi:"type"`
// List of Virtual Network ACL rules configured for the Cosmos DB account.
VirtualNetworkRules []VirtualNetworkRuleResponse `pulumi:"virtualNetworkRules"`
// An array that contains the write location for the Cosmos DB account.
WriteLocations []LocationResponse `pulumi:"writeLocations"`
}
| {
var rv LookupDatabaseAccountResult
err := ctx.Invoke("azure-native:documentdb/v20210115:getDatabaseAccount", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
} |
core.py | # Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
This module has been relocated to ``dazl.client``, ``dazl.damlast``, ``dazl.protocols``, or
``dazl.query``.
"""
from typing import TYPE_CHECKING, TypeVar, Union
import warnings
from ..client.errors import ConfigurationError, DazlPartyMissingError, UnknownTemplateWarning
from ..client.state import (
ContractContextualData,
ContractContextualDataCollection,
ContractsHistoricalState,
ContractsState,
)
from ..damlast.daml_lf_1 import TypeConName
from ..damlast.pkgfile import Dar
from ..prim import ContractData, ContractId as ContractId_, DazlError, DazlWarning, Party
from ..prim.errors import DazlImportError
from ..protocols.errors import ConnectionTimeoutError, UserTerminateRequest
from ..query import ContractMatch
from ..util.proc_util import ProcessDiedException
if TYPE_CHECKING:
from .types import Type, TypeReference
T = TypeVar("T")
__all__ = [
"ConfigurationError",
"ConnectionTimeoutError",
"ContractContextualData",
"ContractContextualDataCollection",
"ContractData",
"ContractId",
"ContractMatch",
"ContractsHistoricalState",
"ContractsState",
"Dar",
"DazlError",
"DazlImportError",
"DazlPartyMissingError",
"DazlWarning",
"Party",
"ProcessDiedException",
"UnknownTemplateWarning",
"UserTerminateRequest",
]
class ContractId(ContractId_):
__slots__ = ("_value_type_deprecated",)
_value_type_deprecated: "TypeReference"
def __init__(self, contract_id: str, template_id: "Union[str, Type, TypeConName]"):
warnings.warn(
"dazl.model.core.ContractId is deprecated; use dazl.prim.ContractId instead.",
DeprecationWarning,
stacklevel=2,
)
from ..damlast.compat import parse_template
if not isinstance(contract_id, str):
raise ValueError("contract_id must be a string")
value = contract_id
value_type, value_type_deprecated = parse_template(template_id)
super().__init__(value_type, value)
object.__setattr__(self, "_value_type_deprecated", value_type_deprecated)
@property
def contract_id(self) -> str:
"""
Get the raw contract ID value (for example, ``"#4:1"``).
"""
warnings.warn(
"ContractId.contract_id is deprecated; use ContractId.value instead.",
DeprecationWarning,
stacklevel=2,
)
return self.value
@property
def template_id(self) -> "TypeReference":
|
def exercise(self, choice_name, arguments=None):
"""
Create an :class:`ExerciseCommand` that represents the result of exercising a choice on this
contract with the specified choice.
:param choice_name:
The name of the choice to exercise.
:param arguments:
(optional) A ``dict`` of named values to send as parameters to the choice exercise.
"""
warnings.warn(
"ContractId.exercise is deprecated; prefer calling dazl.ledger.Connection.exercise or "
"dazl.client.PartyClient.submit_exercise, or use dazl.ledger.ExerciseCommand instead.",
DeprecationWarning,
stacklevel=2,
)
from .writing import ExerciseCommand
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
return ExerciseCommand(self, choice_name, arguments=arguments)
def replace(self, contract_id=None, template_id=None):
"""
Return a new :class:`ContractId` instance replacing specified fields with values.
"""
warnings.warn(
"ContractId.replace is deprecated; simply construct a ContractId with the desired "
"values instead.",
DeprecationWarning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
return ContractId(
contract_id if contract_id is not None else self.value,
template_id if template_id is not None else self.value_type,
)
def for_json(self):
"""
Return the JSON representation of this contract. This is currently just the contract ID
string itself.
"""
return self.value
class CommandTimeoutError(DazlError):
"""
Raised when a corresponding event for a command was not seen in the appropriate time window.
"""
def __init__(self):
warnings.warn(
"This error is never raised; this symbol will be removed in dazl v9",
DeprecationWarning,
stacklevel=2,
)
class ConnectionClosedError(DazlError):
"""
Raised when trying to do something that requires a connection after connection pools have been
closed.
"""
def __init__(self):
warnings.warn(
"This error is never raised; this symbol will be removed in dazl v9",
DeprecationWarning,
stacklevel=2,
)
| """
Get the type of template that is pointed to by this :class:`ContractId` as a
:class:`TypeReference`. Note that usage of :class:`Type` and :class:`TypeReference` are
deprecated, and :meth:`value_type` should be used instead.
As of dazl 7.3.0, the :class:`TemplateId` is always normalized to a :class:`TypeReference`,
regardless of what the :class:`ContractId` was constructed with.
"""
warnings.warn(
"ContractId.template_id is deprecated; use ContractId.value_type instead.",
DeprecationWarning,
stacklevel=2,
)
return self._value_type_deprecated |
ConcatFeedForwardNN.py | from torch import nn
import torch
from ..base import LinkPredictionBase
from .ConcatFeedForwardNNLayer import ConcatFeedForwardNNLayer
class ConcatFeedForwardNN(LinkPredictionBase):
r"""Specific class for link prediction task.
Parameters
----------
input_size : int
The length of input node embeddings
num_class : int
The number of node catrgoriey for classification
hidden_size : list of int type values
Example for two layers's FeedforwardNN: [50, 20]
activation: the activation function class for each fully connected layer
Default: nn.ReLU()
Example: nn.ReLU(),nn.Sigmoid().
"""
def | (self, input_size, hidden_size,num_class,activation=nn.ReLU()):
super(ConcatFeedForwardNN, self).__init__()
self.classifier=ConcatFeedForwardNNLayer(input_size, num_class, hidden_size,activation)
def forward(self, input_graph):
r"""
Forward functions to compute the logits tensor for link prediction.
Parameters
----------
input graph : GraphData
The tensors stored in the node feature field named "node_emb" in the
input_graph are used for link prediction.
Returns
---------
output_graph : GraphData
The computed logit tensor for each pair of nodes in the graph are stored
in the node feature field named "edge_logits".
logit tensor shape is: [num_class]
"""
#get the nod embedding from the graph
node_emb=input_graph.node_features['node_emb']
#add the edges and edge prediction logits into the graph
num_node=node_emb.shape[0]
node_idx_list=[idx for idx in range(num_node)]
src_idx=torch.tensor(node_idx_list).view(-1,1).repeat(1,num_node).view(-1)
dst_idx=torch.tensor(node_idx_list).view(1,-1).repeat(num_node,1).view(-1)
input_graph.add_edges(src_idx,dst_idx)
input_graph.edge_features['logits']=self.classifier(node_emb)
return input_graph
| __init__ |
mod.rs | //! Types and traits for generating responses.
//!
//! See [`axum::response`] for more details.
//!
//! [`axum::response`]: https://docs.rs/axum/latest/axum/response/index.html
use crate::{
body::{boxed, BoxBody},
BoxError,
};
use bytes::{buf::Chain, Buf, Bytes, BytesMut};
use http::{
header::{self, HeaderMap, HeaderName, HeaderValue},
StatusCode,
};
use http_body::{
combinators::{MapData, MapErr},
Empty, Full, SizeHint,
};
use std::{
borrow::Cow,
convert::Infallible,
iter,
pin::Pin,
task::{Context, Poll},
};
/// Type alias for [`http::Response`] whose body type defaults to [`BoxBody`], the most common body
/// type used with axum.
pub type Response<T = BoxBody> = http::Response<T>;
/// Trait for generating responses.
///
/// Types that implement `IntoResponse` can be returned from handlers.
///
/// # Implementing `IntoResponse`
///
/// You generally shouldn't have to implement `IntoResponse` manually, as axum
/// provides implementations for many common types.
///
/// However it might be necessary if you have a custom error type that you want
/// to return from handlers:
///
/// ```rust
/// use axum::{
/// Router,
/// body::{self, Bytes},
/// routing::get,
/// http::StatusCode,
/// response::{IntoResponse, Response},
/// };
///
/// enum MyError {
/// SomethingWentWrong,
/// SomethingElseWentWrong,
/// }
///
/// impl IntoResponse for MyError {
/// fn into_response(self) -> Response {
/// let body = match self {
/// MyError::SomethingWentWrong => {
/// body::boxed(body::Full::from("something went wrong"))
/// },
/// MyError::SomethingElseWentWrong => {
/// body::boxed(body::Full::from("something else went wrong"))
/// },
/// };
///
/// Response::builder()
/// .status(StatusCode::INTERNAL_SERVER_ERROR)
/// .body(body)
/// .unwrap()
/// }
/// }
///
/// // `Result<impl IntoResponse, MyError>` can now be returned from handlers
/// let app = Router::new().route("/", get(handler));
///
/// async fn handler() -> Result<(), MyError> {
/// Err(MyError::SomethingWentWrong)
/// }
/// # async {
/// # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap();
/// # };
/// ```
///
/// Or if you have a custom body type you'll also need to implement
/// `IntoResponse` for it:
///
/// ```rust
/// use axum::{
/// body,
/// routing::get,
/// response::{IntoResponse, Response},
/// Router,
/// };
/// use http_body::Body;
/// use http::HeaderMap;
/// use bytes::Bytes;
/// use std::{
/// convert::Infallible,
/// task::{Poll, Context},
/// pin::Pin,
/// };
///
/// struct MyBody;
///
/// // First implement `Body` for `MyBody`. This could for example use
/// // some custom streaming protocol.
/// impl Body for MyBody {
/// type Data = Bytes;
/// type Error = Infallible;
///
/// fn poll_data(
/// self: Pin<&mut Self>,
/// cx: &mut Context<'_>
/// ) -> Poll<Option<Result<Self::Data, Self::Error>>> {
/// # unimplemented!()
/// // ...
/// }
///
/// fn poll_trailers(
/// self: Pin<&mut Self>,
/// cx: &mut Context<'_>
/// ) -> Poll<Result<Option<HeaderMap>, Self::Error>> {
/// # unimplemented!()
/// // ...
/// }
/// }
///
/// // Now we can implement `IntoResponse` directly for `MyBody`
/// impl IntoResponse for MyBody {
/// fn into_response(self) -> Response {
/// Response::new(body::boxed(self))
/// }
/// }
///
/// // We don't need to implement `IntoResponse for Response<MyBody>` as that is
/// // covered by a blanket implementation in axum.
///
/// // `MyBody` can now be returned from handlers.
/// let app = Router::new().route("/", get(|| async { MyBody }));
/// # async {
/// # hyper::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap();
/// # };
/// ```
pub trait IntoResponse {
/// Create a response.
fn into_response(self) -> Response;
}
/// Trait for generating response headers.
pub trait IntoResponseHeaders {
/// The return type of `into_headers`.
///
/// The iterator item is a [`Result`] to allow the implementation to return a server error
/// instead.
///
/// The header name is optional because [`HeaderMap`]s iterator doesn't yield it multiple times
/// for headers that have multiple values, to avoid unnecessary copies.
#[doc(hidden)]
type IntoIter: IntoIterator<Item = Result<(Option<HeaderName>, HeaderValue), Response>>;
/// Attempt to turn `self` into a list of headers.
///
/// In practice, only the implementation for `axum::response::Headers` ever returns `Err(_)`.
#[doc(hidden)]
fn into_headers(self) -> Self::IntoIter;
}
impl IntoResponse for () {
fn into_response(self) -> Response {
Response::new(boxed(Empty::new()))
}
}
impl IntoResponse for Infallible {
fn into_response(self) -> Response {
match self {}
}
}
impl<T, E> IntoResponse for Result<T, E>
where
T: IntoResponse,
E: IntoResponse,
{
fn into_response(self) -> Response {
match self {
Ok(value) => value.into_response(),
Err(err) => err.into_response(),
}
}
}
impl<B> IntoResponse for Response<B>
where
B: http_body::Body<Data = Bytes> + Send + 'static,
B::Error: Into<BoxError>,
{
fn into_response(self) -> Response {
self.map(boxed)
}
}
macro_rules! impl_into_response_for_body {
($body:ty) => {
impl IntoResponse for $body {
fn into_response(self) -> Response {
Response::new(boxed(self))
}
}
};
}
impl_into_response_for_body!(Full<Bytes>);
impl_into_response_for_body!(Empty<Bytes>);
impl IntoResponse for http::response::Parts {
fn into_response(self) -> Response {
Response::from_parts(self, boxed(Empty::new()))
}
}
impl<E> IntoResponse for http_body::combinators::BoxBody<Bytes, E>
where
E: Into<BoxError> + 'static,
{
fn into_response(self) -> Response {
Response::new(boxed(self))
}
}
impl<E> IntoResponse for http_body::combinators::UnsyncBoxBody<Bytes, E>
where
E: Into<BoxError> + 'static,
{
fn into_response(self) -> Response {
Response::new(boxed(self))
}
}
impl<B, F> IntoResponse for MapData<B, F>
where
B: http_body::Body + Send + 'static,
F: FnMut(B::Data) -> Bytes + Send + 'static,
B::Error: Into<BoxError>,
{
fn into_response(self) -> Response {
Response::new(boxed(self))
}
}
impl<B, F, E> IntoResponse for MapErr<B, F>
where
B: http_body::Body<Data = Bytes> + Send + 'static,
F: FnMut(B::Error) -> E + Send + 'static,
E: Into<BoxError>,
{
fn into_response(self) -> Response {
Response::new(boxed(self))
}
}
impl IntoResponse for &'static str {
#[inline]
fn into_response(self) -> Response {
Cow::Borrowed(self).into_response()
}
}
impl IntoResponse for String {
#[inline]
fn into_response(self) -> Response {
Cow::<'static, str>::Owned(self).into_response()
}
}
impl IntoResponse for Cow<'static, str> {
fn into_response(self) -> Response {
let mut res = Response::new(boxed(Full::from(self)));
res.headers_mut().insert(
header::CONTENT_TYPE,
HeaderValue::from_static(mime::TEXT_PLAIN_UTF_8.as_ref()),
);
res
}
}
impl IntoResponse for Bytes {
fn into_response(self) -> Response {
let mut res = Response::new(boxed(Full::from(self)));
res.headers_mut().insert(
header::CONTENT_TYPE,
HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()),
);
res
}
}
impl IntoResponse for BytesMut {
fn into_response(self) -> Response |
}
impl<T, U> IntoResponse for Chain<T, U>
where
T: Buf + Unpin + Send + 'static,
U: Buf + Unpin + Send + 'static,
{
fn into_response(self) -> Response {
let (first, second) = self.into_inner();
let mut res = Response::new(boxed(BytesChainBody {
first: Some(first),
second: Some(second),
}));
res.headers_mut().insert(
header::CONTENT_TYPE,
HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()),
);
res
}
}
struct BytesChainBody<T, U> {
first: Option<T>,
second: Option<U>,
}
impl<T, U> http_body::Body for BytesChainBody<T, U>
where
T: Buf + Unpin,
U: Buf + Unpin,
{
type Data = Bytes;
type Error = Infallible;
fn poll_data(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Data, Self::Error>>> {
if let Some(mut buf) = self.first.take() {
let bytes = buf.copy_to_bytes(buf.remaining());
return Poll::Ready(Some(Ok(bytes)));
}
if let Some(mut buf) = self.second.take() {
let bytes = buf.copy_to_bytes(buf.remaining());
return Poll::Ready(Some(Ok(bytes)));
}
Poll::Ready(None)
}
fn poll_trailers(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<Option<HeaderMap>, Self::Error>> {
Poll::Ready(Ok(None))
}
fn is_end_stream(&self) -> bool {
self.first.is_none() && self.second.is_none()
}
fn size_hint(&self) -> SizeHint {
match (self.first.as_ref(), self.second.as_ref()) {
(Some(first), Some(second)) => {
let total_size = first.remaining() + second.remaining();
SizeHint::with_exact(total_size as u64)
}
(Some(buf), None) => SizeHint::with_exact(buf.remaining() as u64),
(None, Some(buf)) => SizeHint::with_exact(buf.remaining() as u64),
(None, None) => SizeHint::with_exact(0),
}
}
}
impl IntoResponse for &'static [u8] {
fn into_response(self) -> Response {
let mut res = Response::new(boxed(Full::from(self)));
res.headers_mut().insert(
header::CONTENT_TYPE,
HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()),
);
res
}
}
impl IntoResponse for Vec<u8> {
fn into_response(self) -> Response {
let mut res = Response::new(boxed(Full::from(self)));
res.headers_mut().insert(
header::CONTENT_TYPE,
HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()),
);
res
}
}
impl IntoResponse for Cow<'static, [u8]> {
fn into_response(self) -> Response {
let mut res = Response::new(boxed(Full::from(self)));
res.headers_mut().insert(
header::CONTENT_TYPE,
HeaderValue::from_static(mime::APPLICATION_OCTET_STREAM.as_ref()),
);
res
}
}
impl IntoResponse for StatusCode {
fn into_response(self) -> Response {
Response::builder()
.status(self)
.body(boxed(Empty::new()))
.unwrap()
}
}
impl IntoResponse for HeaderMap {
fn into_response(self) -> Response {
let mut res = Response::new(boxed(Empty::new()));
*res.headers_mut() = self;
res
}
}
impl<T> IntoResponse for (StatusCode, T)
where
T: IntoResponse,
{
fn into_response(self) -> Response {
let mut res = self.1.into_response();
*res.status_mut() = self.0;
res
}
}
impl<H, T> IntoResponse for (H, T)
where
H: IntoResponseHeaders,
T: IntoResponse,
{
fn into_response(self) -> Response {
let mut res = self.1.into_response();
if let Err(e) = try_extend_headers(res.headers_mut(), self.0.into_headers()) {
return e;
}
res
}
}
impl<H, T> IntoResponse for (StatusCode, H, T)
where
H: IntoResponseHeaders,
T: IntoResponse,
{
fn into_response(self) -> Response {
let mut res = self.2.into_response();
*res.status_mut() = self.0;
if let Err(e) = try_extend_headers(res.headers_mut(), self.1.into_headers()) {
return e;
}
res
}
}
impl IntoResponseHeaders for HeaderMap {
// FIXME: Use type_alias_impl_trait when available
type IntoIter = iter::Map<
http::header::IntoIter<HeaderValue>,
fn(
(Option<HeaderName>, HeaderValue),
) -> Result<(Option<HeaderName>, HeaderValue), Response>,
>;
fn into_headers(self) -> Self::IntoIter {
self.into_iter().map(Ok)
}
}
// Slightly adjusted version of `impl<T> Extend<(Option<HeaderName>, T)> for HeaderMap<T>`.
// Accepts an iterator that returns Results and short-circuits on an `Err`.
fn try_extend_headers(
headers: &mut HeaderMap,
iter: impl IntoIterator<Item = Result<(Option<HeaderName>, HeaderValue), Response>>,
) -> Result<(), Response> {
use http::header::Entry;
let mut iter = iter.into_iter();
// The structure of this is a bit weird, but it is mostly to make the
// borrow checker happy.
let (mut key, mut val) = match iter.next().transpose()? {
Some((Some(key), val)) => (key, val),
Some((None, _)) => panic!("expected a header name, but got None"),
None => return Ok(()),
};
'outer: loop {
let mut entry = match headers.entry(key) {
Entry::Occupied(mut e) => {
// Replace all previous values while maintaining a handle to
// the entry.
e.insert(val);
e
}
Entry::Vacant(e) => e.insert_entry(val),
};
// As long as `HeaderName` is none, keep inserting the value into
// the current entry
loop {
match iter.next().transpose()? {
Some((Some(k), v)) => {
key = k;
val = v;
continue 'outer;
}
Some((None, v)) => {
entry.append(v);
}
None => {
return Ok(());
}
}
}
}
}
| {
self.freeze().into_response()
} |
transaction.go | package dht
import (
"sync"
"time"
)
// Transaction keeps track of a message exchange between nodes, such as a
// query message and a response message.
type Transaction struct {
mu sync.Mutex
remoteAddr Addr
t string
response chan Msg
onResponse func(Msg) // Called with the server locked.
done chan struct{}
queryPacket []byte
timer *time.Timer
s *Server
retries int
lastSend time.Time
userOnResponse func(Msg, bool)
}
// SetResponseHandler sets up a function to be called when query response
// arrives.
func (t *Transaction) SetResponseHandler(f func(Msg, bool)) {
t.mu.Lock()
defer t.mu.Unlock()
t.userOnResponse = f
t.tryHandleResponse()
}
func (t *Transaction) tryHandleResponse() {
if t.userOnResponse == nil {
return
}
select {
case r, ok := <-t.response:
t.userOnResponse(r, ok)
// Shouldn't be called more than once.
t.userOnResponse = nil
default:
}
}
func (t *Transaction) key() transactionKey {
return transactionKey{
t.remoteAddr.String(),
t.t,
}
}
func (t *Transaction) startTimer() {
t.timer = time.AfterFunc(jitterDuration(queryResendEvery, time.Second), t.timerCallback)
}
func (t *Transaction) timerCallback() {
t.mu.Lock()
defer t.mu.Unlock()
select {
case <-t.done:
return
default:
}
if t.retries == 2 {
t.timeout()
return
}
t.retries++
t.sendQuery()
if t.timer.Reset(jitterDuration(queryResendEvery, time.Second)) {
panic("timer should have fired to get here")
}
}
func (t *Transaction) sendQuery() error {
err := t.s.writeToNode(t.queryPacket, t.remoteAddr)
if err != nil {
return err
}
t.lastSend = time.Now()
return nil | }
func (t *Transaction) timeout() {
go func() {
t.s.mu.Lock()
defer t.s.mu.Unlock()
t.s.nodeTimedOut(t.remoteAddr)
}()
t.close()
}
func (t *Transaction) close() {
if t.closing() {
return
}
t.queryPacket = nil
close(t.response)
t.tryHandleResponse()
close(t.done)
t.timer.Stop()
go func() {
t.s.mu.Lock()
defer t.s.mu.Unlock()
t.s.deleteTransaction(t)
}()
}
func (t *Transaction) closing() bool {
select {
case <-t.done:
return true
default:
return false
}
}
// Close (abandon) the transaction.
func (t *Transaction) Close() {
t.mu.Lock()
defer t.mu.Unlock()
t.close()
}
func (t *Transaction) handleResponse(m Msg) {
t.mu.Lock()
if t.closing() {
t.mu.Unlock()
return
}
close(t.done)
t.mu.Unlock()
if t.onResponse != nil {
t.s.mu.Lock()
t.onResponse(m)
t.s.mu.Unlock()
}
t.queryPacket = nil
select {
case t.response <- m:
default:
panic("blocked handling response")
}
close(t.response)
t.tryHandleResponse()
} | |
masterwindow.go | package nucular
import (
"bufio"
"fmt"
"image"
"image/draw"
"image/png"
"os"
"sync"
"sync/atomic"
"time"
"github.com/aarzilli/nucular/command"
"github.com/aarzilli/nucular/rect"
nstyle "github.com/aarzilli/nucular/style"
)
type MasterWindow interface {
context() *context
Main()
Changed()
Close()
Closed() bool
OnClose(func())
ActivateEditor(ed *TextEditor)
Style() *nstyle.Style
SetStyle(*nstyle.Style)
GetPerf() bool
SetPerf(bool)
Input() *Input
PopupOpen(title string, flags WindowFlags, rect rect.Rect, scale bool, updateFn UpdateFn)
Walk(WindowWalkFn)
ResetWindows() *DockSplit
Lock()
Unlock()
}
func NewMasterWindow(flags WindowFlags, title string, updatefn UpdateFn) MasterWindow {
return NewMasterWindowSize(flags, title, image.Point{640, 480}, updatefn)
}
type WindowWalkFn func(title string, data interface{}, docked bool, splitSize int, rect rect.Rect)
type masterWindowCommon struct {
ctx *context
layout panel
// show performance counters
Perf bool
uilock sync.Mutex
prevCmds []command.Command
}
func (mw *masterWindowCommon) masterWindowCommonInit(ctx *context, flags WindowFlags, updatefn UpdateFn, wnd MasterWindow) {
ctx.Input.Mouse.valid = true
ctx.DockedWindows.Split.MinSize = 40
mw.layout.Flags = flags
ctx.setupMasterWindow(&mw.layout, updatefn)
mw.ctx = ctx
mw.ctx.mw = wnd
mw.SetStyle(nstyle.FromTheme(nstyle.DefaultTheme, 1.0))
}
func (mw *masterWindowCommon) context() *context {
return mw.ctx
}
func (mw *masterWindowCommon) Walk(fn WindowWalkFn) {
mw.ctx.Walk(fn)
}
func (mw *masterWindowCommon) ResetWindows() *DockSplit {
return mw.ctx.ResetWindows()
}
func (mw *masterWindowCommon) Input() *Input {
return &mw.ctx.Input
}
func (mw *masterWindowCommon) ActivateEditor(ed *TextEditor) {
mw.ctx.activateEditor = ed
}
func (mw *masterWindowCommon) Style() *nstyle.Style {
return &mw.ctx.Style
}
func (mw *masterWindowCommon) SetStyle(style *nstyle.Style) {
mw.ctx.Style = *style
mw.ctx.Style.Defaults()
}
func (mw *masterWindowCommon) GetPerf() bool {
return mw.Perf
}
func (mw *masterWindowCommon) SetPerf(perf bool) {
mw.Perf = perf
}
| atomic.AddInt32(&mw.ctx.changed, 1)
}
func (mw *masterWindowCommon) Lock() {
mw.uilock.Lock()
}
func (mw *masterWindowCommon) Unlock() {
mw.uilock.Unlock()
}
// Opens a popup window inside win. Will return true until the
// popup window is closed.
// The contents of the popup window will be updated by updateFn
func (mw *masterWindowCommon) PopupOpen(title string, flags WindowFlags, rect rect.Rect, scale bool, updateFn UpdateFn) {
go func() {
mw.ctx.mw.Lock()
defer mw.ctx.mw.Unlock()
mw.ctx.popupOpen(title, flags, rect, scale, updateFn)
mw.ctx.mw.Changed()
}()
}
var frameCnt int
func (w *masterWindowCommon) dumpFrame(wimg *image.RGBA, t0, t1, te time.Time, nprimitives int) {
bounds := image.Rect(w.ctx.Input.Mouse.Pos.X, w.ctx.Input.Mouse.Pos.Y, w.ctx.Input.Mouse.Pos.X+10, w.ctx.Input.Mouse.Pos.Y+10)
draw.Draw(wimg, bounds, image.White, bounds.Min, draw.Src)
if fh, err := os.Create(fmt.Sprintf("framedump/frame%03d.png", frameCnt)); err == nil {
png.Encode(fh, wimg)
fh.Close()
}
if fh, err := os.Create(fmt.Sprintf("framedump/frame%03d.txt", frameCnt)); err == nil {
wr := bufio.NewWriter(fh)
fps := 1.0 / te.Sub(t0).Seconds()
tot := time.Duration(0)
fmt.Fprintf(wr, "# Update %0.4fms = %0.4f updatefn + %0.4f draw (%d primitives) [max fps %0.2f]\n", te.Sub(t0).Seconds()*1000, t1.Sub(t0).Seconds()*1000, te.Sub(t1).Seconds()*1000, nprimitives, fps)
for i := range w.prevCmds {
fmt.Fprintf(wr, "%0.2fms %#v\n", w.ctx.cmdstim[i].Seconds()*1000, w.prevCmds[i])
tot += w.ctx.cmdstim[i]
}
fmt.Fprintf(wr, "sanity check %0.2fms\n", tot.Seconds()*1000)
wr.Flush()
fh.Close()
}
frameCnt++
}
// compares cmds to the last draw frame, returns true if there is a change
func (w *masterWindowCommon) drawChanged() bool {
contextAllCommands(w.ctx)
w.ctx.Reset()
cmds := w.ctx.cmds
if len(cmds) != len(w.prevCmds) {
return true
}
for i := range cmds {
if cmds[i].Kind != w.prevCmds[i].Kind {
return true
}
cmd := &cmds[i]
pcmd := &w.prevCmds[i]
switch cmds[i].Kind {
case command.ScissorCmd:
if *pcmd != *cmd {
return true
}
case command.LineCmd:
if *pcmd != *cmd {
return true
}
case command.RectFilledCmd:
if i == 0 {
cmd.RectFilled.Color.A = 0xff
}
if *pcmd != *cmd {
return true
}
case command.TriangleFilledCmd:
if *pcmd != *cmd {
return true
}
case command.CircleFilledCmd:
if *pcmd != *cmd {
return true
}
case command.ImageCmd:
if *pcmd != *cmd {
return true
}
case command.TextCmd:
if *pcmd != *cmd {
return true
}
default:
panic(UnknownCommandErr)
}
}
return false
} | // Forces an update of the window.
func (mw *masterWindowCommon) Changed() { |
pgn.go | package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"runtime"
"strings"
"sync"
"time"
"github.com/urfave/cli/v2"
)
func main() {
tbPath := "/etc/default/mnm"
if runtime.GOOS == "darwin" {
tbPath = "/etc/defaults/mnm"
}
tb, _ := os.ReadFile(tbPath)
token := strings.TrimSpace(string(tb))
app := &cli.App{
Name: "mnm",
Usage: "monitor & notifier to messenger",
Version: "0.0.1",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "api",
Value: "https://mnm.sh",
Usage: "The webhook api",
},
&cli.StringFlag{
Name: "token",
Value: fmt.Sprintf("%s", token),
Usage: fmt.Sprintf("The webhook token (%s)", tbPath),
},
},
EnableBashCompletion: true,
Commands: []*cli.Command{
{
Name: "run",
Usage: "Run a command",
Action: action,
},
},
}
err := app.Run(os.Args)
if err != nil {
fmt.Println(err)
}
}
func | (c *cli.Context) error {
startAt := time.Now()
api := c.String("api")
token := c.String("token")
prog := c.Args().First()
parts := strings.Split(prog, " ")
name, args := parts[0], parts[1:]
name = strings.TrimSpace(name)
if name == "" {
return fmt.Errorf("invalid command to run %s", prog)
}
cmd := exec.Command(name, args...)
stderr, err := cmd.StderrPipe()
if err != nil {
return err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
err = cmd.Start()
if err != nil {
return err
}
var wg sync.WaitGroup
for _, p := range []io.ReadCloser{stdout, stderr} {
wg.Add(1)
go func(pipe io.ReadCloser) {
defer wg.Done()
io.Copy(os.Stdout, pipe)
}(p)
}
wg.Wait()
result, err := "OK", cmd.Wait()
if err != nil {
result = err.Error()
}
return notify(api, token, prog, result, startAt)
}
func notify(api, token, run, result string, startAt time.Time) error {
endpoint := api + "/in/" + token
runtime := time.Now().Sub(startAt).String()
info := fmt.Sprintf("RUN: %s\r\nRESULT: %s\r\nRUNTIME: %s", run, result, runtime)
body, _ := json.Marshal(map[string]string{
"category": "PLAIN_TEXT",
"data": base64.URLEncoding.EncodeToString([]byte(info)),
})
req, err := http.NewRequest("POST", endpoint, bytes.NewReader(body))
if err != nil {
return err
}
req.Close = true
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
| action |
models.py | import copy
import os
import re
import json
import sys
import warnings
from collections import namedtuple
from datetime import datetime
from enum import Enum, unique
from json import JSONDecodeError
from operator import lt, le, eq, ge, gt
from boto3 import Session
from collections import OrderedDict
from moto.core.exceptions import JsonRESTError
from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel, BaseModel
from moto.core.utils import unix_time, iso_8601_datetime_without_milliseconds
from moto.events.exceptions import (
ValidationException,
ResourceNotFoundException,
ResourceAlreadyExistsException,
InvalidEventPatternException,
IllegalStatusException,
)
from moto.utilities.paginator import paginate
from moto.utilities.tagging_service import TaggingService
from uuid import uuid4
from .utils import PAGINATION_MODEL
class Rule(CloudFormationModel):
Arn = namedtuple("Arn", ["service", "resource_type", "resource_id"])
def __init__(
self,
name,
region_name,
description,
event_pattern,
schedule_exp,
role_arn,
event_bus_name,
state,
managed_by=None,
targets=None,
):
self.name = name
self.region_name = region_name
self.description = description
self.event_pattern = EventPattern.load(event_pattern)
self.scheduled_expression = schedule_exp
self.role_arn = role_arn
self.event_bus_name = event_bus_name
self.state = state or "ENABLED"
self.managed_by = managed_by # can only be set by AWS services
self.created_by = ACCOUNT_ID
self.targets = targets or []
@property
def arn(self):
event_bus_name = (
""
if self.event_bus_name == "default"
else "{}/".format(self.event_bus_name)
)
return "arn:aws:events:{region}:{account_id}:rule/{event_bus_name}{name}".format(
region=self.region_name,
account_id=ACCOUNT_ID,
event_bus_name=event_bus_name,
name=self.name,
)
@property
def physical_resource_id(self):
return self.name
# This song and dance for targets is because we need order for Limits and NextTokens, but can't use OrderedDicts
# with Python 2.6, so tracking it with an array it is.
def _check_target_exists(self, target_id):
for i in range(0, len(self.targets)):
if target_id == self.targets[i]["Id"]:
return i
return None
def enable(self):
self.state = "ENABLED"
def disable(self):
self.state = "DISABLED"
def delete(self, region_name):
event_backend = events_backends[region_name]
event_backend.delete_rule(name=self.name)
def put_targets(self, targets):
# Not testing for valid ARNs.
for target in targets:
index = self._check_target_exists(target["Id"])
if index is not None:
self.targets[index] = target
else:
self.targets.append(target)
def remove_targets(self, ids):
for target_id in ids:
index = self._check_target_exists(target_id)
if index is not None:
self.targets.pop(index)
def send_to_targets(self, event_bus_name, event):
event_bus_name = event_bus_name.split("/")[-1]
if event_bus_name != self.event_bus_name.split("/")[-1]:
return
if not self.event_pattern.matches_event(event):
return
# supported targets
# - CloudWatch Log Group
# - EventBridge Archive
# - SQS Queue + FIFO Queue
for target in self.targets:
arn = self._parse_arn(target["Arn"])
if arn.service == "logs" and arn.resource_type == "log-group":
self._send_to_cw_log_group(arn.resource_id, event)
elif arn.service == "events" and not arn.resource_type:
input_template = json.loads(target["InputTransformer"]["InputTemplate"])
archive_arn = self._parse_arn(input_template["archive-arn"])
self._send_to_events_archive(archive_arn.resource_id, event)
elif arn.service == "sqs":
group_id = target.get("SqsParameters", {}).get("MessageGroupId")
self._send_to_sqs_queue(arn.resource_id, event, group_id)
else:
raise NotImplementedError("Expr not defined for {0}".format(type(self)))
def _parse_arn(self, arn):
# http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
# this method needs probably some more fine tuning,
# when also other targets are supported
elements = arn.split(":", 5)
service = elements[2]
resource = elements[5]
if ":" in resource and "/" in resource:
if resource.index(":") < resource.index("/"):
resource_type, resource_id = resource.split(":", 1)
else:
resource_type, resource_id = resource.split("/", 1)
elif ":" in resource:
resource_type, resource_id = resource.split(":", 1)
elif "/" in resource:
resource_type, resource_id = resource.split("/", 1)
else:
resource_type = None
resource_id = resource
return self.Arn(
service=service, resource_type=resource_type, resource_id=resource_id
)
def _send_to_cw_log_group(self, name, event):
from moto.logs import logs_backends
event_copy = copy.deepcopy(event)
event_copy["time"] = iso_8601_datetime_without_milliseconds(
datetime.utcfromtimestamp(event_copy["time"])
)
log_stream_name = str(uuid4())
log_events = [
{
"timestamp": unix_time(datetime.utcnow()),
"message": json.dumps(event_copy),
}
]
logs_backends[self.region_name].create_log_stream(name, log_stream_name)
logs_backends[self.region_name].put_log_events(
name, log_stream_name, log_events, None
)
def _send_to_events_archive(self, resource_id, event):
archive_name, archive_uuid = resource_id.split(":")
archive = events_backends[self.region_name].archives.get(archive_name)
if archive.uuid == archive_uuid:
archive.events.append(event)
def _send_to_sqs_queue(self, resource_id, event, group_id=None):
from moto.sqs import sqs_backends
event_copy = copy.deepcopy(event)
event_copy["time"] = iso_8601_datetime_without_milliseconds(
datetime.utcfromtimestamp(event_copy["time"])
)
if group_id:
queue_attr = sqs_backends[self.region_name].get_queue_attributes(
queue_name=resource_id, attribute_names=["ContentBasedDeduplication"]
)
if queue_attr["ContentBasedDeduplication"] == "false":
warnings.warn(
"To let EventBridge send messages to your SQS FIFO queue, "
"you must enable content-based deduplication."
)
return
sqs_backends[self.region_name].send_message(
queue_name=resource_id,
message_body=json.dumps(event_copy),
group_id=group_id,
)
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["Arn"]
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return "Name"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html
return "AWS::Events::Rule"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
properties.setdefault("EventBusName", "default")
if "EventPattern" in properties:
properties["EventPattern"] = json.dumps(properties["EventPattern"])
event_name = resource_name
event_pattern = properties.get("EventPattern")
scheduled_expression = properties.get("ScheduleExpression")
state = properties.get("State")
desc = properties.get("Description")
role_arn = properties.get("RoleArn")
event_bus_name = properties.get("EventBusName")
tags = properties.get("Tags")
backend = events_backends[region_name]
return backend.put_rule(
event_name,
scheduled_expression=scheduled_expression,
event_pattern=event_pattern,
state=state,
description=desc,
role_arn=role_arn,
event_bus_name=event_bus_name,
tags=tags,
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
original_resource.delete(region_name)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
event_backend = events_backends[region_name]
event_backend.delete_rule(resource_name)
def describe(self):
attributes = {
"Arn": self.arn,
"CreatedBy": self.created_by,
"Description": self.description,
"EventBusName": self.event_bus_name,
"EventPattern": self.event_pattern.dump(),
"ManagedBy": self.managed_by,
"Name": self.name,
"RoleArn": self.role_arn,
"ScheduleExpression": self.scheduled_expression,
"State": self.state,
}
attributes = {
attr: value for attr, value in attributes.items() if value is not None
}
return attributes
class EventBus(CloudFormationModel):
def __init__(self, region_name, name, tags=None):
self.region = region_name
self.name = name
self.tags = tags or []
self._statements = {}
@property
def arn(self):
return "arn:aws:events:{region}:{account_id}:event-bus/{name}".format(
region=self.region, account_id=ACCOUNT_ID, name=self.name
)
@property
def policy(self):
if self._statements:
policy = {
"Version": "2012-10-17",
"Statement": [stmt.describe() for stmt in self._statements.values()],
}
return json.dumps(policy)
return None
def has_permissions(self):
return len(self._statements) > 0
def delete(self, region_name):
event_backend = events_backends[region_name]
event_backend.delete_event_bus(name=self.name)
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["Arn", "Name", "Policy"]
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
elif attribute_name == "Name":
return self.name
elif attribute_name == "Policy":
return self.policy
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return "Name"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbus.html
return "AWS::Events::EventBus"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
event_name = resource_name
event_source_name = properties.get("EventSourceName")
return event_backend.create_event_bus(
name=event_name, event_source_name=event_source_name
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
original_resource.delete(region_name)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
event_backend = events_backends[region_name]
event_bus_name = resource_name
event_backend.delete_event_bus(event_bus_name)
def _remove_principals_statements(self, *principals):
statements_to_delete = set()
for principal in principals:
for sid, statement in self._statements.items():
if statement.principal == principal:
statements_to_delete.add(sid)
# This is done separately to avoid:
# RuntimeError: dictionary changed size during iteration
for sid in statements_to_delete:
del self._statements[sid]
def add_permission(self, statement_id, action, principal, condition):
self._remove_principals_statements(principal)
statement = EventBusPolicyStatement(
sid=statement_id,
action=action,
principal=principal,
condition=condition,
resource=self.arn,
)
self._statements[statement_id] = statement
def add_policy(self, policy):
policy_statements = policy["Statement"]
principals = [stmt["Principal"] for stmt in policy_statements]
self._remove_principals_statements(*principals)
for new_statement in policy_statements:
sid = new_statement["Sid"]
self._statements[sid] = EventBusPolicyStatement.from_dict(new_statement)
def remove_statement(self, sid):
return self._statements.pop(sid, None)
def remove_statements(self):
self._statements.clear()
class EventBusPolicyStatement:
def __init__(
self, sid, principal, action, resource, effect="Allow", condition=None
):
self.sid = sid
self.principal = principal
self.action = action
self.resource = resource
self.effect = effect
self.condition = condition
def describe(self):
statement = dict(
Sid=self.sid,
Effect=self.effect,
Principal=self.principal,
Action=self.action,
Resource=self.resource,
)
if self.condition:
statement["Condition"] = self.condition
return statement
@classmethod
def from_dict(cls, statement_dict):
params = dict(
sid=statement_dict["Sid"],
effect=statement_dict["Effect"],
principal=statement_dict["Principal"],
action=statement_dict["Action"],
resource=statement_dict["Resource"],
)
condition = statement_dict.get("Condition")
if condition:
params["condition"] = condition
return cls(**params)
class Archive(CloudFormationModel):
# https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_ListArchives.html#API_ListArchives_RequestParameters
VALID_STATES = [
"ENABLED",
"DISABLED",
"CREATING",
"UPDATING",
"CREATE_FAILED",
"UPDATE_FAILED",
]
def | (
self, region_name, name, source_arn, description, event_pattern, retention
):
self.region = region_name
self.name = name
self.source_arn = source_arn
self.description = description
self.event_pattern = EventPattern.load(event_pattern)
self.retention = retention if retention else 0
self.creation_time = unix_time(datetime.utcnow())
self.state = "ENABLED"
self.uuid = str(uuid4())
self.events = []
self.event_bus_name = source_arn.split("/")[-1]
@property
def arn(self):
return "arn:aws:events:{region}:{account_id}:archive/{name}".format(
region=self.region, account_id=ACCOUNT_ID, name=self.name
)
def describe_short(self):
return {
"ArchiveName": self.name,
"EventSourceArn": self.source_arn,
"State": self.state,
"RetentionDays": self.retention,
"SizeBytes": sys.getsizeof(self.events) if len(self.events) > 0 else 0,
"EventCount": len(self.events),
"CreationTime": self.creation_time,
}
def describe(self):
result = {
"ArchiveArn": self.arn,
"Description": self.description,
"EventPattern": self.event_pattern.dump(),
}
result.update(self.describe_short())
return result
def update(self, description, event_pattern, retention):
if description:
self.description = description
if event_pattern:
self.event_pattern = EventPattern.load(event_pattern)
if retention:
self.retention = retention
def delete(self, region_name):
event_backend = events_backends[region_name]
event_backend.archives.pop(self.name)
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["Arn", "ArchiveName"]
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "ArchiveName":
return self.name
elif attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return "ArchiveName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-archive.html
return "AWS::Events::Archive"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
source_arn = properties.get("SourceArn")
description = properties.get("Description")
event_pattern = properties.get("EventPattern")
retention = properties.get("RetentionDays")
return event_backend.create_archive(
resource_name, source_arn, description, event_pattern, retention
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
if new_resource_name == original_resource.name:
properties = cloudformation_json["Properties"]
original_resource.update(
properties.get("Description"),
properties.get("EventPattern"),
properties.get("Retention"),
)
return original_resource
else:
original_resource.delete(region_name)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@unique
class ReplayState(Enum):
# https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_ListReplays.html#API_ListReplays_RequestParameters
STARTING = "STARTING"
RUNNING = "RUNNING"
CANCELLING = "CANCELLING"
COMPLETED = "COMPLETED"
CANCELLED = "CANCELLED"
FAILED = "FAILED"
class Replay(BaseModel):
def __init__(
self,
region_name,
name,
description,
source_arn,
start_time,
end_time,
destination,
):
self.region = region_name
self.name = name
self.description = description
self.source_arn = source_arn
self.event_start_time = start_time
self.event_end_time = end_time
self.destination = destination
self.state = ReplayState.STARTING
self.start_time = unix_time(datetime.utcnow())
self.end_time = None
@property
def arn(self):
return "arn:aws:events:{region}:{account_id}:replay/{name}".format(
region=self.region, account_id=ACCOUNT_ID, name=self.name
)
def describe_short(self):
return {
"ReplayName": self.name,
"EventSourceArn": self.source_arn,
"State": self.state.value,
"EventStartTime": self.event_start_time,
"EventEndTime": self.event_end_time,
"ReplayStartTime": self.start_time,
"ReplayEndTime": self.end_time,
}
def describe(self):
result = {
"ReplayArn": self.arn,
"Description": self.description,
"Destination": self.destination,
}
result.update(self.describe_short())
return result
def replay_events(self, archive):
event_bus_name = self.destination["Arn"].split("/")[-1]
for event in archive.events:
for rule in events_backends[self.region].rules.values():
rule.send_to_targets(
event_bus_name,
dict(event, **{"id": str(uuid4()), "replay-name": self.name}),
)
self.state = ReplayState.COMPLETED
self.end_time = unix_time(datetime.utcnow())
class Connection(BaseModel):
def __init__(
self, name, region_name, description, authorization_type, auth_parameters,
):
self.uuid = uuid4()
self.name = name
self.region = region_name
self.description = description
self.authorization_type = authorization_type
self.auth_parameters = auth_parameters
self.creation_time = unix_time(datetime.utcnow())
self.state = "AUTHORIZED"
@property
def arn(self):
return "arn:aws:events:{0}:{1}:connection/{2}/{3}".format(
self.region, ACCOUNT_ID, self.name, self.uuid
)
def describe_short(self):
"""
Create the short description for the Connection object.
Taken our from the Response Syntax of this API doc:
- https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DeleteConnection.html
Something to consider:
- The original response also has
- LastAuthorizedTime (number)
- LastModifiedTime (number)
- At the time of implementing this, there was no place where to set/get
those attributes. That is why they are not in the response.
Returns:
dict
"""
return {
"ConnectionArn": self.arn,
"ConnectionState": self.state,
"CreationTime": self.creation_time,
}
def describe(self):
"""
Create a complete description for the Connection object.
Taken our from the Response Syntax of this API doc:
- https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DescribeConnection.html
Something to consider:
- The original response also has:
- LastAuthorizedTime (number)
- LastModifiedTime (number)
- SecretArn (string)
- StateReason (string)
- At the time of implementing this, there was no place where to set/get
those attributes. That is why they are not in the response.
Returns:
dict
"""
return {
"AuthorizationType": self.authorization_type,
"AuthParameters": self.auth_parameters,
"ConnectionArn": self.arn,
"ConnectionState": self.state,
"CreationTime": self.creation_time,
"Description": self.description,
"Name": self.name,
}
class Destination(BaseModel):
def __init__(
self,
name,
region_name,
description,
connection_arn,
invocation_endpoint,
invocation_rate_limit_per_second,
http_method,
):
self.uuid = uuid4()
self.name = name
self.region = region_name
self.description = description
self.connection_arn = connection_arn
self.invocation_endpoint = invocation_endpoint
self.invocation_rate_limit_per_second = invocation_rate_limit_per_second
self.creation_time = unix_time(datetime.utcnow())
self.http_method = http_method
self.state = "ACTIVE"
@property
def arn(self):
return "arn:aws:events:{0}:{1}:api-destination/{2}/{3}".format(
self.region, ACCOUNT_ID, self.name, self.uuid
)
def describe(self):
"""
Describes the Destination object as a dict
Docs:
Response Syntax in
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DescribeApiDestination.html
Something to consider:
- The response also has [InvocationRateLimitPerSecond] which was not
available when implementing this method
Returns:
dict
"""
return {
"ApiDestinationArn": self.arn,
"ApiDestinationState": self.state,
"ConnectionArn": self.connection_arn,
"CreationTime": self.creation_time,
"Description": self.description,
"HttpMethod": self.http_method,
"InvocationEndpoint": self.invocation_endpoint,
"InvocationRateLimitPerSecond": self.invocation_rate_limit_per_second,
"LastModifiedTime": self.creation_time,
"Name": self.name,
}
def describe_short(self):
return {
"ApiDestinationArn": self.arn,
"ApiDestinationState": self.state,
"CreationTime": self.creation_time,
"LastModifiedTime": self.creation_time,
}
class EventPattern:
def __init__(self, raw_pattern, pattern):
self._raw_pattern = raw_pattern
self._pattern = pattern
def matches_event(self, event):
if not self._pattern:
return True
event = json.loads(json.dumps(event))
return self._does_event_match(event, self._pattern)
def _does_event_match(self, event, pattern):
items_and_filters = [(event.get(k), v) for k, v in pattern.items()]
nested_filter_matches = [
self._does_event_match(item, nested_filter)
for item, nested_filter in items_and_filters
if isinstance(nested_filter, dict)
]
filter_list_matches = [
self._does_item_match_filters(item, filter_list)
for item, filter_list in items_and_filters
if isinstance(filter_list, list)
]
return all(nested_filter_matches + filter_list_matches)
def _does_item_match_filters(self, item, filters):
allowed_values = [value for value in filters if isinstance(value, str)]
allowed_values_match = item in allowed_values if allowed_values else True
named_filter_matches = [
self._does_item_match_named_filter(item, pattern)
for pattern in filters
if isinstance(pattern, dict)
]
return allowed_values_match and all(named_filter_matches)
@staticmethod
def _does_item_match_named_filter(item, pattern):
filter_name, filter_value = list(pattern.items())[0]
if filter_name == "exists":
is_leaf_node = not isinstance(item, dict)
leaf_exists = is_leaf_node and item is not None
should_exist = filter_value
return leaf_exists if should_exist else not leaf_exists
if filter_name == "prefix":
prefix = filter_value
return item.startswith(prefix)
if filter_name == "numeric":
as_function = {"<": lt, "<=": le, "=": eq, ">=": ge, ">": gt}
operators_and_values = zip(filter_value[::2], filter_value[1::2])
numeric_matches = [
as_function[operator](item, value)
for operator, value in operators_and_values
]
return all(numeric_matches)
else:
warnings.warn(
"'{}' filter logic unimplemented. defaulting to True".format(
filter_name
)
)
return True
@classmethod
def load(cls, raw_pattern):
parser = EventPatternParser(raw_pattern)
pattern = parser.parse()
return cls(raw_pattern, pattern)
def dump(self):
return self._raw_pattern
class EventPatternParser:
def __init__(self, pattern):
self.pattern = pattern
def _validate_event_pattern(self, pattern):
# values in the event pattern have to be either a dict or an array
for attr, value in pattern.items():
if isinstance(value, dict):
self._validate_event_pattern(value)
elif isinstance(value, list):
if len(value) == 0:
raise InvalidEventPatternException(
reason="Empty arrays are not allowed"
)
else:
raise InvalidEventPatternException(
reason=f"'{attr}' must be an object or an array"
)
def parse(self):
try:
parsed_pattern = json.loads(self.pattern) if self.pattern else dict()
self._validate_event_pattern(parsed_pattern)
return parsed_pattern
except JSONDecodeError:
raise InvalidEventPatternException(reason="Invalid JSON")
class EventsBackend(BaseBackend):
ACCOUNT_ID = re.compile(r"^(\d{1,12}|\*)$")
STATEMENT_ID = re.compile(r"^[a-zA-Z0-9-_]{1,64}$")
_CRON_REGEX = re.compile(r"^cron\(.*\)")
_RATE_REGEX = re.compile(r"^rate\(\d*\s(minute|minutes|hour|hours|day|days)\)")
def __init__(self, region_name):
self.rules = OrderedDict()
self.next_tokens = {}
self.region_name = region_name
self.event_buses = {}
self.event_sources = {}
self.archives = {}
self.replays = {}
self.tagger = TaggingService()
self._add_default_event_bus()
self.connections = {}
self.destinations = {}
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""Default VPC endpoint service."""
return BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "events"
)
def _add_default_event_bus(self):
self.event_buses["default"] = EventBus(self.region_name, "default")
def _gen_next_token(self, index):
token = os.urandom(128).encode("base64")
self.next_tokens[token] = index
return token
def _process_token_and_limits(self, array_len, next_token=None, limit=None):
start_index = 0
end_index = array_len
new_next_token = None
if next_token:
start_index = self.next_tokens.pop(next_token, 0)
if limit is not None:
new_end_index = start_index + int(limit)
if new_end_index < end_index:
end_index = new_end_index
new_next_token = self._gen_next_token(end_index)
return start_index, end_index, new_next_token
def _get_event_bus(self, name):
event_bus_name = name.split("/")[-1]
event_bus = self.event_buses.get(event_bus_name)
if not event_bus:
raise ResourceNotFoundException(
"Event bus {} does not exist.".format(event_bus_name)
)
return event_bus
def _get_replay(self, name):
replay = self.replays.get(name)
if not replay:
raise ResourceNotFoundException("Replay {} does not exist.".format(name))
return replay
def put_rule(
self,
name,
*,
description=None,
event_bus_name=None,
event_pattern=None,
role_arn=None,
scheduled_expression=None,
state=None,
managed_by=None,
tags=None,
):
event_bus_name = event_bus_name or "default"
if not event_pattern and not scheduled_expression:
raise JsonRESTError(
"ValidationException",
"Parameter(s) EventPattern or ScheduleExpression must be specified.",
)
if scheduled_expression:
if event_bus_name != "default":
raise ValidationException(
"ScheduleExpression is supported only on the default event bus."
)
if not (
self._CRON_REGEX.match(scheduled_expression)
or self._RATE_REGEX.match(scheduled_expression)
):
raise ValidationException("Parameter ScheduleExpression is not valid.")
existing_rule = self.rules.get(name)
targets = existing_rule.targets if existing_rule else list()
rule = Rule(
name,
self.region_name,
description,
event_pattern,
scheduled_expression,
role_arn,
event_bus_name,
state,
managed_by,
targets=targets,
)
self.rules[name] = rule
if tags:
self.tagger.tag_resource(rule.arn, tags)
return rule
def delete_rule(self, name):
arn = self.rules.get(name).arn
if self.tagger.has_tags(arn):
self.tagger.delete_all_tags_for_resource(arn)
return self.rules.pop(name) is not None
def describe_rule(self, name):
rule = self.rules.get(name)
if not rule:
raise ResourceNotFoundException("Rule {} does not exist.".format(name))
return rule
def disable_rule(self, name):
if name in self.rules:
self.rules[name].disable()
return True
return False
def enable_rule(self, name):
if name in self.rules:
self.rules[name].enable()
return True
return False
@paginate(pagination_model=PAGINATION_MODEL)
def list_rule_names_by_target(self, target_arn, next_token=None, limit=None):
matching_rules = []
for _, rule in self.rules.items():
for target in rule.targets:
if target["Arn"] == target_arn:
matching_rules.append(rule)
return matching_rules
@paginate(pagination_model=PAGINATION_MODEL)
def list_rules(self, prefix=None, next_token=None, limit=None):
match_string = ".*"
if prefix is not None:
match_string = "^" + prefix + match_string
match_regex = re.compile(match_string)
matching_rules = []
for name, rule in self.rules.items():
if match_regex.match(name):
matching_rules.append(rule)
return matching_rules
def list_targets_by_rule(self, rule, next_token=None, limit=None):
# We'll let a KeyError exception be thrown for response to handle if
# rule doesn't exist.
rule = self.rules[rule]
start_index, end_index, new_next_token = self._process_token_and_limits(
len(rule.targets), next_token, limit
)
returned_targets = []
return_obj = {}
for i in range(start_index, end_index):
returned_targets.append(rule.targets[i])
return_obj["Targets"] = returned_targets
if new_next_token is not None:
return_obj["NextToken"] = new_next_token
return return_obj
def put_targets(self, name, event_bus_name, targets):
# super simple ARN check
invalid_arn = next(
(
target["Arn"]
for target in targets
if not re.match(r"arn:[\d\w:\-/]*", target["Arn"])
),
None,
)
if invalid_arn:
raise ValidationException(
"Parameter {} is not valid. "
"Reason: Provided Arn is not in correct format.".format(invalid_arn)
)
for target in targets:
arn = target["Arn"]
if (
":sqs:" in arn
and arn.endswith(".fifo")
and not target.get("SqsParameters")
):
raise ValidationException(
"Parameter(s) SqsParameters must be specified for target: {}.".format(
target["Id"]
)
)
rule = self.rules.get(name)
if not rule:
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus {1}.".format(name, event_bus_name)
)
rule.put_targets(targets)
def put_events(self, events):
num_events = len(events)
if num_events > 10:
# the exact error text is longer, the Value list consists of all the put events
raise ValidationException(
"1 validation error detected: "
"Value '[PutEventsRequestEntry]' at 'entries' failed to satisfy constraint: "
"Member must have length less than or equal to 10"
)
entries = []
for event in events:
if "Source" not in event:
entries.append(
{
"ErrorCode": "InvalidArgument",
"ErrorMessage": "Parameter Source is not valid. Reason: Source is a required argument.",
}
)
elif "DetailType" not in event:
entries.append(
{
"ErrorCode": "InvalidArgument",
"ErrorMessage": "Parameter DetailType is not valid. Reason: DetailType is a required argument.",
}
)
elif "Detail" not in event:
entries.append(
{
"ErrorCode": "InvalidArgument",
"ErrorMessage": "Parameter Detail is not valid. Reason: Detail is a required argument.",
}
)
else:
try:
json.loads(event["Detail"])
except ValueError: # json.JSONDecodeError exists since Python 3.5
entries.append(
{
"ErrorCode": "MalformedDetail",
"ErrorMessage": "Detail is malformed.",
}
)
continue
event_id = str(uuid4())
entries.append({"EventId": event_id})
# if 'EventBusName' is not especially set, it will be sent to the default one
event_bus_name = event.get("EventBusName", "default")
for rule in self.rules.values():
rule.send_to_targets(
event_bus_name,
{
"version": "0",
"id": event_id,
"detail-type": event["DetailType"],
"source": event["Source"],
"account": ACCOUNT_ID,
"time": event.get("Time", unix_time(datetime.utcnow())),
"region": self.region_name,
"resources": event.get("Resources", []),
"detail": json.loads(event["Detail"]),
},
)
return entries
def remove_targets(self, name, event_bus_name, ids):
rule = self.rules.get(name)
if not rule:
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus {1}.".format(name, event_bus_name)
)
rule.remove_targets(ids)
def test_event_pattern(self):
raise NotImplementedError()
@staticmethod
def _put_permission_from_policy(event_bus, policy):
try:
policy_doc = json.loads(policy)
event_bus.add_policy(policy_doc)
except JSONDecodeError:
raise JsonRESTError(
"ValidationException", "This policy contains invalid Json"
)
@staticmethod
def _condition_param_to_stmt_condition(condition):
if condition:
key = condition["Key"]
value = condition["Value"]
condition_type = condition["Type"]
return {condition_type: {key: value}}
return None
def _put_permission_from_params(
self, event_bus, action, principal, statement_id, condition
):
if principal is None:
raise JsonRESTError(
"ValidationException", "Parameter Principal must be specified."
)
if condition and principal != "*":
raise JsonRESTError(
"InvalidParameterValue",
"Value of the parameter 'principal' must be '*' when the parameter 'condition' is set.",
)
if not condition and self.ACCOUNT_ID.match(principal) is None:
raise JsonRESTError(
"InvalidParameterValue",
f"Value {principal} at 'principal' failed to satisfy constraint: "
r"Member must satisfy regular expression pattern: (\d{12}|\*)",
)
if action is None or action != "events:PutEvents":
raise JsonRESTError(
"ValidationException",
"Provided value in parameter 'action' is not supported.",
)
if statement_id is None or self.STATEMENT_ID.match(statement_id) is None:
raise JsonRESTError(
"InvalidParameterValue", r"StatementId must match ^[a-zA-Z0-9-_]{1,64}$"
)
principal = {"AWS": f"arn:aws:iam::{principal}:root"}
stmt_condition = self._condition_param_to_stmt_condition(condition)
event_bus.add_permission(statement_id, action, principal, stmt_condition)
def put_permission(
self, event_bus_name, action, principal, statement_id, condition, policy
):
if not event_bus_name:
event_bus_name = "default"
event_bus = self.describe_event_bus(event_bus_name)
if policy:
self._put_permission_from_policy(event_bus, policy)
else:
self._put_permission_from_params(
event_bus, action, principal, statement_id, condition
)
def remove_permission(self, event_bus_name, statement_id, remove_all_permissions):
if not event_bus_name:
event_bus_name = "default"
event_bus = self.describe_event_bus(event_bus_name)
if remove_all_permissions:
event_bus.remove_statements()
else:
if not event_bus.has_permissions():
raise JsonRESTError(
"ResourceNotFoundException", "EventBus does not have a policy."
)
statement = event_bus.remove_statement(statement_id)
if not statement:
raise JsonRESTError(
"ResourceNotFoundException",
"Statement with the provided id does not exist.",
)
def describe_event_bus(self, name):
if not name:
name = "default"
event_bus = self._get_event_bus(name)
return event_bus
def create_event_bus(self, name, event_source_name=None, tags=None):
if name in self.event_buses:
raise JsonRESTError(
"ResourceAlreadyExistsException",
"Event bus {} already exists.".format(name),
)
if not event_source_name and "/" in name:
raise JsonRESTError(
"ValidationException", "Event bus name must not contain '/'."
)
if event_source_name and event_source_name not in self.event_sources:
raise JsonRESTError(
"ResourceNotFoundException",
"Event source {} does not exist.".format(event_source_name),
)
event_bus = EventBus(self.region_name, name, tags=tags)
self.event_buses[name] = event_bus
if tags:
self.tagger.tag_resource(event_bus.arn, tags)
return self.event_buses[name]
def list_event_buses(self, name_prefix):
if name_prefix:
return [
event_bus
for event_bus in self.event_buses.values()
if event_bus.name.startswith(name_prefix)
]
return list(self.event_buses.values())
def delete_event_bus(self, name):
if name == "default":
raise JsonRESTError(
"ValidationException", "Cannot delete event bus default."
)
event_bus = self.event_buses.pop(name, None)
if event_bus:
self.tagger.delete_all_tags_for_resource(event_bus.arn)
def list_tags_for_resource(self, arn):
name = arn.split("/")[-1]
registries = [self.rules, self.event_buses]
for registry in registries:
if name in registry:
return self.tagger.list_tags_for_resource(registry[name].arn)
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus default.".format(name)
)
def tag_resource(self, arn, tags):
name = arn.split("/")[-1]
registries = [self.rules, self.event_buses]
for registry in registries:
if name in registry:
self.tagger.tag_resource(registry[name].arn, tags)
return {}
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus default.".format(name)
)
def untag_resource(self, arn, tag_names):
name = arn.split("/")[-1]
registries = [self.rules, self.event_buses]
for registry in registries:
if name in registry:
self.tagger.untag_resource_using_names(registry[name].arn, tag_names)
return {}
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus default.".format(name)
)
def create_archive(self, name, source_arn, description, event_pattern, retention):
if len(name) > 48:
raise ValidationException(
" 1 validation error detected: "
"Value '{}' at 'archiveName' failed to satisfy constraint: "
"Member must have length less than or equal to 48".format(name)
)
event_bus = self._get_event_bus(source_arn)
if name in self.archives:
raise ResourceAlreadyExistsException(
"Archive {} already exists.".format(name)
)
archive = Archive(
self.region_name, name, source_arn, description, event_pattern, retention
)
rule_event_pattern = json.loads(event_pattern or "{}")
rule_event_pattern["replay-name"] = [{"exists": False}]
rule_name = "Events-Archive-{}".format(name)
rule = self.put_rule(
rule_name,
event_pattern=json.dumps(rule_event_pattern),
event_bus_name=event_bus.name,
managed_by="prod.vhs.events.aws.internal",
)
self.put_targets(
rule.name,
rule.event_bus_name,
[
{
"Id": rule.name,
"Arn": "arn:aws:events:{}:::".format(self.region_name),
"InputTransformer": {
"InputPathsMap": {},
"InputTemplate": json.dumps(
{
"archive-arn": "{0}:{1}".format(
archive.arn, archive.uuid
),
"event": "<aws.events.event.json>",
"ingestion-time": "<aws.events.event.ingestion-time>",
}
),
},
}
],
)
self.archives[name] = archive
return archive
def describe_archive(self, name):
archive = self.archives.get(name)
if not archive:
raise ResourceNotFoundException("Archive {} does not exist.".format(name))
return archive.describe()
def list_archives(self, name_prefix, source_arn, state):
if [name_prefix, source_arn, state].count(None) < 2:
raise ValidationException(
"At most one filter is allowed for ListArchives. "
"Use either : State, EventSourceArn, or NamePrefix."
)
if state and state not in Archive.VALID_STATES:
raise ValidationException(
"1 validation error detected: "
"Value '{0}' at 'state' failed to satisfy constraint: "
"Member must satisfy enum value set: "
"[{1}]".format(state, ", ".join(Archive.VALID_STATES))
)
if [name_prefix, source_arn, state].count(None) == 3:
return [archive.describe_short() for archive in self.archives.values()]
result = []
for archive in self.archives.values():
if name_prefix and archive.name.startswith(name_prefix):
result.append(archive.describe_short())
elif source_arn and archive.source_arn == source_arn:
result.append(archive.describe_short())
elif state and archive.state == state:
result.append(archive.describe_short())
return result
def update_archive(self, name, description, event_pattern, retention):
archive = self.archives.get(name)
if not archive:
raise ResourceNotFoundException("Archive {} does not exist.".format(name))
archive.update(description, event_pattern, retention)
return {
"ArchiveArn": archive.arn,
"CreationTime": archive.creation_time,
"State": archive.state,
}
def delete_archive(self, name):
archive = self.archives.get(name)
if not archive:
raise ResourceNotFoundException("Archive {} does not exist.".format(name))
archive.delete(self.region_name)
def start_replay(
self, name, description, source_arn, start_time, end_time, destination
):
event_bus_arn = destination["Arn"]
event_bus_arn_pattern = r"^arn:aws:events:[a-zA-Z0-9-]+:\d{12}:event-bus/"
if not re.match(event_bus_arn_pattern, event_bus_arn):
raise ValidationException(
"Parameter Destination.Arn is not valid. "
"Reason: Must contain an event bus ARN."
)
self._get_event_bus(event_bus_arn)
archive_name = source_arn.split("/")[-1]
archive = self.archives.get(archive_name)
if not archive:
raise ValidationException(
"Parameter EventSourceArn is not valid. "
"Reason: Archive {} does not exist.".format(archive_name)
)
if event_bus_arn != archive.source_arn:
raise ValidationException(
"Parameter Destination.Arn is not valid. "
"Reason: Cross event bus replay is not permitted."
)
if start_time > end_time:
raise ValidationException(
"Parameter EventEndTime is not valid. "
"Reason: EventStartTime must be before EventEndTime."
)
if name in self.replays:
raise ResourceAlreadyExistsException(
"Replay {} already exists.".format(name)
)
replay = Replay(
self.region_name,
name,
description,
source_arn,
start_time,
end_time,
destination,
)
self.replays[name] = replay
replay.replay_events(archive)
return {
"ReplayArn": replay.arn,
"ReplayStartTime": replay.start_time,
"State": ReplayState.STARTING.value, # the replay will be done before returning the response
}
def describe_replay(self, name):
replay = self._get_replay(name)
return replay.describe()
def list_replays(self, name_prefix, source_arn, state):
if [name_prefix, source_arn, state].count(None) < 2:
raise ValidationException(
"At most one filter is allowed for ListReplays. "
"Use either : State, EventSourceArn, or NamePrefix."
)
valid_states = sorted([item.value for item in ReplayState])
if state and state not in valid_states:
raise ValidationException(
"1 validation error detected: "
"Value '{0}' at 'state' failed to satisfy constraint: "
"Member must satisfy enum value set: "
"[{1}]".format(state, ", ".join(valid_states))
)
if [name_prefix, source_arn, state].count(None) == 3:
return [replay.describe_short() for replay in self.replays.values()]
result = []
for replay in self.replays.values():
if name_prefix and replay.name.startswith(name_prefix):
result.append(replay.describe_short())
elif source_arn and replay.source_arn == source_arn:
result.append(replay.describe_short())
elif state and replay.state == state:
result.append(replay.describe_short())
return result
def cancel_replay(self, name):
replay = self._get_replay(name)
# replays in the state 'COMPLETED' can't be canceled,
# but the implementation is done synchronously,
# so they are done right after the start
if replay.state not in [
ReplayState.STARTING,
ReplayState.RUNNING,
ReplayState.COMPLETED,
]:
raise IllegalStatusException(
"Replay {} is not in a valid state for this operation.".format(name)
)
replay.state = ReplayState.CANCELLED
return {"ReplayArn": replay.arn, "State": ReplayState.CANCELLING.value}
def create_connection(self, name, description, authorization_type, auth_parameters):
connection = Connection(
name, self.region_name, description, authorization_type, auth_parameters
)
self.connections[name] = connection
return connection
def update_connection(self, *, name, **kwargs):
connection = self.connections.get(name)
if not connection:
raise ResourceNotFoundException(
"Connection '{}' does not exist.".format(name)
)
for attr, value in kwargs.items():
if value is not None and hasattr(connection, attr):
setattr(connection, attr, value)
return connection.describe_short()
def list_connections(self):
return self.connections.values()
def describe_connection(self, name):
"""
Retrieves details about a connection.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DescribeConnection.html
Args:
name: The name of the connection to retrieve.
Raises:
ResourceNotFoundException: When the connection is not present.
Returns:
dict
"""
connection = self.connections.get(name)
if not connection:
raise ResourceNotFoundException(
"Connection '{}' does not exist.".format(name)
)
return connection.describe()
def delete_connection(self, name):
"""
Deletes a connection.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DeleteConnection.html
Args:
name: The name of the connection to delete.
Raises:
ResourceNotFoundException: When the connection is not present.
Returns:
dict
"""
connection = self.connections.pop(name, None)
if not connection:
raise ResourceNotFoundException(
"Connection '{}' does not exist.".format(name)
)
return connection.describe_short()
def create_api_destination(
self,
name,
description,
connection_arn,
invocation_endpoint,
invocation_rate_limit_per_second,
http_method,
):
"""
Creates an API destination, which is an HTTP invocation endpoint configured as a target for events.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_CreateApiDestination.html
Returns:
dict
"""
destination = Destination(
name=name,
region_name=self.region_name,
description=description,
connection_arn=connection_arn,
invocation_endpoint=invocation_endpoint,
invocation_rate_limit_per_second=invocation_rate_limit_per_second,
http_method=http_method,
)
self.destinations[name] = destination
return destination.describe_short()
def list_api_destinations(self):
return self.destinations.values()
def describe_api_destination(self, name):
"""
Retrieves details about an API destination.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DescribeApiDestination.html
Args:
name: The name of the API destination to retrieve.
Returns:
dict
"""
destination = self.destinations.get(name)
if not destination:
raise ResourceNotFoundException(
"An api-destination '{}' does not exist.".format(name)
)
return destination.describe()
def update_api_destination(self, *, name, **kwargs):
"""
Creates an API destination, which is an HTTP invocation endpoint configured as a target for events.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_UpdateApiDestination.html
Returns:
dict
"""
destination = self.destinations.get(name)
if not destination:
raise ResourceNotFoundException(
"An api-destination '{}' does not exist.".format(name)
)
for attr, value in kwargs.items():
if value is not None and hasattr(destination, attr):
setattr(destination, attr, value)
return destination.describe_short()
def delete_api_destination(self, name):
"""
Deletes the specified API destination.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DeleteApiDestination.html
Args:
name: The name of the destination to delete.
Raises:
ResourceNotFoundException: When the destination is not present.
Returns:
dict
"""
destination = self.destinations.pop(name, None)
if not destination:
raise ResourceNotFoundException(
"An api-destination '{}' does not exist.".format(name)
)
return {}
events_backends = {}
for region in Session().get_available_regions("events"):
events_backends[region] = EventsBackend(region)
for region in Session().get_available_regions("events", partition_name="aws-us-gov"):
events_backends[region] = EventsBackend(region)
for region in Session().get_available_regions("events", partition_name="aws-cn"):
events_backends[region] = EventsBackend(region)
| __init__ |
20.js | /**
* Copyright IBM Corp. 2019, 2020
*
* This source code is licensed under the Apache-2.0 license found in the
* LICENSE file in the root directory of this source tree.
*
* Code generated by @carbon/icon-build-helpers. DO NOT EDIT.
*/
import { _ as _objectWithoutProperties, I as Icon, a as _extends } from '../Icon-63ed8f4f.js';
import '@carbon/icon-helpers';
import 'prop-types';
import React from 'react';
var _ref2 =
/*#__PURE__*/
/*#__PURE__*/
React.createElement("path", {
d: "M12.4531,25A8.7775,8.7775,0,0,0,14,20a10.6,10.6,0,0,0-.18-2H22V16H13.2175c-.0842-.2109-.17-.4194-.2556-.624A9.8586,9.8586,0,0,1,12,11a4.792,4.792,0,0,1,5-5,6.1234,6.1234,0,0,1,5.2222,2.6279l1.5556-1.2558A8.11,8.11,0,0,0,17,4a6.7781,6.7781,0,0,0-7,7,11.65,11.65,0,0,0,1.0559,5H8v2h3.7729A8.209,8.209,0,0,1,12,20c0,2.5234-1.4858,5-3,5v2H24V25Z"
});
var CurrencyPound20 = /*#__PURE__*/React.forwardRef(function CurrencyPound20(_ref, ref) {
var children = _ref.children,
rest = _objectWithoutProperties(_ref, ["children"]); |
return /*#__PURE__*/React.createElement(Icon, _extends({
width: 20,
height: 20,
viewBox: "0 0 32 32",
xmlns: "http://www.w3.org/2000/svg",
fill: "currentColor",
ref: ref
}, rest), _ref2, children);
});
export default CurrencyPound20; | |
CardReaderNames.rs | // This file is part of security-keys-rust. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/raphaelcohn/security-keys-rust/master/COPYRIGHT. No part of security-keys-rust, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2021 The developers of security-keys-rust. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/raphaelcohn/security-keys-rust/master/COPYRIGHT.
/// Card reader names.
#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct CardReaderNames(CardReaderNamesBuffer);
impl CardReaderNames
{
const ArrayEndMarkerIsEmptyCString: u8 = 0x00;
#[inline(always)]
pub(in crate::pcsc) fn from_valid_buffer(mut reader_names: CardReaderNamesBuffer, reader_names_length: DWORD) -> Self
{
let reader_names_length = reader_names_length as usize;
debug_assert_ne!(reader_names_length, 0);
unsafe { reader_names.set_len(reader_names_length) };
debug_assert_eq!(reader_names.get_unchecked_value_safe(reader_names_length - 1), Self::ArrayEndMarkerIsEmptyCString, "reader_names array of CStrings is not terminated by an empty CString");
Self(reader_names)
}
#[inline(always)] | {
debug_assert_eq!(reader_names.len(), 0);
unsafe { reader_names.set_len(1) };
reader_names.set_unchecked_mut_safe(0, Self::ArrayEndMarkerIsEmptyCString);
return Self(reader_names)
}
/// Iterate.
#[inline(always)]
pub fn iterate(&self) -> CardReaderNamesIterator
{
CardReaderNamesIterator
{
slice: self.slice(),
next_c_string_index: 0,
}
}
/// Create card reader states from all card reader names.
#[inline(always)]
pub fn create_card_reader_states(&self) -> CardReaderStates<()>
{
let mut card_reader_states = CardReaderStates::new();
self.use_all_card_reader_names(|card_reader_name|
{
card_reader_states.push_reader_state(CardReaderEventName::StateChange(card_reader_name), None, false)
});
card_reader_states
}
/// Iterate, efficiently.
#[inline(always)]
pub fn use_all_card_reader_names<'buffer, CardReaderNameUser: FnMut(CardReaderName<'buffer>)>(&'buffer self, mut card_reader_name_user: CardReaderNameUser)
{
let mut slice = self.slice();
let mut null_index = Self::null_index(slice);
while likely!(null_index != 0)
{
let reader_name = Self::wrap_reader_name(slice, null_index);
card_reader_name_user(reader_name);
slice = slice.get_unchecked_range_safe(Self::next_c_string_index(null_index) .. );
null_index = Self::null_index(slice);
}
}
#[inline(always)]
fn slice(&self) -> &[u8]
{
self.0.as_slice()
}
#[inline(always)]
fn null_index(slice: &[u8]) -> usize
{
memchr(b'\0', slice).expect("The final item should be an empty CString, not just empty")
}
#[inline(always)]
fn wrap_reader_name(slice: &[u8], null_index: usize) -> CardReaderName
{
CardReaderName::wrap_buffer(slice, null_index)
}
#[inline(always)]
const fn next_c_string_index(null_index: usize) -> usize
{
null_index + 1
}
} | pub(in crate::pcsc) fn from_empty_buffer(mut reader_names: CardReaderNamesBuffer) -> Self |
config.py | import os
from dataclasses import dataclass
from typing import List
import yaml
from ikfs_anomaly_detector.core.format.telemetry import TelemetryAttrs, Counters
from ikfs_anomaly_detector.intellectual.autoencoder import SignalsGroup
DEFAULT_CONFIG_PATH = os.path.join(os.getcwd(), 'default_config.yml')
DEFAULT_CONFIG = {
'models_dir': '',
'tensorboard_dir': '',
'analysis_result_dir': '/tmp/ikfs_anomaly_detector/results',
'predictor_for': [
TelemetryAttrs.ppt_ripple,
TelemetryAttrs.ppt_sample_count,
TelemetryAttrs.scanner_angle,
TelemetryAttrs.str_power,
TelemetryAttrs.tu1_temperature,
TelemetryAttrs.tu2_temperature,
],
'autoencoder_for': {
'bfk': [
# TelemetryAttrs.channel_bfk,
# TelemetryAttrs.state_bfk,
Counters.bfk_cnt_err_crc,
Counters.bfk_cnt_err_rx_buf_alloc,
Counters.bfk_cnt_err_rx_packet,
Counters.bfk_cnt_err_too_big_can_tx,
Counters.bfk_cnt_lost_interf,
Counters.bfk_cnt_marker_bpop,
Counters.bfk_cnt_marker_bud,
Counters.bfk_cnt_timeout_marker_bpop,
Counters.bfk_cnt_timeout_marker_bud,
],
'bpop': [
# TelemetryAttrs.channel_bpop,
# TelemetryAttrs.power_bpop15v,
# TelemetryAttrs.power_bpop5v,
# TelemetryAttrs.state_bpop,
Counters.bpop_cnt_err_adc_spi_overrun,
Counters.bpop_cnt_err_crc,
Counters.bpop_cnt_err_marker_access,
Counters.bpop_cnt_err_rx_pkt,
Counters.bpop_cnt_marker,
Counters.bpop_cnt_marker_other,
],
'bud': [
# TelemetryAttrs.channel_bud,
# TelemetryAttrs.power_bud10v,
# TelemetryAttrs.power_bud27vi,
# TelemetryAttrs.power_bud27vo,
# TelemetryAttrs.state_bud,
Counters.bud_cnt_err_crc,
Counters.bud_cnt_err_kachalka_brake,
Counters.bud_cnt_err_kachalka_timeout,
Counters.bud_cnt_err_marker_access,
Counters.bud_cnt_err_ref_missed_impulses,
Counters.bud_cnt_err_rx_overflow,
Counters.bud_cnt_err_rx_packet,
Counters.bud_cnt_err_sp_tx_alloc,
Counters.bud_cnt_marker,
Counters.bud_cnt_marker_other,
Counters.bud_cnt_mbx_cmd_busy,
],
'bud_board': [
TelemetryAttrs.power_bpop15v,
TelemetryAttrs.power_bpop5v,
TelemetryAttrs.power_bud10v,
TelemetryAttrs.power_bud27vo,
TelemetryAttrs.power_bud27vi,
],
'fp': [
TelemetryAttrs.tu2_temperature,
TelemetryAttrs.fp_temperature,
],
'mi': [
TelemetryAttrs.mi1_temperature,
TelemetryAttrs.mi2_temperature,
TelemetryAttrs.mi1_heater_state,
TelemetryAttrs.mi2_heater_state,
],
'mk': [
TelemetryAttrs.mk1_temperature,
TelemetryAttrs.mk2_temperature,
TelemetryAttrs.mk_heater_state,
],
'ppt': [
TelemetryAttrs.ppt_zone,
TelemetryAttrs.ppt_ref,
TelemetryAttrs.ppt_ripple,
TelemetryAttrs.ppt_in_zone,
TelemetryAttrs.scanner_angle,
],
'ppt_direction': [
TelemetryAttrs.ppt_direction,
TelemetryAttrs.ifg_max_index,
],
'str': [
TelemetryAttrs.str_power,
TelemetryAttrs.tu1_temperature
],
},
'thresholds': {
'default': {
'rules': 0.55,
'bfk': 0.2,
'bpop': 0.4,
'bud': 6.,
'bud_board': 15.,
'fp': 0.7,
'mi': 0.4,
'mk': 0.09,
'ppt': 0.27,
'ppt_direction': 0.1,
'str': 0.05,
'PptRiple': 100,
'PptSampleCount': 100,
'ScannerAngle': 610,
'Str27V': 210,
'StrSensorTu1': 100,
'StrSensorTu2': 100,
},
},
}
@dataclass
class Config:
data: dict
@property
def models_dir(self) -> str:
return self.data['models_dir']
@property
def tensorboard_dir(self) -> str:
return self.data['tensorboard_dir']
@property
def analysis_result_dir(self) -> str:
return self.data['analysis_result_dir']
@property
def signals_for_predictor(self) -> List[str]:
return self.data['predictor_for'] or []
@property
def signals_groups(self) -> List[SignalsGroup]:
return [
SignalsGroup(name=group_name, signals=signals)
for group_name, signals in (self.data['autoencoder_for'] or {}).items()
]
@property
def thresholds(self) -> dict:
return self.data['thresholds'] or {}
def dump_default_config() -> str:
with open(DEFAULT_CONFIG_PATH, 'w') as f:
yaml.dump(DEFAULT_CONFIG, stream=f, indent=2, explicit_start=True, sort_keys=False)
return DEFAULT_CONFIG_PATH
def | (path: str) -> Config:
with open(path, 'r') as f:
return Config(data=yaml.load(f, Loader=yaml.FullLoader))
| load_config |
__init__.py | from .client import Client # noqa
from .params import Params, ListSymbolsFormat # noqa
from .jsonrpc_client import MsgpackRpcClient # noqa
from .grpc_client import GRPCClient # noqa
# alias
Param = Params # noqa
from .stream import StreamConn # noqa | __version__ = '0.18' | |
test_subscribing.py | # Test some subscription scenarios
from typing import List, Tuple, Dict, Union
from numbers import Number
import pytest
from numpy import ndarray
from qcodes.dataset.param_spec import ParamSpec
# pylint: disable=unused-import
from qcodes.tests.dataset.temporary_databases import (empty_temp_db,
experiment,
dataset)
VALUE = Union[str, Number, List, ndarray, bool]
@pytest.fixture(scope='function')
def basic_subscriber():
|
def test_basic_subscription(dataset, basic_subscriber):
xparam = ParamSpec(name='x', paramtype='numeric', label='x parameter',
unit='V')
yparam = ParamSpec(name='y', paramtype='numeric', label='y parameter',
unit='Hz', depends_on=[xparam])
dataset.add_parameter(xparam)
dataset.add_parameter(yparam)
sub_id = dataset.subscribe(basic_subscriber, min_wait=0, min_count=1,
state={})
assert len(dataset.subscribers) == 1
assert list(dataset.subscribers.keys()) == [sub_id]
expected_state = {}
for x in range(10):
y = -x**2
dataset.add_result({'x': x, 'y': y})
expected_state[x+1] = [(x, y)]
assert dataset.subscribers[sub_id].state == expected_state
| """
A basic subscriber that just puts results and length into
state
"""
def subscriber(results: List[Tuple[VALUE]], length: int,
state: Dict) -> None:
state[length] = results
return subscriber |
host_cgo.go | package host
/*
#include <unistd.h>
*/
import "C"
var (
clockTicks *int64
bootTime *int64
)
func ClockTicks() (int64, error) {
if clockTicks == nil |
return *clockTicks, nil
}
| {
ticks := int64(C.sysconf(C._SC_CLK_TCK))
clockTicks = &ticks
} |
_cpnative_server.py | """Native adapter for serving CherryPy via its builtin server."""
import logging
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
from cherrypy import wsgiserver
class | (wsgiserver.Gateway):
recursive = False
def respond(self):
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr
local = httputil.Host(local[0], local[1], "")
remote = req.conn.remote_addr, req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], "")
scheme = req.scheme
sn = cherrypy.tree.script_name(req.uri or "/")
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.path
qs = req.qs or ""
headers = req.inheaders.items()
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, "HTTP/1.1")
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, req.request_protocol, headers, rfile)
break
except cherrypy.InternalRedirect, ir:
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % ir.path)
else:
# Add the *previous* path_info + qs to redirections.
if qs:
qs = "?" + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = "GET"
path = ir.path
qs = ir.query_string
rfile = StringIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except:
tb = format_exc()
#print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
req = self.req
# Set response status
req.status = str(status or "500 Server Error")
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(wsgiserver.HTTPServer):
"""Wrapper for wsgiserver.HTTPServer.
wsgiserver has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications.
Therefore, we wrap it here, so we can apply some attributes
from config -> cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
wsgiserver.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = self.server_adapter.max_request_header_size or 0
self.max_request_body_size = self.server_adapter.max_request_body_size or 0
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
| NativeGateway |
telegraf_config_handler.py | #!/usr/bin/env python
#
# Azure Linux extension
#
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the ""Software""), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# future imports have no effect on python 3 (verified in official docs)
# importing from source causes import errors on python 3, lets skip import
import sys
if sys.version_info[0] < 3:
from future import standard_library
standard_library.install_aliases()
from builtins import str
import json
import os
from telegraf_utils.telegraf_name_map import name_map
import subprocess
import signal
import urllib.request, urllib.error, urllib.parse
from shutil import copyfile, rmtree
import time
import metrics_ext_utils.metrics_constants as metrics_constants
import metrics_ext_utils.metrics_common_utils as metrics_utils
"""
Sample input data received by this script
[
{
"displayName" : "Network->Packets sent",
"interval" : "15s",
"sink" : ["mdsd" , "me"]
},
{
"displayName" : "Network->Packets recieved",
"interval" : "15s",
"sink" : ["mdsd" , "me"]
}
]
"""
def parse_config(data, me_url, mdsd_url, is_lad, az_resource_id, subscription_id, resource_group, region, virtual_machine_name):
"""
Main parser method to convert Metrics config from extension configuration to telegraf configuration
:param data: Parsed Metrics Configuration from which telegraf config is created
:param me_url: The url to which telegraf will send metrics to for MetricsExtension
:param mdsd_url: The url to which telegraf will send metrics to for MDSD
:param is_lad: Boolean value for whether the extension is Lad or not (AMA)
:param az_resource_id: Azure Resource ID value for the VM
:param subscription_id: Azure Subscription ID value for the VM
:param resource_group: Azure Resource Group value for the VM
:param region: Azure Region value for the VM
:param virtual_machine_name: Azure Virtual Machine Name value (Only in the case for VMSS) for the VM
"""
storage_namepass_list = []
storage_namepass_str = ""
vmi_rate_counters_list = ["LogicalDisk\\BytesPerSecond", "LogicalDisk\\ReadBytesPerSecond", "LogicalDisk\\ReadsPerSecond", "LogicalDisk\\WriteBytesPerSecond", "LogicalDisk\\WritesPerSecond", "LogicalDisk\\TransfersPerSecond", "Network\\ReadBytesPerSecond", "Network\\WriteBytesPerSecond"]
MetricsExtensionNamepsace = metrics_constants.metrics_extension_namespace
has_mdsd_output = False
if len(data) == 0:
raise Exception("Empty config data received.")
if me_url is None or mdsd_url is None:
raise Exception("No url provided for Influxdb output plugin to ME, AMA.")
telegraf_json = {}
for item in data:
sink = item["sink"]
if "mdsd" in sink:
has_mdsd_output = True
counter = item["displayName"]
if counter in name_map:
plugin = name_map[counter]["plugin"]
omiclass = ""
if is_lad:
omiclass = counter.split("->")[0]
else:
omiclass = name_map[counter]["module"]
if omiclass not in telegraf_json:
telegraf_json[omiclass] = {}
if plugin not in telegraf_json[omiclass]:
telegraf_json[omiclass][plugin] = {}
telegraf_json[omiclass][plugin][name_map[counter]["field"]] = {}
if is_lad:
telegraf_json[omiclass][plugin][name_map[counter]["field"]]["displayName"] = counter.split("->")[1]
else:
telegraf_json[omiclass][plugin][name_map[counter]["field"]]["displayName"] = counter
telegraf_json[omiclass][plugin][name_map[counter]["field"]]["interval"] = item["interval"]
if is_lad:
telegraf_json[omiclass][plugin][name_map[counter]["field"]]["ladtablekey"] = name_map[counter]["ladtablekey"]
if "op" in name_map[counter]:
telegraf_json[omiclass][plugin][name_map[counter]["field"]]["op"] = name_map[counter]["op"]
"""
Sample converted telegraf conf dict -
"network": {
"net": {
"bytes_total": {"interval": "15s","displayName": "Network total bytes","ladtablekey": "/builtin/network/bytestotal"},
"drop_total": {"interval": "15s","displayName": "Network collisions","ladtablekey": "/builtin/network/totalcollisions"},
"err_in": {"interval": "15s","displayName": "Packets received errors","ladtablekey": "/builtin/network/totalrxerrors"},
"packets_sent": {"interval": "15s","displayName": "Packets sent","ladtablekey": "/builtin/network/packetstransmitted"},
}
},
"filesystem": {
"disk": {
"used_percent": {"interval": "15s","displayName": "Filesystem % used space","ladtablekey": "/builtin/filesystem/percentusedspace"},
"used": {"interval": "15s","displayName": "Filesystem used space","ladtablekey": "/builtin/filesystem/usedspace"},
"free": {"interval": "15s","displayName": "Filesystem free space","ladtablekey": "/builtin/filesystem/freespace"},
"inodes_free_percent": {"interval": "15s","displayName": "Filesystem % free inodes","ladtablekey": "/builtin/filesystem/percentfreeinodes"},
},
"diskio": {
"writes_filesystem": {"interval": "15s","displayName": "Filesystem writes/sec","ladtablekey": "/builtin/filesystem/writespersecond","op": "rate"},
"total_transfers_filesystem": {"interval": "15s","displayName": "Filesystem transfers/sec","ladtablekey": "/builtin/filesystem/transferspersecond","op": "rate"},
"reads_filesystem": {"interval": "15s","displayName": "Filesystem reads/sec","ladtablekey": "/builtin/filesystem/readspersecond","op": "rate"},
}
},
"""
if len(telegraf_json) == 0:
raise Exception("Unable to parse telegraf config into intermediate dictionary.")
excess_diskio_plugin_list_lad = ["total_transfers_filesystem", "read_bytes_filesystem", "total_bytes_filesystem", "write_bytes_filesystem", "reads_filesystem", "writes_filesystem"]
excess_diskio_field_drop_list_str = ""
int_file = {"filename":"intermediate.json", "data": json.dumps(telegraf_json)}
output = []
output.append(int_file)
for omiclass in telegraf_json:
input_str = ""
ama_rename_str = ""
metricsext_rename_str = ""
lad_specific_rename_str = ""
rate_specific_aggregator_str = ""
aggregator_str = ""
for plugin in telegraf_json[omiclass]:
config_file = {"filename" : omiclass+".conf"}
# Arbitrary max value for finding min
min_interval = "999999999s"
is_vmi = plugin.endswith("_vmi")
is_vmi_rate_counter = False
for field in telegraf_json[omiclass][plugin]:
if not is_vmi_rate_counter:
is_vmi_rate_counter = telegraf_json[omiclass][plugin][field]["displayName"] in vmi_rate_counters_list
# if is_vmi_rate_counter:
# min_interval = "1s"
if is_vmi or is_vmi_rate_counter:
splitResult = plugin.split('_')
telegraf_plugin = splitResult[0]
input_str += "[[inputs." + telegraf_plugin + "]]\n"
# plugin = plugin[:-4]
else:
input_str += "[[inputs." + plugin + "]]\n"
# input_str += " "*2 + "name_override = \"" + omiclass + "\"\n"
# If it's a lad config then add the namepass fields for sending totals to storage
# always skip lad plugin names as they should be dropped from ME
lad_plugin_name = plugin + "_total"
if lad_plugin_name not in storage_namepass_list:
storage_namepass_list.append(lad_plugin_name)
if is_lad:
lad_specific_rename_str += "\n[[processors.rename]]\n"
lad_specific_rename_str += " "*2 + "namepass = [\"" + lad_plugin_name + "\"]\n"
elif is_vmi or is_vmi_rate_counter:
if plugin not in storage_namepass_list:
storage_namepass_list.append(plugin + "_mdsd")
else:
ama_plugin_name = plugin + "_mdsd_la_perf"
ama_rename_str += "\n[[processors.rename]]\n"
ama_rename_str += " "*2 + "namepass = [\"" + ama_plugin_name + "\"]\n"
if ama_plugin_name not in storage_namepass_list:
storage_namepass_list.append(ama_plugin_name)
namespace = MetricsExtensionNamepsace
if is_vmi or is_vmi_rate_counter:
namespace = "insights.virtualmachine"
if is_vmi_rate_counter:
# Adding "_rated" as a substring for vmi rate metrics to avoid renaming collisions
plugin_name = plugin + "_rated"
else:
plugin_name = plugin
metricsext_rename_str += "\n[[processors.rename]]\n"
metricsext_rename_str += " "*2 + "namepass = [\"" + plugin_name + "\"]\n"
metricsext_rename_str += "\n" + " "*2 + "[[processors.rename.replace]]\n"
metricsext_rename_str += " "*4 + "measurement = \"" + plugin_name + "\"\n"
metricsext_rename_str += " "*4 + "dest = \"" + namespace + "\"\n"
fields = ""
ops_fields = ""
non_ops_fields = ""
non_rate_aggregate = False
ops = ""
min_agg_period = ""
rate_aggregate = False
for field in telegraf_json[omiclass][plugin]:
fields += "\"" + field + "\", "
if is_vmi or is_vmi_rate_counter :
if "MB" in field:
fields += "\"" + field.replace('MB','Bytes') + "\", "
#Use the shortest interval time for the whole plugin
new_interval = telegraf_json[omiclass][plugin][field]["interval"]
if int(new_interval[:-1]) < int(min_interval[:-1]):
min_interval = new_interval
#compute values for aggregator options
if "op" in telegraf_json[omiclass][plugin][field]:
if telegraf_json[omiclass][plugin][field]["op"] == "rate":
rate_aggregate = True
ops = "\"rate\", \"rate_min\", \"rate_max\", \"rate_count\", \"rate_sum\", \"rate_mean\""
if is_lad:
ops_fields += "\"" + telegraf_json[omiclass][plugin][field]["ladtablekey"] + "\", "
else:
ops_fields += "\"" + telegraf_json[omiclass][plugin][field]["displayName"] + "\", "
else:
non_rate_aggregate = True
if is_lad:
non_ops_fields += "\"" + telegraf_json[omiclass][plugin][field]["ladtablekey"] + "\", "
else:
non_ops_fields += "\"" + telegraf_json[omiclass][plugin][field]["displayName"] + "\", "
# #Aggregation perdiod needs to be double of interval/polling period for metrics for rate aggegation to work properly
# This is only for metrics going to MDSD. for VMI metrics aggregation,
# the requirement is to have ALL the metrics at 60 seconds and that is handled later by sourcing the VMI metrics that need to be aggregated at 30 seconds
if int(min_interval[:-1]) > 30:
min_agg_period = str(int(min_interval[:-1])*2) #if the min interval is greater than 30, use the double value
else:
min_agg_period = "60" #else use 60 as mininum so that we can maintain 1 event per minute
#Add respective rename processor plugin based on the displayname
if is_lad:
lad_specific_rename_str += "\n" + " "*2 + "[[processors.rename.replace]]\n"
lad_specific_rename_str += " "*4 + "field = \"" + field + "\"\n"
lad_specific_rename_str += " "*4 + "dest = \"" + telegraf_json[omiclass][plugin][field]["ladtablekey"] + "\"\n"
elif not is_vmi and not is_vmi_rate_counter:
# no rename of fields as they are set in telegraf directly
ama_rename_str += "\n" + " "*2 + "[[processors.rename.replace]]\n"
ama_rename_str += " "*4 + "field = \"" + field + "\"\n"
ama_rename_str += " "*4 + "dest = \"" + telegraf_json[omiclass][plugin][field]["displayName"] + "\"\n"
# Avoid adding the rename logic for the redundant *_filesystem fields for diskio which were added specifically for OMI parity in LAD
# Had to re-use these six fields to avoid renaming issues since both Filesystem and Disk in OMI-LAD use them
# AMA only uses them once so only need this for LAD
if is_lad:
if field in excess_diskio_plugin_list_lad:
excess_diskio_field_drop_list_str += "\"" + field + "\", "
else:
metricsext_rename_str += "\n" + " "*2 + "[[processors.rename.replace]]\n"
metricsext_rename_str += " "*4 + "field = \"" + field + "\"\n"
metricsext_rename_str += " "*4 + "dest = \"" + plugin + "/" + field + "\"\n"
elif not is_vmi and not is_vmi_rate_counter:
# no rename of fields as they are set in telegraf directly
metricsext_rename_str += "\n" + " "*2 + "[[processors.rename.replace]]\n"
metricsext_rename_str += " "*4 + "field = \"" + field + "\"\n"
metricsext_rename_str += " "*4 + "dest = \"" + plugin + "/" + field + "\"\n"
#Add respective operations for aggregators
# if is_lad:
if not is_vmi and not is_vmi_rate_counter:
suffix = ""
if is_lad:
suffix = "_total\"]\n"
else:
suffix = "_mdsd_la_perf\"]\n"
if rate_aggregate:
aggregator_str += "[[aggregators.basicstats]]\n"
aggregator_str += " "*2 + "namepass = [\"" + plugin + suffix
aggregator_str += " "*2 + "period = \"" + min_agg_period + "s\"\n"
aggregator_str += " "*2 + "drop_original = true\n"
aggregator_str += " "*2 + "fieldpass = [" + ops_fields[:-2] + "]\n" #-2 to strip the last comma and space
aggregator_str += " "*2 + "stats = [" + ops + "]\n"
if non_rate_aggregate:
aggregator_str += "[[aggregators.basicstats]]\n"
aggregator_str += " "*2 + "namepass = [\"" + plugin + suffix
aggregator_str += " "*2 + "period = \"" + min_agg_period + "s\"\n"
aggregator_str += " "*2 + "drop_original = true\n"
aggregator_str += " "*2 + "fieldpass = [" + non_ops_fields[:-2] + "]\n" #-2 to strip the last comma and space
aggregator_str += " "*2 + "stats = [\"mean\", \"max\", \"min\", \"sum\", \"count\"]\n\n"
elif is_vmi_rate_counter:
# Aggregator config for MDSD
aggregator_str += "[[aggregators.basicstats]]\n"
aggregator_str += " "*2 + "namepass = [\"" + plugin + "_mdsd\"]\n"
aggregator_str += " "*2 + "period = \"" + min_interval + "\"\n"
aggregator_str += " "*2 + "drop_original = true\n"
aggregator_str += " "*2 + "fieldpass = [" + ops_fields[:-2].replace('\\','\\\\\\\\') + "]\n" #-2 to strip the last comma and space
aggregator_str += " "*2 + "stats = [" + ops + "]\n\n"
# Aggregator config for ME
aggregator_str += "[[aggregators.mdmratemetrics]]\n"
aggregator_str += " "*2 + "namepass = [\"" + plugin + "\"]\n"
aggregator_str += " "*2 + "period = \"" + min_interval + "\"\n"
aggregator_str += " "*2 + "drop_original = true\n"
aggregator_str += " "*2 + "fieldpass = [" + ops_fields[:-2].replace('\\','\\\\\\\\') + "]\n" #-2 to strip the last comma and space
aggregator_str += " "*2 + "stats = [\"rate\"]\n\n"
if is_lad:
lad_specific_rename_str += "\n"
elif not is_vmi and not is_vmi_rate_counter:
# no rename of fields as they are set in telegraf directly
ama_rename_str += "\n"
# Using fields[: -2] here to get rid of the last ", " at the end of the string
input_str += " "*2 + "fieldpass = ["+fields[:-2]+"]\n"
if plugin == "cpu":
input_str += " "*2 + "report_active = true\n"
if is_vmi_rate_counter:
# Rate interval needs to be atleast twice the regular sourcing interval for aggregation to work.
# Since we want all the VMI metrics to be sent at the same interval as selected by the customer, To overcome the twice the min internval limitation,
# We are sourcing the VMI metrics that need to be aggregated at half the selected frequency
rated_min_interval = str(int(min_interval[:-1]) // 2) + "s"
input_str += " "*2 + "interval = " + "\"" + rated_min_interval + "\"\n\n"
else:
input_str += " "*2 + "interval = " + "\"" + min_interval + "\"\n\n"
config_file["data"] = input_str + "\n" + metricsext_rename_str + "\n" + ama_rename_str + "\n" + lad_specific_rename_str + "\n" +aggregator_str
output.append(config_file)
config_file = {}
"""
Sample telegraf TOML file output
[[inputs.net]]
fieldpass = ["err_out", "packets_sent", "err_in", "bytes_sent", "packets_recv"]
interval = "5s"
[[inputs.cpu]]
fieldpass = ["usage_nice", "usage_user", "usage_idle", "usage_active", "usage_irq", "usage_system"]
interval = "15s"
[[processors.rename]]
[[processors.rename.replace]]
measurement = "net"
dest = "network"
[[processors.rename.replace]]
field = "err_out"
dest = "Packets sent errors"
[[aggregators.basicstats]]
period = "30s"
drop_original = false
fieldpass = ["Disk reads", "Disk writes", "Filesystem write bytes/sec"]
stats = ["rate"]
"""
## Get the log folder directory from HandlerEnvironment.json and use that for the telegraf default logging
logFolder, _ = get_handler_vars()
for measurement in storage_namepass_list:
storage_namepass_str += "\"" + measurement + "\", "
# Telegraf basic agent and output config
agentconf = "[agent]\n"
agentconf += " interval = \"10s\"\n"
agentconf += " round_interval = true\n"
agentconf += " metric_batch_size = 1000\n"
agentconf += " metric_buffer_limit = 1000000\n"
agentconf += " collection_jitter = \"0s\"\n"
agentconf += " flush_interval = \"10s\"\n"
agentconf += " flush_jitter = \"0s\"\n"
agentconf += " logtarget = \"file\"\n"
agentconf += " quiet = true\n"
agentconf += " logfile = \"" + logFolder + "/telegraf.log\"\n"
agentconf += " logfile_rotation_max_size = \"100MB\"\n"
agentconf += " logfile_rotation_max_archives = 5\n"
agentconf += "\n# Configuration for adding gloabl tags\n"
agentconf += "[global_tags]\n"
if is_lad:
agentconf += " DeploymentId= \"${DeploymentId}\"\n"
agentconf += " \"microsoft.subscriptionId\"= \"" + subscription_id + "\"\n"
agentconf += " \"microsoft.resourceGroupName\"= \"" + resource_group + "\"\n"
agentconf += " \"microsoft.regionName\"= \"" + region + "\"\n"
agentconf += " \"microsoft.resourceId\"= \"" + az_resource_id + "\"\n"
if virtual_machine_name != "":
agentconf += " \"VMInstanceId\"= \"" + virtual_machine_name + "\"\n"
agentconf += "\n# Configuration for sending metrics to MetricsExtension\n"
agentconf += "[[outputs.influxdb]]\n"
agentconf += " namedrop = [" + storage_namepass_str[:-2] + "]\n"
if is_lad:
agentconf += " fielddrop = [" + excess_diskio_field_drop_list_str[:-2] + "]\n"
agentconf += " urls = [\"" + str(me_url) + "\"]\n\n"
agentconf += " udp_payload = \"1024B\"\n\n"
if has_mdsd_output:
agentconf += "\n# Configuration for sending metrics to MDSD\n"
agentconf += "[[outputs.socket_writer]]\n"
agentconf += " namepass = [" + storage_namepass_str[:-2] + "]\n"
agentconf += " data_format = \"influx\"\n"
agentconf += " address = \"" + str(mdsd_url) + "\"\n\n"
agentconf += "\n# Configuration for outputing metrics to file. Uncomment to enable.\n"
agentconf += "#[[outputs.file]]\n"
agentconf += "# files = [\"./metrics_to_file.out\"]\n\n"
agent_file = {"filename":"telegraf.conf", "data": agentconf}
output.append(agent_file)
return output, storage_namepass_list
def write_configs(configs, telegraf_conf_dir, telegraf_d_conf_dir):
"""
Write the telegraf config created by config parser method to disk at the telegraf config location
:param configs: Telegraf config data parsed by the parse_config method above
:param telegraf_conf_dir: Path where the telegraf.conf is written to on the disk
:param telegraf_d_conf_dir: Path where the individual module telegraf configs are written to on the disk
"""
# Delete the older config folder to prevent telegraf from loading older configs
if os.path.exists(telegraf_conf_dir):
rmtree(telegraf_conf_dir)
os.mkdir(telegraf_conf_dir)
os.mkdir(telegraf_d_conf_dir)
for configfile in configs:
if configfile["filename"] == "telegraf.conf" or configfile["filename"] == "intermediate.json":
path = telegraf_conf_dir + configfile["filename"]
else:
path = telegraf_d_conf_dir + configfile["filename"]
with open(path, "w") as f:
f.write(configfile["data"])
def get_handler_vars():
"""
This method is taken from the Waagent code. This is used to grab the log and config file location from the json public setting for the Extension
"""
logFolder = ""
configFolder = ""
handler_env_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'HandlerEnvironment.json'))
if os.path.exists(handler_env_path):
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
if "handlerEnvironment" in handler_env:
if "logFolder" in handler_env["handlerEnvironment"]:
logFolder = handler_env["handlerEnvironment"]["logFolder"]
if "configFolder" in handler_env["handlerEnvironment"]:
configFolder = handler_env["handlerEnvironment"]["configFolder"]
return logFolder, configFolder
def is_running(is_lad):
"""
This method is used to check if telegraf binary is currently running on the system or not.
In order to check whether it needs to be restarted from the watcher daemon
"""
if is_lad:
telegraf_bin = metrics_constants.lad_telegraf_bin
else:
telegraf_bin = metrics_constants.ama_telegraf_bin
proc = subprocess.Popen(["ps aux | grep telegraf | grep -v grep"], stdout=subprocess.PIPE, shell=True)
output = proc.communicate()[0]
if telegraf_bin in output.decode('utf-8', 'ignore'):
return True
else:
return False
def stop_telegraf_service(is_lad):
"""
Stop the telegraf service if VM is using is systemd, otherwise check if the pid_file exists,
and if the pid belongs to the Telegraf process, if yes, then kill the process
This method is called before remove_telegraf_service by the main extension code
:param is_lad: boolean whether the extension is LAD or not (AMA)
"""
if is_lad:
telegraf_bin = metrics_constants.lad_telegraf_bin
else:
telegraf_bin = metrics_constants.ama_telegraf_bin
# If the VM has systemd, then we will use that to stop
if metrics_utils.is_systemd():
code = 1
telegraf_service_path = get_telegraf_service_path()
if os.path.isfile(telegraf_service_path):
code = os.system("sudo systemctl stop metrics-sourcer")
else:
return False, "Telegraf service file does not exist. Failed to stop telegraf service: metrics-sourcer.service."
if code != 0:
return False, "Unable to stop telegraf service: metrics-sourcer.service. Run systemctl status metrics-sourcer.service for more info."
# Whether or not VM has systemd, let's check if we have any telegraf pids saved and if so, terminate the associated process
_, configFolder = get_handler_vars()
telegraf_conf_dir = configFolder + "/telegraf_configs/"
telegraf_pid_path = telegraf_conf_dir + "telegraf_pid.txt"
if os.path.isfile(telegraf_pid_path):
with open(telegraf_pid_path, "r") as f:
for pid in f.readlines():
# Verify the pid actually belongs to telegraf
cmd_path = os.path.join("/proc", str(pid.strip("\n")), "cmdline")
if os.path.exists(cmd_path):
with open(cmd_path, "r") as cmd_f:
cmdline = cmd_f.readlines()
if cmdline[0].find(telegraf_bin) >= 0:
os.kill(int(pid), signal.SIGKILL)
os.remove(telegraf_pid_path)
elif not metrics_utils.is_systemd():
return False, "Could not find telegraf service nor process to stop."
return True, "Successfully stopped metrics-sourcer service"
def remove_telegraf_service():
"""
Remove the telegraf service if the VM is using systemd as well as the telegraf Binary
This method is called after stop_telegraf_service by the main extension code during Extension uninstall
:param is_lad: boolean whether the extension is LAD or not (AMA)
"""
telegraf_service_path = get_telegraf_service_path()
if os.path.isfile(telegraf_service_path):
os.remove(telegraf_service_path)
else:
return True, "Unable to remove the Telegraf service as the file doesn't exist."
# Checking To see if the file was successfully removed, since os.remove doesn't return an error code
if os.path.isfile(telegraf_service_path):
return False, "Unable to remove telegraf service: metrics-sourcer.service at {0}.".format(telegraf_service_path)
return True, "Successfully removed metrics-sourcer service"
def setup_telegraf_service(telegraf_bin, telegraf_d_conf_dir, telegraf_agent_conf):
"""
Add the metrics-sourcer service if the VM is using systemd
This method is called in handle_config
:param telegraf_bin: path to the telegraf binary
:param telegraf_d_conf_dir: path to telegraf .d conf subdirectory
:param telegraf_agent_conf: path to telegraf .conf file
"""
telegraf_service_path = get_telegraf_service_path()
telegraf_service_template_path = os.getcwd() + "/services/metrics-sourcer.service"
if not os.path.exists(telegraf_d_conf_dir):
raise Exception("Telegraf config directory does not exist. Failed to setup telegraf service.")
if not os.path.isfile(telegraf_agent_conf):
raise Exception("Telegraf agent config does not exist. Failed to setup telegraf service.")
if os.path.isfile(telegraf_service_template_path):
copyfile(telegraf_service_template_path, telegraf_service_path)
if os.path.isfile(telegraf_service_path):
os.system(r"sed -i 's+%TELEGRAF_BIN%+{1}+' {0}".format(telegraf_service_path, telegraf_bin))
os.system(r"sed -i 's+%TELEGRAF_AGENT_CONFIG%+{1}+' {0}".format(telegraf_service_path, telegraf_agent_conf))
os.system(r"sed -i 's+%TELEGRAF_CONFIG_DIR%+{1}+' {0}".format(telegraf_service_path, telegraf_d_conf_dir))
daemon_reload_status = os.system("sudo systemctl daemon-reload")
if daemon_reload_status != 0:
raise Exception("Unable to reload systemd after Telegraf service file change. Failed to setup telegraf service.")
else:
raise Exception("Unable to copy Telegraf service template file to {0}. Failed to setup telegraf service.".format(telegraf_service_path))
else:
raise Exception("Telegraf service template file does not exist at {0}. Failed to setup telegraf service.".format(telegraf_service_template_path))
return True
def start_telegraf(is_lad):
"""
Start the telegraf service if VM is using is systemd, otherwise start the binary as a process and store the pid
to a file in the telegraf config directory
This method is called after config setup is completed by the main extension code
:param is_lad: boolean whether the extension is LAD or not (AMA)
"""
# Re using the code to grab the config directories and imds values because start will be called from Enable process outside this script
log_messages = ""
if is_lad:
telegraf_bin = metrics_constants.lad_telegraf_bin
else:
telegraf_bin = metrics_constants.ama_telegraf_bin
if not os.path.isfile(telegraf_bin):
log_messages += "Telegraf binary does not exist. Failed to start telegraf service."
return False, log_messages
# Ensure that any old telegraf processes are cleaned up to avoid duplication
stop_telegraf_service(is_lad)
# If the VM has systemd, telegraf will be managed as a systemd service
if metrics_utils.is_systemd():
|
# Otherwise, start telegraf as a process and save the pid to a file so that we can terminate it while disabling/uninstalling
else:
_, configFolder = get_handler_vars()
telegraf_conf_dir = configFolder + "/telegraf_configs/"
telegraf_agent_conf = telegraf_conf_dir + "telegraf.conf"
telegraf_d_conf_dir = telegraf_conf_dir + "telegraf.d/"
telegraf_pid_path = telegraf_conf_dir + "telegraf_pid.txt"
binary_exec_command = "{0} --config {1} --config-directory {2}".format(telegraf_bin, telegraf_agent_conf, telegraf_d_conf_dir)
proc = subprocess.Popen(binary_exec_command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Sleeping for 3 seconds before checking if the process is still running, to give it ample time to relay crash info
time.sleep(3)
p = proc.poll()
# Process is running successfully
if p is None:
telegraf_pid = proc.pid
# Write this pid to a file for future use
try:
with open(telegraf_pid_path, "a") as f:
f.write(str(telegraf_pid) + '\n')
except Exception as e:
log_messages += "Successfully started telegraf binary, but could not save telegraf pidfile."
else:
out, err = proc.communicate()
log_messages += "Unable to run telegraf binary as a process due to error - {0}. Failed to start telegraf.".format(err)
return False, log_messages
return True, log_messages
def get_telegraf_service_path():
"""
Utility method to get the service path in case /lib/systemd/system doesnt exist on the OS
"""
if os.path.exists("/lib/systemd/system/"):
return metrics_constants.telegraf_service_path
elif os.path.exists("/usr/lib/systemd/system/"):
return metrics_constants.telegraf_service_path_usr_lib
else:
raise Exception("Systemd unit files do not exist at /lib/systemd/system or /usr/lib/systemd/system/. Failed to setup telegraf service.")
def handle_config(config_data, me_url, mdsd_url, is_lad):
"""
The main method to perfom the task of parsing the config , writing them to disk, setting up, stopping, removing and starting telegraf
:param config_data: Parsed Metrics Configuration from which telegraf config is created
:param me_url: The url to which telegraf will send metrics to for MetricsExtension
:param mdsd_url: The url to which telegraf will send metrics to for MDSD
:param is_lad: Boolean value for whether the extension is Lad or not (AMA)
"""
# Making the imds call to get resource id, sub id, resource group and region for the dimensions for telegraf metrics
retries = 1
max_retries = 3
sleep_time = 5
imdsurl = ""
is_arc = False
if is_lad:
imdsurl = "http://169.254.169.254/metadata/instance?api-version=2019-03-11"
else:
if metrics_utils.is_arc_installed():
imdsurl = metrics_utils.get_arc_endpoint()
imdsurl += "/metadata/instance?api-version=2019-11-01"
is_arc = True
else:
imdsurl = "http://169.254.169.254/metadata/instance?api-version=2019-03-11"
data = None
while retries <= max_retries:
req = urllib.request.Request(imdsurl, headers={'Metadata':'true'})
res = urllib.request.urlopen(req)
data = json.loads(res.read().decode('utf-8', 'ignore'))
if "compute" not in data:
retries += 1
else:
break
time.sleep(sleep_time)
if retries > max_retries:
raise Exception("Unable to find 'compute' key in imds query response. Reached max retry limit of - {0} times. Failed to setup Telegraf.".format(max_retries))
if "resourceId" not in data["compute"]:
raise Exception("Unable to find 'resourceId' key in imds query response. Failed to setup Telegraf.")
az_resource_id = data["compute"]["resourceId"]
# If the instance is VMSS then trim the last two values from the resource id ie - "/virtualMachines/0"
# Since ME expects the resource id in a particular format. For egs -
# IMDS returned ID - /subscriptions/<sub-id>/resourceGroups/<rg_name>/providers/Microsoft.Compute/virtualMachineScaleSets/<VMSSName>/virtualMachines/0
# ME expected ID- /subscriptions/<sub-id>/resourceGroups/<rg_name>/providers/Microsoft.Compute/virtualMachineScaleSets/<VMSSName>
if "virtualMachineScaleSets" in az_resource_id:
az_resource_id = "/".join(az_resource_id.split("/")[:-2])
if "subscriptionId" not in data["compute"]:
raise Exception("Unable to find 'subscriptionId' key in imds query response. Failed to setup Telegraf.")
subscription_id = data["compute"]["subscriptionId"]
if "resourceGroupName" not in data["compute"]:
raise Exception("Unable to find 'resourceGroupName' key in imds query response. Failed to setup Telegraf.")
resource_group = data["compute"]["resourceGroupName"]
if "location" not in data["compute"]:
raise Exception("Unable to find 'location' key in imds query response. Failed to setup Telegraf.")
region = data["compute"]["location"]
virtual_machine_name = ""
if "vmScaleSetName" in data["compute"] and data["compute"]["vmScaleSetName"] != "":
virtual_machine_name = data["compute"]["name"]
#call the method to first parse the configs
output, namespaces = parse_config(config_data, me_url, mdsd_url, is_lad, az_resource_id, subscription_id, resource_group, region, virtual_machine_name)
_, configFolder = get_handler_vars()
if is_lad:
telegraf_bin = metrics_constants.lad_telegraf_bin
else:
telegraf_bin = metrics_constants.ama_telegraf_bin
telegraf_conf_dir = configFolder + "/telegraf_configs/"
telegraf_agent_conf = telegraf_conf_dir + "telegraf.conf"
telegraf_d_conf_dir = telegraf_conf_dir + "telegraf.d/"
#call the method to write the configs
write_configs(output, telegraf_conf_dir, telegraf_d_conf_dir)
# Setup Telegraf service.
# If the VM has systemd, then we will copy over the systemd unit file and use that to start/stop
if metrics_utils.is_systemd():
telegraf_service_setup = setup_telegraf_service(telegraf_bin, telegraf_d_conf_dir, telegraf_agent_conf)
if not telegraf_service_setup:
return False, []
return True, namespaces
| service_restart_status = os.system("sudo systemctl restart metrics-sourcer")
if service_restart_status != 0:
log_messages += "Unable to start Telegraf service. Failed to start telegraf service."
return False, log_messages |
magick.rs | /*
* Copyright 2016 Mattis Marjak
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use libc::c_void;
use std::ffi::{CStr, CString};
use std::{fmt, ptr, slice};
use super::{DrawingWand, PixelWand};
use bindings;
use conversions::*;
#[cfg(target_os = "freebsd")]
use libc::{size_t, ssize_t};
#[cfg(not(target_os = "freebsd"))]
use {size_t, ssize_t};
wand_common!(
MagickWand,
NewMagickWand,
ClearMagickWand,
IsMagickWand,
CloneMagickWand,
DestroyMagickWand,
MagickClearException,
MagickGetExceptionType,
MagickGetException
);
/// MagickWand is a Rustic wrapper to the Rust bindings to ImageMagick.
///
/// Instantiating a `MagickWand` will construct an ImageMagick "wand"
/// on which operations can be performed via the `MagickWand` functions.
/// When the `MagickWand` is dropped, the ImageMagick wand will be
/// destroyed as well.
impl MagickWand {
pub fn new_image(
&self,
columns: size_t,
rows: size_t,
pixel_wand: &PixelWand,
) -> Result<(), &'static str> {
match unsafe { bindings::MagickNewImage(self.wand, columns, rows, pixel_wand.wand) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("Could not create image"),
}
}
pub fn set_option(&mut self, key: &str, value: &str) -> Result<(), &'static str> {
let c_key = CString::new(key).unwrap();
let c_value = CString::new(value).unwrap();
let result =
unsafe { bindings::MagickSetOption(self.wand, c_key.as_ptr(), c_value.as_ptr()) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to set option"),
}
}
pub fn annotate_image(
&mut self,
drawing_wand: &DrawingWand,
x: f64,
y: f64,
angle: f64,
text: &str,
) -> Result<(), &'static str> {
let c_string = try!(CString::new(text).map_err(|_| "could not convert to cstring"));
match unsafe {
bindings::MagickAnnotateImage(
self.wand,
drawing_wand.wand,
x,
y,
angle,
c_string.as_ptr() as *const _,
)
} {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("unable to annotate image"),
}
}
/// Add all images from another wand to this wand at the current index.
pub fn add_image(&mut self, other_wand: &MagickWand) -> Result<(), &'static str> {
match unsafe { bindings::MagickAddImage(self.wand, other_wand.wand) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("unable to add images from another wand"),
}
}
pub fn append_all(&mut self, stack: bool) -> MagickWand {
unsafe { bindings::MagickResetIterator(self.wand) };
MagickWand {
wand: unsafe { bindings::MagickAppendImages(self.wand, stack.to_magick()) },
}
}
pub fn label_image(&self, label: &str) -> Result<(), &'static str> {
let c_label = CString::new(label).unwrap();
let result = unsafe { bindings::MagickLabelImage(self.wand, c_label.as_ptr()) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to add label"),
}
}
pub fn write_images(&self, path: &str, adjoin: bool) -> Result<(), &'static str> {
let c_name = CString::new(path).unwrap();
let result =
unsafe { bindings::MagickWriteImages(self.wand, c_name.as_ptr(), adjoin.to_magick()) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to write images"),
}
}
/// Read the image data from the named file.
pub fn read_image(&self, path: &str) -> Result<(), &'static str> {
let c_name = CString::new(path).unwrap();
let result = unsafe { bindings::MagickReadImage(self.wand, c_name.as_ptr()) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to read image"),
}
}
/// Read the image data from the vector of bytes.
pub fn read_image_blob<T: AsRef<[u8]>>(&self, data: T) -> Result<(), &'static str> {
let int_slice = data.as_ref();
let size = int_slice.len();
let result = unsafe {
bindings::MagickReadImageBlob(
self.wand,
int_slice.as_ptr() as *const c_void,
size as size_t,
)
};
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to read image"),
}
}
/// Same as read_image, but reads only the width, height, size and format of an image,
/// without reading data.
pub fn ping_image(&self, path: &str) -> Result<(), &'static str> {
let c_name = CString::new(path).unwrap();
let result = unsafe { bindings::MagickPingImage(self.wand, c_name.as_ptr()) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to ping image"),
}
}
/// Same as read_image, but reads only the width, height, size and format of an image,
/// without reading data.
pub fn | <T: AsRef<[u8]>>(&self, data: T) -> Result<(), &'static str> {
let int_slice = data.as_ref();
let size = int_slice.len();
let result = unsafe {
bindings::MagickPingImageBlob(
self.wand,
int_slice.as_ptr() as *const c_void,
size as size_t,
)
};
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to ping image"),
}
}
/// Compare two images and return tuple `(distortion, diffImage)`
/// `diffImage` is `None` if `distortion == 0`
pub fn compare_images(
&self,
reference: &MagickWand,
metric: bindings::MetricType,
) -> (f64, Option<MagickWand>) {
let mut distortion: f64 = 0.0;
let result = unsafe {
bindings::MagickCompareImages(self.wand, reference.wand, metric, &mut distortion)
};
let wand = if result.is_null() {
None
} else {
Some(MagickWand { wand: result })
};
(distortion, wand)
}
/// Compose another image onto self at (x, y) using composition_operator
pub fn compose_images(
&self,
reference: &MagickWand,
composition_operator: bindings::CompositeOperator,
clip_to_self: bool,
x: isize,
y: isize,
) -> Result<(), &'static str> {
let native_clip_to_self = if clip_to_self {
bindings::MagickBooleanType_MagickTrue
} else {
bindings::MagickBooleanType_MagickFalse
};
let result = unsafe {
bindings::MagickCompositeImage(
self.wand,
reference.wand,
composition_operator,
native_clip_to_self,
x,
y,
)
};
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to compose images"),
}
}
// Replaces colors in the image from a color lookup table.
pub fn clut_image(
&self,
clut_wand: &MagickWand,
method: bindings::PixelInterpolateMethod,
) -> Result<(), &'static str> {
let result = unsafe { bindings::MagickClutImage(self.wand, clut_wand.wand, method) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to replace colors in the image from color lookup table"),
}
}
pub fn set_size(&self, columns: size_t, rows: size_t) -> Result<(), &'static str> {
let result = unsafe { bindings::MagickSetSize(self.wand, columns, rows) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to set size of wand"),
}
}
/// Extend the image as defined by the geometry, gravity, and wand background color. Set the
/// (x,y) offset of the geometry to move the original wand relative to the extended wand.
pub fn extend_image(
&self,
width: usize,
height: usize,
x: isize,
y: isize,
) -> Result<(), &'static str> {
let result = unsafe { bindings::MagickExtentImage(self.wand, width, height, x, y) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to extend image"),
}
}
pub fn profile_image<'a, T: Into<Option<&'a [u8]>>>(
&self,
name: &str,
profile: T,
) -> Result<(), &'static str> {
let c_name = CString::new(name).unwrap();
let result = unsafe {
let profile = profile.into();
let profile_ptr = match profile {
Some(data) => data.as_ptr(),
None => ptr::null(),
} as *const c_void;
let profile_len = match profile {
Some(data) => data.len(),
None => 0,
};
bindings::MagickProfileImage(self.wand, c_name.as_ptr(), profile_ptr, profile_len)
};
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to profile image"),
}
}
pub fn flip_image(&self) -> Result<(), &'static str> {
let result = unsafe { bindings::MagickFlipImage(self.wand) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to flip image"),
}
}
pub fn flop_image(&self) -> Result<(), &'static str> {
let result = unsafe { bindings::MagickFlopImage(self.wand) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to flip image"),
}
}
pub fn gaussian_blur_image(&self, radius: f64, sigma: f64) -> Result<(), &'static str> {
let result = unsafe { bindings::MagickGaussianBlurImage(self.wand, radius, sigma) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to blur image"),
}
}
/// Adaptively resize the currently selected image.
pub fn adaptive_resize_image(&self, width: usize, height: usize) -> Result<(), &'static str> {
match unsafe { bindings::MagickAdaptiveResizeImage(self.wand, width, height) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to adaptive-resize image"),
}
}
/// Rotate the currently selected image by the given number of degrees,
/// filling any empty space with the background color of a given PixelWand
pub fn rotate_image(&self, background: &PixelWand, degrees: f64) -> Result<(), &'static str> {
match unsafe { bindings::MagickRotateImage(self.wand, background.wand, degrees) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to rotate image"),
}
}
/// Trim the image removing the backround color from the edges.
pub fn trim_image(&self, fuzz: f64) -> Result<(), &'static str> {
let result = unsafe { bindings::MagickTrimImage(self.wand, fuzz) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to trim image"),
}
}
/// Retrieve the width of the image.
pub fn get_image_width(&self) -> usize {
unsafe { bindings::MagickGetImageWidth(self.wand) as usize }
}
/// Retrieve the height of the image.
pub fn get_image_height(&self) -> usize {
unsafe { bindings::MagickGetImageHeight(self.wand) as usize }
}
/// Retrieve the page geometry (width, height, x offset, y offset) of the image.
pub fn get_image_page(&self) -> (usize, usize, isize, isize) {
let (mut width, mut height, mut x, mut y) = (0usize, 0usize, 0isize, 0isize);
unsafe {
bindings::MagickGetImagePage(self.wand, &mut width, &mut height, &mut x, &mut y);
}
(width, height, x, y)
}
/// Reset the Wand page canvas and position.
pub fn reset_image_page(&self, page_geometry: &str) -> Result<(), &'static str> {
let c_page_geometry = CString::new(page_geometry).unwrap();
let result = unsafe { bindings::MagickResetImagePage(self.wand, c_page_geometry.as_ptr()) };
if result == bindings::MagickBooleanType_MagickTrue {
Ok(())
} else {
Err("Resetting page geometry failed.")
}
}
/// Retrieve the named image property value.
pub fn get_image_property(&self, name: &str) -> Result<String, &'static str> {
let c_name = CString::new(name).unwrap();
let result = unsafe { bindings::MagickGetImageProperty(self.wand, c_name.as_ptr()) };
let value = if result.is_null() {
Err("missing property")
} else {
// convert (and copy) the C string to a Rust string
let cstr = unsafe { CStr::from_ptr(result) };
Ok(cstr.to_string_lossy().into_owned())
};
unsafe {
bindings::MagickRelinquishMemory(result as *mut c_void);
}
value
}
/// Set the named image property.
pub fn set_image_property(&self, name: &str, value: &str) -> Result<(), &'static str> {
let c_name = CString::new(name).unwrap();
let c_value = CString::new(value).unwrap();
let result = unsafe {
bindings::MagickSetImageProperty(self.wand, c_name.as_ptr(), c_value.as_ptr())
};
if result == bindings::MagickBooleanType_MagickTrue {
Ok(())
} else {
Err("Setting image property failed.")
}
}
/// Returns a `PixelWand` instance for the pixel specified by x and y offests.
pub fn get_image_pixel_color(&self, x: isize, y: isize) -> Option<PixelWand> {
let pw = PixelWand::new();
unsafe {
if bindings::MagickGetImagePixelColor(self.wand, x, y, pw.wand)
== bindings::MagickBooleanType_MagickTrue
{
Some(pw)
} else {
None
}
}
}
/// Sets the image sampling factors.
///
/// samplingFactors: An array of floats representing the sampling factor for each color component (in RGB order).
pub fn set_sampling_factors(&self, samplingFactors: &[f64]) -> Result<(), &'static str> {
match unsafe {
bindings::MagickSetSamplingFactors(
self.wand,
samplingFactors.len(),
&samplingFactors[0],
)
} {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("SetSamplingFactors returned false"),
}
}
/// Returns the image histogram as a vector of `PixelWand` instances for every unique color.
pub fn get_image_histogram(&self) -> Option<Vec<PixelWand>> {
let mut color_count: size_t = 0;
unsafe {
bindings::MagickGetImageHistogram(self.wand, &mut color_count)
.as_mut()
.map(|ptrs| {
slice::from_raw_parts(ptrs, color_count)
.iter()
.map(|raw_wand| PixelWand { wand: *raw_wand })
.collect()
})
}
}
/// Sharpens an image. We convolve the image with a Gaussian operator of the
/// given radius and standard deviation (sigma). For reasonable results, the
/// radius should be larger than sigma. Use a radius of 0 and SharpenImage()
/// selects a suitable radius for you.
///
/// radius: the radius of the Gaussian, in pixels, not counting the center pixel.
///
/// sigma: the standard deviation of the Gaussian, in pixels.
///
pub fn sharpen_image(&self, radius: f64, sigma: f64) -> Result<(), &'static str> {
match unsafe { bindings::MagickSharpenImage(self.wand, radius, sigma) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("SharpenImage returned false"),
}
}
/// Set the background color.
pub fn set_background_color(&self, pixel_wand: &PixelWand) -> Result<(), &'static str> {
match unsafe { bindings::MagickSetBackgroundColor(self.wand, pixel_wand.wand) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("SetBackgroundColor returned false"),
}
}
/// Set the image background color.
pub fn set_image_background_color(&self, pixel_wand: &PixelWand) -> Result<(), &'static str> {
match unsafe { bindings::MagickSetImageBackgroundColor(self.wand, pixel_wand.wand) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("SetImageBackgroundColor returned false"),
}
}
/// Returns the image resolution as a pair (horizontal resolution, vertical resolution)
pub fn get_image_resolution(&self) -> Result<(f64, f64), &'static str> {
let mut x_resolution = 0f64;
let mut y_resolution = 0f64;
unsafe {
if bindings::MagickGetImageResolution(self.wand, &mut x_resolution, &mut y_resolution)
== bindings::MagickBooleanType_MagickTrue
{
Ok((x_resolution, y_resolution))
} else {
Err("GetImageResolution returned false")
}
}
}
/// Sets the image resolution
pub fn set_image_resolution(
&self,
x_resolution: f64,
y_resolution: f64,
) -> Result<(), &'static str> {
unsafe {
if bindings::MagickSetImageResolution(self.wand, x_resolution, y_resolution)
== bindings::MagickBooleanType_MagickTrue
{
Ok(())
} else {
Err("SetImageResolution returned false")
}
}
}
/// Sets the wand resolution
pub fn set_resolution(&self, x_resolution: f64, y_resolution: f64) -> Result<(), &'static str> {
unsafe {
if bindings::MagickSetResolution(self.wand, x_resolution, y_resolution)
== bindings::MagickBooleanType_MagickTrue
{
Ok(())
} else {
Err("SetResolution returned false")
}
}
}
/// Returns the image resolution as a pair (horizontal resolution, vertical resolution)
pub fn sepia_tone_image(&self, threshold: f64) -> Result<(), &'static str> {
unsafe {
if bindings::MagickSepiaToneImage(self.wand, threshold * bindings::QuantumRange)
== bindings::MagickBooleanType_MagickTrue
{
Ok(())
} else {
Err("SepiaToneImage returned false")
}
}
}
/// Extracts pixel data from the image as a vector of 0..255 values defined by `map`.
/// See https://www.imagemagick.org/api/magick-image.php#MagickExportImagePixels for more information.
pub fn export_image_pixels(
&self,
x: isize,
y: isize,
width: usize,
height: usize,
map: &str,
) -> Option<Vec<u8>> {
let c_map = CString::new(map).unwrap();
let capacity = width * height * map.len();
let mut pixels = Vec::with_capacity(capacity);
unsafe {
pixels.set_len(capacity as usize);
if bindings::MagickExportImagePixels(
self.wand,
x,
y,
width,
height,
c_map.as_ptr(),
bindings::StorageType_CharPixel,
pixels.as_mut_ptr() as *mut c_void,
) == bindings::MagickBooleanType_MagickTrue
{
Some(pixels)
} else {
None
}
}
}
/// Resize the image to the specified width and height, using the
/// specified filter type.
pub fn resize_image(&self, width: usize, height: usize, filter: bindings::FilterType) {
unsafe {
bindings::MagickResizeImage(self.wand, width as size_t, height as size_t, filter);
}
}
/// Extract a region of the image. The width and height is used as the size
/// of the region. X and Y is the offset.
pub fn crop_image(
&self,
width: usize,
height: usize,
x: isize,
y: isize,
) -> Result<(), &'static str> {
let result = unsafe { bindings::MagickCropImage(self.wand, width, height, x, y) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to crop image"),
}
}
/// Resample the image to the specified horizontal and vertical resolution, using the
/// specified filter type.
pub fn resample_image(
&self,
x_resolution: f64,
y_resolution: f64,
filter: bindings::FilterType,
) {
unsafe {
bindings::MagickResampleImage(self.wand, x_resolution, y_resolution, filter);
}
}
/// Resize the image to fit within the given dimensions, maintaining
/// the current aspect ratio.
pub fn fit(&self, width: size_t, height: size_t) {
let mut width_ratio = width as f64;
width_ratio /= self.get_image_width() as f64;
let mut height_ratio = height as f64;
height_ratio /= self.get_image_height() as f64;
let (new_width, new_height) = if width_ratio < height_ratio {
(
width,
(self.get_image_height() as f64 * width_ratio) as size_t,
)
} else {
(
(self.get_image_width() as f64 * height_ratio) as size_t,
height,
)
};
unsafe {
bindings::MagickResetIterator(self.wand);
while bindings::MagickNextImage(self.wand) != bindings::MagickBooleanType_MagickFalse {
bindings::MagickResizeImage(
self.wand,
new_width,
new_height,
bindings::FilterType_LanczosFilter,
);
}
}
}
/// Detect if the loaded image is not in top-left orientation, and
/// hence should be "auto" oriented so it is suitable for viewing.
pub fn requires_orientation(&self) -> bool {
unsafe {
bindings::MagickGetImageOrientation(self.wand)
!= bindings::OrientationType_TopLeftOrientation
}
}
/// Automatically adjusts the loaded image so that its orientation is
/// suitable for viewing (i.e. top-left orientation).
///
/// Returns `true` if successful or `false` if an error occurred.
pub fn auto_orient(&self) -> bool {
unsafe {
bindings::MagickAutoOrientImage(self.wand) == bindings::MagickBooleanType_MagickTrue
}
}
/// Write the current image to the provided path.
pub fn write_image(&self, path: &str) -> Result<(), &'static str> {
let c_name = CString::new(path).unwrap();
let result = unsafe { bindings::MagickWriteImage(self.wand, c_name.as_ptr()) };
match result {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err("failed to write image"),
}
}
/// Write the image in the desired format to a new blob.
///
/// The `format` argument may be any ImageMagick supported image
/// format (e.g. GIF, JPEG, PNG, etc).
pub fn write_image_blob(&self, format: &str) -> Result<Vec<u8>, &'static str> {
let c_format = CString::new(format).unwrap();
let mut length: size_t = 0;
let blob = unsafe {
bindings::MagickResetIterator(self.wand);
bindings::MagickSetImageFormat(self.wand, c_format.as_ptr());
bindings::MagickGetImageBlob(self.wand, &mut length)
};
let mut bytes = Vec::with_capacity(length as usize);
unsafe {
bytes.set_len(length as usize);
ptr::copy_nonoverlapping(blob, bytes.as_mut_ptr(), length as usize);
bindings::MagickRelinquishMemory(blob as *mut c_void);
};
Ok(bytes)
}
/// Write the images in the desired format to a new blob.
///
/// The `format` argument may be any ImageMagick supported image
/// format (e.g. GIF, JPEG, PNG, etc).
pub fn write_images_blob(&self, format: &str) -> Result<Vec<u8>, &'static str> {
let c_format = CString::new(format).unwrap();
let mut length: size_t = 0;
let blob = unsafe {
bindings::MagickSetIteratorIndex(self.wand, 0);
bindings::MagickSetImageFormat(self.wand, c_format.as_ptr());
bindings::MagickGetImagesBlob(self.wand, &mut length)
};
let mut bytes = Vec::with_capacity(length as usize);
unsafe {
bytes.set_len(length as usize);
ptr::copy_nonoverlapping(blob, bytes.as_mut_ptr(), length as usize);
bindings::MagickRelinquishMemory(blob as *mut c_void);
};
Ok(bytes)
}
mutations!(
/// Set the image colorspace, transforming (unlike `set_image_colorspace`) image data in
/// the process.
MagickTransformImageColorspace => transform_image_colorspace(
colorspace: bindings::ColorspaceType)
/// Set the image alpha channel mode.
MagickSetImageAlphaChannel => set_image_alpha_channel(
alpha_channel: bindings::AlphaChannelOption)
/// Reduce the number of colors in the image.
MagickQuantizeImage => quantize_image(
number_of_colors: size_t, colorspace: bindings::ColorspaceType,
tree_depth: size_t, dither_method: bindings::DitherMethod, measure_error: bindings::MagickBooleanType)
/// Reduce the number of colors in the image.
MagickQuantizeImages => quantize_images(
number_of_colors: size_t, colorspace: bindings::ColorspaceType,
tree_depth: size_t, dither_method: bindings::DitherMethod, measure_error: bindings::MagickBooleanType)
/// Discard all but one of any pixel color.
MagickUniqueImageColors => unique_image_colors()
);
get!(get_image_colors, MagickGetImageColors, size_t);
string_set_get!(
get_filename, set_filename, MagickGetFilename, MagickSetFilename
get_font, set_font, MagickGetFont, MagickSetFont
get_format, set_format, MagickGetFormat, MagickSetFormat
get_image_filename, set_image_filename, MagickGetImageFilename, MagickSetImageFilename
get_image_format, set_image_format, MagickGetImageFormat, MagickSetImageFormat
);
set_get!(
get_colorspace, set_colorspace, MagickGetColorspace, MagickSetColorspace, bindings::ColorspaceType
get_compression, set_compression, MagickGetCompression, MagickSetCompression, bindings::CompressionType
get_compression_quality, set_compression_quality, MagickGetCompressionQuality, MagickSetCompressionQuality, size_t
get_gravity, set_gravity, MagickGetGravity, MagickSetGravity, bindings::GravityType
get_image_colorspace, set_image_colorspace, MagickGetImageColorspace, MagickSetImageColorspace, bindings::ColorspaceType
get_image_compose, set_image_compose, MagickGetImageCompose, MagickSetImageCompose, bindings::CompositeOperator
get_image_compression, set_image_compression, MagickGetImageCompression, MagickSetImageCompression, bindings::CompressionType
get_image_compression_quality, set_image_compression_quality, MagickGetImageCompressionQuality, MagickSetImageCompressionQuality, size_t
get_image_delay, set_image_delay, MagickGetImageDelay, MagickSetImageDelay, size_t
get_image_depth, set_image_depth, MagickGetImageDepth, MagickSetImageDepth, size_t
get_image_dispose, set_image_dispose, MagickGetImageDispose, MagickSetImageDispose, bindings::DisposeType
get_image_endian, set_image_endian, MagickGetImageEndian, MagickSetImageEndian, bindings::EndianType
get_image_fuzz, set_image_fuzz, MagickGetImageFuzz, MagickSetImageFuzz, f64
get_image_gamma, set_image_gamma, MagickGetImageGamma, MagickSetImageGamma, f64
get_image_gravity, set_image_gravity, MagickGetImageGravity, MagickSetImageGravity, bindings::GravityType
get_image_interlace_scheme, set_image_interlace_scheme, MagickGetImageInterlaceScheme, MagickSetImageInterlaceScheme, bindings::InterlaceType
get_image_interpolate_method, set_image_interpolate_method, MagickGetImageInterpolateMethod, MagickSetImageInterpolateMethod, bindings::PixelInterpolateMethod
get_image_iterations, set_image_iterations, MagickGetImageIterations, MagickSetImageIterations, size_t
get_image_orientation, set_image_orientation, MagickGetImageOrientation, MagickSetImageOrientation, bindings::OrientationType
get_image_rendering_intent, set_image_rendering_intent, MagickGetImageRenderingIntent, MagickSetImageRenderingIntent, bindings::RenderingIntent
get_image_scene, set_image_scene, MagickGetImageScene, MagickSetImageScene, size_t
get_image_type, set_image_type, MagickGetImageType, MagickSetImageType, bindings::ImageType
get_image_units, set_image_units, MagickGetImageUnits, MagickSetImageUnits, bindings::ResolutionType
get_interlace_scheme, set_interlace_scheme, MagickGetInterlaceScheme, MagickSetInterlaceScheme, bindings::InterlaceType
get_interpolate_method, set_interpolate_method, MagickGetInterpolateMethod, MagickSetInterpolateMethod, bindings::PixelInterpolateMethod
get_iterator_index, set_iterator_index, MagickGetIteratorIndex, MagickSetIteratorIndex, ssize_t
get_orientation, set_orientation, MagickGetOrientation, MagickSetOrientation, bindings::OrientationType
get_pointsize, set_pointsize, MagickGetPointsize, MagickSetPointsize, f64
get_type, set_type, MagickGetType, MagickSetType, bindings::ImageType
);
}
impl fmt::Debug for MagickWand {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(writeln!(f, "MagickWand {{"));
try!(writeln!(f, " Exception: {:?}", self.get_exception()));
try!(writeln!(f, " IsWand: {:?}", self.is_wand()));
try!(self.fmt_string_settings(f, " "));
try!(self.fmt_checked_settings(f, " "));
writeln!(f, "}}")
}
}
| ping_image_blob |
fake_worker_lifecycle.go | // Code generated by counterfeiter. DO NOT EDIT.
package dbfakes
import (
"sync"
"github.com/concourse/atc/db"
)
type FakeWorkerLifecycle struct {
StallUnresponsiveWorkersStub func() ([]string, error)
stallUnresponsiveWorkersMutex sync.RWMutex
stallUnresponsiveWorkersArgsForCall []struct{}
stallUnresponsiveWorkersReturns struct {
result1 []string
result2 error
}
stallUnresponsiveWorkersReturnsOnCall map[int]struct {
result1 []string
result2 error
}
LandFinishedLandingWorkersStub func() ([]string, error)
landFinishedLandingWorkersMutex sync.RWMutex
landFinishedLandingWorkersArgsForCall []struct{}
landFinishedLandingWorkersReturns struct {
result1 []string
result2 error
}
landFinishedLandingWorkersReturnsOnCall map[int]struct {
result1 []string
result2 error
}
DeleteFinishedRetiringWorkersStub func() ([]string, error)
deleteFinishedRetiringWorkersMutex sync.RWMutex
deleteFinishedRetiringWorkersArgsForCall []struct{}
deleteFinishedRetiringWorkersReturns struct {
result1 []string
result2 error
}
deleteFinishedRetiringWorkersReturnsOnCall map[int]struct {
result1 []string
result2 error
}
invocations map[string][][]interface{}
invocationsMutex sync.RWMutex
}
func (fake *FakeWorkerLifecycle) StallUnresponsiveWorkers() ([]string, error) {
fake.stallUnresponsiveWorkersMutex.Lock()
ret, specificReturn := fake.stallUnresponsiveWorkersReturnsOnCall[len(fake.stallUnresponsiveWorkersArgsForCall)]
fake.stallUnresponsiveWorkersArgsForCall = append(fake.stallUnresponsiveWorkersArgsForCall, struct{}{})
fake.recordInvocation("StallUnresponsiveWorkers", []interface{}{})
fake.stallUnresponsiveWorkersMutex.Unlock()
if fake.StallUnresponsiveWorkersStub != nil {
return fake.StallUnresponsiveWorkersStub()
}
if specificReturn {
return ret.result1, ret.result2
}
return fake.stallUnresponsiveWorkersReturns.result1, fake.stallUnresponsiveWorkersReturns.result2
}
func (fake *FakeWorkerLifecycle) StallUnresponsiveWorkersCallCount() int {
fake.stallUnresponsiveWorkersMutex.RLock()
defer fake.stallUnresponsiveWorkersMutex.RUnlock()
return len(fake.stallUnresponsiveWorkersArgsForCall)
}
func (fake *FakeWorkerLifecycle) StallUnresponsiveWorkersReturns(result1 []string, result2 error) {
fake.StallUnresponsiveWorkersStub = nil
fake.stallUnresponsiveWorkersReturns = struct {
result1 []string
result2 error
}{result1, result2}
}
func (fake *FakeWorkerLifecycle) StallUnresponsiveWorkersReturnsOnCall(i int, result1 []string, result2 error) {
fake.StallUnresponsiveWorkersStub = nil
if fake.stallUnresponsiveWorkersReturnsOnCall == nil {
fake.stallUnresponsiveWorkersReturnsOnCall = make(map[int]struct {
result1 []string
result2 error
})
}
fake.stallUnresponsiveWorkersReturnsOnCall[i] = struct {
result1 []string
result2 error
}{result1, result2}
}
func (fake *FakeWorkerLifecycle) LandFinishedLandingWorkers() ([]string, error) {
fake.landFinishedLandingWorkersMutex.Lock()
ret, specificReturn := fake.landFinishedLandingWorkersReturnsOnCall[len(fake.landFinishedLandingWorkersArgsForCall)]
fake.landFinishedLandingWorkersArgsForCall = append(fake.landFinishedLandingWorkersArgsForCall, struct{}{})
fake.recordInvocation("LandFinishedLandingWorkers", []interface{}{})
fake.landFinishedLandingWorkersMutex.Unlock()
if fake.LandFinishedLandingWorkersStub != nil {
return fake.LandFinishedLandingWorkersStub()
}
if specificReturn {
return ret.result1, ret.result2
}
return fake.landFinishedLandingWorkersReturns.result1, fake.landFinishedLandingWorkersReturns.result2
}
func (fake *FakeWorkerLifecycle) LandFinishedLandingWorkersCallCount() int {
fake.landFinishedLandingWorkersMutex.RLock()
defer fake.landFinishedLandingWorkersMutex.RUnlock()
return len(fake.landFinishedLandingWorkersArgsForCall)
}
func (fake *FakeWorkerLifecycle) LandFinishedLandingWorkersReturns(result1 []string, result2 error) {
fake.LandFinishedLandingWorkersStub = nil
fake.landFinishedLandingWorkersReturns = struct {
result1 []string
result2 error
}{result1, result2}
}
func (fake *FakeWorkerLifecycle) LandFinishedLandingWorkersReturnsOnCall(i int, result1 []string, result2 error) {
fake.LandFinishedLandingWorkersStub = nil
if fake.landFinishedLandingWorkersReturnsOnCall == nil |
fake.landFinishedLandingWorkersReturnsOnCall[i] = struct {
result1 []string
result2 error
}{result1, result2}
}
func (fake *FakeWorkerLifecycle) DeleteFinishedRetiringWorkers() ([]string, error) {
fake.deleteFinishedRetiringWorkersMutex.Lock()
ret, specificReturn := fake.deleteFinishedRetiringWorkersReturnsOnCall[len(fake.deleteFinishedRetiringWorkersArgsForCall)]
fake.deleteFinishedRetiringWorkersArgsForCall = append(fake.deleteFinishedRetiringWorkersArgsForCall, struct{}{})
fake.recordInvocation("DeleteFinishedRetiringWorkers", []interface{}{})
fake.deleteFinishedRetiringWorkersMutex.Unlock()
if fake.DeleteFinishedRetiringWorkersStub != nil {
return fake.DeleteFinishedRetiringWorkersStub()
}
if specificReturn {
return ret.result1, ret.result2
}
return fake.deleteFinishedRetiringWorkersReturns.result1, fake.deleteFinishedRetiringWorkersReturns.result2
}
func (fake *FakeWorkerLifecycle) DeleteFinishedRetiringWorkersCallCount() int {
fake.deleteFinishedRetiringWorkersMutex.RLock()
defer fake.deleteFinishedRetiringWorkersMutex.RUnlock()
return len(fake.deleteFinishedRetiringWorkersArgsForCall)
}
func (fake *FakeWorkerLifecycle) DeleteFinishedRetiringWorkersReturns(result1 []string, result2 error) {
fake.DeleteFinishedRetiringWorkersStub = nil
fake.deleteFinishedRetiringWorkersReturns = struct {
result1 []string
result2 error
}{result1, result2}
}
func (fake *FakeWorkerLifecycle) DeleteFinishedRetiringWorkersReturnsOnCall(i int, result1 []string, result2 error) {
fake.DeleteFinishedRetiringWorkersStub = nil
if fake.deleteFinishedRetiringWorkersReturnsOnCall == nil {
fake.deleteFinishedRetiringWorkersReturnsOnCall = make(map[int]struct {
result1 []string
result2 error
})
}
fake.deleteFinishedRetiringWorkersReturnsOnCall[i] = struct {
result1 []string
result2 error
}{result1, result2}
}
func (fake *FakeWorkerLifecycle) Invocations() map[string][][]interface{} {
fake.invocationsMutex.RLock()
defer fake.invocationsMutex.RUnlock()
fake.stallUnresponsiveWorkersMutex.RLock()
defer fake.stallUnresponsiveWorkersMutex.RUnlock()
fake.landFinishedLandingWorkersMutex.RLock()
defer fake.landFinishedLandingWorkersMutex.RUnlock()
fake.deleteFinishedRetiringWorkersMutex.RLock()
defer fake.deleteFinishedRetiringWorkersMutex.RUnlock()
copiedInvocations := map[string][][]interface{}{}
for key, value := range fake.invocations {
copiedInvocations[key] = value
}
return copiedInvocations
}
func (fake *FakeWorkerLifecycle) recordInvocation(key string, args []interface{}) {
fake.invocationsMutex.Lock()
defer fake.invocationsMutex.Unlock()
if fake.invocations == nil {
fake.invocations = map[string][][]interface{}{}
}
if fake.invocations[key] == nil {
fake.invocations[key] = [][]interface{}{}
}
fake.invocations[key] = append(fake.invocations[key], args)
}
var _ db.WorkerLifecycle = new(FakeWorkerLifecycle)
| {
fake.landFinishedLandingWorkersReturnsOnCall = make(map[int]struct {
result1 []string
result2 error
})
} |
issue-78721.rs | // edition:2018
#![feature(impl_trait_in_bindings)]
//~^ WARN the feature `impl_trait_in_bindings` is incomplete
struct Bug { | 1
}],
}
fn main() {} | V1: [(); {
let f: impl core::future::Future<Output = u8> = async { 1 };
//~^ ERROR `async` blocks are not allowed in constants
//~| ERROR destructors cannot be evaluated at compile-time |
dispatch.rs | use std::str::FromStr;
use paste::paste;
use proc_macro2::Literal;
macro_rules! fixed_to_literal {
($int_bits:expr, $frac_bits:expr, $signed:expr, $s:expr, $w:expr, $i:expr, $f:expr) => {
if ($int_bits, $frac_bits, $signed) == ($i, $f, true) {
return paste![fixed::types::[<I $i F $f>]::from_str]($s)
.map(|x| x.to_bits()).map(paste![Literal::[<i $w _unsuffixed>]])
} else if ($int_bits, $frac_bits, $signed) == ($i, $f, false) {
return paste![fixed::types::[<U $i F $f>]::from_str]($s)
.map(|x| x.to_bits()).map(paste![Literal::[<u $w _unsuffixed>]])
}
};
}
pub fn fixed_to_literal(
int_bits: u8,
frac_bits: u8,
signed: bool,
s: &str,
) -> Result<Literal, fixed::ParseFixedError> | {
// 8-bit
fixed_to_literal!(int_bits, frac_bits, signed, s, 8, 8, 0);
fixed_to_literal!(int_bits, frac_bits, signed, s, 8, 7, 1);
fixed_to_literal!(int_bits, frac_bits, signed, s, 8, 6, 2);
fixed_to_literal!(int_bits, frac_bits, signed, s, 8, 5, 3);
fixed_to_literal!(int_bits, frac_bits, signed, s, 8, 4, 4);
fixed_to_literal!(int_bits, frac_bits, signed, s, 8, 3, 5);
fixed_to_literal!(int_bits, frac_bits, signed, s, 8, 2, 6);
fixed_to_literal!(int_bits, frac_bits, signed, s, 8, 1, 7);
fixed_to_literal!(int_bits, frac_bits, signed, s, 8, 0, 8);
// 16-bit
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 16, 0);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 15, 1);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 14, 2);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 13, 3);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 12, 4);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 11, 5);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 10, 6);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 9, 7);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 8, 8);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 7, 9);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 6, 10);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 5, 11);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 4, 12);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 3, 13);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 2, 14);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 1, 15);
fixed_to_literal!(int_bits, frac_bits, signed, s, 16, 0, 16);
// 32-bit
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 32, 0);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 31, 1);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 30, 2);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 29, 3);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 28, 4);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 27, 5);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 26, 6);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 25, 7);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 24, 8);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 23, 9);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 22, 10);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 21, 11);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 20, 12);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 19, 13);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 18, 14);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 17, 15);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 16, 16);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 15, 17);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 14, 18);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 13, 19);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 12, 20);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 11, 21);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 10, 22);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 9, 23);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 8, 24);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 7, 25);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 6, 26);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 5, 27);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 4, 28);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 3, 29);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 2, 30);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 1, 31);
fixed_to_literal!(int_bits, frac_bits, signed, s, 32, 0, 32);
// 64-bit
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 64, 0);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 63, 1);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 62, 2);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 61, 3);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 60, 4);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 59, 5);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 58, 6);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 57, 7);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 56, 8);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 55, 9);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 54, 10);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 53, 11);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 52, 12);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 51, 13);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 50, 14);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 49, 15);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 48, 16);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 47, 17);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 46, 18);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 45, 19);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 44, 20);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 43, 21);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 42, 22);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 41, 23);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 40, 24);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 39, 25);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 38, 26);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 37, 27);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 36, 28);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 35, 29);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 34, 30);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 33, 31);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 32, 32);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 31, 33);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 30, 34);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 29, 35);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 28, 36);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 27, 37);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 26, 38);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 25, 39);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 24, 40);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 23, 41);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 22, 42);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 21, 43);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 20, 44);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 19, 45);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 18, 46);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 17, 47);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 16, 48);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 15, 49);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 14, 50);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 13, 51);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 12, 52);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 11, 53);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 10, 54);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 9, 55);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 8, 56);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 7, 57);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 6, 58);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 5, 59);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 4, 60);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 3, 61);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 2, 62);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 1, 63);
fixed_to_literal!(int_bits, frac_bits, signed, s, 64, 0, 64);
// 128-bit
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 128, 0);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 127, 1);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 126, 2);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 125, 3);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 124, 4);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 123, 5);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 122, 6);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 121, 7);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 120, 8);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 119, 9);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 118, 10);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 117, 11);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 116, 12);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 115, 13);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 114, 14);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 113, 15);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 112, 16);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 111, 17);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 110, 18);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 109, 19);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 108, 20);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 107, 21);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 106, 22);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 105, 23);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 104, 24);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 103, 25);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 102, 26);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 101, 27);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 100, 28);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 99, 29);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 98, 30);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 97, 31);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 96, 32);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 95, 33);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 94, 34);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 93, 35);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 92, 36);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 91, 37);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 90, 38);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 89, 39);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 88, 40);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 87, 41);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 86, 42);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 85, 43);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 84, 44);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 83, 45);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 82, 46);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 81, 47);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 80, 48);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 79, 49);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 78, 50);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 77, 51);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 76, 52);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 75, 53);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 74, 54);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 73, 55);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 72, 56);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 71, 57);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 70, 58);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 69, 59);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 68, 60);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 67, 61);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 66, 62);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 65, 63);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 64, 64);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 63, 65);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 62, 66);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 61, 67);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 60, 68);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 59, 69);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 58, 70);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 57, 71);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 56, 72);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 55, 73);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 54, 74);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 53, 75);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 52, 76);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 51, 77);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 50, 78);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 49, 79);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 48, 80);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 47, 81);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 46, 82);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 45, 83);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 44, 84);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 43, 85);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 42, 86);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 41, 87);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 40, 88);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 39, 89);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 38, 90);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 37, 91);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 36, 92);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 35, 93);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 34, 94);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 33, 95);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 32, 96);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 31, 97);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 30, 98);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 29, 99);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 28, 100);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 27, 101);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 26, 102);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 25, 103);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 24, 104);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 23, 105);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 22, 106);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 21, 107);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 20, 108);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 19, 109);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 18, 110);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 17, 111);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 16, 112);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 15, 113);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 14, 114);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 13, 115);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 12, 116);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 11, 117);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 10, 118);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 9, 119);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 8, 120);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 7, 121);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 6, 122);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 5, 123);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 4, 124);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 3, 125);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 2, 126);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 1, 127);
fixed_to_literal!(int_bits, frac_bits, signed, s, 128, 0, 128);
// shouldn't happen, pinky promise
unreachable!("no way");
} |
|
TransactionLedger.tsx | import React from 'react'
import {Table} from "antd";
import './Components.css';
import {Transaction} from "../models/Transaction";
const TransactionLedger: React.FunctionComponent<{
currentVaspId: string
transactions: Transaction[]
onTransactionSelected: (transaction: Transaction) => void
}> = (props) => {
let columns = [ | title: 'Timestamp',
dataIndex: 'timestamp',
key: 'timestamp'
},
{
title: 'Transaction ID',
dataIndex: 'transactionId',
key: 'transactionId'
},
{
title: 'Direction',
dataIndex: 'direction',
key: 'direction'
},
{
title: 'Details',
dataIndex: 'details',
key: 'details'
}
]
let dataSource = props.transactions.map(transaction => {
return {
'timestamp': formatTransactionTime(transaction),
'transactionId': transaction.transaction_id,
'direction': formatTransactionDirection(transaction, props.currentVaspId),
'details': formatTransactionDetails(transaction)
}
})
function handleRow(transactionId: String) {
let selectedTransaction = props.transactions
.filter(transaction => transaction.transaction_id === transactionId)[0]
props.onTransactionSelected(selectedTransaction)
}
return <div className="transaction-ledger-container">
<Table
dataSource={dataSource}
columns={columns}
pagination={false}
size="small"
bordered={false}
scroll={{y: 200}}
onRow={(record, rowIndex) => {
return {
onClick: event => {
handleRow(record.transactionId)
}
};
}}
/></div>
}
export function formatTransactionTime(transaction: Transaction): string {
return transaction.timestamp
// if (transaction.timestamp == 0) {
// return ''
// }
// let date = new Date(transaction.timestamp)
//
// let dateOptions = {day: '2-digit', month: '2-digit', year: 'numeric'}
// let timeOptions = {hour12: false, hour: '2-digit', minute: '2-digit', second: '2-digit'}
//
// let dateFormat = new Intl.DateTimeFormat([], dateOptions).format(date)
// let timeFormat = new Intl.DateTimeFormat([], timeOptions).format(date)
//
// return dateFormat + ' ' + timeFormat
}
function formatTransactionDirection(transaction: Transaction, current_vasp_id: string): string {
let direction = current_vasp_id === transaction.beneficiary_vasp_id ? "Incoming" : "Outgoing"
return direction
}
function formatTransactionDetails(transaction: Transaction): string {
return transaction.originating_wallet + ' => ' + transaction.beneficiary_wallet
}
export default TransactionLedger; | { |
node.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: node.proto
package node
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type ServerNodeMsgID int32
const (
ServerNodeMsgID_SERVER_NODE_MSG_ID ServerNodeMsgID = 0
ServerNodeMsgID_GATE_ROUTE_TO_LOGIN ServerNodeMsgID = 10001
ServerNodeMsgID_LOGIN_ROUTE_TO_GATE ServerNodeMsgID = 10002
ServerNodeMsgID_GATE_ROUTE_TO_GAME ServerNodeMsgID = 10003
ServerNodeMsgID_GAME_ROUTE_TO_GATE ServerNodeMsgID = 10004
ServerNodeMsgID_GATE_ROUTE_TO_CHAT ServerNodeMsgID = 10005
ServerNodeMsgID_CHAT_ROUTE_TO_GATE ServerNodeMsgID = 10006
ServerNodeMsgID_WORLD_ROUTE_TO_GATE ServerNodeMsgID = 10007
ServerNodeMsgID_GATE_ROUTE_TO_WORLD ServerNodeMsgID = 10008
ServerNodeMsgID_GAME_ROUTE_TO_CHAT ServerNodeMsgID = 10009
ServerNodeMsgID_CHAT_ROUTE_TO_GAME ServerNodeMsgID = 10010
ServerNodeMsgID_GAME_ROUTE_TO_WORLD ServerNodeMsgID = 10011
ServerNodeMsgID_WORLD_ROUTE_TO_GAME ServerNodeMsgID = 10012
ServerNodeMsgID_LOGIN_ROUTE_TO_WORLD ServerNodeMsgID = 10013
ServerNodeMsgID_WORLD_ROUTE_TO_LOGIN ServerNodeMsgID = 10014
ServerNodeMsgID_WORLD_ROUTE_TO_DB ServerNodeMsgID = 10015
ServerNodeMsgID_DB_ROUTE_TO_WORLD ServerNodeMsgID = 10016
ServerNodeMsgID_PLAYER_ROUTE_TO_GATE ServerNodeMsgID = 10017
ServerNodeMsgID_GATE_ROUTE_TO_PLAYER ServerNodeMsgID = 10018
ServerNodeMsgID_GAME_PLAYER_OFFLINE ServerNodeMsgID = 10051
ServerNodeMsgID_REPORT_CLIENT_INFO_TO_SERVER ServerNodeMsgID = 10054
ServerNodeMsgID_MASTER_REPORT_SERVER_INFO_TO_SERVER ServerNodeMsgID = 10053
)
var ServerNodeMsgID_name = map[int32]string{
0: "SERVER_NODE_MSG_ID",
10001: "GATE_ROUTE_TO_LOGIN",
10002: "LOGIN_ROUTE_TO_GATE",
10003: "GATE_ROUTE_TO_GAME",
10004: "GAME_ROUTE_TO_GATE",
10005: "GATE_ROUTE_TO_CHAT",
10006: "CHAT_ROUTE_TO_GATE",
10007: "WORLD_ROUTE_TO_GATE",
10008: "GATE_ROUTE_TO_WORLD",
10009: "GAME_ROUTE_TO_CHAT",
10010: "CHAT_ROUTE_TO_GAME",
10011: "GAME_ROUTE_TO_WORLD",
10012: "WORLD_ROUTE_TO_GAME",
10013: "LOGIN_ROUTE_TO_WORLD",
10014: "WORLD_ROUTE_TO_LOGIN",
10015: "WORLD_ROUTE_TO_DB",
10016: "DB_ROUTE_TO_WORLD",
10017: "PLAYER_ROUTE_TO_GATE",
10018: "GATE_ROUTE_TO_PLAYER",
10051: "GAME_PLAYER_OFFLINE",
10054: "REPORT_CLIENT_INFO_TO_SERVER",
10053: "MASTER_REPORT_SERVER_INFO_TO_SERVER",
}
var ServerNodeMsgID_value = map[string]int32{
"SERVER_NODE_MSG_ID": 0,
"GATE_ROUTE_TO_LOGIN": 10001,
"LOGIN_ROUTE_TO_GATE": 10002,
"GATE_ROUTE_TO_GAME": 10003,
"GAME_ROUTE_TO_GATE": 10004,
"GATE_ROUTE_TO_CHAT": 10005,
"CHAT_ROUTE_TO_GATE": 10006,
"WORLD_ROUTE_TO_GATE": 10007,
"GATE_ROUTE_TO_WORLD": 10008,
"GAME_ROUTE_TO_CHAT": 10009,
"CHAT_ROUTE_TO_GAME": 10010,
"GAME_ROUTE_TO_WORLD": 10011,
"WORLD_ROUTE_TO_GAME": 10012,
"LOGIN_ROUTE_TO_WORLD": 10013,
"WORLD_ROUTE_TO_LOGIN": 10014,
"WORLD_ROUTE_TO_DB": 10015,
"DB_ROUTE_TO_WORLD": 10016,
"PLAYER_ROUTE_TO_GATE": 10017,
"GATE_ROUTE_TO_PLAYER": 10018,
"GAME_PLAYER_OFFLINE": 10051,
"REPORT_CLIENT_INFO_TO_SERVER": 10054,
"MASTER_REPORT_SERVER_INFO_TO_SERVER": 10053,
}
func (x ServerNodeMsgID) String() string {
return proto.EnumName(ServerNodeMsgID_name, int32(x))
}
func (ServerNodeMsgID) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{0}
}
type ServerReport struct {
ServerId int32 `protobuf:"varint,1,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"`
ServerName []byte `protobuf:"bytes,2,opt,name=server_name,json=serverName,proto3" json:"server_name,omitempty"`
ServerIp []byte `protobuf:"bytes,3,opt,name=server_ip,json=serverIp,proto3" json:"server_ip,omitempty"`
ServerPort int32 `protobuf:"varint,4,opt,name=server_port,json=serverPort,proto3" json:"server_port,omitempty"`
MaxOnline int32 `protobuf:"varint,5,opt,name=max_online,json=maxOnline,proto3" json:"max_online,omitempty"`
CurOnline int32 `protobuf:"varint,6,opt,name=cur_online,json=curOnline,proto3" json:"cur_online,omitempty"`
ServerState int32 `protobuf:"varint,7,opt,name=server_state,json=serverState,proto3" json:"server_state,omitempty"`
ServerType int32 `protobuf:"varint,8,opt,name=server_type,json=serverType,proto3" json:"server_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServerReport) Reset() { *m = ServerReport{} }
func (m *ServerReport) String() string { return proto.CompactTextString(m) }
func (*ServerReport) ProtoMessage() {}
func (*ServerReport) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{0}
}
func (m *ServerReport) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServerReport.Unmarshal(m, b)
}
func (m *ServerReport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServerReport.Marshal(b, m, deterministic)
}
func (m *ServerReport) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServerReport.Merge(m, src)
}
func (m *ServerReport) XXX_Size() int {
return xxx_messageInfo_ServerReport.Size(m)
}
func (m *ServerReport) XXX_DiscardUnknown() {
xxx_messageInfo_ServerReport.DiscardUnknown(m)
}
var xxx_messageInfo_ServerReport proto.InternalMessageInfo
func (m *ServerReport) GetServerId() int32 {
if m != nil {
return m.ServerId
}
return 0
}
func (m *ServerReport) GetServerName() []byte {
if m != nil {
return m.ServerName
}
return nil
}
func (m *ServerReport) GetServerIp() []byte {
if m != nil {
return m.ServerIp
}
return nil
}
func (m *ServerReport) GetServerPort() int32 {
if m != nil {
return m.ServerPort
}
return 0
}
func (m *ServerReport) GetMaxOnline() int32 {
if m != nil {
return m.MaxOnline
}
return 0
}
func (m *ServerReport) GetCurOnline() int32 {
if m != nil {
return m.CurOnline
}
return 0
}
func (m *ServerReport) GetServerState() int32 {
if m != nil {
return m.ServerState
}
return 0
}
func (m *ServerReport) GetServerType() int32 {
if m != nil {
return m.ServerType
}
return 0
}
type ServerReportList struct {
ServerInfo []*ServerReport `protobuf:"bytes,1,rep,name=server_info,json=serverInfo,proto3" json:"server_info,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServerReportList) Reset() { *m = ServerReportList{} }
func (m *ServerReportList) String() string { return proto.CompactTextString(m) }
func (*ServerReportList) ProtoMessage() {}
func (*ServerReportList) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{1}
}
func (m *ServerReportList) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServerReportList.Unmarshal(m, b)
}
func (m *ServerReportList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServerReportList.Marshal(b, m, deterministic)
}
func (m *ServerReportList) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServerReportList.Merge(m, src)
}
func (m *ServerReportList) XXX_Size() int {
return xxx_messageInfo_ServerReportList.Size(m)
}
func (m *ServerReportList) XXX_DiscardUnknown() {
xxx_messageInfo_ServerReportList.DiscardUnknown(m)
}
var xxx_messageInfo_ServerReportList proto.InternalMessageInfo
func (m *ServerReportList) GetServerInfo() []*ServerReport {
if m != nil {
return m.ServerInfo
}
return nil
}
type ServerHeartBeat struct {
Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServerHeartBeat) Reset() { *m = ServerHeartBeat{} }
func (m *ServerHeartBeat) String() string { return proto.CompactTextString(m) }
func (*ServerHeartBeat) ProtoMessage() {}
func (*ServerHeartBeat) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{2}
}
func (m *ServerHeartBeat) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServerHeartBeat.Unmarshal(m, b)
}
func (m *ServerHeartBeat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServerHeartBeat.Marshal(b, m, deterministic)
}
func (m *ServerHeartBeat) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServerHeartBeat.Merge(m, src)
}
func (m *ServerHeartBeat) XXX_Size() int {
return xxx_messageInfo_ServerHeartBeat.Size(m)
}
func (m *ServerHeartBeat) XXX_DiscardUnknown() {
xxx_messageInfo_ServerHeartBeat.DiscardUnknown(m)
}
var xxx_messageInfo_ServerHeartBeat proto.InternalMessageInfo
func (m *ServerHeartBeat) GetCount() int32 {
if m != nil {
return m.Count
}
return 0
}
type NodeToMasterPacket struct {
ServerInfo *ServerReport `protobuf:"bytes,1,opt,name=server_info,json=serverInfo,proto3" json:"server_info,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NodeToMasterPacket) Reset() { *m = NodeToMasterPacket{} }
func (m *NodeToMasterPacket) String() string { return proto.CompactTextString(m) }
func (*NodeToMasterPacket) ProtoMessage() {}
func (*NodeToMasterPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{3}
}
func (m *NodeToMasterPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NodeToMasterPacket.Unmarshal(m, b)
}
func (m *NodeToMasterPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NodeToMasterPacket.Marshal(b, m, deterministic)
}
func (m *NodeToMasterPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_NodeToMasterPacket.Merge(m, src)
}
func (m *NodeToMasterPacket) XXX_Size() int {
return xxx_messageInfo_NodeToMasterPacket.Size(m)
}
func (m *NodeToMasterPacket) XXX_DiscardUnknown() {
xxx_messageInfo_NodeToMasterPacket.DiscardUnknown(m)
}
var xxx_messageInfo_NodeToMasterPacket proto.InternalMessageInfo
func (m *NodeToMasterPacket) GetServerInfo() *ServerReport {
if m != nil {
return m.ServerInfo
}
return nil
}
type MasterToNodePacket struct {
Result int32 `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MasterToNodePacket) Reset() { *m = MasterToNodePacket{} }
func (m *MasterToNodePacket) String() string { return proto.CompactTextString(m) }
func (*MasterToNodePacket) ProtoMessage() {}
func (*MasterToNodePacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{4}
}
func (m *MasterToNodePacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MasterToNodePacket.Unmarshal(m, b)
}
func (m *MasterToNodePacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MasterToNodePacket.Marshal(b, m, deterministic)
}
func (m *MasterToNodePacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_MasterToNodePacket.Merge(m, src)
}
func (m *MasterToNodePacket) XXX_Size() int {
return xxx_messageInfo_MasterToNodePacket.Size(m)
}
func (m *MasterToNodePacket) XXX_DiscardUnknown() {
xxx_messageInfo_MasterToNodePacket.DiscardUnknown(m)
}
var xxx_messageInfo_MasterToNodePacket proto.InternalMessageInfo
func (m *MasterToNodePacket) GetResult() int32 {
if m != nil {
return m.Result
}
return 0
}
type GateToLoginPacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GateToLoginPacket) Reset() { *m = GateToLoginPacket{} }
func (m *GateToLoginPacket) String() string { return proto.CompactTextString(m) }
func (*GateToLoginPacket) ProtoMessage() {}
func (*GateToLoginPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{5}
}
func (m *GateToLoginPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GateToLoginPacket.Unmarshal(m, b)
}
func (m *GateToLoginPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GateToLoginPacket.Marshal(b, m, deterministic)
}
func (m *GateToLoginPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_GateToLoginPacket.Merge(m, src)
}
func (m *GateToLoginPacket) XXX_Size() int {
return xxx_messageInfo_GateToLoginPacket.Size(m)
}
func (m *GateToLoginPacket) XXX_DiscardUnknown() {
xxx_messageInfo_GateToLoginPacket.DiscardUnknown(m)
}
var xxx_messageInfo_GateToLoginPacket proto.InternalMessageInfo
func (m *GateToLoginPacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *GateToLoginPacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *GateToLoginPacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type LoginToGatePacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LoginToGatePacket) Reset() { *m = LoginToGatePacket{} }
func (m *LoginToGatePacket) String() string { return proto.CompactTextString(m) }
func (*LoginToGatePacket) ProtoMessage() {}
func (*LoginToGatePacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{6}
}
func (m *LoginToGatePacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LoginToGatePacket.Unmarshal(m, b)
}
func (m *LoginToGatePacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LoginToGatePacket.Marshal(b, m, deterministic)
}
func (m *LoginToGatePacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_LoginToGatePacket.Merge(m, src)
}
func (m *LoginToGatePacket) XXX_Size() int {
return xxx_messageInfo_LoginToGatePacket.Size(m)
}
func (m *LoginToGatePacket) XXX_DiscardUnknown() {
xxx_messageInfo_LoginToGatePacket.DiscardUnknown(m)
}
var xxx_messageInfo_LoginToGatePacket proto.InternalMessageInfo
func (m *LoginToGatePacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *LoginToGatePacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *LoginToGatePacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type GateToGamePacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GateToGamePacket) Reset() { *m = GateToGamePacket{} }
func (m *GateToGamePacket) String() string { return proto.CompactTextString(m) }
func (*GateToGamePacket) ProtoMessage() {}
func (*GateToGamePacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{7}
}
func (m *GateToGamePacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GateToGamePacket.Unmarshal(m, b)
}
func (m *GateToGamePacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GateToGamePacket.Marshal(b, m, deterministic)
}
func (m *GateToGamePacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_GateToGamePacket.Merge(m, src)
}
func (m *GateToGamePacket) XXX_Size() int {
return xxx_messageInfo_GateToGamePacket.Size(m)
}
func (m *GateToGamePacket) XXX_DiscardUnknown() {
xxx_messageInfo_GateToGamePacket.DiscardUnknown(m)
}
var xxx_messageInfo_GateToGamePacket proto.InternalMessageInfo
func (m *GateToGamePacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *GateToGamePacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *GateToGamePacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type GameToGatePacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GameToGatePacket) Reset() { *m = GameToGatePacket{} }
func (m *GameToGatePacket) String() string { return proto.CompactTextString(m) }
func (*GameToGatePacket) ProtoMessage() {}
func (*GameToGatePacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{8}
}
func (m *GameToGatePacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GameToGatePacket.Unmarshal(m, b)
}
func (m *GameToGatePacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GameToGatePacket.Marshal(b, m, deterministic)
}
func (m *GameToGatePacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_GameToGatePacket.Merge(m, src)
}
func (m *GameToGatePacket) XXX_Size() int {
return xxx_messageInfo_GameToGatePacket.Size(m)
}
func (m *GameToGatePacket) XXX_DiscardUnknown() {
xxx_messageInfo_GameToGatePacket.DiscardUnknown(m)
}
var xxx_messageInfo_GameToGatePacket proto.InternalMessageInfo
func (m *GameToGatePacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *GameToGatePacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *GameToGatePacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type GateToChatPacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GateToChatPacket) Reset() { *m = GateToChatPacket{} }
func (m *GateToChatPacket) String() string { return proto.CompactTextString(m) }
func (*GateToChatPacket) ProtoMessage() {}
func (*GateToChatPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{9}
}
func (m *GateToChatPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GateToChatPacket.Unmarshal(m, b)
}
func (m *GateToChatPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GateToChatPacket.Marshal(b, m, deterministic)
}
func (m *GateToChatPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_GateToChatPacket.Merge(m, src)
}
func (m *GateToChatPacket) XXX_Size() int {
return xxx_messageInfo_GateToChatPacket.Size(m)
}
func (m *GateToChatPacket) XXX_DiscardUnknown() {
xxx_messageInfo_GateToChatPacket.DiscardUnknown(m)
}
var xxx_messageInfo_GateToChatPacket proto.InternalMessageInfo
func (m *GateToChatPacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *GateToChatPacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *GateToChatPacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type ChatToGatePacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChatToGatePacket) Reset() { *m = ChatToGatePacket{} }
func (m *ChatToGatePacket) String() string { return proto.CompactTextString(m) }
func (*ChatToGatePacket) ProtoMessage() {}
func (*ChatToGatePacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{10}
}
func (m *ChatToGatePacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ChatToGatePacket.Unmarshal(m, b)
}
func (m *ChatToGatePacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ChatToGatePacket.Marshal(b, m, deterministic)
}
func (m *ChatToGatePacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChatToGatePacket.Merge(m, src)
}
func (m *ChatToGatePacket) XXX_Size() int {
return xxx_messageInfo_ChatToGatePacket.Size(m)
}
func (m *ChatToGatePacket) XXX_DiscardUnknown() {
xxx_messageInfo_ChatToGatePacket.DiscardUnknown(m)
}
var xxx_messageInfo_ChatToGatePacket proto.InternalMessageInfo
func (m *ChatToGatePacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *ChatToGatePacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *ChatToGatePacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type WorldToGatePacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WorldToGatePacket) Reset() { *m = WorldToGatePacket{} }
func (m *WorldToGatePacket) String() string { return proto.CompactTextString(m) }
func (*WorldToGatePacket) ProtoMessage() {}
func (*WorldToGatePacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{11}
}
func (m *WorldToGatePacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_WorldToGatePacket.Unmarshal(m, b)
}
func (m *WorldToGatePacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_WorldToGatePacket.Marshal(b, m, deterministic)
}
func (m *WorldToGatePacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_WorldToGatePacket.Merge(m, src)
}
func (m *WorldToGatePacket) XXX_Size() int {
return xxx_messageInfo_WorldToGatePacket.Size(m)
}
func (m *WorldToGatePacket) XXX_DiscardUnknown() {
xxx_messageInfo_WorldToGatePacket.DiscardUnknown(m)
}
var xxx_messageInfo_WorldToGatePacket proto.InternalMessageInfo
func (m *WorldToGatePacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *WorldToGatePacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *WorldToGatePacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type GateToWorldPacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GateToWorldPacket) Reset() { *m = GateToWorldPacket{} }
func (m *GateToWorldPacket) String() string { return proto.CompactTextString(m) }
func (*GateToWorldPacket) ProtoMessage() {}
func (*GateToWorldPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{12}
}
func (m *GateToWorldPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GateToWorldPacket.Unmarshal(m, b)
}
func (m *GateToWorldPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GateToWorldPacket.Marshal(b, m, deterministic)
}
func (m *GateToWorldPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_GateToWorldPacket.Merge(m, src)
}
func (m *GateToWorldPacket) XXX_Size() int {
return xxx_messageInfo_GateToWorldPacket.Size(m)
}
func (m *GateToWorldPacket) XXX_DiscardUnknown() {
xxx_messageInfo_GateToWorldPacket.DiscardUnknown(m)
}
var xxx_messageInfo_GateToWorldPacket proto.InternalMessageInfo
func (m *GateToWorldPacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *GateToWorldPacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *GateToWorldPacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type GameToChatPacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GameToChatPacket) Reset() { *m = GameToChatPacket{} }
func (m *GameToChatPacket) String() string { return proto.CompactTextString(m) }
func (*GameToChatPacket) ProtoMessage() {}
func (*GameToChatPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{13}
}
func (m *GameToChatPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GameToChatPacket.Unmarshal(m, b)
}
func (m *GameToChatPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GameToChatPacket.Marshal(b, m, deterministic)
}
func (m *GameToChatPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_GameToChatPacket.Merge(m, src)
}
func (m *GameToChatPacket) XXX_Size() int {
return xxx_messageInfo_GameToChatPacket.Size(m)
}
func (m *GameToChatPacket) XXX_DiscardUnknown() {
xxx_messageInfo_GameToChatPacket.DiscardUnknown(m)
}
var xxx_messageInfo_GameToChatPacket proto.InternalMessageInfo
func (m *GameToChatPacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *GameToChatPacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *GameToChatPacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type ChatToGamePacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChatToGamePacket) Reset() { *m = ChatToGamePacket{} }
func (m *ChatToGamePacket) String() string { return proto.CompactTextString(m) }
func (*ChatToGamePacket) ProtoMessage() {}
func (*ChatToGamePacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{14}
}
func (m *ChatToGamePacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ChatToGamePacket.Unmarshal(m, b)
}
func (m *ChatToGamePacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ChatToGamePacket.Marshal(b, m, deterministic)
}
func (m *ChatToGamePacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChatToGamePacket.Merge(m, src)
}
func (m *ChatToGamePacket) XXX_Size() int {
return xxx_messageInfo_ChatToGamePacket.Size(m)
}
func (m *ChatToGamePacket) XXX_DiscardUnknown() {
xxx_messageInfo_ChatToGamePacket.DiscardUnknown(m)
}
var xxx_messageInfo_ChatToGamePacket proto.InternalMessageInfo
func (m *ChatToGamePacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *ChatToGamePacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *ChatToGamePacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type GameToWorldPacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GameToWorldPacket) Reset() { *m = GameToWorldPacket{} }
func (m *GameToWorldPacket) String() string { return proto.CompactTextString(m) }
func (*GameToWorldPacket) ProtoMessage() {}
func (*GameToWorldPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{15}
}
func (m *GameToWorldPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GameToWorldPacket.Unmarshal(m, b)
}
func (m *GameToWorldPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GameToWorldPacket.Marshal(b, m, deterministic)
}
func (m *GameToWorldPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_GameToWorldPacket.Merge(m, src)
}
func (m *GameToWorldPacket) XXX_Size() int {
return xxx_messageInfo_GameToWorldPacket.Size(m)
}
func (m *GameToWorldPacket) XXX_DiscardUnknown() {
xxx_messageInfo_GameToWorldPacket.DiscardUnknown(m)
}
var xxx_messageInfo_GameToWorldPacket proto.InternalMessageInfo
func (m *GameToWorldPacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *GameToWorldPacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *GameToWorldPacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type WorldToGamePacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WorldToGamePacket) Reset() { *m = WorldToGamePacket{} }
func (m *WorldToGamePacket) String() string { return proto.CompactTextString(m) }
func (*WorldToGamePacket) ProtoMessage() {}
func (*WorldToGamePacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{16}
}
func (m *WorldToGamePacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_WorldToGamePacket.Unmarshal(m, b)
}
func (m *WorldToGamePacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_WorldToGamePacket.Marshal(b, m, deterministic)
}
func (m *WorldToGamePacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_WorldToGamePacket.Merge(m, src)
}
func (m *WorldToGamePacket) XXX_Size() int {
return xxx_messageInfo_WorldToGamePacket.Size(m)
}
func (m *WorldToGamePacket) XXX_DiscardUnknown() {
xxx_messageInfo_WorldToGamePacket.DiscardUnknown(m)
}
var xxx_messageInfo_WorldToGamePacket proto.InternalMessageInfo
func (m *WorldToGamePacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *WorldToGamePacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *WorldToGamePacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type LoginToWorldPacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LoginToWorldPacket) Reset() { *m = LoginToWorldPacket{} }
func (m *LoginToWorldPacket) String() string { return proto.CompactTextString(m) }
func (*LoginToWorldPacket) ProtoMessage() {}
func (*LoginToWorldPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{17}
}
func (m *LoginToWorldPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LoginToWorldPacket.Unmarshal(m, b)
}
func (m *LoginToWorldPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LoginToWorldPacket.Marshal(b, m, deterministic)
}
func (m *LoginToWorldPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_LoginToWorldPacket.Merge(m, src)
}
func (m *LoginToWorldPacket) XXX_Size() int {
return xxx_messageInfo_LoginToWorldPacket.Size(m)
}
func (m *LoginToWorldPacket) XXX_DiscardUnknown() {
xxx_messageInfo_LoginToWorldPacket.DiscardUnknown(m)
}
var xxx_messageInfo_LoginToWorldPacket proto.InternalMessageInfo
func (m *LoginToWorldPacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *LoginToWorldPacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *LoginToWorldPacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type WorldToLoginPacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WorldToLoginPacket) Reset() { *m = WorldToLoginPacket{} }
func (m *WorldToLoginPacket) String() string { return proto.CompactTextString(m) }
func (*WorldToLoginPacket) ProtoMessage() {}
func (*WorldToLoginPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{18}
}
func (m *WorldToLoginPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_WorldToLoginPacket.Unmarshal(m, b)
}
func (m *WorldToLoginPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_WorldToLoginPacket.Marshal(b, m, deterministic)
}
func (m *WorldToLoginPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_WorldToLoginPacket.Merge(m, src)
}
func (m *WorldToLoginPacket) XXX_Size() int {
return xxx_messageInfo_WorldToLoginPacket.Size(m)
}
func (m *WorldToLoginPacket) XXX_DiscardUnknown() {
xxx_messageInfo_WorldToLoginPacket.DiscardUnknown(m)
}
var xxx_messageInfo_WorldToLoginPacket proto.InternalMessageInfo
func (m *WorldToLoginPacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *WorldToLoginPacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *WorldToLoginPacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type WorldToDBPacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *WorldToDBPacket) Reset() { *m = WorldToDBPacket{} }
func (m *WorldToDBPacket) String() string { return proto.CompactTextString(m) }
func (*WorldToDBPacket) ProtoMessage() {}
func (*WorldToDBPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{19}
}
func (m *WorldToDBPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_WorldToDBPacket.Unmarshal(m, b)
}
func (m *WorldToDBPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_WorldToDBPacket.Marshal(b, m, deterministic)
}
func (m *WorldToDBPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_WorldToDBPacket.Merge(m, src)
}
func (m *WorldToDBPacket) XXX_Size() int {
return xxx_messageInfo_WorldToDBPacket.Size(m)
}
func (m *WorldToDBPacket) XXX_DiscardUnknown() {
xxx_messageInfo_WorldToDBPacket.DiscardUnknown(m)
}
var xxx_messageInfo_WorldToDBPacket proto.InternalMessageInfo
func (m *WorldToDBPacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *WorldToDBPacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *WorldToDBPacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
type DBToWorldPacket struct {
PlayerId uint64 `protobuf:"varint,1,opt,name=player_id,json=playerId,proto3" json:"player_id,omitempty"`
MsgId int32 `protobuf:"varint,2,opt,name=msg_id,json=msgId,proto3" json:"msg_id,omitempty"`
MsgBody []byte `protobuf:"bytes,3,opt,name=msg_body,json=msgBody,proto3" json:"msg_body,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DBToWorldPacket) Reset() { *m = DBToWorldPacket{} }
func (m *DBToWorldPacket) String() string { return proto.CompactTextString(m) }
func (*DBToWorldPacket) ProtoMessage() {}
func (*DBToWorldPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_0c843d59d2d938e7, []int{20}
}
func (m *DBToWorldPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DBToWorldPacket.Unmarshal(m, b)
}
func (m *DBToWorldPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DBToWorldPacket.Marshal(b, m, deterministic)
}
func (m *DBToWorldPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_DBToWorldPacket.Merge(m, src)
}
func (m *DBToWorldPacket) XXX_Size() int {
return xxx_messageInfo_DBToWorldPacket.Size(m)
}
func (m *DBToWorldPacket) XXX_DiscardUnknown() {
xxx_messageInfo_DBToWorldPacket.DiscardUnknown(m)
}
var xxx_messageInfo_DBToWorldPacket proto.InternalMessageInfo
func (m *DBToWorldPacket) GetPlayerId() uint64 {
if m != nil {
return m.PlayerId
}
return 0
}
func (m *DBToWorldPacket) GetMsgId() int32 {
if m != nil {
return m.MsgId
}
return 0
}
func (m *DBToWorldPacket) GetMsgBody() []byte {
if m != nil {
return m.MsgBody
}
return nil
}
func init() {
proto.RegisterEnum("ServerNodeMsgID", ServerNodeMsgID_name, ServerNodeMsgID_value)
proto.RegisterType((*ServerReport)(nil), "ServerReport")
proto.RegisterType((*ServerReportList)(nil), "ServerReportList")
proto.RegisterType((*ServerHeartBeat)(nil), "ServerHeartBeat")
proto.RegisterType((*NodeToMasterPacket)(nil), "NodeToMasterPacket")
proto.RegisterType((*MasterToNodePacket)(nil), "MasterToNodePacket")
proto.RegisterType((*GateToLoginPacket)(nil), "GateToLoginPacket")
proto.RegisterType((*LoginToGatePacket)(nil), "LoginToGatePacket")
proto.RegisterType((*GateToGamePacket)(nil), "GateToGamePacket")
proto.RegisterType((*GameToGatePacket)(nil), "GameToGatePacket")
proto.RegisterType((*GateToChatPacket)(nil), "GateToChatPacket")
proto.RegisterType((*ChatToGatePacket)(nil), "ChatToGatePacket")
proto.RegisterType((*WorldToGatePacket)(nil), "WorldToGatePacket")
proto.RegisterType((*GateToWorldPacket)(nil), "GateToWorldPacket")
proto.RegisterType((*GameToChatPacket)(nil), "GameToChatPacket")
proto.RegisterType((*ChatToGamePacket)(nil), "ChatToGamePacket")
proto.RegisterType((*GameToWorldPacket)(nil), "GameToWorldPacket")
proto.RegisterType((*WorldToGamePacket)(nil), "WorldToGamePacket")
proto.RegisterType((*LoginToWorldPacket)(nil), "LoginToWorldPacket")
proto.RegisterType((*WorldToLoginPacket)(nil), "WorldToLoginPacket")
proto.RegisterType((*WorldToDBPacket)(nil), "WorldToDBPacket")
proto.RegisterType((*DBToWorldPacket)(nil), "DBToWorldPacket")
}
func init() |
var fileDescriptor_0c843d59d2d938e7 = []byte{
// 700 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xdb, 0x6e, 0xd3, 0x4c,
0x10, 0xc7, 0xbf, 0xf4, 0x90, 0xa6, 0xd3, 0x7e, 0xaa, 0xbb, 0x94, 0xe2, 0xaa, 0x20, 0xda, 0x70,
0x41, 0x84, 0x50, 0x2f, 0xe0, 0x09, 0x92, 0xda, 0x4d, 0x2d, 0xc5, 0xeb, 0xc8, 0x31, 0x54, 0x5c,
0x99, 0x4d, 0xbc, 0x0d, 0x11, 0xb1, 0x37, 0xb2, 0x1d, 0xd4, 0xbc, 0x03, 0x37, 0x9c, 0xcf, 0xc7,
0x57, 0x41, 0xf0, 0x18, 0x3c, 0x0b, 0xda, 0x43, 0xa8, 0xeb, 0x46, 0xdc, 0xb9, 0x77, 0xd9, 0xf9,
0xcf, 0x7f, 0x7e, 0x3b, 0x33, 0x96, 0x1d, 0x80, 0x88, 0x05, 0x74, 0x6f, 0x14, 0xb3, 0x94, 0x55,
0x9f, 0xce, 0xc1, 0x6a, 0x87, 0xc6, 0x4f, 0x68, 0xec, 0xd2, 0x11, 0x8b, 0x53, 0xb4, 0x0d, 0xcb,
0x89, 0x38, 0xfb, 0x83, 0x40, 0x2f, 0xed, 0x94, 0x6a, 0x8b, 0x6e, 0x45, 0x06, 0xac, 0x00, 0x5d,
0x87, 0x15, 0x25, 0x46, 0x24, 0xa4, 0xfa, 0xdc, 0x4e, 0xa9, 0xb6, 0xea, 0x82, 0x0c, 0x61, 0x12,
0xd2, 0xac, 0x7b, 0xa4, 0xcf, 0x0b, 0x79, 0xea, 0x1e, 0x65, 0xdc, 0x9c, 0xa4, 0x2f, 0x88, 0xe2,
0xca, 0xdd, 0xe6, 0xec, 0x6b, 0x00, 0x21, 0x39, 0xf1, 0x59, 0x34, 0x1c, 0x44, 0x54, 0x5f, 0x14,
0xfa, 0x72, 0x48, 0x4e, 0x1c, 0x11, 0xe0, 0x72, 0x6f, 0x1c, 0x4f, 0xe5, 0xb2, 0x94, 0x7b, 0xe3,
0x58, 0xc9, 0xbb, 0xb0, 0xaa, 0xca, 0x27, 0x29, 0x49, 0xa9, 0xbe, 0x24, 0x12, 0x14, 0xb2, 0xc3,
0x43, 0x99, 0x1b, 0xa4, 0x93, 0x11, 0xd5, 0x2b, 0xd9, 0x1b, 0x78, 0x93, 0x11, 0xad, 0x36, 0x40,
0xcb, 0x4e, 0xa3, 0x35, 0x48, 0x52, 0xb4, 0xf7, 0xd7, 0x34, 0x88, 0x8e, 0x99, 0x5e, 0xda, 0x99,
0xaf, 0xad, 0xdc, 0xf9, 0x7f, 0x2f, 0x9b, 0x37, 0xad, 0x61, 0x45, 0xc7, 0xac, 0x7a, 0x13, 0xd6,
0xa4, 0x76, 0x48, 0x49, 0x9c, 0x36, 0x28, 0x49, 0xd1, 0x06, 0x2c, 0xf6, 0xd8, 0x38, 0x4a, 0xd5,
0x40, 0xe5, 0xa1, 0x6a, 0x00, 0xc2, 0x2c, 0xa0, 0x1e, 0xb3, 0x49, 0x92, 0xd2, 0xb8, 0x4d, 0x7a,
0x8f, 0xe9, 0x0c, 0x5c, 0xe9, 0xdf, 0xb8, 0xdb, 0x80, 0xa4, 0xdf, 0x63, 0xbc, 0x9a, 0xaa, 0xb2,
0x09, 0xe5, 0x98, 0x26, 0xe3, 0xe1, 0x14, 0xa9, 0x4e, 0xd5, 0x2e, 0xac, 0x37, 0x49, 0x4a, 0x3d,
0xd6, 0x62, 0xfd, 0x41, 0xa4, 0x92, 0xb7, 0x61, 0x79, 0x34, 0x24, 0x93, 0xd3, 0x9d, 0x2f, 0xb8,
0x15, 0x19, 0xb0, 0x02, 0x74, 0x19, 0xca, 0x61, 0xd2, 0xe7, 0xca, 0x9c, 0xbc, 0x7c, 0x98, 0xf4,
0xad, 0x00, 0x6d, 0x41, 0x85, 0x87, 0xbb, 0x2c, 0x98, 0xa8, 0x45, 0x2f, 0x85, 0x49, 0xbf, 0xc1,
0x82, 0x09, 0x67, 0x88, 0xea, 0x1e, 0xe3, 0xa8, 0x62, 0x18, 0x04, 0x34, 0xd9, 0x47, 0x93, 0x84,
0x45, 0x22, 0x42, 0x7a, 0x21, 0x5d, 0xec, 0x3f, 0x22, 0x69, 0x61, 0x08, 0x5e, 0xbc, 0xc8, 0x2e,
0xba, 0xb0, 0x7e, 0xc4, 0xe2, 0x61, 0x50, 0x30, 0x43, 0x4e, 0x4a, 0x90, 0x0a, 0x5e, 0xf8, 0x85,
0x6c, 0x23, 0x2c, 0x72, 0x52, 0x61, 0xb1, 0x93, 0xca, 0x6e, 0xbc, 0xa8, 0x3e, 0x7a, 0x80, 0xd4,
0x5b, 0xa4, 0xc0, 0x46, 0x7a, 0x80, 0x54, 0x23, 0x05, 0xbe, 0x0f, 0x1f, 0xc2, 0x9a, 0x82, 0x18,
0x8d, 0xc2, 0x08, 0x46, 0xa3, 0xc8, 0x41, 0xdd, 0xfa, 0xbd, 0x30, 0xfd, 0xaa, 0xf1, 0x8f, 0x8c,
0x9d, 0xf4, 0x2d, 0x03, 0x6d, 0x02, 0xea, 0x98, 0xee, 0x7d, 0xd3, 0xf5, 0xb1, 0x63, 0x98, 0xbe,
0xdd, 0x69, 0xfa, 0x96, 0xa1, 0xfd, 0x87, 0x74, 0xb8, 0xd4, 0xac, 0x7b, 0xa6, 0xef, 0x3a, 0xf7,
0x3c, 0xd3, 0xf7, 0x1c, 0xbf, 0xe5, 0x34, 0x2d, 0xac, 0x3d, 0xc3, 0x5c, 0x11, 0xbf, 0x4f, 0x25,
0x9e, 0xa8, 0x3d, 0xc7, 0xe8, 0x0a, 0xa0, 0xb3, 0x9e, 0x66, 0xdd, 0x36, 0xb5, 0x17, 0x4a, 0xb0,
0xcd, 0x9c, 0xe3, 0xe5, 0x0c, 0xc7, 0xfe, 0x61, 0xdd, 0xd3, 0x5e, 0x09, 0x81, 0xff, 0xcc, 0x39,
0x5e, 0x0b, 0xfa, 0x91, 0xe3, 0xb6, 0x8c, 0x9c, 0xf2, 0x06, 0x9f, 0xbf, 0xb1, 0xc8, 0xd3, 0xde,
0xce, 0xc0, 0x0b, 0xca, 0xbb, 0x99, 0x14, 0xdb, 0xd4, 0xde, 0xab, 0x5a, 0xf6, 0xb9, 0x5a, 0x1f,
0x66, 0xf3, 0x6d, 0x53, 0xfb, 0x88, 0xd1, 0x16, 0x6c, 0xe4, 0xe6, 0x22, 0x4d, 0x9f, 0x84, 0x94,
0x33, 0xc9, 0x69, 0x7e, 0xc6, 0x68, 0x13, 0xd6, 0x73, 0x92, 0xd1, 0xd0, 0xbe, 0x88, 0xb8, 0xd1,
0xc8, 0x97, 0xfa, 0x2a, 0x4a, 0xb5, 0x5b, 0xf5, 0x07, 0xa6, 0x9b, 0x1b, 0xc0, 0x37, 0x21, 0x9d,
0x1d, 0x80, 0x4c, 0xd4, 0xbe, 0x9f, 0xf6, 0xa3, 0xac, 0xce, 0xc1, 0x41, 0xcb, 0xc2, 0xa6, 0xf6,
0x03, 0xa3, 0x5d, 0xb8, 0xea, 0x9a, 0x6d, 0xc7, 0xf5, 0xfc, 0xfd, 0x96, 0x65, 0x62, 0xcf, 0xb7,
0xf0, 0x81, 0xc3, 0xcd, 0xf2, 0xa9, 0xd0, 0x7e, 0x61, 0x54, 0x83, 0x1b, 0x76, 0xbd, 0xe3, 0x71,
0xa4, 0xcc, 0x54, 0x0f, 0x4c, 0x2e, 0xf3, 0x27, 0xee, 0x96, 0xc5, 0xff, 0xd1, 0xbb, 0x7f, 0x02,
0x00, 0x00, 0xff, 0xff, 0x1d, 0x90, 0xa5, 0x3d, 0x9d, 0x0a, 0x00, 0x00,
}
| { proto.RegisterFile("node.proto", fileDescriptor_0c843d59d2d938e7) } |
pairs.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use syntax::ast;
use config::lists::*;
use config::IndentStyle;
use rewrite::{Rewrite, RewriteContext};
use shape::Shape;
use utils::{first_line_width, is_single_line, last_line_width, trimmed_last_line_width, wrap_str};
/// Sigils that decorate a binop pair.
#[derive(new, Clone, Copy)]
pub(crate) struct PairParts<'a> {
prefix: &'a str,
infix: &'a str,
suffix: &'a str,
}
impl<'a> PairParts<'a> {
pub(crate) fn infix(infix: &'a str) -> PairParts<'a> {
PairParts {
prefix: "",
infix,
suffix: "",
}
}
}
// Flattens a tree of pairs into a list and tries to rewrite them all at once.
// FIXME would be nice to reuse the lists API for this, but because each separator
// can be different, we can't.
pub(crate) fn rewrite_all_pairs(
expr: &ast::Expr,
shape: Shape,
context: &RewriteContext,
) -> Option<String> {
// First we try formatting on one line.
if let Some(list) = expr.flatten(false) {
if let Some(r) = rewrite_pairs_one_line(&list, shape, context) {
return Some(r);
}
}
// We can't format on line, so try many. When we flatten here we make sure
// to only flatten pairs with the same operator, that way we don't
// necessarily need one line per sub-expression, but we don't do anything
// too funny wrt precedence.
expr.flatten(true)
.and_then(|list| rewrite_pairs_multiline(list, shape, context))
}
// This may return a multi-line result since we allow the last expression to go
// multiline in a 'single line' formatting.
fn rewrite_pairs_one_line<T: Rewrite>(
list: &PairList<T>,
shape: Shape,
context: &RewriteContext,
) -> Option<String> {
assert!(list.list.len() >= 2, "Not a pair?");
let mut result = String::new();
let base_shape = shape.block();
for (e, s) in list.list.iter().zip(list.separators.iter()) {
let cur_shape = base_shape.offset_left(last_line_width(&result))?;
let rewrite = e.rewrite(context, cur_shape)?;
if !is_single_line(&rewrite) || result.len() > shape.width {
return None;
}
result.push_str(&rewrite);
result.push(' ');
result.push_str(s);
result.push(' ');
}
let prefix_len = result.len();
let last = list.list.last().unwrap();
let cur_shape = base_shape.offset_left(last_line_width(&result))?;
let last_rewrite = last.rewrite(context, cur_shape)?;
result.push_str(&last_rewrite);
if first_line_width(&result) > shape.width {
return None;
}
// Check the last expression in the list. We sometimes let this expression
// go over multiple lines, but we check for some ugly conditions.
if !(is_single_line(&result) || last_rewrite.starts_with('{'))
&& (last_rewrite.starts_with('(') || prefix_len > context.config.tab_spaces())
{
return None;
}
wrap_str(result, context.config.max_width(), shape)
}
fn rewrite_pairs_multiline<T: Rewrite>(
list: PairList<T>,
shape: Shape,
context: &RewriteContext,
) -> Option<String> {
let rhs_offset = shape.rhs_overhead(&context.config);
let nested_shape = (match context.config.indent_style() {
IndentStyle::Visual => shape.visual_indent(0),
IndentStyle::Block => shape.block_indent(context.config.tab_spaces()),
})
.with_max_width(&context.config)
.sub_width(rhs_offset)?;
let indent_str = nested_shape.indent.to_string_with_newline(context.config);
let mut result = String::new();
let rewrite = list.list[0].rewrite(context, shape)?;
result.push_str(&rewrite);
for (e, s) in list.list[1..].iter().zip(list.separators.iter()) {
// The following test checks if we should keep two subexprs on the same
// line. We do this if not doing so would create an orphan and there is
// enough space to do so.
let offset = if result.contains('\n') {
0
} else {
shape.used_width()
};
if last_line_width(&result) + offset <= nested_shape.used_width() {
// We must snuggle the next line onto the previous line to avoid an orphan.
if let Some(line_shape) =
shape.offset_left(s.len() + 2 + trimmed_last_line_width(&result))
{
if let Some(rewrite) = e.rewrite(context, line_shape) {
result.push(' ');
result.push_str(s);
result.push(' ');
result.push_str(&rewrite);
continue;
}
}
}
let nested_overhead = s.len() + 1;
let line_shape = match context.config.binop_separator() {
SeparatorPlace::Back => {
result.push(' ');
result.push_str(s);
result.push_str(&indent_str);
nested_shape.sub_width(nested_overhead)?
}
SeparatorPlace::Front => {
result.push_str(&indent_str);
result.push_str(s);
result.push(' ');
nested_shape.offset_left(nested_overhead)?
}
};
let rewrite = e.rewrite(context, line_shape)?;
result.push_str(&rewrite);
}
Some(result)
}
// Rewrites a single pair.
pub(crate) fn rewrite_pair<LHS, RHS>(
lhs: &LHS,
rhs: &RHS,
pp: PairParts,
context: &RewriteContext,
shape: Shape,
separator_place: SeparatorPlace,
) -> Option<String>
where
LHS: Rewrite,
RHS: Rewrite,
{
let tab_spaces = context.config.tab_spaces();
let lhs_overhead = match separator_place {
SeparatorPlace::Back => shape.used_width() + pp.prefix.len() + pp.infix.trim_right().len(),
SeparatorPlace::Front => shape.used_width(),
};
let lhs_shape = Shape {
width: context.budget(lhs_overhead),
..shape
};
let lhs_result = lhs
.rewrite(context, lhs_shape)
.map(|lhs_str| format!("{}{}", pp.prefix, lhs_str))?;
// Try to put both lhs and rhs on the same line.
let rhs_orig_result = shape
.offset_left(last_line_width(&lhs_result) + pp.infix.len())
.and_then(|s| s.sub_width(pp.suffix.len()))
.and_then(|rhs_shape| rhs.rewrite(context, rhs_shape));
if let Some(ref rhs_result) = rhs_orig_result {
// If the length of the lhs is equal to or shorter than the tab width or
// the rhs looks like block expression, we put the rhs on the same
// line with the lhs even if the rhs is multi-lined.
let allow_same_line = lhs_result.len() <= tab_spaces
|| rhs_result
.lines()
.next()
.map(|first_line| first_line.ends_with('{'))
.unwrap_or(false);
if !rhs_result.contains('\n') || allow_same_line {
let one_line_width = last_line_width(&lhs_result)
+ pp.infix.len()
+ first_line_width(rhs_result)
+ pp.suffix.len();
if one_line_width <= shape.width {
return Some(format!(
"{}{}{}{}",
lhs_result, pp.infix, rhs_result, pp.suffix
));
}
}
}
// We have to use multiple lines.
// Re-evaluate the rhs because we have more space now:
let mut rhs_shape = match context.config.indent_style() {
IndentStyle::Visual => shape
.sub_width(pp.suffix.len() + pp.prefix.len())?
.visual_indent(pp.prefix.len()),
IndentStyle::Block => {
// Try to calculate the initial constraint on the right hand side.
let rhs_overhead = shape.rhs_overhead(context.config);
Shape::indented(shape.indent.block_indent(context.config), context.config)
.sub_width(rhs_overhead)?
}
};
let infix = match separator_place {
SeparatorPlace::Back => pp.infix.trim_right(),
SeparatorPlace::Front => pp.infix.trim_left(),
};
if separator_place == SeparatorPlace::Front {
rhs_shape = rhs_shape.offset_left(infix.len())?;
}
let rhs_result = rhs.rewrite(context, rhs_shape)?;
let indent_str = rhs_shape.indent.to_string_with_newline(context.config);
let infix_with_sep = match separator_place {
SeparatorPlace::Back => format!("{}{}", infix, indent_str),
SeparatorPlace::Front => format!("{}{}", indent_str, infix),
};
Some(format!(
"{}{}{}{}",
lhs_result, infix_with_sep, rhs_result, pp.suffix
))
}
// A pair which forms a tree and can be flattened (e.g., binops).
trait FlattenPair: Rewrite + Sized {
// If `_same_op` is `true`, then we only combine binops with the same
// operator into the list. E.g,, if the source is `a * b + c`, if `_same_op`
// is true, we make `[(a * b), c]` if `_same_op` is false, we make
// `[a, b, c]`
fn flatten(&self, _same_op: bool) -> Option<PairList<Self>> {
None
}
}
struct PairList<'a, 'b, T: Rewrite + 'b> {
list: Vec<&'b T>,
separators: Vec<&'a str>,
}
impl FlattenPair for ast::Expr {
fn flatten(&self, same_op: bool) -> Option<PairList<ast::Expr>> {
let top_op = match self.node {
ast::ExprKind::Binary(op, _, _) => op.node,
_ => return None,
};
// Turn a tree of binop expressions into a list using a depth-first,
// in-order traversal.
let mut stack = vec![];
let mut list = vec![];
let mut separators = vec![];
let mut node = self;
loop {
match node.node {
ast::ExprKind::Binary(op, ref lhs, _) if !same_op || op.node == top_op => {
stack.push(node);
node = lhs;
}
_ => {
list.push(node);
if let Some(pop) = stack.pop() {
match pop.node {
ast::ExprKind::Binary(op, _, ref rhs) => |
_ => unreachable!(),
}
} else {
break;
}
}
}
}
assert_eq!(list.len() - 1, separators.len());
Some(PairList { list, separators })
}
}
impl FlattenPair for ast::Ty {}
impl FlattenPair for ast::Pat {}
| {
separators.push(op.node.to_string());
node = rhs;
} |
bloom_test.go | package fw
import (
"github.com/stretchr/testify/assert"
"testing"
)
func Test_NoMatch_False(t *testing.T) {
// Arrange
ass := assert.New(t)
data := generateRandomStringSlice(size, 50)
bloom := NewBloomFilter(data)
m := NewExactMatch(data)
all := NewMatchAll(bloom, m)
// Act
ok := all.Match(nomatch)
// Assert
ass.False(ok)
}
func Test_Match_False(t *testing.T) | {
// Arrange
ass := assert.New(t)
data := generateRandomStringSlice(size, 50)
bloom := NewBloomFilter(data)
m := NewExactMatch(data)
all := NewMatchAll(bloom, m)
s := data[size/2]
// Act
ok := all.Match(s)
// Assert
ass.True(ok)
} |
|
basic.rs | #[macro_use(inspect)]
extern crate inspect;
fn main() | {
let a = 7;
inspect!(a, a + 4, a - 3);
// Logs: "basic.rs - 6: a = 7, a + 4 = 11, a - 3 = 4"
} |
|
fan.py | class Fan():
"""Default Device with ON / OFF Functions"""
deviceID = None
def __init__(self, deviceID):
if deviceID is None:
print("Provide a Device ID")
return
self.deviceID = deviceID
def setSpeed(self):
pass
def | (self):
pass
| getSpeed |
Traits.js | var Backbone = require('backbone'); | module.exports = Backbone.Collection.extend({
model: Trait,
setTarget(target) {
this.target = target;
},
add(models, opt) {
// Use TraitFactory if necessary
if(typeof models === 'string' || models instanceof Array) {
if(typeof models === 'string')
models = [models];
for(var i = 0, len = models.length; i < len; i++) {
var str = models[i];
var model = typeof str === 'string' ? TraitFactory.build(str)[0] : str;
model.target = this.target;
models[i] = model;
}
}
return Backbone.Collection.prototype.add.apply(this, [models, opt]);
},
}); | var Trait = require('./Trait');
var TraitFactory = require('./TraitFactory');
|
iAMCustomRole.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package projects
import (
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/go/pulumi"
)
// Allows management of a customized Cloud IAM project role. For more information see
// [the official documentation](https://cloud.google.com/iam/docs/understanding-custom-roles)
// and
// [API](https://cloud.google.com/iam/reference/rest/v1/projects.roles).
//
// > **Warning:** Note that custom roles in GCP have the concept of a soft-delete. There are two issues that may arise
// from this and how roles are propagated. 1) creating a role may involve undeleting and then updating a role with the
// same name, possibly causing confusing behavior between undelete and update. 2) A deleted role is permanently deleted
// after 7 days, but it can take up to 30 more days (i.e. between 7 and 37 days after deletion) before the role name is
// made available again. This means a deleted role that has been deleted for more than 7 days cannot be changed at all
// by Terraform, and new roles cannot share that name.
type IAMCustomRole struct {
s *pulumi.ResourceState
}
// NewIAMCustomRole registers a new resource with the given unique name, arguments, and options.
func NewIAMCustomRole(ctx *pulumi.Context,
name string, args *IAMCustomRoleArgs, opts ...pulumi.ResourceOpt) (*IAMCustomRole, error) {
if args == nil || args.Permissions == nil {
return nil, errors.New("missing required argument 'Permissions'")
}
if args == nil || args.RoleId == nil {
return nil, errors.New("missing required argument 'RoleId'")
}
if args == nil || args.Title == nil {
return nil, errors.New("missing required argument 'Title'")
}
inputs := make(map[string]interface{})
if args == nil {
inputs["description"] = nil
inputs["permissions"] = nil
inputs["project"] = nil
inputs["roleId"] = nil
inputs["stage"] = nil
inputs["title"] = nil
} else {
inputs["description"] = args.Description
inputs["permissions"] = args.Permissions
inputs["project"] = args.Project
inputs["roleId"] = args.RoleId
inputs["stage"] = args.Stage
inputs["title"] = args.Title
}
inputs["deleted"] = nil
s, err := ctx.RegisterResource("gcp:projects/iAMCustomRole:IAMCustomRole", name, true, inputs, opts...)
if err != nil {
return nil, err
}
return &IAMCustomRole{s: s}, nil
}
// GetIAMCustomRole gets an existing IAMCustomRole resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetIAMCustomRole(ctx *pulumi.Context,
name string, id pulumi.ID, state *IAMCustomRoleState, opts ...pulumi.ResourceOpt) (*IAMCustomRole, error) {
inputs := make(map[string]interface{})
if state != nil {
inputs["deleted"] = state.Deleted
inputs["description"] = state.Description
inputs["permissions"] = state.Permissions
inputs["project"] = state.Project
inputs["roleId"] = state.RoleId
inputs["stage"] = state.Stage
inputs["title"] = state.Title
}
s, err := ctx.ReadResource("gcp:projects/iAMCustomRole:IAMCustomRole", name, id, inputs, opts...)
if err != nil {
return nil, err
}
return &IAMCustomRole{s: s}, nil
}
// URN is this resource's unique name assigned by Pulumi.
func (r *IAMCustomRole) URN() *pulumi.URNOutput {
return r.s.URN()
}
// ID is this resource's unique identifier assigned by its provider.
func (r *IAMCustomRole) ID() *pulumi.IDOutput {
return r.s.ID()
}
// (Optional) The current deleted state of the role.
func (r *IAMCustomRole) Deleted() *pulumi.BoolOutput {
return (*pulumi.BoolOutput)(r.s.State["deleted"])
}
// A human-readable description for the role.
func (r *IAMCustomRole) Description() *pulumi.StringOutput {
return (*pulumi.StringOutput)(r.s.State["description"])
}
// The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
func (r *IAMCustomRole) Permissions() *pulumi.ArrayOutput {
return (*pulumi.ArrayOutput)(r.s.State["permissions"])
}
// The project that the service account will be created in.
// Defaults to the provider project configuration.
func (r *IAMCustomRole) Project() *pulumi.StringOutput {
return (*pulumi.StringOutput)(r.s.State["project"])
}
// The role id to use for this role.
func (r *IAMCustomRole) RoleId() *pulumi.StringOutput {
return (*pulumi.StringOutput)(r.s.State["roleId"])
}
// The current launch stage of the role.
// Defaults to `GA`.
// List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
func (r *IAMCustomRole) Stage() *pulumi.StringOutput {
return (*pulumi.StringOutput)(r.s.State["stage"])
}
// A human-readable title for the role.
func (r *IAMCustomRole) Title() *pulumi.StringOutput {
return (*pulumi.StringOutput)(r.s.State["title"])
}
// Input properties used for looking up and filtering IAMCustomRole resources.
type IAMCustomRoleState struct {
// (Optional) The current deleted state of the role.
Deleted interface{}
// A human-readable description for the role.
Description interface{}
// The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
Permissions interface{}
// The project that the service account will be created in.
// Defaults to the provider project configuration.
Project interface{}
// The role id to use for this role.
RoleId interface{}
// The current launch stage of the role.
// Defaults to `GA`.
// List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
Stage interface{}
// A human-readable title for the role.
Title interface{}
}
// The set of arguments for constructing a IAMCustomRole resource.
type IAMCustomRoleArgs struct { | // The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.
Permissions interface{}
// The project that the service account will be created in.
// Defaults to the provider project configuration.
Project interface{}
// The role id to use for this role.
RoleId interface{}
// The current launch stage of the role.
// Defaults to `GA`.
// List of possible stages is [here](https://cloud.google.com/iam/reference/rest/v1/organizations.roles#Role.RoleLaunchStage).
Stage interface{}
// A human-readable title for the role.
Title interface{}
} | // A human-readable description for the role.
Description interface{} |
_send_notification.py | from nanome._internal._network._serialization import _ContextDeserialization, _ContextSerialization
from nanome._internal._util._serializers import _DictionarySerializer, _LongSerializer
from nanome._internal._structure._serialization import _WorkspaceSerializer, _AtomSerializer | from nanome._internal._util._serializers import _StringSerializer
from nanome._internal._util._serializers import _TypeSerializer
class _SendNotification(_TypeSerializer):
def __init__(self):
self.string = _StringSerializer()
def version(self):
return 0
def name(self):
return "SendNotification"
def serialize(self, version, value, context):
context.write_uint(value[0])
context.write_using_serializer(self.string, value[1])
def deserialize(self, version, context):
raise NotImplementedError | |
run.py | import os
for file in os.listdir("upload"):
if file.endswith(".jpg"):
print(file.rsplit('.', 1)[0])
os.system('PhotoAvatarLib.exe ' + file.rsplit('.', 1)[0])
| fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "r")
fstr = fp.read()
fp.close()
fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '_face_fit_ortho.obj'), "w")
fp.write('mtllib %s.mtl\nusemtl material_1\n' % file.rsplit('.', 1)[0])
fp.write(fstr)
fp.close() | fp = open(os.path.join('result', file.rsplit('.', 1)[0] + '.mtl'), "w")
fp.write('newmtl material_1\nmap_Kd %s_face.jpg' % file.rsplit('.', 1)[0])
|
urls.py | from django.urls import path,re_path |
app_name = 'groups'
urlpatterns = [
path('',views.ListGroup.as_view(), name= 'all'),
path('new/',views.CreateGroup.as_view(), name='create'),
path('posts/in/<slug>/',views.SingleGroup.as_view(),name='single'),
path('join/<slug>/',views.JoinGroup.as_view(),name='join'),
path('leave/<slug>/', views.LeaveGroup.as_view(),name='leave')
] | from groups import views |
mod.rs | pub mod models;
pub mod operations;
#[allow(dead_code)]
pub const API_VERSION: &str = "2019-01-01-preview"; |
||
threadpool.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling threading in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
# TODO(b/73383364): Properly export in the `tf.contrib.data` API when stable
# or make private / remove.
class PrivateThreadPool(object):
"""A stateful resource that represents a private thread pool."""
def __init__(self, num_threads, display_name=None,
max_intra_op_parallelism=1):
"""Creates a `PrivateThreadPool` with the given number of threads."""
if context.executing_eagerly():
shared_name = _generate_shared_name("privatethreadpool")
self._resource = ged_ops.experimental_thread_pool_handle(
num_threads=num_threads,
max_intra_op_parallelism=max_intra_op_parallelism,
display_name=display_name,
shared_name=shared_name)
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device=context.context().device_name)
else:
self._resource = ged_ops.experimental_thread_pool_handle(
num_threads=num_threads,
max_intra_op_parallelism=max_intra_op_parallelism,
display_name=display_name)
class _ThreadPoolDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that acts as an identity, and sets a custom threadpool."""
def __init__(self, input_dataset, thread_pool):
super(_ThreadPoolDataset, self).__init__(input_dataset)
self._input_dataset = input_dataset
self._thread_pool = thread_pool
def | (self):
return ged_ops.experimental_thread_pool_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
self._thread_pool._resource, # pylint: disable=protected-access
**dataset_ops.flat_structure(self))
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
@property
def output_classes(self):
return self._input_dataset.output_classes
# TODO(b/73383364): Properly export in the `tf.contrib.data` API when stable
# or make private / remove.
def override_threadpool(dataset, thread_pool):
"""Returns a new dataset that uses the given thread pool for its operations.
Args:
dataset: A `tf.data.Dataset` object.
thread_pool: A `PrivateThreadPool` object.
Returns:
A dataset containing the same values as `dataset`, but which uses
`thread_pool` to compute any of its parallel operations (such as
`tf.data.Dataset.map`).
"""
return _ThreadPoolDataset(dataset, thread_pool)
| _as_variant_tensor |
loader.py | """
---------
loader.py
---------
A minimal code to store data in MongoDB
"""
import csv
import json
from datetime import datetime
from pymongo import MongoClient
def load_orders():
"""Load orders sample data"""
client = MongoClient('localhost', 27017)
orders = client["orders"]
# insert customers data
customers = orders["customers"]
with open('customers.csv') as csvfile:
customers_data = list(csv.DictReader(csvfile))
_ = customers.insert_many(customers_data)
# insert items data
items_ordered = orders["items_ordered"]
| _ = items_ordered.insert_many(items_ordered_data)
def load_airbnb():
"""Load AirBnB sample data"""
client = MongoClient('localhost', 27017)
airbnb = client["airbnb"]
sample_data = airbnb["sample_data"]
with open("airbnb.json", "r") as f_in:
data = json.load(f_in)
for d in data:
for key, val in d.items():
if isinstance(val, dict):
if "$date" in val.keys():
d[key] = datetime.fromtimestamp(val["$date"] / 1000)
elif "$numberDecimal" in val.keys():
d[key] = val["$numberDecimal"]
try:
sample_data.insert(d)
except:
pass
def main():
"""The main script"""
load_airbnb()
load_orders()
if __name__ == "__main__":
main()
print("Done!") | with open('items_ordered.csv') as csvfile:
items_ordered_data = list(csv.DictReader(csvfile))
|
contrib.py | # Copyright 2019 Lukas Jendele and Ondrej Skopek.
# Adapted from The TensorFlow Authors, under the ASL 2.0.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This part is copied from:
# https://github.com/tensorflow/tensorflow/blob/r1.11/tensorflow/contrib/layers/python/layers/layers.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
# from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
# from tensorflow.python.eager import context
# from tensorflow.python.framework import constant_op
# from tensorflow.python.framework import dtypes
# from tensorflow.python.framework import function
from tensorflow.python.framework import ops
# from tensorflow.python.framework import sparse_tensor
from tensorflow.python.layers import convolutional as convolutional_layers
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
# My imports
from tensorflow.contrib.layers.python.layers.layers import _build_variable_getter, _add_variable_to_collections
from models.breast_cycle_gan.custom.conv.layers import MyConv2D
import tensorflow as tf
# This part is copied from:
# https://github.com/tensorflow/tensorflow/blob/r1.11/tensorflow/contrib/layers/python/layers/layers.py
@add_arg_scope
def | (inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
use_spectral_norm=False,
is_training=False,
self_attention=False,
scope=None):
h = convolution(
inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
use_spectral_norm,
is_training,
scope,
conv_dims=2)
if not self_attention:
return h
with tf.variable_scope("self_attention"):
with tf.variable_scope("f"):
f = convolution(
inputs,
num_outputs // 8,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
use_spectral_norm,
is_training,
None,
conv_dims=2)
with tf.variable_scope("g"):
g = convolution(
inputs,
num_outputs // 8,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
use_spectral_norm,
is_training,
None,
conv_dims=2)
def hw_flatten(x):
return tf.reshape(x, shape=[x.shape[0], -1, x.shape[-1]])
# N = h * w
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True) # # [bs, N, N]
beta = tf.nn.softmax(s, axis=-1) # attention map
o = tf.matmul(beta, hw_flatten(h)) # [bs, N, C]
gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0))
o = tf.reshape(o, shape=inputs.shape) # [bs, h, w, C]
x = gamma * o + inputs
return x
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
use_spectral_norm=False,
is_training=False,
scope=None,
conv_dims=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
conv_dims: Optional convolution dimensionality, when set it would use the
corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When
leaved to None it would select the convolution dimensionality based on
the input rank (i.e. Conv ND, with N = input_rank - 2).
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter({'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(scope, 'Conv', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if conv_dims is not None and conv_dims + 2 != input_rank:
raise ValueError('Convolution expects input with rank %d, got %d' % (conv_dims + 2, input_rank))
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
layer_class = MyConv2D
elif input_rank == 5:
layer_class = convolutional_layers.Convolution3D
else:
raise ValueError('Convolution not supported for input with rank', input_rank)
df = ('channels_first' if data_format and data_format.startswith('NC') else 'channels_last')
layer = layer_class(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
use_spectral_norm=use_spectral_norm,
is_training=is_training,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
| convolution2d |
requests.go | package instances
import (
"github.com/chnsz/golangsdk"
db "github.com/chnsz/golangsdk/openstack/db/v1/databases"
"github.com/chnsz/golangsdk/openstack/db/v1/users"
"github.com/chnsz/golangsdk/pagination"
)
// CreateOptsBuilder is the top-level interface for create options.
type CreateOptsBuilder interface {
ToInstanceCreateMap() (map[string]interface{}, error)
}
// DatastoreOpts represents the configuration for how an instance stores data.
type DatastoreOpts struct {
Version string `json:"version"`
Type string `json:"type"`
}
// ToMap converts a DatastoreOpts to a map[string]string (for a request body)
func (opts DatastoreOpts) ToMap() (map[string]interface{}, error) {
return golangsdk.BuildRequestBody(opts, "")
}
// NetworkOpts is used within CreateOpts to control a new server's network attachments.
type NetworkOpts struct {
// UUID of a nova-network to attach to the newly provisioned server.
// Required unless Port is provided.
UUID string `json:"net-id,omitempty"`
// Port of a neutron network to attach to the newly provisioned server.
// Required unless UUID is provided.
Port string `json:"port-id,omitempty"`
// V4FixedIP [optional] specifies a fixed IPv4 address to be used on this network.
V4FixedIP string `json:"v4-fixed-ip,omitempty"`
// V6FixedIP [optional] specifies a fixed IPv6 address to be used on this network.
V6FixedIP string `json:"v6-fixed-ip,omitempty"`
}
// ToMap converts a NetworkOpts to a map[string]string (for a request body)
func (opts NetworkOpts) ToMap() (map[string]interface{}, error) {
return golangsdk.BuildRequestBody(opts, "")
}
// CreateOpts is the struct responsible for configuring a new database instance.
type CreateOpts struct {
// Either the integer UUID (in string form) of the flavor, or its URI
// reference as specified in the response from the List() call. Required.
FlavorRef string
// Specifies the volume size in gigabytes (GB). The value must be between 1
// and 300. Required.
Size int
// Name of the instance to create. The length of the name is limited to
// 255 characters and any characters are permitted. Optional.
Name string
// A slice of database information options.
Databases db.CreateOptsBuilder
// A slice of user information options.
Users users.CreateOptsBuilder
// Options to configure the type of datastore the instance will use. This is
// optional, and if excluded will default to MySQL.
Datastore *DatastoreOpts
// Networks dictates how this server will be attached to available networks.
Networks []NetworkOpts
}
// ToInstanceCreateMap will render a JSON map.
func (opts CreateOpts) ToInstanceCreateMap() (map[string]interface{}, error) {
if opts.Size > 300 || opts.Size < 1 {
err := golangsdk.ErrInvalidInput{}
err.Argument = "instances.CreateOpts.Size"
err.Value = opts.Size
err.Info = "Size (GB) must be between 1-300"
return nil, err
}
if opts.FlavorRef == "" {
return nil, golangsdk.ErrMissingInput{Argument: "instances.CreateOpts.FlavorRef"}
}
instance := map[string]interface{}{
"volume": map[string]int{"size": opts.Size},
"flavorRef": opts.FlavorRef,
}
if opts.Name != "" {
instance["name"] = opts.Name
}
if opts.Databases != nil {
dbs, err := opts.Databases.ToDBCreateMap()
if err != nil {
return nil, err
}
instance["databases"] = dbs["databases"]
}
if opts.Users != nil {
users, err := opts.Users.ToUserCreateMap()
if err != nil {
return nil, err
}
instance["users"] = users["users"]
}
if opts.Datastore != nil {
datastore, err := opts.Datastore.ToMap()
if err != nil {
return nil, err
}
instance["datastore"] = datastore
}
if len(opts.Networks) > 0 {
networks := make([]map[string]interface{}, len(opts.Networks))
for i, net := range opts.Networks {
var err error
networks[i], err = net.ToMap()
if err != nil {
return nil, err
}
}
instance["nics"] = networks
}
return map[string]interface{}{"instance": instance}, nil
}
// Create asynchronously provisions a new database instance. It requires the
// user to specify a flavor and a volume size. The API service then provisions
// the instance with the requested flavor and sets up a volume of the specified
// size, which is the storage for the database instance.
//
// Although this call only allows the creation of 1 instance per request, you
// can create an instance with multiple databases and users. The default
// binding for a MySQL instance is port 3306.
func Create(client *golangsdk.ServiceClient, opts CreateOptsBuilder) (r CreateResult) {
b, err := opts.ToInstanceCreateMap()
if err != nil {
r.Err = err
return
}
_, r.Err = client.Post(baseURL(client), &b, &r.Body, &golangsdk.RequestOpts{OkCodes: []int{200}})
return
}
// List retrieves the status and information for all database instances.
func List(client *golangsdk.ServiceClient) pagination.Pager {
return pagination.NewPager(client, baseURL(client), func(r pagination.PageResult) pagination.Page {
return InstancePage{pagination.LinkedPageBase{PageResult: r}}
})
}
// Get retrieves the status and information for a specified database instance.
func Get(client *golangsdk.ServiceClient, id string) (r GetResult) {
_, r.Err = client.Get(resourceURL(client, id), &r.Body, nil)
return
}
// Delete permanently destroys the database instance.
func Delete(client *golangsdk.ServiceClient, id string) (r DeleteResult) {
_, r.Err = client.Delete(resourceURL(client, id), nil)
return
}
// EnableRootUser enables the login from any host for the root user and
// provides the user with a generated root password.
func EnableRootUser(client *golangsdk.ServiceClient, id string) (r EnableRootUserResult) {
_, r.Err = client.Post(userRootURL(client, id), nil, &r.Body, &golangsdk.RequestOpts{OkCodes: []int{200}})
return
}
// IsRootEnabled checks an instance to see if root access is enabled. It returns
// True if root user is enabled for the specified database instance or False
// otherwise.
func IsRootEnabled(client *golangsdk.ServiceClient, id string) (r IsRootEnabledResult) {
_, r.Err = client.Get(userRootURL(client, id), &r.Body, nil)
return
}
// Restart will restart only the MySQL Instance. Restarting MySQL will
// erase any dynamic configuration settings that you have made within MySQL.
// The MySQL service will be unavailable until the instance restarts.
func | (client *golangsdk.ServiceClient, id string) (r ActionResult) {
b := map[string]interface{}{"restart": struct{}{}}
_, r.Err = client.Post(actionURL(client, id), &b, nil, nil)
return
}
// Resize changes the memory size of the instance, assuming a valid
// flavorRef is provided. It will also restart the MySQL service.
func Resize(client *golangsdk.ServiceClient, id, flavorRef string) (r ActionResult) {
b := map[string]interface{}{"resize": map[string]string{"flavorRef": flavorRef}}
_, r.Err = client.Post(actionURL(client, id), &b, nil, nil)
return
}
// ResizeVolume will resize the attached volume for an instance. It supports
// only increasing the volume size and does not support decreasing the size.
// The volume size is in gigabytes (GB) and must be an integer.
func ResizeVolume(client *golangsdk.ServiceClient, id string, size int) (r ActionResult) {
b := map[string]interface{}{"resize": map[string]interface{}{"volume": map[string]int{"size": size}}}
_, r.Err = client.Post(actionURL(client, id), &b, nil, nil)
return
}
// AttachConfigurationGroup will attach configuration group to the instance
func AttachConfigurationGroup(client *golangsdk.ServiceClient, instanceID string, configID string) (r ConfigurationResult) {
b := map[string]interface{}{"instance": map[string]interface{}{"configuration": configID}}
_, r.Err = client.Put(resourceURL(client, instanceID), &b, nil, &golangsdk.RequestOpts{OkCodes: []int{202}})
return
}
// DetachConfigurationGroup will dettach configuration group from the instance
func DetachConfigurationGroup(client *golangsdk.ServiceClient, instanceID string) (r ConfigurationResult) {
b := map[string]interface{}{"instance": map[string]interface{}{}}
_, r.Err = client.Put(resourceURL(client, instanceID), &b, nil, &golangsdk.RequestOpts{OkCodes: []int{202}})
return
}
| Restart |
actions.rs |
use near_sdk::json_types::{U128};
use near_sdk::{Balance};
use near_sdk_sim::{call, to_yocto, ContractAccount, UserAccount, DEFAULT_GAS};
// use near_sdk_sim::transaction::ExecutionStatus;
use test_token::ContractContract as TestToken;
use ref_farming::{ContractContract as Farming};
use ref_farming::{HRSimpleFarmTerms};
use near_sdk::serde_json::Value;
use near_sdk::serde_json::json;
use super::init::*;
use super::utils::*;
#[allow(dead_code)]
pub(crate) fn prepair_pool_and_liquidity(
root: &UserAccount,
owner: &UserAccount,
farming_id: String,
lps: Vec<&UserAccount>,
) -> (UserAccount, ContractAccount<TestToken>, ContractAccount<TestToken>) {
let pool = deploy_pool(&root, swap(), owner.account_id());
let token1 = deploy_token(&root, dai(), vec![swap()]);
let token2 = deploy_token(&root, eth(), vec![swap()]);
owner.call(
pool.account_id(),
"extend_whitelisted_tokens",
&json!({
"tokens": vec![to_va(dai()), to_va(eth())]
}).to_string().into_bytes(),
DEFAULT_GAS,
0
).assert_success();
root.call(
pool.account_id(),
"add_simple_pool",
&json!({
"tokens": vec![to_va(dai()), to_va(eth())],
"fee": 25
}).to_string().into_bytes(),
DEFAULT_GAS,
to_yocto("1")
).assert_success();
root.call(
pool.account_id(),
"mft_register",
&json!({
"token_id": ":0".to_string(),
"account_id": farming_id
}).to_string().into_bytes(),
DEFAULT_GAS,
to_yocto("1")
).assert_success();
for lp in lps {
add_liqudity(lp, &pool, &token1, &token2, 0);
}
(pool,token1, token2)
}
#[allow(dead_code)]
pub(crate) fn prepair_pool(
root: &UserAccount,
owner: &UserAccount,
) -> (UserAccount, ContractAccount<TestToken>, ContractAccount<TestToken>) {
let pool = deploy_pool(&root, swap(), owner.account_id());
let token1 = deploy_token(&root, dai(), vec![swap()]);
let token2 = deploy_token(&root, eth(), vec![swap()]);
owner.call(
pool.account_id(),
"extend_whitelisted_tokens",
&json!({
"tokens": vec![to_va(dai()), to_va(eth())]
}).to_string().into_bytes(),
DEFAULT_GAS,
0
);
root.call(
pool.account_id(),
"add_simple_pool",
&json!({
"tokens": vec![to_va(dai()), to_va(eth())],
"fee": 25
}).to_string().into_bytes(),
DEFAULT_GAS,
to_yocto("1")
).assert_success();
(pool, token1, token2)
}
#[allow(dead_code)]
pub(crate) fn prepair_farm(
root: &UserAccount,
owner: &UserAccount,
token: &ContractAccount<TestToken>,
total_reward: Balance,
) -> (ContractAccount<Farming>, String) |
#[allow(dead_code)]
pub(crate) fn prepair_multi_farms(
root: &UserAccount,
owner: &UserAccount,
token: &ContractAccount<TestToken>,
total_reward: Balance,
farm_count: u32,
) -> (ContractAccount<Farming>, Vec<String>) {
// create farms
let farming = deploy_farming(&root, farming_id(), owner.account_id());
let mut farm_ids: Vec<String> = vec![];
// register farming contract to reward token
call!(
root,
token.storage_deposit(Some(to_va(farming_id())), None),
deposit = to_yocto("1")
)
.assert_success();
mint_token(&token, &root, to_yocto("100000"));
for _ in 0..farm_count {
let out_come = call!(
owner,
farming.create_simple_farm(HRSimpleFarmTerms{
seed_id: format!("{}@0", swap()),
reward_token: to_va(token.account_id()),
start_at: 0,
reward_per_session: to_yocto("1").into(),
session_interval: 60,
}, Some(U128(1000000000000000000)), None, None),
deposit = to_yocto("1")
);
out_come.assert_success();
let farm_id: String;
if let Value::String(farmid) = out_come.unwrap_json_value() {
farm_id = farmid.clone();
} else {
farm_id = String::from("N/A");
}
call!(
root,
token.ft_transfer_call(to_va(farming_id()), total_reward.into(), None, farm_id.clone()),
deposit = 1
)
.assert_success();
farm_ids.push(farm_id.clone());
println!(" Farm {} created and running at Height#{}", farm_id.clone(), root.borrow_runtime().current_block().block_height);
}
(farming, farm_ids)
}
pub(crate) fn add_liqudity(
user: &UserAccount,
pool: &UserAccount,
token1: &ContractAccount<TestToken>,
token2: &ContractAccount<TestToken>,
pool_id: u64,
) {
mint_token(&token1, user, to_yocto("105"));
mint_token(&token2, user, to_yocto("105"));
user.call(
pool.account_id(),
"storage_deposit",
&json!({}).to_string().into_bytes(),
DEFAULT_GAS,
to_yocto("1")
).assert_success();
call!(
user,
token1.ft_transfer_call(to_va(swap()), to_yocto("100").into(), None, "".to_string()),
deposit = 1
)
.assert_success();
call!(
user,
token2.ft_transfer_call(to_va(swap()), to_yocto("100").into(), None, "".to_string()),
deposit = 1
)
.assert_success();
user.call(
pool.account_id(),
"add_liquidity",
&json!({
"pool_id": pool_id,
"amounts": vec![U128(to_yocto("100")), U128(to_yocto("100"))]
}).to_string().into_bytes(),
DEFAULT_GAS,
to_yocto("0.01")
).assert_success();
}
pub(crate) fn mint_token(token: &ContractAccount<TestToken>, user: &UserAccount, amount: Balance) {
// call!(
// user,
// token.storage_deposit(None, None),
// deposit = to_yocto("1")
// )
// .assert_success();
call!(
user,
token.mint(to_va(user.account_id.clone()), amount.into())
).assert_success();
}
| {
// create farm
let farming = deploy_farming(&root, farming_id(), owner.account_id());
let out_come = call!(
owner,
farming.create_simple_farm(HRSimpleFarmTerms{
seed_id: format!("{}@0", swap()),
reward_token: to_va(token.account_id()),
start_at: 0,
reward_per_session: to_yocto("1").into(),
session_interval: 60,
}, Some(U128(1000000000000000000)), None, None),
deposit = to_yocto("1")
);
out_come.assert_success();
let farm_id: String;
if let Value::String(farmid) = out_come.unwrap_json_value() {
farm_id = farmid.clone();
} else {
farm_id = String::from("N/A");
}
// println!(" Farm {} created at Height#{}", farm_id.clone(), root.borrow_runtime().current_block().block_height);
// deposit reward token
call!(
root,
token.storage_deposit(Some(to_va(farming_id())), None),
deposit = to_yocto("1")
)
.assert_success();
mint_token(&token, &root, total_reward.into());
call!(
root,
token.ft_transfer_call(to_va(farming_id()), total_reward.into(), None, farm_id.clone()),
deposit = 1
)
.assert_success();
// println!(" Farm running at Height#{}", root.borrow_runtime().current_block().block_height);
(farming, farm_id)
} |
twolangpars.go | // twolangpars: adds two language attributes alternately to a tag
package main
import (
"bufio"
"fmt"
"os"
"strings"
)
func main() {
const (
first = "ru"
second = "de"
tag = "<p>"
tagfmt = `<p lang="%s">`
)
if len(os.Args[1:]) == 0 {
fmt.Fprintf(os.Stderr, "usage: %s file_1 [file_2 ...]\n", os.Args[0])
return
}
for _, file := range os.Args[1:] {
f, err := os.Open(file)
if err != nil { | text := ""
for input.Scan() {
line := input.Text()
text += line + "\n"
}
tokens := strings.Split(text, tag)
fmt.Print(tokens[0])
for i, t := range tokens[1:len(tokens)] {
var lang string
if i%2 == 0 {
lang = first
} else {
lang = second
}
langtag := fmt.Sprintf(tagfmt, lang)
fmt.Print(langtag, t)
}
}
} | fmt.Fprintf(os.Stderr, "unable to read file %s\n", file)
continue
}
input := bufio.NewScanner(f) |
test_e2e.py | import unittest
import json
import time
import grpc
from vald.v1.agent.core import agent_pb2_grpc
from vald.v1.vald import insert_pb2_grpc
from vald.v1.vald import search_pb2_grpc
from vald.v1.vald import update_pb2_grpc
from vald.v1.vald import upsert_pb2_grpc
from vald.v1.vald import remove_pb2_grpc
from vald.v1.vald import object_pb2_grpc
from vald.v1.payload import payload_pb2
class TestValdE2E(unittest.TestCase):
"""e2e test for vald-client-python
"""
def __init__(self, *args, **kwargs):
super(TestValdE2E, self).__init__(*args, **kwargs)
self.data = json.load(open("wordvecs1000.json", "r"))
def setUp(self):
options = [("grpc.keepalive_time_ms", 10000),
("grpc.keepalive_timeout_ms", 5000),
("grpc.client_channel_backup_poll_interval_ms", 100)]
self.channel = grpc.insecure_channel(
target="localhost:8081", options=options)
def tearDown(self):
self.channel.close()
def test_insert(self):
stub = insert_pb2_grpc.InsertStub(self.channel)
vec = payload_pb2.Object.Vector(
id=self.data[0]["id"], vector=self.data[0]["vector"])
cfg = payload_pb2.Insert.Config(skip_strict_exist_check=True)
result = stub.Insert(
payload_pb2.Insert.Request(vector=vec, config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_insert(self):
stub = insert_pb2_grpc.InsertStub(self.channel)
cfg = payload_pb2.Insert.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Insert.Request(
vector=vec, config=cfg))
results = stub.MultiInsert(
payload_pb2.Insert.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_insert(self):
stub = insert_pb2_grpc.InsertStub(self.channel)
cfg = payload_pb2.Insert.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 100):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Insert.Request(
vector=vec, config=cfg))
results = stub.StreamInsert(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
def test_create_index(self):
stub = agent_pb2_grpc.AgentStub(self.channel)
result = stub.CreateIndex(
payload_pb2.Control.CreateIndexRequest(pool_size=10000))
self.assertIsInstance(result, payload_pb2.Empty)
def test_save_index(self):
|
def test_index_info(self):
stub = agent_pb2_grpc.AgentStub(self.channel)
result = stub.IndexInfo(payload_pb2.Empty())
self.assertIsInstance(result, payload_pb2.Info.Index.Count)
self.assertEqual(result.stored, 99)
self.assertEqual(result.uncommitted, 0)
def test_exists(self):
stub = object_pb2_grpc.ObjectStub(self.channel)
result = stub.Exists(
payload_pb2.Object.ID(id=self.data[0]["id"]))
self.assertIsInstance(result, payload_pb2.Object.ID)
def test_get_object(self):
stub = object_pb2_grpc.ObjectStub(self.channel)
req = payload_pb2.Object.VectorRequest(
id=payload_pb2.Object.ID(id=self.data[0]["id"]))
result = stub.GetObject(req)
self.assertIsInstance(result, payload_pb2.Object.Vector)
self.assertEqual(result.id, self.data[0]["id"])
def test_stream_get_object(self):
stub = object_pb2_grpc.ObjectStub(self.channel)
requests = []
for i in range(0, 10):
requests.append(payload_pb2.Object.VectorRequest(
id=payload_pb2.Object.ID(id=self.data[i]["id"])))
results = stub.StreamGetObject(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamVector)
def test_search(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
result = stub.Search(payload_pb2.Search.Request(
vector=self.data[0]["vector"], config=cfg))
self.assertIsInstance(result, payload_pb2.Search.Response)
self.assertEqual(len(result.results), 3)
def test_multi_search(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(1, 10):
requests.append(payload_pb2.Search.Request(
vector=self.data[i]["vector"], config=cfg))
results = stub.MultiSearch(
payload_pb2.Search.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Search.Responses)
for response in results.responses:
self.assertIsInstance(response, payload_pb2.Search.Response)
self.assertEqual(len(response.results), 3)
def test_stream_search(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(11, 20):
requests.append(payload_pb2.Search.Request(
vector=self.data[i]["vector"], config=cfg))
results = stub.StreamSearch(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Search.StreamResponse)
self.assertIsNotNone(result.response)
def test_search_id(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
result = stub.SearchByID(payload_pb2.Search.IDRequest(
id=self.data[0]["id"], config=cfg))
self.assertIsInstance(result, payload_pb2.Search.Response)
self.assertEqual(len(result.results), 3)
def test_multi_search_id(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(1, 10):
requests.append(payload_pb2.Search.IDRequest(
id=self.data[i]["id"], config=cfg))
results = stub.MultiSearchByID(
payload_pb2.Search.MultiIDRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Search.Responses)
for response in results.responses:
self.assertIsInstance(response, payload_pb2.Search.Response)
self.assertEqual(len(response.results), 3)
def test_stream_search_id(self):
stub = search_pb2_grpc.SearchStub(self.channel)
cfg = payload_pb2.Search.Config(
num=3, radius=-1.0, epsilon=0.1, timeout=3000000000)
requests = []
for i in range(11, 20):
requests.append(payload_pb2.Search.IDRequest(
id=self.data[i]["id"], config=cfg))
results = stub.StreamSearchByID(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Search.StreamResponse)
self.assertIsNotNone(result.response)
def test_update(self):
stub = update_pb2_grpc.UpdateStub(self.channel)
vec = payload_pb2.Object.Vector(
id=self.data[0]["id"], vector=self.data[1]["vector"])
cfg = payload_pb2.Update.Config(skip_strict_exist_check=True)
result = stub.Update(
payload_pb2.Update.Request(vector=vec, config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_update(self):
stub = update_pb2_grpc.UpdateStub(self.channel)
cfg = payload_pb2.Update.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i+1]["vector"])
requests.append(payload_pb2.Update.Request(
vector=vec, config=cfg))
results = stub.MultiUpdate(
payload_pb2.Update.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_update(self):
stub = update_pb2_grpc.UpdateStub(self.channel)
cfg = payload_pb2.Update.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 20):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i+1]["vector"])
requests.append(payload_pb2.Update.Request(
vector=vec, config=cfg))
results = stub.StreamUpdate(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
def test_upsert(self):
stub = upsert_pb2_grpc.UpsertStub(self.channel)
vec = payload_pb2.Object.Vector(
id=self.data[0]["id"], vector=self.data[0]["vector"])
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=True)
result = stub.Upsert(
payload_pb2.Upsert.Request(vector=vec, config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_upsert(self):
stub = upsert_pb2_grpc.UpsertStub(self.channel)
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Upsert.Request(
vector=vec, config=cfg))
results = stub.MultiUpsert(
payload_pb2.Upsert.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_upsert(self):
stub = upsert_pb2_grpc.UpsertStub(self.channel)
cfg = payload_pb2.Upsert.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 20):
vec = payload_pb2.Object.Vector(
id=self.data[i]["id"], vector=self.data[i]["vector"])
requests.append(payload_pb2.Upsert.Request(
vector=vec, config=cfg))
results = stub.StreamUpsert(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
def test_remove(self):
stub = remove_pb2_grpc.RemoveStub(self.channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=True)
result = stub.Remove(
payload_pb2.Remove.Request(
id=payload_pb2.Object.ID(id=self.data[0]["id"]), config=cfg))
self.assertIsInstance(result, payload_pb2.Object.Location)
def test_multi_remove(self):
stub = remove_pb2_grpc.RemoveStub(self.channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=True)
requests = []
for i in range(1, 10):
requests.append(payload_pb2.Remove.Request(
id=payload_pb2.Object.ID(id=self.data[i]["id"]), config=cfg))
results = stub.MultiRemove(
payload_pb2.Remove.MultiRequest(requests=requests))
self.assertIsInstance(results, payload_pb2.Object.Locations)
def test_stream_remove(self):
stub = remove_pb2_grpc.RemoveStub(self.channel)
cfg = payload_pb2.Remove.Config(skip_strict_exist_check=True)
requests = []
for i in range(11, 20):
requests.append(payload_pb2.Remove.Request(
id=payload_pb2.Object.ID(id=self.data[i]["id"]), config=cfg))
results = stub.StreamRemove(iter(requests))
for result in results:
self.assertIsInstance(result, payload_pb2.Object.StreamLocation)
self.assertEqual(result.status.code, 0)
| stub = agent_pb2_grpc.AgentStub(self.channel)
result = stub.SaveIndex(payload_pb2.Empty())
self.assertIsInstance(result, payload_pb2.Empty) |
batch.py | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with a batch of updates / deletes.
Batches provide the ability to execute multiple operations
in a single request to the Cloud Datastore API.
See
https://cloud.google.com/datastore/docs/concepts/entities#Datastore_Batch_operations
"""
from gcloud.datastore import helpers
from gcloud.datastore.key import _dataset_ids_equal
from gcloud.datastore._generated import datastore_pb2 as _datastore_pb2
class Batch(object):
"""An abstraction representing a collected group of updates / deletes.
Used to build up a bulk mutuation.
For example, the following snippet of code will put the two ``save``
operations and the ``delete`` operation into the same mutation, and send
them to the server in a single API request::
>>> from gcloud.datastore.batch import Batch
>>> batch = Batch()
>>> batch.put(entity1)
>>> batch.put(entity2)
>>> batch.delete(key3)
>>> batch.commit()
You can also use a batch as a context manager, in which case
:meth:`commit` will be called automatically if its block exits without
raising an exception::
>>> with Batch() as batch:
... batch.put(entity1)
... batch.put(entity2)
... batch.delete(key3)
By default, no updates will be sent if the block exits with an error::
>>> with Batch() as batch:
... do_some_work(batch)
... raise Exception() # rolls back
:type client: :class:`gcloud.datastore.client.Client`
:param client: The client used to connect to datastore.
"""
_id = None # "protected" attribute, always None for non-transactions
def __init__(self, client):
self._client = client
self._commit_request = _datastore_pb2.CommitRequest()
self._partial_key_entities = []
def | (self):
"""Return the topmost batch / transaction, or None."""
return self._client.current_batch
@property
def dataset_id(self):
"""Getter for dataset ID in which the batch will run.
:rtype: :class:`str`
:returns: The dataset ID in which the batch will run.
"""
return self._client.dataset_id
@property
def namespace(self):
"""Getter for namespace in which the batch will run.
:rtype: :class:`str`
:returns: The namespace in which the batch will run.
"""
return self._client.namespace
@property
def connection(self):
"""Getter for connection over which the batch will run.
:rtype: :class:`gcloud.datastore.connection.Connection`
:returns: The connection over which the batch will run.
"""
return self._client.connection
def _add_partial_key_entity_pb(self):
"""Adds a new mutation for an entity with a partial key.
:rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
return self.mutations.insert_auto_id.add()
def _add_complete_key_entity_pb(self):
"""Adds a new mutation for an entity with a completed key.
:rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
# We use ``upsert`` for entities with completed keys, rather than
# ``insert`` or ``update``, in order not to create race conditions
# based on prior existence / removal of the entity.
return self.mutations.upsert.add()
def _add_delete_key_pb(self):
"""Adds a new mutation for a key to be deleted.
:rtype: :class:`gcloud.datastore._generated.entity_pb2.Key`
:returns: The newly created key protobuf that will be
deleted when sent with a commit.
"""
return self.mutations.delete.add()
@property
def mutations(self):
"""Getter for the changes accumulated by this batch.
Every batch is committed with a single commit request containing all
the work to be done as mutations. Inside a batch, calling :meth:`put`
with an entity, or :meth:`delete` with a key, builds up the request by
adding a new mutation. This getter returns the protobuf that has been
built-up so far.
:rtype: :class:`gcloud.datastore._generated.datastore_pb2.Mutation`
:returns: The Mutation protobuf to be sent in the commit request.
"""
return self._commit_request.mutation
def put(self, entity):
"""Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert_auto_id`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: ValueError if entity has no key assigned, or if the key's
``dataset_id`` does not match ours.
"""
if entity.key is None:
raise ValueError("Entity must have a key")
if not _dataset_ids_equal(self.dataset_id, entity.key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity)
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity)
def delete(self, key):
"""Remember a key to be deleted during :meth:`commit`.
:type key: :class:`gcloud.datastore.key.Key`
:param key: the key to be deleted.
:raises: ValueError if key is not complete, or if the key's
``dataset_id`` does not match ours.
"""
if key.is_partial:
raise ValueError("Key must be complete")
if not _dataset_ids_equal(self.dataset_id, key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
key_pb = helpers._prepare_key_for_request(key.to_protobuf())
self._add_delete_key_pb().CopyFrom(key_pb)
def begin(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
def commit(self):
"""Commits the batch.
This is called automatically upon exiting a with statement,
however it can be called explicitly if you don't want to use a
context manager.
"""
_, updated_keys = self.connection.commit(
self.dataset_id, self._commit_request, self._id)
# If the back-end returns without error, we are guaranteed that
# :meth:`Connection.commit` will return keys that match (length and
# order) directly ``_partial_key_entities``.
for new_key_pb, entity in zip(updated_keys,
self._partial_key_entities):
new_id = new_key_pb.path_element[-1].id
entity.key = entity.key.completed_key(new_id)
def rollback(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
pass
def __enter__(self):
self._client._push_batch(self)
self.begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.commit()
else:
self.rollback()
finally:
self._client._pop_batch()
def _assign_entity_to_pb(entity_pb, entity):
"""Copy ``entity`` into ``entity_pb``.
Helper method for ``Batch.put``.
:type entity_pb: :class:`gcloud.datastore._generated.entity_pb2.Entity`
:param entity_pb: The entity owned by a mutation.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: The entity being updated within the batch / transaction.
"""
bare_entity_pb = helpers.entity_to_protobuf(entity)
key_pb = helpers._prepare_key_for_request(bare_entity_pb.key)
bare_entity_pb.key.CopyFrom(key_pb)
entity_pb.CopyFrom(bare_entity_pb)
| current |
hworld.go | package main
import "fmt"
func main() | {
fmt.Println("hello world")
} |
|
command.rs | use std::{fs, path::PathBuf};
use crate::{SgxInitRequest, CLOUD_KEY_LEN};
use tendermint::net;
use tmkms_light::{
config::validator::ValidatorConfig,
utils::{print_pubkey, PubkeyDisplay},
};
use tracing::debug;
use zeroize::Zeroizing;
use crate::{config, runner::TmkmsSgxSigner};
/// write tmkms.toml + generate keys (sealed for machine CPU
/// + backup if an external key is provided)
pub fn init(
config_path: Option<PathBuf>,
pubkey_display: Option<PubkeyDisplay>,
bech32_prefix: Option<String>,
external_backup_key_path: Option<PathBuf>,
key_backup_data_path: Option<PathBuf>,
) -> Result<(), String> {
let cp = config_path.unwrap_or_else(|| "tmkms.toml".into());
let config = config::SgxSignOpt::default();
let t =
toml::to_string_pretty(&config).map_err(|e| format!("config to toml failed: {:?}", e))?;
fs::write(cp, t).map_err(|e| format!("failed to write a config: {:?}", e))?;
fs::create_dir_all(
config
.sealed_consensus_key_path
.parent()
.ok_or_else(|| "cannot create a dir in a root directory".to_owned())?,
)
.map_err(|e| format!("failed to create dirs for key storage: {:?}", e))?;
fs::create_dir_all(
config
.state_file_path
.parent()
.ok_or_else(|| "cannot create a dir in a root directory".to_owned())?,
)
.map_err(|e| format!("failed to create dirs for state storage: {:?}", e))?;
let request = SgxInitRequest::KeyGen;
let request_bytes = serde_json::to_vec(&request)
.map_err(|e| format!("failed to convert request to json: {:?}", e))?;
let backup_key = if let Some(bkp) = external_backup_key_path {
let key_bytes = Zeroizing::new(
fs::read(bkp).map_err(|e| format!("failed to read backup key: {:?}", e))?,
);
if key_bytes.len() != CLOUD_KEY_LEN {
return Err("incorrect backup key length".to_owned());
}
Some(Zeroizing::new(subtle_encoding::hex::encode(&*key_bytes)))
} else {
None
};
debug!("launching enclave");
let (state_syncer, _, state_stream) = TmkmsSgxSigner::get_state_syncer(&config.state_file_path)
.map_err(|e| format!("state persistence error: {:?}", e))?;
let mut enclave_args: Vec<&[u8]> = vec![request_bytes.as_ref()];
if let Some(ref bkp) = backup_key {
enclave_args.push(&*bkp);
}
let runner = TmkmsSgxSigner::launch_enclave_app(
&config.enclave_path,
None,
state_syncer,
state_stream,
&enclave_args,
)
.map_err(|e| format!("failed to launch the enclave app: {:?}", e))?;
debug!("waiting for keygen");
let sealed_key = runner
.get_init_response()
.map_err(|e| format!("failed to generate consensus key: {:?}", e))?;
config::write_sealed_file(
config.sealed_consensus_key_path,
&sealed_key.sealed_key_data,
)
.map_err(|e| format!("failed to write consensus key: {:?}", e))?;
let public_key =
ed25519_dalek::PublicKey::from_bytes(&sealed_key.sealed_key_data.seal_key_request.keyid)
.map_err(|e| format!("invalid keyid: {:?}", e))?;
print_pubkey(bech32_prefix, pubkey_display, public_key);
let base_backup_path = key_backup_data_path.unwrap_or_else(|| "".into());
if let Some(bkp) = sealed_key.cloud_backup_key_data {
config::write_backup_file(base_backup_path.join("consensus-key.backup"), &bkp)
.map_err(|e| format!("failed to write consensus key backup: {:?}", e))?;
}
if let Some(id_path) = config.sealed_id_key_path {
let (state_syncer, _, state_stream) =
TmkmsSgxSigner::get_state_syncer(&config.state_file_path)
.map_err(|e| format!("state persistence error: {:?}", e))?;
let runner = TmkmsSgxSigner::launch_enclave_app(
&config.enclave_path,
None,
state_syncer,
state_stream,
&enclave_args,
)
.map_err(|e| format!("failed to launch the enclave app: {:?}", e))?;
let sealed_key = runner
.get_init_response()
.map_err(|e| format!("failed to generate id key: {:?}", e))?;
config::write_sealed_file(id_path, &sealed_key.sealed_key_data)
.map_err(|e| format!("failed to write id key: {:?}", e))?;
if let Some(bkp) = sealed_key.cloud_backup_key_data {
config::write_backup_file(base_backup_path.join("id-key.backup"), &bkp)
.map_err(|e| format!("failed to write id key backup: {:?}", e))?;
}
}
Ok(())
}
/// startup the enclave with Unix socket pairs for retrieving state updates and persisting them on the host
pub fn start(config_path: Option<PathBuf>) -> Result<(), String> {
let cp = config_path.unwrap_or_else(|| "tmkms.toml".into());
if !cp.exists() {
Err("missing tmkms.toml file".to_owned())
} else {
let toml_string = fs::read_to_string(cp)
.map_err(|e| format!("toml config file failed to read: {:?}", e))?;
let config: config::SgxSignOpt = toml::from_str(&toml_string)
.map_err(|e| format!("toml config file failed to parse: {:?}", e))?;
let tm_conn = match &config.address {
net::Address::Unix { path } => {
debug!(
"{}: Connecting to socket at {}...",
&config.chain_id, &config.address
);
Some(path.clone())
}
_ => None,
};
let remote = if let (None, Some(path)) = (&tm_conn, config.sealed_id_key_path) {
Some((config.address, path))
} else {
None
};
let (state_syncer, state, state_stream) =
TmkmsSgxSigner::get_state_syncer(&config.state_file_path)
.map_err(|e| format!("state persistence error: {:?}", e))?;
let start_request_bytes = TmkmsSgxSigner::get_start_request_bytes(
config.sealed_consensus_key_path,
ValidatorConfig {
chain_id: config.chain_id,
max_height: config.max_height,
},
state,
remote,
)
.map_err(|e| format!("failed to get enclave request: {:?}", e))?;
let runner = TmkmsSgxSigner::launch_enclave_app(
&config.enclave_path,
tm_conn,
state_syncer,
state_stream,
&[&start_request_bytes],
)
.map_err(|e| format!("failed to launch the enclave app: {:?}", e))?;
runner
.start()
.map_err(|e| format!("enclave running failed: {:?}", e))?;
Ok(())
}
}
/// recover the previously backed up id/consensus key (e.g. in cloud settings where
/// physical CPU-affinity isn't guaranteed)
pub fn recover(
config_path: Option<PathBuf>,
pubkey_display: Option<PubkeyDisplay>,
bech32_prefix: Option<String>,
external_backup_key_path: PathBuf,
key_backup_data_path: PathBuf,
recover_consensus_key: bool,
) -> Result<(), String> {
let cp = config_path.unwrap_or_else(|| "tmkms.toml".into());
if !cp.exists() {
Err("missing tmkms.toml file".to_owned())
} else {
let toml_string = fs::read_to_string(cp)
.map_err(|e| format!("toml config file failed to read: {:?}", e))?;
let config: config::SgxSignOpt = toml::from_str(&toml_string)
.map_err(|e| format!("toml config file failed to parse: {:?}", e))?;
if !recover_consensus_key && config.sealed_id_key_path.is_none() {
return Err("empty id key path in config".to_owned());
}
let backup_key = {
let key_bytes = Zeroizing::new(
fs::read(external_backup_key_path)
.map_err(|e| format!("failed to read backup key: {:?}", e))?,
);
if key_bytes.len() != CLOUD_KEY_LEN {
return Err("incorrect backup key length".to_owned());
}
Zeroizing::new(subtle_encoding::hex::encode(&*key_bytes))
};
let key_data = serde_json::from_str(
&fs::read_to_string(key_backup_data_path.join("consensus-key.backup"))
.map_err(|e| format!("failed to read backup data: {:?}", e))?,
)
.map_err(|e| format!("failed to parse backup data: {:?}", e))?;
let request = SgxInitRequest::CloudRecover { key_data };
let request_bytes = serde_json::to_vec(&request)
.map_err(|e| format!("failed to convert request to json: {:?}", e))?;
debug!("launching enclave");
let (state_syncer, _, state_stream) =
TmkmsSgxSigner::get_state_syncer(&config.state_file_path)
.map_err(|e| format!("state persistence error: {:?}", e))?;
let runner = TmkmsSgxSigner::launch_enclave_app(
&config.enclave_path, | state_syncer,
state_stream,
&[request_bytes.as_ref(), &*backup_key],
)
.map_err(|e| format!("failed to launch the enclave app: {:?}", e))?;
debug!("waiting for recover");
let sealed_key = runner
.get_init_response()
.map_err(|e| format!("failed to recover key: {:?}", e))?;
if recover_consensus_key {
config::write_sealed_file(
config.sealed_consensus_key_path,
&sealed_key.sealed_key_data,
)
.map_err(|e| format!("failed to write consensus key: {:?}", e))?;
let public_key = ed25519_dalek::PublicKey::from_bytes(
&sealed_key.sealed_key_data.seal_key_request.keyid,
)
.map_err(|e| format!("ivalid keyid: {:?}", e))?;
println!("recovered key");
print_pubkey(bech32_prefix, pubkey_display, public_key);
} else {
// checked above after config parsing
let id_path = config.sealed_id_key_path.unwrap();
config::write_sealed_file(id_path, &sealed_key.sealed_key_data)
.map_err(|e| format!("failed to write id key: {:?}", e))?;
}
Ok(())
}
} | None, |
test_dbu64.rs | //
// these tests implemente from:
// ref.) https://github.com/rust-lang/rust/blob/master/library/std/src/collections/hash/map/tests.rs
//
mod test_dbu64 {
use abyssiniandb::filedb::{FileDbParams, HashBucketsParam};
use abyssiniandb::{DbMap, DbXxx, DbXxxBase};
//use std::cell::RefCell;
//
#[test]
#[should_panic]
fn test_create_capacity_zero() {
let db_name = "target/tmp/test_dbu64/test_create_capacity_zero.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let _db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(0),
..Default::default()
},
)
.unwrap();
}
#[test]
fn test_insert() {
let db_name = "target/tmp/test_dbu64/test_insert.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
assert_eq!(db_map.len().unwrap(), 0);
db_map.put(&1, &[2]).unwrap();
assert_eq!(db_map.len().unwrap(), 1);
db_map.put(&2, &[4]).unwrap();
assert_eq!(db_map.len().unwrap(), 2);
//
assert_eq!(db_map.get(&1).unwrap(), Some(vec![2]));
assert_eq!(db_map.get(&2).unwrap(), Some(vec![4]));
}
#[test]
fn test_clone() {
let db_name = "target/tmp/test_dbu64/test_clone.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
assert_eq!(db_map.len().unwrap(), 0);
db_map.put(&1, &[2]).unwrap();
assert_eq!(db_map.len().unwrap(), 1);
db_map.put(&2, &[4]).unwrap();
assert_eq!(db_map.len().unwrap(), 2);
//
let mut db_map2 = db_map.clone();
//
assert_eq!(db_map2.get(&1).unwrap(), Some(vec![2]));
assert_eq!(db_map2.get(&2).unwrap(), Some(vec![4]));
assert_eq!(db_map2.len().unwrap(), 2);
}
/* #[test] fn test_empty_entry() {
let db_name = "target/tmp/test_dbu64/test_empty_entry.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
match db_map.entry(0) {
Occupied(_) => panic!(),
Vacant(_) => {}
}
assert!(*db_map.entry(0).or_insert(&[1]));
assert_eq!(db_map.len(), 1);
}
*/
#[test]
fn test_empty_iter() {
let db_name = "target/tmp/test_dbu64/test_empty_iter.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
assert_eq!(db_map.len().unwrap(), 0);
assert!(db_map.is_empty().unwrap());
//
//assert_eq!(db_map.drain().next(), None);
assert_eq!(db_map.keys().next(), None);
assert_eq!(db_map.values().next(), None);
//assert_eq!(db_map.values_mut().next(), None);
assert_eq!(db_map.iter().next(), None);
assert_eq!(db_map.iter_mut().next(), None);
//assert_eq!(db_map.into_iter().next(), None);
}
#[cfg(feature = "large_test")]
#[test]
fn test_lots_of_insertions() {
let db_name = "target/tmp/test_dbu64/test_lots_of_insertions.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(10000),
..Default::default()
},
)
.unwrap();
//
// Try this a few times to make sure we never screw up the hashmap's
// internal state.
for _ in 0..10 {
assert!(db_map.is_empty().unwrap());
//
for i in 1..1001 {
db_map.put(&i, &i.to_le_bytes()).unwrap();
//
for j in 1..=i {
let r = db_map.get(&j).unwrap();
assert_eq!(r, Some(j.to_le_bytes().to_vec()));
}
for j in i + 1..1001 {
let r = db_map.get(&j).unwrap();
assert_eq!(r, None);
}
}
for i in 1001..2001 {
assert!(!db_map.includes_key(&i).unwrap());
}
//
// remove forwards
for i in 1..1001 {
assert!(db_map.delete(&i).unwrap().is_some());
for j in 1..=i {
assert!(!db_map.includes_key(&j).unwrap());
}
for j in i + 1..1001 {
assert!(db_map.includes_key(&j).unwrap());
}
}
for i in 1..1001 {
assert!(!db_map.includes_key(&i).unwrap());
}
//
for i in 1..1001 {
db_map.put(&i, &i.to_le_bytes()).unwrap();
}
//
// remove backwards
for i in (1..1001).rev() {
assert!(db_map.delete(&i).unwrap().is_some());
for j in i..1001 {
assert!(!db_map.includes_key(&j).unwrap());
}
for j in 1..i {
assert!(db_map.includes_key(&j).unwrap());
}
}
}
}
/* #[test] fn test_find_mut() {
let mut m = HashMap::new();
assert!(m.insert(1, 12).is_none());
assert!(m.insert(2, 8).is_none());
assert!(m.insert(5, 14).is_none());
let new = 100;
match m.get_mut(&5) {
None => panic!(),
Some(x) => *x = new,
}
assert_eq!(m.get(&5), Some(&new));
}
*/
/* #[test] fn test_insert_overwrite() {
let mut m = HashMap::new();
assert!(m.insert(1, 2).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
assert!(!m.insert(1, 3).is_none());
assert_eq!(*m.get(&1).unwrap(), 3);
}
*/
#[test]
fn test_insert_conflicts() {
let db_name = "target/tmp/test_dbu64/test_insert_conflicts.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
db_map.put(&1, &[2_u8]).unwrap();
db_map.put(&5, &[3_u8]).unwrap();
db_map.put(&9, &[4_u8]).unwrap();
//
assert_eq!(db_map.get(&9).unwrap(), Some(vec![4_u8]));
assert_eq!(db_map.get(&5).unwrap(), Some(vec![3_u8]));
assert_eq!(db_map.get(&1).unwrap(), Some(vec![2_u8]));
}
#[test]
fn test_delete_conflicts() {
let db_name = "target/tmp/test_dbu64/test_delete_conflicts.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
db_map.put(&1, &[2_u8]).unwrap();
assert_eq!(db_map.get(&1).unwrap(), Some(vec![2_u8]));
//
db_map.put(&5, &[3_u8]).unwrap();
assert_eq!(db_map.get(&1).unwrap(), Some(vec![2_u8]));
assert_eq!(db_map.get(&5).unwrap(), Some(vec![3_u8]));
//
db_map.put(&9, &[4_u8]).unwrap();
assert_eq!(db_map.get(&1).unwrap(), Some(vec![2_u8]));
assert_eq!(db_map.get(&5).unwrap(), Some(vec![3_u8]));
assert_eq!(db_map.get(&9).unwrap(), Some(vec![4_u8]));
//
assert_eq!(db_map.delete(&1).unwrap(), Some(vec![2_u8]));
assert_eq!(db_map.get(&9).unwrap(), Some(vec![4_u8]));
assert_eq!(db_map.get(&5).unwrap(), Some(vec![3_u8]));
assert_eq!(db_map.get(&1).unwrap(), None);
}
#[test]
fn test_is_empty() {
let db_name = "target/tmp/test_dbu64/test_is_empty.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
db_map.put(&1, &[2_u8]).unwrap();
assert!(!db_map.is_empty().unwrap());
assert_eq!(db_map.delete(&1).unwrap(), Some(vec![2_u8]));
assert!(db_map.is_empty().unwrap());
}
#[test]
fn test_delete() {
let db_name = "target/tmp/test_dbu64/test_delete.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
db_map.put(&1, &[2_u8]).unwrap();
assert!(!db_map.is_empty().unwrap());
assert_eq!(db_map.delete(&1).unwrap(), Some(vec![2_u8]));
assert_eq!(db_map.delete(&1).unwrap(), None);
}
/* #[test] fn test_remove_entry() {
let mut m = HashMap::new();
m.insert(1, 2);
assert_eq!(m.remove_entry(&1), Some((1, 2)));
assert_eq!(m.remove(&1), None);
}
*/
#[test]
fn test_iterate() {
let db_name = "target/tmp/test_dbu64/test_iterate.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
for i in 0..32 {
db_map.put(&i, &[i as u8 * 2]).unwrap();
}
assert_eq!(db_map.len().unwrap(), 32);
//
let mut observed: u32 = 0;
for (k, v) in &db_map {
let n: u64 = k.into();
assert_eq!(v[0], n as u8 * 2);
observed |= 1 << n;
}
assert_eq!(observed, 0xFFFF_FFFF);
}
#[test]
fn test_keys() {
let db_name = "target/tmp/test_dbu64/test_keys.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
let xs = [(1, b'a'), (2, b'b'), (3, b'c')];
db_map
.put_from_iter(xs.iter().map(|&(k, v)| (k.into(), vec![v as u8])))
.unwrap();
assert_eq!(db_map.len().unwrap(), 3);
//
let keys: Vec<u64> = db_map.keys().map(|i| i.into()).collect();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
assert!(keys.contains(&2));
assert!(keys.contains(&3));
}
#[test]
fn test_values() {
let db_name = "target/tmp/test_dbu64/test_values.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
let xs = [(1, b'a'), (2, b'b'), (3, b'c')];
db_map
.put_from_iter(xs.iter().map(|&(k, v)| (k.into(), vec![v as u8])))
.unwrap();
assert_eq!(db_map.len().unwrap(), 3);
//
let values: Vec<String> = db_map
.values()
.map(|v| String::from_utf8_lossy(&v).to_string())
.collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&"a".to_string()));
assert!(values.contains(&"b".to_string()));
assert!(values.contains(&"c".to_string()));
}
/* #[test] fn test_values_mut() {
let pairs = [(1, 1), (2, 2), (3, 3)];
let mut map: HashMap<_, _> = pairs.into_iter().collect();
for value in map.values_mut() {
*value = (*value) * 2
}
let values: Vec<_> = map.values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&2));
assert!(values.contains(&4));
assert!(values.contains(&6));
}
#[test]
fn test_into_keys() {
let pairs = [(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = pairs.into_iter().collect();
let keys: Vec<_> = map.into_keys().collect();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
assert!(keys.contains(&2));
assert!(keys.contains(&3));
}
#[test]
fn test_into_values() {
let pairs = [(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = pairs.into_iter().collect();
let values: Vec<_> = map.into_values().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
assert!(values.contains(&'b'));
assert!(values.contains(&'c'));
}
*/
#[test]
fn test_find() {
let db_name = "target/tmp/test_dbu64/test_find.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
assert_eq!(db_map.get(&1).unwrap(), None);
db_map.put(&1, &[2_u8]).unwrap();
match db_map.get(&1).unwrap() {
None => panic!(),
Some(v) => assert_eq!(*v, vec![2_u8]),
}
}
/* #[test] fn test_eq() {
let db_name = "target/tmp/test_dbu64/test_find.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map1 = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
let mut db_map2 = db
.db_map_u64_with_params(
"some_u64_2",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
db_map1.put(&1, &[2_u8]).unwrap();
db_map1.put(&2, &[3_u8]).unwrap();
db_map1.put(&3, &[4_u8]).unwrap();
//
db_map2.put(&1, &[2_u8]).unwrap();
db_map2.put(&2, &[3_u8]).unwrap();
//
assert!(db_map1 != db_map2);
//
db_map2.put(&3, &[4_u8]).unwrap();
//
assert_eq!(db_map1, db_map2);
}
*/
/* #[test] fn test_show() {
let db_name = "target/tmp/test_dbu64/test_show.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map1 = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
let mut db_map2 = db
.db_map_u64_with_params(
"some_u64_2",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
db_map1.put(&1, &[2_u8]).unwrap();
db_map1.put(&3, &[4_u8]).unwrap();
//
let map_str = format!("{:?}", db_map1);
assert_eq!(map_str, "");
assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}");
assert_eq!(format!("{:?}", db_map2), "{}");
}
*/
/* #[test] fn test_reserve_shrink_to_fit() {
let mut m = HashMap::new();
m.insert(0, 0);
m.remove(&0);
assert!(m.capacity() >= m.len());
for i in 0..128 {
m.insert(i, i);
}
m.reserve(256);
let usable_cap = m.capacity();
for i in 128..(128 + 256) {
m.insert(i, i);
assert_eq!(m.capacity(), usable_cap);
}
for i in 100..(128 + 256) {
assert_eq!(m.remove(&i), Some(i));
}
m.shrink_to_fit();
assert_eq!(m.len(), 100);
assert!(!m.is_empty());
assert!(m.capacity() >= m.len());
for i in 0..100 {
assert_eq!(m.remove(&i), Some(i));
}
m.shrink_to_fit();
m.insert(0, 0);
assert_eq!(m.len(), 1);
assert!(m.capacity() >= m.len());
assert_eq!(m.remove(&0), Some(0));
}
*/
#[test]
fn test_put_from_iter() |
#[test]
fn test_size_hint() {
let db_name = "target/tmp/test_dbu64/test_size_hint.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
db_map
.put_from_iter(xs.iter().map(|&(k, v)| (k.into(), vec![v as u8])))
.unwrap();
assert_eq!(db_map.len().unwrap(), 6);
//
let mut iter = db_map.iter();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.size_hint(), (3, Some(3)));
}
#[test]
fn test_iter_len() {
let db_name = "target/tmp/test_dbu64/test_iter_len.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
db_map
.put_from_iter(xs.iter().map(|&(k, v)| (k.into(), vec![v as u8])))
.unwrap();
assert_eq!(db_map.len().unwrap(), 6);
//
let mut iter = db_map.iter();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.len(), 3);
}
/* #[test] fn test_mut_size_hint() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter_mut();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.size_hint(), (3, Some(3)));
}
*/
/* #[test] fn test_iter_mut_len() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let mut map: HashMap<_, _> = xs.iter().cloned().collect();
let mut iter = map.iter_mut();
for _ in iter.by_ref().take(3) {}
assert_eq!(iter.len(), 3);
}
*/
/* #[test] fn test_index() {
let mut map = HashMap::new();
map.insert(1, 2);
map.insert(2, 1);
map.insert(3, 4);
assert_eq!(map[&2], 1);
}
*/
}
| {
let db_name = "target/tmp/test_dbu64/test_from_iter.abyssiniandb";
let _ = std::fs::remove_dir_all(db_name);
let db = abyssiniandb::open_file(db_name).unwrap();
let mut db_map = db
.db_map_u64_with_params(
"some_u64_1",
FileDbParams {
buckets_size: HashBucketsParam::Capacity(4),
..Default::default()
},
)
.unwrap();
//
let xs = [(1, 1), (2, 2), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
db_map
.put_from_iter(xs.iter().map(|&(k, v)| (k.into(), vec![v as u8])))
.unwrap();
assert_eq!(db_map.len().unwrap(), 6);
//
for &(k, v) in &xs {
assert_eq!(db_map.get(&k).unwrap(), Some(vec![v as u8]));
}
assert_eq!(db_map.len().unwrap() as usize, xs.len() - 1);
} |
mod.rs | //! Collectors will receive events from the contextual shard, check if the
//! filter lets them pass, and collects if the receive, collect, or time limits
//! are not reached yet.
#[cfg(feature = "unstable_discord_api")]
pub mod component_interaction_collector;
pub mod message_collector;
pub mod reaction_collector;
use std::sync::Arc;
#[cfg(feature = "unstable_discord_api")]
pub use component_interaction_collector::*;
pub use message_collector::*;
pub use reaction_collector::*;
/// Wraps a &T and clones the value into an Arc<T> lazily. Used with collectors to allow inspecting
/// the value in filters while only cloning values that actually match.
#[derive(Debug)]
pub(crate) struct LazyArc<'a, T> {
value: &'a T,
arc: Option<Arc<T>>,
}
impl<'a, T: Clone> LazyArc<'a, T> {
pub fn new(value: &'a T) -> Self |
pub fn as_arc(&mut self) -> Arc<T> {
let value = self.value;
self.arc.get_or_insert_with(|| Arc::new(value.clone())).clone()
}
}
impl<'a, T> std::ops::Deref for LazyArc<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.value
}
}
| {
LazyArc {
value,
arc: None,
}
} |
executor.rs | //! Async executor
use crate::custom_event::RuffleEvent;
use crate::task::Task;
use generational_arena::{Arena, Index};
use glutin::event_loop::EventLoopProxy;
use ruffle_core::backend::navigator::{Error, OwnedFuture};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex, Weak};
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
/// Exeuctor context passed to event sources.
///
/// All task handles are identical and interchangeable. Cloning a `TaskHandle`
/// does not clone the underlying task.
#[derive(Clone)]
struct TaskHandle {
/// The arena handle for a given task.
handle: Index,
/// The executor the task belongs to.
executor: Arc<Mutex<GlutinAsyncExecutor>>,
}
impl TaskHandle {
/// Construct a handle to a given task.
fn for_task(task: Index, executor: Arc<Mutex<GlutinAsyncExecutor>>) -> Self {
Self {
handle: task,
executor,
}
}
/// Construct a new `RawWaker` for this task handle.
///
/// This function clones the underlying task handle.
fn raw_waker(&self) -> RawWaker {
let clone = Box::new(self.clone());
RawWaker::new(Box::into_raw(clone) as *const (), &Self::VTABLE)
}
/// Construct a new waker for this task handle.
fn waker(&self) -> Waker {
unsafe { Waker::from_raw(self.raw_waker()) }
}
/// Wake the task this context refers to.
fn wake(&self) {
self.executor
.lock()
.expect("able to lock executor")
.wake(self.handle);
}
/// Convert a voidptr into an `TaskHandle` reference, if non-null.
///
/// This function is unsafe because the pointer can refer to any resource
/// in memory. It also can belong to any lifetime. Use of this function on
/// a pointer *not* ultimately derived from an TaskHandle in memory
/// constitutes undefined behavior.
unsafe fn from_const_ptr<'a>(almost_self: *const ()) -> Option<&'a Self> {
if almost_self.is_null() {
return None;
}
Some(&*(almost_self as *const Self))
}
/// Convert a voidptr into a mutable `TaskHandle` reference, if
/// non-null.
///
/// This function is unsafe because the pointer can refer to any resource
/// in memory. It also can belong to any lifetime. Use of this function on
/// a pointer *not* ultimately derived from an TaskHandle in memory
/// constitutes undefined behavior.
///
/// It's also additionally unsound to call this function while other
/// references to the same `TaskHandle` exist.
unsafe fn box_from_const_ptr(almost_self: *const ()) -> Option<Box<Self>> {
if almost_self.is_null() {
return None;
}
Some(Box::from_raw(almost_self as *mut Self))
}
/// Construct a new `RawWaker` that wakes the same task.
///
/// This is part of the vtable methods of our `RawWaker` impl.
unsafe fn clone_as_ptr(almost_self: *const ()) -> RawWaker |
/// Wake the given task, then drop it.
unsafe fn wake_as_ptr(almost_self: *const ()) {
let selfish = TaskHandle::box_from_const_ptr(almost_self).expect("non-null context ptr");
selfish.wake();
}
/// Wake the given task.
unsafe fn wake_by_ref_as_ptr(almost_self: *const ()) {
let selfish = TaskHandle::from_const_ptr(almost_self).expect("non-null context ptr");
selfish.wake();
}
/// Drop the async executor.
unsafe fn drop_as_ptr(almost_self: *const ()) {
let _ = TaskHandle::box_from_const_ptr(almost_self).expect("non-null context ptr");
}
const VTABLE: RawWakerVTable = RawWakerVTable::new(
Self::clone_as_ptr,
Self::wake_as_ptr,
Self::wake_by_ref_as_ptr,
Self::drop_as_ptr,
);
}
pub struct GlutinAsyncExecutor {
/// List of all spawned tasks.
task_queue: Arena<Task>,
/// Source of tasks sent to us by the `NavigatorBackend`.
channel: Receiver<OwnedFuture<(), Error>>,
/// Weak reference to ourselves.
self_ref: Weak<Mutex<Self>>,
/// Event injector for the main thread event loop.
event_loop: EventLoopProxy<RuffleEvent>,
/// Whether or not we have already queued a `TaskPoll` event.
waiting_for_poll: bool,
}
impl GlutinAsyncExecutor {
/// Construct a new executor for the Glutin event loop.
///
/// This function returns the executor itself, plus the `Sender` necessary
/// to spawn new tasks.
pub fn new(
event_loop: EventLoopProxy<RuffleEvent>,
) -> (Arc<Mutex<Self>>, Sender<OwnedFuture<(), Error>>) {
let (send, recv) = channel();
let new_self = Arc::new(Mutex::new(Self {
task_queue: Arena::new(),
channel: recv,
self_ref: Weak::new(),
event_loop,
waiting_for_poll: false,
}));
let self_ref = Arc::downgrade(&new_self);
new_self.lock().expect("locked self").self_ref = self_ref;
(new_self, send)
}
/// Poll all `Ready` futures.
pub fn poll_all(&mut self) {
self.waiting_for_poll = false;
while let Ok(fut) = self.channel.try_recv() {
self.task_queue.insert(Task::from_future(fut));
}
let self_ref = self.self_ref.upgrade().expect("active self-reference");
let mut completed_tasks = vec![];
for (index, task) in self.task_queue.iter_mut() {
if task.is_ready() {
let handle = TaskHandle::for_task(index, self_ref.clone());
let waker = handle.waker();
let mut context = Context::from_waker(&waker);
match task.poll(&mut context) {
Poll::Pending => {}
Poll::Ready(r) => {
if let Err(e) = r {
log::error!("Async error: {}", e);
}
completed_tasks.push(index);
}
}
}
}
for index in completed_tasks {
self.task_queue.remove(index);
}
}
/// Mark a task as ready to proceed.
fn wake(&mut self, task: Index) {
if let Some(task) = self.task_queue.get_mut(task) {
if !task.is_completed() {
if !self.waiting_for_poll {
self.waiting_for_poll = true;
if self.event_loop.send_event(RuffleEvent::TaskPoll).is_err() {
log::warn!("A task was queued on an event loop that has already ended. It will not be polled.");
}
} else {
log::info!("Double polling");
}
} else {
log::warn!("A Waker was invoked after the task it was attached to was completed.");
}
} else {
log::warn!("Attempted to wake an already-finished task");
}
}
}
| {
let selfish = TaskHandle::from_const_ptr(almost_self).expect("non-null context ptr");
selfish.raw_waker()
} |
rose_build_bytecode.rs | use super::{RoseBuild, RoseResources, RoseRuntimeImpl};
use crate::util::CompileContext; | #[derive(Default)]
pub(super) struct BuildContext {
/// Resources in use (tracked as programs are added).
pub(super) resources: RoseResources,
}
fn is_pure_floating(resources: &RoseResources, cc: &CompileContext) -> bool {
if !resources.has_floating {
return false;
}
if resources.has_outfixes || resources.has_suffixes || resources.has_leftfixes {
return false;
}
if resources.has_anchored {
return false;
}
if resources.has_eod {
return false;
}
if resources.has_states {
return false;
}
if resources.has_lit_delay {
return false;
}
if cc.streaming && resources.has_lit_check {
return false;
}
if resources.checks_groups {
return false;
}
true
}
fn is_single_outfix(_tbi: &RoseBuild) -> bool {
false
}
pub(super) fn pick_runtime_impl(build: &RoseBuild, resources: &RoseResources) -> RoseRuntimeImpl {
if is_pure_floating(resources, build.cc) {
return RoseRuntimeImpl::PureLiteral;
}
if is_single_outfix(build) {
return RoseRuntimeImpl::SingleOutfix;
}
RoseRuntimeImpl::FullRose
}
pub(super) struct DerivedBoundaryReports {} | |
api.py | #Django Imports
from django.conf import settings
#Python Imports
import requests, os
#Local Imports
from .at_utils import AfricasTalkingException
#Import Afica's Talking Settings
AFRICAS_TALKING_SETTINGS = getattr(settings,'AFRICAS_TALKING',{})
API_KEY = AFRICAS_TALKING_SETTINGS.get('API_KEY',None)
USERNAME = AFRICAS_TALKING_SETTINGS.get('USERNAME',None)
SHORTCODE = AFRICAS_TALKING_SETTINGS.get('SHORTCODE',None)
AFRICAS_TALKING_SEND = AFRICAS_TALKING_SETTINGS.get('SEND',False)
AFRICAS_TALKING_API_BASE = 'http://api.africastalking.com/version1'
HEADERS = {'Accept': 'application/json','apikey':API_KEY}
PARAMS = {'username':USERNAME,'bulkSMSMode':1}
if SHORTCODE:
PARAMS['from'] = SHORTCODE
def send_raw(to,message):
if not AFRICAS_TALKING_SEND:
raise AfricasTalkingException("Africas Talking called when send not set to True")
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'to':to,'message':message}
params.update(PARAMS)
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging')
post = requests.post(send_url,data=params,headers=HEADERS)
#Raise requests.exceptions.HTTPError if 4XX or 5XX
post.raise_for_status()
return post.json()
def send(to,message):
data = send_raw(to,message)
'''
Example of JSON Response
{u'SMSMessageData':
{u'Message': u'Sent to 1/1 Total Cost: USD 0.0109',
u'Recipients': [{
u'status': u'Success', #u'status': u'Invalid Phone Number',
u'cost': u'KES 1.0000',
u'number': u'+254708054321',
u'messageId': u'ATXid_b50fada5b1af078f2277cacb58ef2447'
}] | '''
# Return tuple (messageId, messageSuccess, extra_data)
recipients = data['SMSMessageData']['Recipients']
if len(recipients) == 1:
msg_id = recipients[0]['messageId']
msg_success = recipients[0]['status'] == 'Success'
return msg_id, msg_success, {'status':recipients[0]['status']}
def balance():
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'username':USERNAME}
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'user')
post = requests.get(send_url,params=params,headers=HEADERS)
#Raise requests.exceptions.HTTPError if 4XX or 5XX
post.raise_for_status()
data = post.json()
return data['UserData']['balance']
def fetch(last_received_id=0):
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'username':USERNAME,'lastReceivedId':last_received_id}
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging')
post = requests.get(send_url,params=params,headers=HEADERS)
return post | }
} |
OCPPCommonTests.ts | import { ChargePointErrorCode, ChargePointStatus, OCPP15TransactionData, OCPPAuthorizationStatus, OCPPMeterValue, OCPPReadingContext, OCPPStatusNotificationRequest, OCPPVersion } from '../../src/types/ocpp/OCPPServer';
import Transaction, { InactivityStatus } from '../../src/types/Transaction';
import chai, { expect } from 'chai';
import CentralServerService from './client/CentralServerService';
import ChargingStationContext from './context/ChargingStationContext';
import Constants from '../../src/utils/Constants';
import Factory from '../factories/Factory';
import { OCPPStatus } from '../../src/types/ocpp/OCPPClient';
import { PricingSettingsType } from '../../src/types/Setting';
import { StatusCodes } from 'http-status-codes';
import Tag from '../../src/types/Tag';
import TenantContext from './context/TenantContext';
import User from '../../src/types/User';
import Utils from '../../src/utils/Utils';
import chaiSubset from 'chai-subset';
import { fail } from 'assert';
import faker from 'faker';
import moment from 'moment';
import responseHelper from '../helpers/responseHelper';
chai.use(chaiSubset);
chai.use(responseHelper);
export default class | {
public tenantContext: TenantContext;
public chargingStationContext: ChargingStationContext;
public centralUserContext: any;
public centralUserService: CentralServerService;
public currentPricingSetting;
public pricekWh = 2;
public chargingStationConnector1: OCPPStatusNotificationRequest;
public chargingStationConnector2: OCPPStatusNotificationRequest;
public transactionStartUser;
public transactionStartUserService: CentralServerService;
public transactionStopUser;
public energyActiveImportStartMeterValue: number;
public energyActiveImportEndMeterValue: number;
public energyActiveImportMeterValues: number[];
public socMeterValues: number[];
public powerImportMeterValues: number[];
public powerImportL1MeterValues: number[];
public powerImportL2MeterValues: number[];
public powerImportL3MeterValues: number[];
public voltageMeterValues: number[];
public voltageL1MeterValues: number[];
public voltageL2MeterValues: number[];
public voltageL3MeterValues: number[];
public amperageMeterValues: number[];
public amperageL1MeterValues: number[];
public amperageL2MeterValues: number[];
public amperageL3MeterValues: number[];
public transactionStartSignedData: string;
public transactionEndSignedData: string;
public totalInactivities: number[];
public meterValueIntervalSecs: number;
public transactionStartTime: Date;
public transactionTotalConsumptionWh: number;
public transactionTotalInactivitySecs: number;
public totalPrice: number;
public newTransaction: Transaction;
public transactionCurrentTime: Date;
public createAnyUser = false;
public numberTag: number;
public validTag: string;
public invalidTag: string;
public anyUser: User;
public anyTag: Tag;
public createdUsers: User[] = [];
public createdTags: Tag[] = [];
public constructor(tenantContext: TenantContext, centralUserContext, createAnyUser = false) {
expect(tenantContext).to.exist;
this.tenantContext = tenantContext;
this.centralUserContext = centralUserContext;
expect(centralUserContext).to.exist;
// Avoid double login for identical user contexts
const centralAdminUserService = this.tenantContext.getAdminCentralServerService();
if (this.centralUserContext.email === centralAdminUserService.getAuthenticatedUserEmail()) {
this.centralUserService = centralAdminUserService;
} else {
this.centralUserService = new CentralServerService(this.tenantContext.getTenant().subdomain, this.centralUserContext);
}
this.createAnyUser = createAnyUser;
}
public setChargingStation(chargingStationContext) {
expect(chargingStationContext).to.exist;
this.chargingStationContext = chargingStationContext;
}
public setUsers(startUserContext, stopUserContext?) {
expect(startUserContext).to.exist;
this.transactionStartUser = startUserContext;
if (stopUserContext) {
this.transactionStopUser = stopUserContext;
} else {
this.transactionStopUser = this.transactionStartUser;
}
// Avoid double login for identical user contexts
if (this.transactionStartUser === this.centralUserContext) {
this.transactionStartUserService = this.centralUserService;
} else {
this.transactionStartUserService = new CentralServerService(
this.tenantContext.getTenant().subdomain, this.transactionStartUser);
}
}
public async assignAnyUserToSite(siteContext) {
expect(siteContext).to.exist;
if (this.anyUser) {
await this.centralUserService.siteApi.addUsersToSite(siteContext.getSite().id, [this.anyUser.id]);
}
}
public async before() {
const allSettings = await this.centralUserService.settingApi.readAll({});
this.currentPricingSetting = allSettings.data.result.find((s) => s.identifier === 'pricing');
if (this.currentPricingSetting) {
await this.centralUserService.updatePriceSetting(this.pricekWh, 'EUR');
}
// Default Connector values
this.chargingStationConnector1 = {
connectorId: 1,
status: ChargePointStatus.AVAILABLE,
errorCode: ChargePointErrorCode.NO_ERROR,
timestamp: new Date().toISOString()
};
this.chargingStationConnector2 = {
connectorId: 2,
status: ChargePointStatus.AVAILABLE,
errorCode: ChargePointErrorCode.NO_ERROR,
timestamp: new Date().toISOString()
};
// Set meter value start
this.energyActiveImportStartMeterValue = 0;
this.meterValueIntervalSecs = 60;
// eslint-disable-next-line no-useless-escape
this.transactionStartSignedData = '<?xml version=\"1.0\" encoding=\"UTF-8\" ?><signedMeterValue> <publicKey encoding=\"base64\">8Y5UzWD+TZeMKBDkKLpHhwzSfGsnCvo00ndCXv/LVRD5pAVtRZEA49bqpr/DY3KL</publicKey> <meterValueSignature encoding=\"base64\">wQdZJR1CLRe+QhS3C+kHpkfVL4hqPhc8YIt/+4uHBBb9N6JNygltdEhYufTfaM++AJ8=</meterValueSignature> <signatureMethod>ECDSA192SHA256</signatureMethod> <encodingMethod>EDL</encodingMethod> <encodedMeterValue encoding=\"base64\">CQFFTUgAAH+eoQxVP10I4Zf9ACcAAAABAAERAP8e/5KqWwEAAAAAAJ9sYQoCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtVP10AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=</encodedMeterValue></signedMeterValue>';
// eslint-disable-next-line no-useless-escape
this.transactionEndSignedData = '<?xml version=\"1.0\" encoding=\"UTF-8\" ?><signedMeterValue> <publicKey encoding=\"base64\">8Y5UzWD+TZeMKBDkKLpHhwzSfGsnCvo00ndCXv/LVRD5pAVtRZEA49bqpr/DY3KL</publicKey> <meterValueSignature encoding=\"base64\">GChPf/f+0Rw6DDWI0mujec6dOMDqm5cuCLXdEVV6MRua6OVqcHNP85q7K70tRPJKAJ8=</meterValueSignature> <signatureMethod>ECDSA192SHA256</signatureMethod> <encodingMethod>EDL</encodingMethod> <encodedMeterValue encoding=\"base64\">CQFFTUgAAH+eodYDQF0IrEb+ACgAAAABAAERAP8e/8OtYQEAAAAAAJ9sYQoCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtVP10AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=</encodedMeterValue></signedMeterValue>';
// Energy Import Meter Value (14 values)
this.energyActiveImportMeterValues = Array.from({ length: 12 }, () => faker.datatype.number({
min: 200, max: 500
})).concat([0, 0]);
// SoC Meter Value (14 values)
this.socMeterValues = Array.from({ length: 8 }, () => faker.datatype.number({
min: 10, max: 90
})).concat([8, 8, 98, 99, 100, 100]).sort((a, b) => (a - b));
// Voltage (14 values)
this.voltageMeterValues = Array.from({ length: 14 }, () => faker.datatype.number({
min: 220, max: 240
}));
this.voltageL1MeterValues = Array.from({ length: 14 }, () => faker.datatype.number({
min: 220, max: 240
}));
this.voltageL2MeterValues = Array.from({ length: 14 }, () => faker.datatype.number({
min: 220, max: 240
}));
this.voltageL3MeterValues = Array.from({ length: 14 }, () => faker.datatype.number({
min: 220, max: 240
}));
// Amperage (14 values)
this.amperageL1MeterValues = Array.from({ length: 14 }, () => faker.datatype.number({
min: 16, max: 32
}));
this.amperageL2MeterValues = Array.from({ length: 14 }, () => faker.datatype.number({
min: 16, max: 32
}));
this.amperageL3MeterValues = Array.from({ length: 14 }, () => faker.datatype.number({
min: 16, max: 32
}));
this.amperageMeterValues = [];
for (let i = 0; i < this.amperageL1MeterValues.length; i++) {
this.amperageMeterValues.push(this.amperageL1MeterValues[i] + this.amperageL2MeterValues[i] + this.amperageL3MeterValues[i]);
}
// Power Import (14 values)
this.powerImportMeterValues = [];
for (let i = 0; i < this.amperageMeterValues.length; i++) {
this.powerImportMeterValues.push(
this.amperageMeterValues[i] * this.voltageMeterValues[i]);
}
this.powerImportL1MeterValues = [];
for (let i = 0; i < this.amperageL1MeterValues.length; i++) {
this.powerImportL1MeterValues.push(
this.amperageL1MeterValues[i] * this.voltageL1MeterValues[i]);
}
this.powerImportL2MeterValues = [];
for (let i = 0; i < this.amperageL2MeterValues.length; i++) {
this.powerImportL2MeterValues.push(
this.amperageL2MeterValues[i] * this.voltageL2MeterValues[i]);
}
this.powerImportL3MeterValues = [];
for (let i = 0; i < this.amperageL3MeterValues.length; i++) {
this.powerImportL3MeterValues.push(
this.amperageL3MeterValues[i] * this.voltageL3MeterValues[i]);
}
// Total Inactivity (14 values)
this.totalInactivities = [];
let lastInactivity = 0;
for (let i = 0; i < this.energyActiveImportMeterValues.length; i++) {
lastInactivity += (this.energyActiveImportMeterValues[i] === 0 ? this.meterValueIntervalSecs : 0);
this.totalInactivities.push(lastInactivity);
}
// Meter Values params
this.transactionStartTime = moment().subtract(this.energyActiveImportMeterValues.length * this.meterValueIntervalSecs + 1, 'seconds').toDate();
this.transactionTotalConsumptionWh = this.energyActiveImportMeterValues.reduce((sum, meterValue) => sum + meterValue);
this.energyActiveImportEndMeterValue = this.energyActiveImportStartMeterValue + this.transactionTotalConsumptionWh;
this.transactionTotalInactivitySecs = this.energyActiveImportMeterValues.reduce(
(sum, meterValue) => (meterValue === 0 ? sum + this.meterValueIntervalSecs : sum), 0);
// Tags
this.validTag = faker.random.alphaNumeric(20).toString();
this.invalidTag = faker.random.alphaNumeric(21).toString();
this.numberTag = faker.datatype.number(10000);
if (this.createAnyUser) {
this.anyUser = await this.createUser(Factory.user.build());
if (!this.createdUsers) {
this.createdUsers = [];
}
this.createdUsers.push(this.anyUser);
if (!this.createdTags) {
this.createdTags = [];
}
this.anyTag = (await this.createTag(Factory.tag.build({ id: this.validTag, userID: this.anyUser.id }))).data;
this.createdTags.push(this.anyTag);
this.anyTag = (await this.createTag(Factory.tag.build({ id: this.invalidTag, userID: this.anyUser.id }))).data;
this.createdTags.push(this.anyTag);
this.anyTag = (await this.createTag(Factory.tag.build({ id: this.numberTag.toString(), userID: this.anyUser.id }))).data;
this.createdTags.push(this.anyTag);
}
}
public async after() {
if (this.currentPricingSetting) {
await this.centralUserService.settingApi.update(this.currentPricingSetting);
}
if (this.createdUsers && Array.isArray(this.createdUsers)) {
for (const user of this.createdUsers) {
await this.centralUserService.deleteEntity(
this.centralUserService.userApi, user);
}
}
if (this.createdTags && Array.isArray(this.createdTags)) {
for (const tag of this.createdTags) {
await this.centralUserService.userApi.deleteTag(tag.id);
}
}
}
public async testConnectorStatus() {
let response = await this.chargingStationContext.setConnectorStatus(this.chargingStationConnector1);
expect(response).to.eql({});
response = await this.chargingStationContext.setConnectorStatus(this.chargingStationConnector2);
expect(response).to.eql({});
// Warning: connector status is always 'Unavailable' if too much time has passed since last seen!
response = await this.chargingStationContext.sendHeartbeat();
// Now we can test the connector status!
const foundChargingStation = await this.chargingStationContext.readChargingStation();
expect(foundChargingStation.status).to.equal(StatusCodes.OK);
expect(foundChargingStation.data.id).is.eql(this.chargingStationContext.getChargingStation().id);
// Check
expect(foundChargingStation.data.connectors).to.not.be.null;
expect(foundChargingStation.data.connectors[0]).to.include({
status: this.chargingStationConnector1.status,
errorCode: this.chargingStationConnector1.errorCode
});
expect(foundChargingStation.data.connectors[1]).to.include({
status: this.chargingStationConnector2.status,
errorCode: this.chargingStationConnector2.errorCode
});
}
public async testChangeConnectorStatus() {
// Set it to Occupied
this.chargingStationConnector1.status = ChargePointStatus.OCCUPIED;
this.chargingStationConnector1.timestamp = new Date().toISOString();
// Update
let response = await this.chargingStationContext.setConnectorStatus(this.chargingStationConnector1);
// Check
expect(response).to.eql({});
// To be sure send a heartbeat
response = await this.chargingStationContext.sendHeartbeat();
// Check the connectors
const foundChargingStation = await this.chargingStationContext.readChargingStation();
expect(foundChargingStation.status).to.equal(StatusCodes.OK);
expect(foundChargingStation.data.id).is.eql(this.chargingStationContext.getChargingStation().id);
// Check Connector 1
expect(foundChargingStation.data.connectors[0]).to.include({
status: this.chargingStationConnector1.status,
errorCode: this.chargingStationConnector1.errorCode
});
// Connector 2 should be still ChargePointStatus.AVAILABLE
expect(foundChargingStation.data.connectors[1]).to.include({
status: this.chargingStationConnector2.status,
errorCode: this.chargingStationConnector2.errorCode
});
// Reset Status of Connector 1
this.chargingStationConnector1.status = ChargePointStatus.AVAILABLE;
this.chargingStationConnector1.timestamp = new Date().toISOString();
// Update
response = await this.chargingStationContext.setConnectorStatus(this.chargingStationConnector1);
// Check
expect(response).to.eql({});
}
public async testHeartbeat() {
// Update Status of Connector 1
const response = await this.chargingStationContext.sendHeartbeat();
// Check
expect(response).to.have.property('currentTime');
}
public async testClientIP() {
// Read charging station
const response = await this.chargingStationContext.readChargingStation();
// Check the presence of the IP
expect(response.data).to.have.property('currentIPAddress');
expect(response.data.currentIPAddress).to.not.be.empty;
}
public async testDataTransfer() {
// Check
const response = await this.chargingStationContext.transferData({
'vendorId': 'Schneider Electric',
'messageId': 'Detection loop',
'data': '{\\"connectorId\\":2,\\"name\\":\\"Vehicle\\",\\"state\\":\\"0\\",\\"timestamp\\":\\"2018-08-08T10:21:11Z:\\"}',
});
// Check
expect(response).to.have.property('status');
expect(response.status).to.equal(OCPPStatus.ACCEPTED);
}
public async testChargingStationRegistrationWithInvalidToken() {
try {
await this.chargingStationContext.sendBootNotification();
fail('BootNotification should fail');
} catch (error) {
expect(error).to.be.not.null;
}
}
public async testChargingStationRegistrationWithInvalidIdentifier() {
try {
await this.chargingStationContext.sendBootNotification();
fail('BootNotification should fail');
} catch (error) {
expect(error).to.be.not.null;
}
}
public async testAuthorizeUsers() {
// Asserts that the start user is authorized.
await this.testAuthorize(this.transactionStartUser.tags[0].id, OCPPStatus.ACCEPTED);
// Asserts that the stop user is authorized.
await this.testAuthorize(this.transactionStopUser.tags[0].id, OCPPStatus.ACCEPTED);
// Asserts that the user with a too long tag is not authorized.
await this.testAuthorize('ThisIsATooTooTooLongTag', OCPPAuthorizationStatus.INVALID);
}
public async testStartTransaction(validTransaction = true) {
// Start a new Transaction
const startTransactionResponse = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
this.transactionStartUser.tags[0].id,
this.energyActiveImportStartMeterValue,
this.transactionStartTime
);
if (validTransaction) {
// eslint-disable-next-line @typescript-eslint/unbound-method
expect(startTransactionResponse).to.be.transactionValid;
const transactionId = startTransactionResponse.transactionId;
await this.validateStartedTransaction(
startTransactionResponse,
this.chargingStationConnector1,
this.energyActiveImportStartMeterValue,
this.transactionStartTime);
this.newTransaction = (await this.centralUserService.transactionApi.readById(transactionId)).data;
expect(this.newTransaction).to.not.be.null;
const chargingStationResponse = await this.chargingStationContext.readChargingStation(this.transactionStartUserService);
expect(chargingStationResponse.status).eq(StatusCodes.OK);
expect(chargingStationResponse.data).not.null;
const connector = chargingStationResponse.data.connectors[this.chargingStationConnector1.connectorId - 1];
expect(connector).not.null;
expect(connector.currentTransactionID).eq(transactionId);
expect(connector.currentTransactionDate).eq(this.transactionStartTime.toISOString());
expect(connector.currentTagID).eq(this.transactionStartUser.tags[0].id);
} else {
this.newTransaction = null;
expect(startTransactionResponse).to.be.transactionStatus(OCPPAuthorizationStatus.INVALID);
}
}
public async testStartSecondTransaction(withSoC = false) {
// Check on current transaction
expect(this.newTransaction).to.not.be.null;
// Set
const transactionId = this.newTransaction.id;
this.transactionStartTime = moment().subtract(1, 'h').toDate();
// Clear old one
this.newTransaction = null;
// Start the 2nd Transaction
const startTransactionResponse = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
this.transactionStartUser.tags[0].id,
this.energyActiveImportStartMeterValue,
this.transactionStartTime
);
const secondTransactionId = startTransactionResponse.transactionId;
await this.validateStartedTransaction(
startTransactionResponse,
this.chargingStationConnector1,
this.energyActiveImportStartMeterValue,
this.transactionStartTime);
// Check if the Transaction exists
this.newTransaction = (await this.centralUserService.transactionApi.readById(secondTransactionId)).data;
// Check
expect(this.newTransaction).to.not.be.null;
expect(this.newTransaction.id).to.not.equal(transactionId);
}
public async testRemoteStartTransactionWithNoBadge() {
const response = await this.centralUserService.chargingStationApi.remoteStartTransaction({
'chargingStationID': this.chargingStationContext.getChargingStation().id,
'args': {
'connectorId': this.chargingStationContext.getChargingStation().connectors[0].connectorId
}
});
expect(response.status).to.equal(570);
}
public async testRemoteStartTransactionWithExternalUser() {
const response = await this.centralUserService.chargingStationApi.remoteStartTransaction({
'chargingStationID': this.chargingStationContext.getChargingStation().id,
'args': {
'visualTagID': this.transactionStartUser.tags[0].visualID,
'connectorId': this.chargingStationContext.getChargingStation().connectors[0].connectorId,
'userID': this.transactionStartUser.id
}
});
expect(response.status).to.equal(StatusCodes.INTERNAL_SERVER_ERROR);
}
public async testRemoteStartTransactionWithUnassignedChargingStation() {
console.log('yooo ' + this.transactionStartUser.tags[0].id);
const response = await this.centralUserService.chargingStationApi.remoteStartTransaction({
'chargingStationID': this.chargingStationContext.getChargingStation().id,
'args': {
'visualTagID': this.transactionStartUser.tags[0].visualID,
'connectorId': this.chargingStationContext.getChargingStation().connectors[0].connectorId,
'userID': this.transactionStartUser.id
}
});
expect(response.status).to.equal(StatusCodes.INTERNAL_SERVER_ERROR);
}
public async testSendMeterValues(withSoC = false, withSignedData = false, withOnlyEndSignedData = false) {
// Check on Transaction
expect(this.newTransaction).to.not.be.null;
// Current Time matches Transaction one
this.transactionCurrentTime = moment(this.newTransaction.timestamp).toDate();
// Start Meter Value matches Transaction one
let currentEnergyActiveImportMeterValue = this.energyActiveImportStartMeterValue;
// ------------------------------------------------------------------
// Send Transaction.Begin
// ------------------------------------------------------------------
let meterValueResponse = await this.chargingStationContext.sendBeginMeterValue(
this.newTransaction.connectorId,
this.newTransaction.id,
this.transactionCurrentTime,
{
energyActiveImportMeterValue: this.energyActiveImportStartMeterValue,
socMeterValue: withSoC ? this.socMeterValues[0] : 0,
powerImportMeterValue: this.powerImportMeterValues[0],
voltageMeterValue: this.voltageMeterValues[0],
voltageL1MeterValue: this.voltageL1MeterValues[0],
voltageL2MeterValue: this.voltageL2MeterValues[0],
voltageL3MeterValue: this.voltageL3MeterValues[0],
amperageMeterValue: this.amperageMeterValues[0],
amperageL1MeterValue: this.amperageL1MeterValues[0],
amperageL2MeterValue: this.amperageL2MeterValues[0],
amperageL3MeterValue: this.amperageL3MeterValues[0],
signedDataStartMeterValue: (withSignedData && !withOnlyEndSignedData) ? this.transactionStartSignedData : null,
}
);
if (meterValueResponse) {
expect(meterValueResponse).to.eql({});
}
// Check Transaction
let transactionValidation = await this.basicTransactionValidation(this.newTransaction.id,
this.newTransaction.connectorId, this.newTransaction.meterStart, this.newTransaction.timestamp);
// ------------------------------------------------------------------
// Send Meter Values (except the last one which is used in Stop Transaction)
// ------------------------------------------------------------------
let currentCumulatedPrice = 0;
for (let index = 0; index <= this.energyActiveImportMeterValues.length - 2; index++) {
// Set new meter value
currentCumulatedPrice = Utils.createDecimal(currentCumulatedPrice).plus(
Utils.computeSimplePrice(this.pricekWh, this.energyActiveImportMeterValues[index])).toNumber();
if (index === this.energyActiveImportMeterValues.length - 2) {
this.totalPrice = currentCumulatedPrice;
}
currentEnergyActiveImportMeterValue += this.energyActiveImportMeterValues[index];
// Add time
this.transactionCurrentTime = moment(this.transactionCurrentTime).add(this.meterValueIntervalSecs, 's').toDate();
// Send consumption meter value
meterValueResponse = await this.chargingStationContext.sendConsumptionMeterValue(
this.newTransaction.connectorId,
this.newTransaction.id,
this.transactionCurrentTime,
{
energyActiveImportMeterValue: currentEnergyActiveImportMeterValue,
powerImportMeterValue: this.powerImportMeterValues[index],
powerImportL1MeterValue: this.powerImportL1MeterValues[index],
powerImportL2MeterValue: this.powerImportL2MeterValues[index],
powerImportL3MeterValue: this.powerImportL3MeterValues[index],
voltageMeterValue: this.voltageMeterValues[index],
voltageL1MeterValue: this.voltageL1MeterValues[index],
voltageL2MeterValue: this.voltageL2MeterValues[index],
voltageL3MeterValue: this.voltageL3MeterValues[index],
amperageMeterValue: this.amperageMeterValues[index],
amperageL1MeterValue: this.amperageL1MeterValues[index],
amperageL2MeterValue: this.amperageL2MeterValues[index],
amperageL3MeterValue: this.amperageL3MeterValues[index],
socMeterValue: withSoC ? this.socMeterValues[index] : 0,
}
);
expect(meterValueResponse).to.eql({});
// Check the Consumption
if (this.chargingStationContext.getChargingStation().ocppVersion === OCPPVersion.VERSION_15) {
transactionValidation = await this.basicTransactionValidation(this.newTransaction.id,
this.newTransaction.connectorId, this.newTransaction.meterStart, this.newTransaction.timestamp);
expect(transactionValidation.data).to.deep.include({
currentInstantWatts: this.energyActiveImportMeterValues[index] * (3600 / this.meterValueIntervalSecs),
currentTotalConsumptionWh: (currentEnergyActiveImportMeterValue - this.energyActiveImportStartMeterValue),
currentTotalDurationSecs: this.meterValueIntervalSecs * (index + 1),
currentTotalInactivitySecs: this.totalInactivities[index],
currentCumulatedPrice: currentCumulatedPrice,
currentInactivityStatus: Utils.getInactivityStatusLevel(this.chargingStationContext.getChargingStation(),
this.newTransaction.connectorId, this.totalInactivities[index]),
});
} else {
transactionValidation = await this.basicTransactionValidation(this.newTransaction.id,
this.newTransaction.connectorId, this.newTransaction.meterStart, this.newTransaction.timestamp);
expect(transactionValidation.data).to.deep.include({
currentInstantWatts: this.powerImportMeterValues[index],
currentTotalConsumptionWh: (currentEnergyActiveImportMeterValue - this.energyActiveImportStartMeterValue),
currentTotalDurationSecs: this.meterValueIntervalSecs * (index + 1),
currentTotalInactivitySecs: this.totalInactivities[index],
currentCumulatedPrice: currentCumulatedPrice,
currentInactivityStatus: Utils.getInactivityStatusLevel(this.chargingStationContext.getChargingStation(),
this.newTransaction.connectorId, this.totalInactivities[index]),
});
}
if (withSoC) {
expect(transactionValidation.data).to.deep.include({
currentStateOfCharge: this.socMeterValues[index]
});
} else {
expect(transactionValidation.data).to.deep.include({
stateOfCharge: this.newTransaction.stateOfCharge
});
}
}
// ------------------------------------------------------------------
// Send Transaction.End
// ------------------------------------------------------------------
meterValueResponse = await this.chargingStationContext.sendEndMeterValue(
this.newTransaction.connectorId,
this.newTransaction.id,
moment(this.transactionCurrentTime).toDate(),
{
energyActiveImportMeterValue: this.energyActiveImportEndMeterValue,
powerImportMeterValue: this.powerImportMeterValues[this.powerImportMeterValues.length - 1],
voltageMeterValue: this.voltageMeterValues[this.voltageMeterValues.length - 1],
voltageL1MeterValue: this.voltageL1MeterValues[this.voltageL1MeterValues.length - 1],
voltageL2MeterValue: this.voltageL2MeterValues[this.voltageL2MeterValues.length - 1],
voltageL3MeterValue: this.voltageL3MeterValues[this.voltageL3MeterValues.length - 1],
amperageMeterValue: this.amperageMeterValues[this.amperageMeterValues.length - 1],
amperageL1MeterValue: this.amperageL1MeterValues[this.amperageL1MeterValues.length - 1],
amperageL2MeterValue: this.amperageL2MeterValues[this.amperageL2MeterValues.length - 1],
amperageL3MeterValue: this.amperageL3MeterValues[this.amperageL3MeterValues.length - 1],
socMeterValue: withSoC ? this.socMeterValues[this.socMeterValues.length - 1] : 0,
signedDataStartMeterValue: withSignedData ? this.transactionStartSignedData : null,
signedDataStopMeterValue: withSignedData ? this.transactionEndSignedData : null,
}
);
if (meterValueResponse) {
expect(meterValueResponse).to.eql({});
}
// Check the Transaction End
transactionValidation = await this.basicTransactionValidation(this.newTransaction.id,
this.newTransaction.connectorId, this.newTransaction.meterStart, this.newTransaction.timestamp);
if (withSoC) {
expect(transactionValidation.data).to.deep.include({
currentStateOfCharge: this.socMeterValues[this.socMeterValues.length - 1]
});
} else {
expect(transactionValidation.data).to.deep.include({
stateOfCharge: this.newTransaction.stateOfCharge
});
}
}
public async testStopTransaction(withSoC = false, withSignedData = false) {
// Check on Transaction
expect(this.newTransaction).to.not.be.null;
expect(this.transactionCurrentTime).to.not.be.null;
// Set end time
this.transactionCurrentTime = moment(this.transactionCurrentTime).add(this.meterValueIntervalSecs, 's').toDate();
// Stop the Transaction
const stopTransactionResponse = await this.chargingStationContext.stopTransaction(this.newTransaction.id,
this.transactionStopUser.tags[0].id, this.energyActiveImportEndMeterValue, this.transactionCurrentTime);
// Check
expect(stopTransactionResponse).to.have.property('idTagInfo');
expect(stopTransactionResponse.idTagInfo.status).to.equal(OCPPStatus.ACCEPTED);
// Set the connector to Available
this.chargingStationConnector1.status = ChargePointStatus.AVAILABLE;
this.chargingStationConnector1.timestamp = this.transactionCurrentTime.toISOString();
// Update
const statusResponse = await this.chargingStationContext.setConnectorStatus(this.chargingStationConnector1);
expect(statusResponse).to.eql({});
// Check the Transaction
const transactionValidation = await this.basicTransactionValidation(this.newTransaction.id,
this.newTransaction.connectorId, this.newTransaction.meterStart, this.newTransaction.timestamp);
const totalTransactionPrice = Utils.computeSimplePrice(this.pricekWh, this.transactionTotalConsumptionWh);
expect(this.totalPrice).equal(totalTransactionPrice);
expect(transactionValidation.data).to.deep['containSubset']({
'signedData': (withSignedData ? this.transactionStartSignedData : ''),
'stop': {
'meterStop': this.energyActiveImportEndMeterValue,
'totalConsumptionWh': this.transactionTotalConsumptionWh,
'totalInactivitySecs': this.transactionTotalInactivitySecs,
'inactivityStatus': InactivityStatus.INFO,
'totalDurationSecs': moment.duration(moment(this.transactionCurrentTime).diff(this.newTransaction.timestamp)).asSeconds(),
'price': this.totalPrice,
'priceUnit': 'EUR',
'pricingSource': PricingSettingsType.SIMPLE,
'roundedPrice': Utils.truncTo(this.totalPrice, 2),
'tagID': this.transactionStopUser.tags[0].id,
'timestamp': this.transactionCurrentTime.toISOString(),
'signedData': (withSignedData ? this.transactionEndSignedData : ''),
'stateOfCharge': (withSoC ? this.socMeterValues[this.socMeterValues.length - 1] : 0),
'user': {
'id': this.transactionStopUser.id,
'name': this.transactionStopUser.name,
'firstName': this.transactionStopUser.firstName
}
}
});
}
public async testTransactionMetrics(withSoC = false, checkNewMeterValues = false) {
// Check on Transaction
expect(this.newTransaction).to.not.be.null;
const response = await this.centralUserService.transactionApi.readAllConsumption({ TransactionId: this.newTransaction.id });
expect(response.status).to.equal(StatusCodes.OK);
const totalTransactionPrice = Utils.computeSimplePrice(this.pricekWh, this.transactionTotalConsumptionWh);
expect(this.totalPrice).equal(totalTransactionPrice);
// Check Headers
expect(response.data).to.deep['containSubset']({
'chargeBoxID': this.newTransaction.chargeBoxID,
'connectorId': this.newTransaction.connectorId,
'stop': {
'price': this.totalPrice,
'pricingSource': 'simple',
'roundedPrice': Utils.truncTo(this.totalPrice, 2),
'tagID': this.transactionStopUser.tags[0].id,
'totalConsumptionWh': this.transactionTotalConsumptionWh,
'totalInactivitySecs': this.transactionTotalInactivitySecs,
'inactivityStatus': InactivityStatus.INFO,
'stateOfCharge': (withSoC ? this.socMeterValues[this.socMeterValues.length - 1] : 0),
'user': {
'id': this.transactionStopUser.id,
'name': this.transactionStopUser.name,
'firstName': this.transactionStopUser.firstName
}
},
'id': this.newTransaction.id,
'user': {
'id': this.transactionStartUser.id,
'name': this.transactionStartUser.name,
'firstName': this.transactionStartUser.firstName
}
});
// Init
const transactionCurrentTime = moment(this.newTransaction.timestamp);
let transactionCumulatedConsumption = this.energyActiveImportStartMeterValue;
// Check Consumption
for (let i = 0; i < response.data.values.length - 1; i++) {
// Get the value
const value = response.data.values[i];
// Sum
transactionCumulatedConsumption += this.energyActiveImportMeterValues[i];
// Check
if (this.chargingStationContext.getChargingStation().ocppVersion === OCPPVersion.VERSION_15) {
const instantWatts = this.energyActiveImportMeterValues[i] * (3600 / this.meterValueIntervalSecs);
expect(value).to.include({
'startedAt': transactionCurrentTime.toISOString(),
'instantAmps': Utils.convertWattToAmp(this.chargingStationContext.getChargingStation(),
null, this.newTransaction.connectorId, instantWatts),
'instantWatts': instantWatts,
'cumulatedConsumptionWh': transactionCumulatedConsumption,
'cumulatedConsumptionAmps': Utils.convertWattToAmp(this.chargingStationContext.getChargingStation(),
null, this.newTransaction.connectorId, transactionCumulatedConsumption)
});
if (withSoC) {
expect(value).to.include({
'stateOfCharge': this.socMeterValues[i]
});
}
} else {
expect(value).to.include({
'startedAt': transactionCurrentTime.toISOString(),
'instantVolts': checkNewMeterValues ? this.voltageMeterValues[i] : 0,
'instantVoltsL1': checkNewMeterValues ? this.voltageL1MeterValues[i] : 0,
'instantVoltsL2': checkNewMeterValues ? this.voltageL2MeterValues[i] : 0,
'instantVoltsL3': checkNewMeterValues ? this.voltageL3MeterValues[i] : 0,
'instantAmps': checkNewMeterValues ? this.amperageMeterValues[i] :
Utils.convertWattToAmp(this.chargingStationContext.getChargingStation(),
null, this.newTransaction.connectorId, this.powerImportMeterValues[i]),
'instantAmpsL1': checkNewMeterValues ? this.amperageL1MeterValues[i] : 0,
'instantAmpsL2': checkNewMeterValues ? this.amperageL2MeterValues[i] : 0,
'instantAmpsL3': checkNewMeterValues ? this.amperageL3MeterValues[i] : 0,
'instantWatts': this.powerImportMeterValues[i],
'instantWattsL1': checkNewMeterValues ? this.voltageL1MeterValues[i] * this.amperageL1MeterValues[i] : 0,
'instantWattsL2': checkNewMeterValues ? this.voltageL2MeterValues[i] * this.amperageL2MeterValues[i] : 0,
'instantWattsL3': checkNewMeterValues ? this.voltageL3MeterValues[i] * this.amperageL3MeterValues[i] : 0,
'cumulatedConsumptionWh': transactionCumulatedConsumption,
'cumulatedConsumptionAmps': Utils.convertWattToAmp(this.chargingStationContext.getChargingStation(),
null, this.newTransaction.connectorId, transactionCumulatedConsumption)
});
}
// Add time
transactionCurrentTime.add(this.meterValueIntervalSecs, 's');
}
}
public async testDeleteTransaction(noAuthorization = false) {
// Delete the created entity
expect(this.newTransaction).to.not.be.null;
let response = await this.transactionStartUserService.transactionApi.delete(this.newTransaction.id);
if (noAuthorization) {
expect(response.status).to.equal(StatusCodes.FORBIDDEN);
// Transaction must be deleted by Admin user
response = await this.centralUserService.transactionApi.delete(this.newTransaction.id);
}
// Remove from transactions to be deleted
this.chargingStationContext.removeTransaction(this.newTransaction.id);
expect(response.status).to.equal(StatusCodes.OK);
expect(response.data).to.have.property('status');
expect(response.data.status).to.be.eql('Success');
this.newTransaction = null;
}
public async testAuthorizeTagAsInteger() {
await this.testAuthorize(this.numberTag, OCPPStatus.ACCEPTED);
await this.testAuthorize(this.numberTag.toString(), OCPPStatus.ACCEPTED);
}
public async testAuthorizeInvalidTag() {
await this.testAuthorize(this.invalidTag, OCPPAuthorizationStatus.INVALID);
await this.testAuthorize('', OCPPAuthorizationStatus.INVALID);
await this.testAuthorize(null, OCPPAuthorizationStatus.INVALID);
}
public async testStartTransactionWithConnectorIdAsString() {
const response = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
this.numberTag.toString(),
0,
this.transactionStartTime
);
// eslint-disable-next-line @typescript-eslint/unbound-method
expect(response).to.be.transactionValid;
}
public async testStartTransactionWithMeterStartGreaterZero() {
const response = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
this.numberTag.toString(),
faker.datatype.number(100000),
this.transactionStartTime
);
// eslint-disable-next-line @typescript-eslint/unbound-method
expect(response).to.be.transactionValid;
}
public async testStartTransactionWithInvalidTag() {
let response = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
this.invalidTag,
0,
this.transactionStartTime
);
expect(response).to.be.transactionStatus(OCPPAuthorizationStatus.INVALID);
response = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
'',
0,
this.transactionStartTime
);
expect(response).to.be.transactionStatus(OCPPAuthorizationStatus.INVALID);
response = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
null,
0,
this.transactionStartTime
);
expect(response).to.be.transactionStatus(OCPPAuthorizationStatus.INVALID);
}
public async testStopTransactionWithoutTransactionData() {
const startTransactionResponse = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
this.numberTag.toString(),
this.energyActiveImportStartMeterValue,
this.transactionStartTime
);
// eslint-disable-next-line @typescript-eslint/unbound-method
expect(startTransactionResponse).to.be.transactionValid;
const transactionId = startTransactionResponse.transactionId;
this.transactionCurrentTime = moment().toDate();
const stopValue = this.energyActiveImportStartMeterValue + faker.datatype.number(100000);
const stopTransactionResponse = await this.chargingStationContext.stopTransaction(
transactionId, this.numberTag.toString(), stopValue, this.transactionCurrentTime);
expect(stopTransactionResponse).to.have.property('idTagInfo');
expect(stopTransactionResponse.idTagInfo.status).to.equal(OCPPStatus.ACCEPTED);
}
public async testStopTransactionWithTransactionData() {
const startTransactionResponse = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
this.numberTag.toString(),
this.energyActiveImportStartMeterValue,
this.transactionStartTime
);
// eslint-disable-next-line @typescript-eslint/unbound-method
expect(startTransactionResponse).to.be.transactionValid;
const transactionId = startTransactionResponse.transactionId;
this.transactionCurrentTime = moment().toDate();
const stopValue = this.energyActiveImportStartMeterValue + faker.datatype.number(100000);
let transactionData: OCPPMeterValue[] | OCPP15TransactionData;
if (this.chargingStationContext.getChargingStation().ocppVersion === OCPPVersion.VERSION_16) {
transactionData = [
{
'timestamp': this.transactionStartTime.toISOString(),
'sampledValue': [
{
'value': this.energyActiveImportStartMeterValue.toString(),
...Constants.OCPP_ENERGY_ACTIVE_IMPORT_REGISTER_ATTRIBUTE,
'context': OCPPReadingContext.TRANSACTION_BEGIN,
}
]
},
{
'timestamp': this.transactionCurrentTime.toISOString(),
'sampledValue': [
{
'value': stopValue.toString(),
...Constants.OCPP_ENERGY_ACTIVE_IMPORT_REGISTER_ATTRIBUTE,
'context': OCPPReadingContext.TRANSACTION_END,
}
]
}
];
// OCPP 1.5
} else {
transactionData = {
'values': [
{
'timestamp': this.transactionStartTime.toISOString(),
'value': {
'$attributes': {
...Constants.OCPP_ENERGY_ACTIVE_IMPORT_REGISTER_ATTRIBUTE,
'context': OCPPReadingContext.TRANSACTION_BEGIN,
},
'$value': this.energyActiveImportStartMeterValue.toString(),
}
},
{
'timestamp': this.transactionCurrentTime.toISOString(),
'value': {
'$attributes': {
...Constants.OCPP_ENERGY_ACTIVE_IMPORT_REGISTER_ATTRIBUTE,
'context': OCPPReadingContext.TRANSACTION_END,
},
'$value': stopValue.toString()
}
}
]
};
}
const stopTransactionResponse = await this.chargingStationContext.stopTransaction(transactionId, this.numberTag.toString(), stopValue, this.transactionCurrentTime, transactionData);
expect(stopTransactionResponse).to.have.property('idTagInfo');
expect(stopTransactionResponse.idTagInfo.status).to.equal(OCPPStatus.ACCEPTED);
}
public async testStopTransactionWithInvalidTransactionData() {
const startTransactionResponse = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
this.numberTag.toString(),
this.energyActiveImportStartMeterValue,
this.transactionStartTime
);
// eslint-disable-next-line @typescript-eslint/unbound-method
expect(startTransactionResponse).to.be.transactionValid;
const transactionId = startTransactionResponse.transactionId;
this.transactionCurrentTime = moment().toDate();
const stopValue = this.energyActiveImportStartMeterValue + faker.datatype.number(100000);
let transactionData: OCPPMeterValue[] | OCPP15TransactionData;
// Provide TransactionData for wrong OCPP Version
if (this.chargingStationContext.getChargingStation().ocppVersion === OCPPVersion.VERSION_15) {
transactionData = [
{
'timestamp': this.transactionStartTime.toISOString(),
'sampledValue': [
{
'value': this.energyActiveImportStartMeterValue.toString(),
...Constants.OCPP_ENERGY_ACTIVE_IMPORT_REGISTER_ATTRIBUTE,
'context': OCPPReadingContext.TRANSACTION_BEGIN,
}
]
},
{
'timestamp': this.transactionCurrentTime.toISOString(),
'sampledValue': [
{
'value': stopValue.toString(),
...Constants.OCPP_ENERGY_ACTIVE_IMPORT_REGISTER_ATTRIBUTE,
'context': OCPPReadingContext.TRANSACTION_END,
}
]
}
];
// OCPP 1.5
} else {
transactionData = {
'values': [
{
'timestamp': this.transactionStartTime.toISOString(),
'value': {
'$attributes': {
...Constants.OCPP_ENERGY_ACTIVE_IMPORT_REGISTER_ATTRIBUTE,
'context': OCPPReadingContext.TRANSACTION_BEGIN,
},
'$value': this.energyActiveImportStartMeterValue.toString(),
}
},
{
'timestamp': this.transactionCurrentTime.toISOString(),
'value': {
'$attributes': {
...Constants.OCPP_ENERGY_ACTIVE_IMPORT_REGISTER_ATTRIBUTE,
'context': OCPPReadingContext.TRANSACTION_END,
},
'$value': stopValue.toString()
}
}
]
};
}
let stopTransactionResponse = await this.chargingStationContext.stopTransaction(transactionId, this.numberTag.toString(), stopValue, this.transactionCurrentTime, transactionData);
expect(stopTransactionResponse).to.have.property('idTagInfo');
expect(stopTransactionResponse.idTagInfo.status).to.equal(OCPPAuthorizationStatus.INVALID);
// Now stop the transaction without Transaction Data
stopTransactionResponse = await this.chargingStationContext.stopTransaction(transactionId, this.numberTag.toString(), stopValue, this.transactionCurrentTime);
expect(stopTransactionResponse).to.have.property('idTagInfo');
expect(stopTransactionResponse.idTagInfo.status).to.equal(OCPPStatus.ACCEPTED);
}
public async testRetrieveLastRebootDate() {
const bootNotification = await this.chargingStationContext.sendBootNotification();
expect(bootNotification).to.not.be.null;
expect(bootNotification.status).to.eql(OCPPStatus.ACCEPTED);
expect(bootNotification).to.have.property('currentTime');
let chargingStationResponse = await this.chargingStationContext.readChargingStation();
if (this.chargingStationContext.getChargingStation().ocppVersion === OCPPVersion.VERSION_16) {
expect(bootNotification.currentTime).to.equal(chargingStationResponse.data.lastReboot);
} else {
expect((bootNotification.currentTime as unknown as Date).toISOString()).to.equal(chargingStationResponse.data.lastReboot);
}
const bootNotification2 = await this.chargingStationContext.sendBootNotification();
chargingStationResponse = await this.chargingStationContext.readChargingStation();
if (this.chargingStationContext.getChargingStation().ocppVersion === OCPPVersion.VERSION_16) {
expect(bootNotification2.currentTime).to.equal(chargingStationResponse.data.lastReboot);
} else {
expect((bootNotification2.currentTime as unknown as Date).toISOString()).to.equal(chargingStationResponse.data.lastReboot);
}
expect(bootNotification.currentTime).to.not.equal(bootNotification2.currentTime);
if (this.chargingStationContext.getChargingStation().ocppVersion === OCPPVersion.VERSION_16) {
expect(new Date(bootNotification.currentTime)).to.beforeTime(new Date(bootNotification2.currentTime));
} else {
expect(bootNotification.currentTime).to.beforeTime(new Date(bootNotification2.currentTime));
}
// Boot notification empty the connectors
// Send status notifications
for (const connector of this.chargingStationContext.getChargingStation().connectors) {
// Send async on purpose
void this.chargingStationContext.setConnectorStatus({
connectorId: connector.connectorId,
status: ChargePointStatus.AVAILABLE,
errorCode: ChargePointErrorCode.NO_ERROR,
timestamp: new Date().toISOString()
});
}
// Wait for both status notification to be processed
await Utils.sleep(2000);
// Check Connectors are recreated
chargingStationResponse = await this.chargingStationContext.readChargingStation();
expect(chargingStationResponse.data.connectors.length).to.equal(this.chargingStationContext.getChargingStation().connectors.length);
// Check they are all available
for (const connector of chargingStationResponse.data.connectors) {
expect(connector.status).to.eql(ChargePointStatus.AVAILABLE);
}
}
public async testTransactionIgnoringClockMeterValues() {
const meterStart = 0;
let meterValue = meterStart;
const currentTime = moment();
const startTransactionResponse = await this.chargingStationContext.startTransaction(
this.chargingStationConnector1.connectorId,
this.numberTag.toString(),
meterValue,
currentTime.toDate()
);
// eslint-disable-next-line @typescript-eslint/unbound-method
expect(startTransactionResponse).to.be.transactionValid;
const transactionId = startTransactionResponse.transactionId;
meterValue += 300;
let meterValueResponse = await this.chargingStationContext.sendConsumptionMeterValue(
this.chargingStationConnector1.connectorId,
transactionId,
currentTime.add(1, 'minute').clone().toDate(),
{ energyActiveImportMeterValue: meterValue }
);
expect(meterValueResponse).to.eql({});
meterValue += 300;
meterValueResponse = await this.chargingStationContext.sendConsumptionMeterValue(
this.chargingStationConnector1.connectorId,
transactionId,
currentTime.add(1, 'minute').clone().toDate(),
{ energyActiveImportMeterValue: meterValue }
);
expect(meterValueResponse).to.eql({});
meterValue += 300;
meterValueResponse = await this.chargingStationContext.sendConsumptionMeterValue(
this.chargingStationConnector1.connectorId,
transactionId,
currentTime.add(1, 'minute').clone().toDate(),
{ energyActiveImportMeterValue: meterValue }
);
expect(meterValueResponse).to.eql({});
meterValueResponse = await this.chargingStationContext.sendClockMeterValue(
this.chargingStationConnector1.connectorId,
transactionId,
currentTime.clone().toDate(),
0
);
expect(meterValueResponse).to.eql({});
meterValue += 300;
meterValueResponse = await this.chargingStationContext.sendConsumptionMeterValue(
this.chargingStationConnector1.connectorId,
transactionId,
currentTime.add(1, 'minute').clone().toDate(),
{ energyActiveImportMeterValue: meterValue }
);
expect(meterValueResponse).to.eql({});
const stopTransactionResponse = await this.chargingStationContext.stopTransaction(
transactionId,
this.numberTag.toString(),
meterValue, currentTime.add(1, 'minute').clone().toDate()
);
expect(stopTransactionResponse).to.have.property('idTagInfo');
expect(stopTransactionResponse.idTagInfo.status).to.equal(OCPPStatus.ACCEPTED);
const transaction = await this.centralUserService.transactionApi.readById(transactionId);
expect(transaction.status).to.equal(StatusCodes.OK);
expect(transaction.data).to.deep['containSubset']({
id: transactionId,
meterStart: meterStart,
stop: {
totalConsumptionWh: meterValue - meterStart,
totalInactivitySecs: 60,
inactivityStatus: InactivityStatus.INFO
}
});
}
private async createUser(user = Factory.user.build()) {
const createdUser = await this.centralUserService.createEntity(this.centralUserService.userApi, user);
return createdUser;
}
private async createTag(tag: Tag) {
const createdTag = await this.centralUserService.userApi.createTag(tag);
return createdTag;
}
private async testAuthorize(tagId, expectedStatus) {
const response = await this.chargingStationContext.authorize(tagId);
// Check
expect(response).to.have.property('idTagInfo');
expect(response.idTagInfo.status).to.equal(expectedStatus);
}
private async validateStartedTransaction(response, chargingStationConnector, startMeterValue, startTime) {
expect(response).to.have.property('idTagInfo');
expect(response.idTagInfo.status).to.equal(OCPPStatus.ACCEPTED);
expect(response).to.have.property('transactionId');
expect(response.transactionId).to.not.equal(0);
const transactionId = response.transactionId;
// Update connector status
chargingStationConnector.status = ChargePointStatus.OCCUPIED;
chargingStationConnector.timestamp = new Date().toISOString();
const statusNotificationResponse = await this.chargingStationContext.setConnectorStatus(chargingStationConnector);
expect(statusNotificationResponse).to.eql({});
const basicTransactionValidation = await this.basicTransactionValidation(transactionId, chargingStationConnector.connectorId, startMeterValue, startTime.toISOString());
expect(basicTransactionValidation.data).to.deep.include({
currentInstantWatts: 0,
currentCumulatedPrice: 0,
currentStateOfCharge: 0,
currentTotalConsumptionWh: 0,
currentTotalInactivitySecs: 0,
currentInactivityStatus: InactivityStatus.INFO,
price: 0,
roundedPrice: 0,
});
}
private async basicTransactionValidation(transactionId: number, connectorId: number, meterStart: number, timestamp: Date) {
const transactionResponse = await this.centralUserService.transactionApi.readById(transactionId);
expect(transactionResponse.status).to.equal(StatusCodes.OK);
expect(transactionResponse.data).to.deep['containSubset']({
'id': transactionId,
'timestamp': timestamp,
'chargeBoxID': this.chargingStationContext.getChargingStation().id,
'connectorId': connectorId,
'tagID': this.transactionStartUser.tags[0].id,
'meterStart': meterStart,
'userID': this.transactionStartUser.id,
'siteAreaID': this.chargingStationContext.getChargingStation().siteAreaID,
'siteID': this.chargingStationContext.getChargingStation().siteID,
'user': {
'id': this.transactionStartUser.id,
'name': this.transactionStartUser.name,
'firstName': this.transactionStartUser.firstName
}
});
return transactionResponse;
}
}
| OCPPCommonTests |
utils.go | /*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhook
import (
"context"
"encoding/base64"
"errors"
"fmt"
"path"
"strings"
"github.com/google/go-github/v35/github"
"github.com/hashicorp/go-multierror"
"github.com/xanzy/go-gitlab"
"go.uber.org/zap"
"helm.sh/helm/v3/pkg/releaseutil"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/koderover/zadig/pkg/microservice/aslan/config"
commonmodels "github.com/koderover/zadig/pkg/microservice/aslan/core/common/repository/models"
commonrepo "github.com/koderover/zadig/pkg/microservice/aslan/core/common/repository/mongodb"
"github.com/koderover/zadig/pkg/microservice/aslan/core/common/service/codehub"
"github.com/koderover/zadig/pkg/setting"
"github.com/koderover/zadig/pkg/shared/codehost"
"github.com/koderover/zadig/pkg/shared/poetry"
e "github.com/koderover/zadig/pkg/tool/errors"
githubtool "github.com/koderover/zadig/pkg/tool/git/github"
gitlabtool "github.com/koderover/zadig/pkg/tool/git/gitlab"
"github.com/koderover/zadig/pkg/tool/ilyshin"
"github.com/koderover/zadig/pkg/tool/kube/serializer"
"github.com/koderover/zadig/pkg/tool/log"
"github.com/koderover/zadig/pkg/util"
)
func syncContent(args *commonmodels.Service, logger *zap.SugaredLogger) error {
address, _, repo, branch, path, pathType, err := GetOwnerRepoBranchPath(args.SrcPath)
if err != nil {
logger.Errorf("Failed to parse url %s, err: %s", args.SrcPath, err)
return fmt.Errorf("url parse failure, err: %s", err)
}
var yamls []string
switch args.Source {
case setting.SourceFromCodeHub:
client, err := getCodehubClientByAddress(address)
if err != nil {
logger.Errorf("Failed to get codehub client, error: %s", err)
return err
}
repoUUID, err := client.GetRepoUUID(repo)
if err != nil {
logger.Errorf("Failed to get repoUUID, error: %s", err)
return err
}
yamls, err = client.GetYAMLContents(repoUUID, branch, path, pathType == "tree", true)
if err != nil {
logger.Errorf("Failed to get yamls, error: %s", err)
return err
}
}
args.KubeYamls = yamls
args.Yaml = util.CombineManifests(yamls)
return nil
}
// fillServiceTmpl 更新服务模板参数
func fillServiceTmpl(userName string, args *commonmodels.Service, log *zap.SugaredLogger) error {
if args == nil {
return errors.New("service template arg is null")
}
if len(args.ServiceName) == 0 {
return errors.New("service name is empty")
}
if !config.ServiceNameRegex.MatchString(args.ServiceName) {
return fmt.Errorf("service name must match %s", config.ServiceNameRegexString)
}
if args.Type == setting.K8SDeployType {
if args.Containers == nil {
args.Containers = make([]*commonmodels.Container, 0)
}
// 配置来源为Gitlab,需要从Gitlab同步配置,并设置KubeYamls.
if args.Source == setting.SourceFromGitlab {
// Set args.Commit
if err := syncLatestCommit(args); err != nil {
log.Errorf("Sync change log from gitlab failed, error: %v", err)
return err
}
// 从Gitlab同步args指定的Commit下,指定目录中对应的Yaml文件
// Set args.Yaml & args.KubeYamls
if err := syncContentFromGitlab(userName, args); err != nil {
log.Errorf("Sync content from gitlab failed, error: %v", err)
return err
}
} else if args.Source == setting.SourceFromIlyshin {
// Set args.Commit
if err := syncIlyshinLatestCommit(args, log); err != nil {
log.Errorf("Sync change log from ilyshin failed, error: %v", err)
return err
}
// 从 ilyshin 同步 args 指定的 Commit 下,指定目录中对应的 Yaml 文件
// Set args.Yaml & args.KubeYamls
if err := syncContentFromIlyshin(userName, args); err != nil {
log.Errorf("Sync content from ilyshin failed, error: %v", err)
return err
}
} else if args.Source == setting.SourceFromGithub {
err := syncContentFromGithub(args, log)
if err != nil {
log.Errorf("Sync content from github failed, error: %v", err)
return err
}
} else if args.Source == setting.SourceFromCodeHub {
err := syncContent(args, log)
if err != nil {
log.Errorf("Sync content from codehub failed, error: %v", err)
return err
}
} else {
// 拆分 all-in-one yaml文件
// 替换分隔符
args.Yaml = util.ReplaceWrapLine(args.Yaml)
// 分隔符为\n---\n
args.KubeYamls = SplitYaml(args.Yaml)
}
// 遍历args.KubeYamls,获取 Deployment 或者 StatefulSet 里面所有containers 镜像和名称
if err := setCurrentContainerImages(args); err != nil {
return err
}
log.Infof("find %d containers in service %s", len(args.Containers), args.ServiceName)
}
// 设置新的版本号
serviceTemplate := fmt.Sprintf(setting.ServiceTemplateCounterName, args.ServiceName, args.ProductName)
rev, err := commonrepo.NewCounterColl().GetNextSeq(serviceTemplate)
if err != nil {
return fmt.Errorf("get next service template revision error: %v", err)
}
args.Revision = rev
return nil
}
func syncLatestCommit(service *commonmodels.Service) error {
if service.SrcPath == "" {
return fmt.Errorf("url不能是空的")
} | return fmt.Errorf("url 必须包含 owner/repo/tree/branch/path,具体请参考 Placeholder 提示")
}
client, err := getGitlabClientByAddress(address)
if err != nil {
return err
}
commit, err := GitlabGetLatestCommit(client, owner, repo, branch, path)
if err != nil {
return err
}
service.Commit = &commonmodels.Commit{
SHA: commit.ID,
Message: commit.Message,
}
return nil
}
func syncIlyshinLatestCommit(service *commonmodels.Service, log *zap.SugaredLogger) error {
if service.SrcPath == "" {
return fmt.Errorf("url不能是空的")
}
address, owner, repo, branch, path, _, err := GetOwnerRepoBranchPath(service.SrcPath)
if err != nil {
return fmt.Errorf("url 必须包含 owner/repo/tree/branch/path,具体请参考 Placeholder 提示")
}
client, err := getIlyshinClientByAddress(address)
if err != nil {
return err
}
commit, err := client.GetLatestCommit(owner, repo, branch, path, log)
if err != nil {
return err
}
service.Commit = &commonmodels.Commit{
SHA: commit.ID,
Message: commit.Message,
}
return nil
}
func syncCodehubLatestCommit(service *commonmodels.Service) error {
if service.SrcPath == "" {
return fmt.Errorf("url不能是空的")
}
address, owner, repo, branch, _, _, err := GetOwnerRepoBranchPath(service.SrcPath)
if err != nil {
return fmt.Errorf("url 必须包含 owner/repo/tree/branch/path,具体请参考 Placeholder 提示")
}
client, err := getCodehubClientByAddress(address)
if err != nil {
return err
}
id, message, err := CodehubGetLatestCommit(client, owner, repo, branch)
if err != nil {
return err
}
service.Commit = &commonmodels.Commit{
SHA: id,
Message: message,
}
return nil
}
func getCodehubClientByAddress(address string) (*codehub.Client, error) {
opt := &codehost.Option{
Address: address,
CodeHostType: codehost.CodeHubProvider,
}
codehost, err := codehost.GetCodeHostInfo(opt)
if err != nil {
log.Error(err)
return nil, e.ErrCodehostListProjects.AddDesc("git client is nil")
}
client := codehub.NewClient(codehost.AccessKey, codehost.SecretKey, codehost.Region)
return client, nil
}
func getIlyshinClientByAddress(address string) (*ilyshin.Client, error) {
opt := &codehost.Option{
Address: address,
CodeHostType: codehost.IlyshinProvider,
}
codehost, err := codehost.GetCodeHostInfo(opt)
if err != nil {
log.Error(err)
return nil, e.ErrCodehostListProjects.AddDesc("git client is nil")
}
client := ilyshin.NewClient(codehost.Address, codehost.AccessToken)
return client, nil
}
func getGitlabClientByAddress(address string) (*gitlabtool.Client, error) {
opt := &codehost.Option{
Address: address,
CodeHostType: codehost.GitLabProvider,
}
codehost, err := codehost.GetCodeHostInfo(opt)
if err != nil {
log.Error(err)
return nil, e.ErrCodehostListProjects.AddDesc("git client is nil")
}
client, err := gitlabtool.NewClient(codehost.Address, codehost.AccessToken)
if err != nil {
log.Error(err)
return nil, e.ErrCodehostListProjects.AddDesc(err.Error())
}
return client, nil
}
func GitlabGetLatestCommit(client *gitlabtool.Client, owner, repo string, ref, path string) (*gitlab.Commit, error) {
commit, err := client.GetLatestRepositoryCommit(owner, repo, path, ref)
if err != nil {
return nil, fmt.Errorf("failed to get lastest commit with project %s/%s, ref: %s, path:%s, error: %v",
owner, repo, ref, path, err)
}
return commit, nil
}
func CodehubGetLatestCommit(client *codehub.Client, owner, repo string, branch string) (string, string, error) {
commit, err := client.GetLatestRepositoryCommit(owner, repo, branch)
if err != nil {
return "", "", fmt.Errorf("failed to get lastest commit with project %s/%s, ref: %s, error: %s",
owner, repo, branch, err)
}
return commit.ID, commit.Message, nil
}
// GitlabGetRawFiles ...
// projectID: identity of project, can be retrieved from s.GitlabGetProjectID(owner, repo)
// ref: branch (e.g. master) or commit (commit id) or tag
// path: file path of raw files, only retrieve leaf node(blob type == file), no recursive get
func GitlabGetRawFiles(client *gitlabtool.Client, owner, repo, ref, path, pathType string) (files []string, err error) {
files = make([]string, 0)
var errs *multierror.Error
if pathType == "tree" {
nodes, err := client.ListTree(owner, repo, path, ref, false, nil)
if err != nil {
return files, err
}
for _, node := range nodes {
// if node type is "tree", it is a directory, skip it for now
if node.Type == "tree" {
continue
}
fileName := strings.ToLower(node.Name)
if !strings.HasSuffix(fileName, ".yaml") && !strings.HasSuffix(fileName, ".yml") {
continue
}
// if node type is "blob", it is a file
// Path is filepath of a node
content, err := client.GetRawFile(owner, repo, ref, node.Path)
if err != nil {
errs = multierror.Append(errs, err)
}
contentStr := string(content)
contentStr = util.ReplaceWrapLine(contentStr)
files = append(files, contentStr)
}
return files, errs.ErrorOrNil()
}
content, err := client.GetFileContent(owner, repo, ref, path)
if err != nil {
return files, err
}
files = append(files, string(content))
return files, errs.ErrorOrNil()
}
// syncContentFromGitlab ...
// sync content with commit, args.Commit should not be nil
func syncContentFromGitlab(userName string, args *commonmodels.Service) error {
if args.Commit == nil {
return nil
}
address, owner, repo, branch, path, pathType, err := GetOwnerRepoBranchPath(args.SrcPath)
if err != nil {
return fmt.Errorf("url format failed")
}
client, err := getGitlabClientByAddress(address)
if err != nil {
return err
}
files, err := GitlabGetRawFiles(client, owner, repo, branch, path, pathType)
if err != nil {
return err
}
if userName != setting.WebhookTaskCreator {
if len(files) == 0 {
return fmt.Errorf("没有检索到yml,yaml类型文件,请检查目录是否正确")
}
}
// KubeYamls field is dynamicly synced.
// 根据gitlab sync的内容来设置args.KubeYamls
args.KubeYamls = files
// 拼装并设置args.Yaml
args.Yaml = joinYamls(files)
return nil
}
func joinYamls(files []string) string {
return strings.Join(files, setting.YamlFileSeperator)
}
// IlyshinGetRawFiles ...
func IlyshinGetRawFiles(client *ilyshin.Client, owner, repo, ref, path, pathType string) (files []string, err error) {
files = make([]string, 0)
var errs *multierror.Error
if pathType == "tree" {
nodes, err := client.ListTree(owner, repo, ref, path)
if err != nil {
return files, err
}
for _, node := range nodes {
// if node type is "tree", it is a directory, skip it for now
if node.Type == "tree" {
continue
}
fileName := strings.ToLower(node.Name)
if !strings.HasSuffix(fileName, ".yaml") && !strings.HasSuffix(fileName, ".yml") {
continue
}
// if node type is "blob", it is a file
// Path is filepath of a node
content, err := client.GetRawFile(owner, repo, ref, node.Path)
if err != nil {
errs = multierror.Append(errs, err)
}
contentStr := string(content)
contentStr = util.ReplaceWrapLine(contentStr)
files = append(files, contentStr)
}
return files, errs.ErrorOrNil()
}
fileInfo, err := client.GetFile(owner, repo, ref, path)
if err != nil {
return files, err
}
decodedFile, err := base64.StdEncoding.DecodeString(fileInfo.Content)
if err != nil {
return files, err
}
files = append(files, string(decodedFile))
return files, errs.ErrorOrNil()
}
// syncContentFromIlyshin ...
// sync content with commit, args.Commit should not be nil
func syncContentFromIlyshin(userName string, args *commonmodels.Service) error {
if args.Commit == nil {
return nil
}
address, owner, repo, branch, path, pathType, err := GetOwnerRepoBranchPath(args.SrcPath)
if err != nil {
return fmt.Errorf("url format failed")
}
client, err := getIlyshinClientByAddress(address)
if err != nil {
return err
}
files, err := IlyshinGetRawFiles(client, owner, repo, branch, path, pathType)
if err != nil {
return err
}
if userName != setting.WebhookTaskCreator {
if len(files) == 0 {
return fmt.Errorf("没有检索到yml,yaml类型文件,请检查目录是否正确")
}
}
// KubeYamls field is dynamicly synced.
// 根据gitlab sync的内容来设置args.KubeYamls
args.KubeYamls = files
// 拼装并设置args.Yaml
args.Yaml = joinYamls(files)
return nil
}
func syncContentFromGithub(args *commonmodels.Service, log *zap.SugaredLogger) error {
// 根据pipeline中的filepath获取文件内容
address, owner, repo, branch, path, _, err := GetOwnerRepoBranchPath(args.SrcPath)
if err != nil {
log.Errorf("GetOwnerRepoBranchPath failed, srcPath:%s, err:%v", args.SrcPath, err)
return errors.New("invalid url " + args.SrcPath)
}
ch, err := codehost.GetCodeHostInfo(
&codehost.Option{CodeHostType: poetry.GitHubProvider, Address: address, Namespace: owner})
if err != nil {
log.Errorf("GetCodeHostInfo failed, srcPath:%s, err:%v", args.SrcPath, err)
return err
}
gc := githubtool.NewClient(&githubtool.Config{AccessToken: ch.AccessToken, Proxy: config.ProxyHTTPSAddr()})
fileContent, directoryContent, err := gc.GetContents(context.TODO(), owner, repo, path, &github.RepositoryContentGetOptions{Ref: branch})
if fileContent != nil {
svcContent, _ := fileContent.GetContent()
splitYaml := SplitYaml(svcContent)
args.KubeYamls = splitYaml
args.Yaml = svcContent
} else {
var files []string
for _, f := range directoryContent {
// 排除目录
if *f.Type != "file" {
continue
}
fileName := strings.ToLower(*f.Path)
if !strings.HasSuffix(fileName, ".yaml") && !strings.HasSuffix(fileName, ".yml") {
continue
}
file, err := syncSingleFileFromGithub(owner, repo, branch, *f.Path, ch.AccessToken)
if err != nil {
log.Errorf("syncSingleFileFromGithub failed, path: %s, err: %v", *f.Path, err)
continue
}
files = append(files, file)
}
args.KubeYamls = files
args.Yaml = joinYamls(files)
}
return nil
}
func SplitYaml(yaml string) []string {
return strings.Split(yaml, setting.YamlFileSeperator)
}
func syncSingleFileFromGithub(owner, repo, branch, path, token string) (string, error) {
gc := githubtool.NewClient(&githubtool.Config{AccessToken: token, Proxy: config.ProxyHTTPSAddr()})
fileContent, _, err := gc.GetContents(context.TODO(), owner, repo, path, &github.RepositoryContentGetOptions{Ref: branch})
if fileContent != nil {
return fileContent.GetContent()
}
return "", err
}
// 从 kube yaml 中获取所有当前 containers 镜像和名称
// 支持 Deployment StatefulSet Job
func setCurrentContainerImages(args *commonmodels.Service) error {
srvContainers := make([]*commonmodels.Container, 0)
for _, data := range args.KubeYamls {
manifests := releaseutil.SplitManifests(data)
for _, item := range manifests {
//在Unmarshal之前填充渲染变量{{.}}
item = config.RenderTemplateAlias.ReplaceAllLiteralString(item, "ssssssss")
// replace $Service$ with service name
item = config.ServiceNameAlias.ReplaceAllLiteralString(item, args.ServiceName)
u, err := serializer.NewDecoder().YamlToUnstructured([]byte(item))
if err != nil {
return fmt.Errorf("unmarshal ResourceKind error: %v", err)
}
switch u.GetKind() {
case setting.Deployment, setting.StatefulSet, setting.Job:
cs, err := getContainers(u)
if err != nil {
return fmt.Errorf("GetContainers error: %v", err)
}
srvContainers = append(srvContainers, cs...)
}
}
}
args.Containers = srvContainers
return nil
}
// 从kube yaml中查找所有containers 镜像和名称
func getContainers(u *unstructured.Unstructured) ([]*commonmodels.Container, error) {
var containers []*commonmodels.Container
cs, _, _ := unstructured.NestedSlice(u.Object, "spec", "template", "spec", "containers")
for _, c := range cs {
val, ok := c.(map[string]interface{})
if !ok {
continue
}
nameStr, ok := val["name"].(string)
if !ok {
return containers, errors.New("error name value")
}
imageStr, ok := val["image"].(string)
if !ok {
return containers, errors.New("error image value")
}
containers = append(containers, &commonmodels.Container{
Name: nameStr,
Image: imageStr,
})
}
return containers, nil
}
type MatchFolders []string
// ContainsFile "/" 代表全部文件
func ContainsFile(h *commonmodels.GitHook, file string) bool {
return MatchFolders(h.MatchFolders).ContainsFile(file)
}
func (m MatchFolders) ContainsFile(file string) bool {
var excludes []string
var matches []string
for _, match := range m {
if strings.HasPrefix(match, "!") {
excludes = append(excludes, match)
} else {
matches = append(matches, match)
}
}
for _, match := range matches {
if match == "/" || strings.HasPrefix(file, match) {
// 以!开头的目录或者后缀名为不运行pipeline的过滤条件
for _, exclude := range excludes {
// 如果!后面不跟任何目录或者文件,忽略
if len(exclude) <= 2 {
return false
}
eCheck := exclude[1:]
if eCheck == "/" || path.Ext(file) == eCheck || strings.HasPrefix(file, eCheck) || strings.HasSuffix(file, eCheck) {
return false
}
}
return true
}
}
return false
}
func MatchChanges(m commonmodels.MainHookRepo, files []string) bool {
mf := MatchFolders(m.MatchFolders)
for _, file := range files {
if matches := mf.ContainsFile(file); matches {
return true
}
}
return false
}
func EventConfigured(m commonmodels.MainHookRepo, event config.HookEventType) bool {
for _, ev := range m.Events {
if ev == event {
return true
}
}
return false
} |
address, owner, repo, branch, path, _, err := GetOwnerRepoBranchPath(service.SrcPath)
if err != nil { |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.