file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
app.module.ts | import { BrowserModule } from '@angular/platform-browser';
import { NgModule } from '@angular/core';
import { FormsModule,ReactiveFormsModule } from "@angular/forms";
import { HttpClientModule } from "@angular/common/http";
import { RouterModule, Routes } from '@angular/router';
import { AppRoutingModule } from './app-routing.module';
import { AppComponent } from './app.component';
import { LoginComponent } from "./auth/login/login.component";
import { SignupComponent } from "./auth/signup/signup.component";
import { UserModule } from "./user/user.module";
import { AdminComponent } from './admin/admin.component';
import { PermissionsComponent } from './admin/permissions/permissions.component';
import { CompanyoverviewComponent } from './admin/companyoverview/companyoverview.component';
import { TaskpanelComponent } from './admin/taskpanel/taskpanel.component';
import * as $ from 'jquery';
@NgModule({
declarations: [
AppComponent,LoginComponent, SignupComponent, AdminComponent, PermissionsComponent, CompanyoverviewComponent, TaskpanelComponent
],
imports: [
BrowserModule,
HttpClientModule,
AppRoutingModule,
FormsModule,
ReactiveFormsModule,
UserModule
],
providers: [],
bootstrap: [AppComponent]
})
export class | { }
| AppModule |
server.go | package server
import (
"fmt"
"github.com/apache/thrift/lib/go/thrift"
"github.com/chuan-yu/go-thrift-mock/processor"
)
type ExpectedReturn struct {
Err error
Response thrift.TStruct
}
type MockServer struct {
host string
Server *thrift.TSimpleServer
processor *processor.MockProcessor
protocolFactory *thrift.TBinaryProtocolFactory
transportFactory *thrift.TTransportFactory
}
func MustNewMockServer(host string) *MockServer {
protocolFactory := thrift.NewTBinaryProtocolFactoryDefault()
transportFactory := thrift.NewTTransportFactory()
transportFactory = thrift.NewTFramedTransportFactory(transportFactory)
transport, err := thrift.NewTServerSocket(host)
if err != nil {
panic("failed to create a MockServer instance: " + err.Error())
}
p := processor.NewMockProcessor()
server := thrift.NewTSimpleServer4(p, transport, transportFactory, protocolFactory)
return &MockServer{
host: host,
processor: p,
protocolFactory: protocolFactory,
transportFactory: &transportFactory,
Server: server,
}
}
func (s *MockServer) Start() (err error) {
fmt.Printf("starting the simple server... on %s \n", s.host)
return s.Server.Serve()
}
func (s *MockServer) Stop() {
s.Server.Stop()
}
func (s *MockServer) SetExpectedReturn(methodName string, expected ExpectedReturn) {
r := processor.NewMockResult(methodName, expected.Response)
processFunc := processor.MockProcessorFunction{
MethodName: methodName,
Result: r,
Err: expected.Err, | s.processor.AddToProcessorMap(methodName, &processFunc)
} | } |
games.go | package pokeapi
import (
"fmt"
"github.com/sgmccullough/pokeapi-go/structs"
)
// Generation returns a single generation (by name or ID).
func | (id string) (result structs.Generation, err error) {
err = do(fmt.Sprintf("generation/%s", id), &result)
return result, err
}
// Pokedex returns a single Pokedex (by name or ID).
func Pokedex(id string) (result structs.Pokedex, err error) {
err = do(fmt.Sprintf("pokedex/%s", id), &result)
return result, err
}
// Version returns a single version (by name or ID).
func Version(id string) (result structs.Version, err error) {
err = do(fmt.Sprintf("version/%s", id), &result)
return result, err
}
// VersionGroup returns a single version group (by name or ID).
func VersionGroup(id string) (result structs.VersionGroup, err error) {
err = do(fmt.Sprintf("version-group/%s", id), &result)
return result, err
}
| Generation |
asset_test.go | package html
import "testing"
func TestAsset_minifiedData(t *testing.T) {
type fields struct {
Type string
Path string
Data string
}
tests := []struct {
name string
fields fields
want string
wantErr bool
}{
{
name: "multi-line tag",
fields: fields{
Type: AssetTypes.HTML,
Path: "foo.html",
Data: "<link\n rel=\"stylesheet\"\n href=\"src/foo.css\"\n>\n",
},
want: "data:text/html;charset=utf-8,%3Clink%20rel=%22stylesheet%22%20href=%22src%2ffoo.css%22%20%3E%20",
},
{
name: "multi-line tag no spaces",
fields: fields{
Type: AssetTypes.HTML,
Path: "foo.html",
Data: "<link\nrel=\"stylesheet\"\nhref=\"src/foo.css\"\n>\n",
},
want: "data:text/html;charset=utf-8,%3Clink%20rel=%22stylesheet%22%20href=%22src%2ffoo.css%22%20%3E%20",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := &Asset{
Type: tt.fields.Type,
Path: tt.fields.Path,
Data: tt.fields.Data,
}
got, err := a.minifiedData()
if (err != nil) != tt.wantErr |
if got != tt.want {
t.Errorf("Asset.minifiedData() = %v, want %v", got, tt.want)
}
})
}
}
| {
t.Errorf("Asset.minifiedData() error = %v, wantErr %v", err, tt.wantErr)
return
} |
AuthController.js | const User = require('../models/User');
const Token = require('../models/Token');
const bcrypt = require('bcryptjs');
const jwt = require('jsonwebtoken');
const generateToken = (params = {}) => {
return jwt.sign(params, process.env.API_KEY, {
expiresIn: 86400
});
}
class | {
async login(req, res) {
try {
const { email, password } = req.body;
const user = await User.findOne({ where: { email },
include: [ { association: 'role' } ] });
if (!user) return res.status(404).json([{ message: 'User with email not found' }]);
if(!bcrypt.compareSync(password, user.password)){
return res.status(400).json([{ message: 'Invalid password' }]);
}
user.password = '';
return res.json({ user, token: generateToken({ id: user.id })});
} catch (error) {
console.log(error);
return res.status(500).json([{ message: 'Server error' }]);
}
}
async register(req, res) {
try{
const { email } = req.body;
const exists = await User.findOne({ where: { email } });
if (exists) return res.status(400).json([{ message: 'User already exists' }]);
delete req.body.password_confirmation;
let salt = bcrypt.genSaltSync(10);
req.body.password = bcrypt.hashSync(req.body.password, salt);
let user = await User.create(req.body);
user = await User.findByPk(user.id, {
include: [ { association: 'role' } ]
});
user.password = undefined;
return res.status(201).json({ user, token: generateToken({ id: user.id })});
} catch (error){
console.log(error);
return res.status(500).json([{ message: 'Server error' }]);
}
}
async logout(req, res) {
try {
const token = req.headers.authorization;
const data = { token };
const revoked = await Token.create(data);
return res.json([{ message: 'Logout success' }]);
} catch (error) {
console.log(error);
return res.status(500).json([{ message: 'Server error' }]);
}
}
}
module.exports = AuthController;
| AuthController |
xctest-specs.js | import chai from 'chai';
import { parseXCTestStdout } from '../../../lib/commands/xctest';
chai.should();
describe('session commands', function () {
const xctestLogsSuccess = `XCTesterAppUITests - XCTesterAppUITests.XCTesterAppUITests/testExample | Passed: True | Crashed: False | Duration: 1.485 | Failure message: | Location :0
XCTesterAppUITests - XCTesterAppUITests.XCTesterAppUITests/testLaunchPerformance | Passed: True | Crashed: False | Duration: 14.297 | Failure message: | Location :0
`.trim();
describe('xctest', function () {
it('should parse successful test logs', function () {
const results = parseXCTestStdout(xctestLogsSuccess);
results.length.should.equal(2);
results[0].should.eql({
testName: 'XCTesterAppUITests - XCTesterAppUITests.XCTesterAppUITests/testExample',
passed: true,
crashed: false,
duration: 1.485,
failureMessage: null,
location: 0,
});
results[1].should.eql({
testName: 'XCTesterAppUITests - XCTesterAppUITests.XCTesterAppUITests/testLaunchPerformance',
passed: true,
crashed: false,
duration: 14.297,
failureMessage: null, | });
});
}); | location: 0,
}); |
app.component.ts | import { Component } from '@angular/core';
// noinspection TsLint
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css']
})
export class AppComponent {
constructor() {
this.title = 'Template Reference Variable In Angular 2/4/5';
this.title2 = 'This Is Data App Component';
}
// noinspection TsLint
title:string;
title2:string;
colors = ['red', 'green', 'blue', 'cyan', 'pink', 'yellow', 'aqua', 'orange', 'yellowgreen', 'aquamarine', 'brown', 'forestgreen'];
applyMultipleClasses(flag:string) {
let multipleClass;
// noinspection TsLint
if(flag == 'done') {
multipleClass = {
'one': true,
'two': true
}
} | }
}
return multipleClass;
}
} | else {
multipleClass = {
'three': false,
'four': true |
kexec_linux.go | // Copyright 2015-2018 the u-root Authors. All rights reserved
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// kexec executes a new kernel over the running kernel (u-root).
//
// Synopsis:
// kexec [--initrd=FILE] [--command-line=STRING] [-l] [-e] [KERNELIMAGE]
//
// Description:
// Loads a kernel for later execution.
//
// Options:
// --cmdline=STRING or -c=STRING: Set the kernel command line
// --reuse-commandline: Use the kernel command line from running system
// --i=FILE or --initrd=FILE: Use file as the kernel's initial ramdisk
// -l or --load: Load the new kernel into the current kernel
// -e or --exec: Execute a currently loaded kernel
package main
import (
"io"
"log"
"os"
flag "github.com/spf13/pflag"
"github.com/u-root/u-root/pkg/boot"
"github.com/u-root/u-root/pkg/boot/kexec"
"github.com/u-root/u-root/pkg/boot/multiboot"
"github.com/u-root/u-root/pkg/cmdline"
"github.com/u-root/u-root/pkg/uio"
)
type options struct {
cmdline string | initramfs string
load bool
exec bool
debug bool
modules []string
}
func registerFlags() *options {
o := &options{}
flag.StringVarP(&o.cmdline, "cmdline", "c", "", "Append to the kernel command line")
flag.StringVar(&o.cmdline, "append", "", "Append to the kernel command line")
flag.BoolVar(&o.reuseCmdline, "reuse-cmdline", false, "Use the kernel command line from running system")
flag.StringVarP(&o.initramfs, "initrd", "i", "", "Use file as the kernel's initial ramdisk")
flag.StringVar(&o.initramfs, "initramfs", "", "Use file as the kernel's initial ramdisk")
flag.BoolVarP(&o.load, "load", "l", false, "Load the new kernel into the current kernel")
flag.BoolVarP(&o.exec, "exec", "e", false, "Execute a currently loaded kernel")
flag.BoolVarP(&o.debug, "debug", "d", false, "Print debug info")
flag.StringArrayVar(&o.modules, "module", nil, `Load multiboot module with command line args (e.g --module="mod arg1")`)
return o
}
func main() {
opts := registerFlags()
flag.Parse()
if (!opts.exec && flag.NArg() == 0) || flag.NArg() > 1 {
flag.PrintDefaults()
log.Fatalf("usage: kexec [flags] kernelname OR kexec -e")
}
if opts.cmdline != "" && opts.reuseCmdline {
flag.PrintDefaults()
log.Fatalf("--reuse-cmdline and other command line options are mutually exclusive")
}
if !opts.load && !opts.exec {
opts.load = true
opts.exec = true
}
newCmdline := opts.cmdline
if opts.reuseCmdline {
procCmdLine := cmdline.NewCmdLine()
if procCmdLine.Err != nil {
log.Fatal("Couldn't read /proc/cmdline")
} else {
newCmdline = procCmdLine.Raw
}
}
if opts.load {
kernelpath := flag.Arg(0)
mbkernel, err := os.Open(kernelpath)
if err != nil {
log.Fatal(err)
}
defer mbkernel.Close()
var image boot.OSImage
if err := multiboot.Probe(mbkernel); err == nil {
image = &boot.MultibootImage{
Modules: multiboot.LazyOpenModules(opts.modules),
Kernel: mbkernel,
Cmdline: newCmdline,
}
} else {
var i io.ReaderAt
if opts.initramfs != "" {
i = uio.NewLazyFile(opts.initramfs)
}
image = &boot.LinuxImage{
Kernel: uio.NewLazyFile(kernelpath),
Initrd: i,
Cmdline: newCmdline,
}
}
if err := image.Load(opts.debug); err != nil {
log.Fatal(err)
}
}
if opts.exec {
if err := kexec.Reboot(); err != nil {
log.Fatalf("%v", err)
}
}
} | reuseCmdline bool |
latextools.py | # -*- coding: utf-8 -*-
"""Tools for handling LaTeX."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO, open
from base64 import encodestring
import os
import tempfile
import shutil
import subprocess
from IPython.utils.process import find_cmd, FindCmdError
from IPython.config import get_config
from IPython.config.configurable import SingletonConfigurable
from IPython.utils.traitlets import List, Bool, Unicode
from IPython.utils.py3compat import cast_unicode, cast_unicode_py2 as u
class LaTeXTool(SingletonConfigurable):
"""An object to store configuration of the LaTeX tool."""
def _config_default(self):
return get_config()
backends = List(
Unicode, ["matplotlib", "dvipng"],
help="Preferred backend to draw LaTeX math equations. "
"Backends in the list are checked one by one and the first "
"usable one is used. Note that `matplotlib` backend "
"is usable only for inline style equations. To draw "
"display style equations, `dvipng` backend must be specified. ",
# It is a List instead of Enum, to make configuration more
# flexible. For example, to use matplotlib mainly but dvipng
# for display style, the default ["matplotlib", "dvipng"] can
# be used. To NOT use dvipng so that other repr such as
# unicode pretty printing is used, you can use ["matplotlib"].
config=True)
use_breqn = Bool(
True,
help="Use breqn.sty to automatically break long equations. "
"This configuration takes effect only for dvipng backend.",
config=True)
packages = List(
['amsmath', 'amsthm', 'amssymb', 'bm'],
help="A list of packages to use for dvipng backend. "
"'breqn' will be automatically appended when use_breqn=True.",
config=True)
preamble = Unicode(
help="Additional preamble to use when generating LaTeX source "
"for dvipng backend.",
config=True)
def latex_to_png(s, encode=False, backend=None, wrap=False):
"""Render a LaTeX string to PNG.
Parameters
----------
s : text
The raw string containing valid inline LaTeX.
encode : bool, optional
Should the PNG data base64 encoded to make it JSON'able.
backend : {matplotlib, dvipng}
Backend for producing PNG data.
wrap : bool
If true, Automatically wrap `s` as a LaTeX equation.
None is returned when the backend cannot be used.
"""
s = cast_unicode(s)
allowed_backends = LaTeXTool.instance().backends
if backend is None:
backend = allowed_backends[0]
if backend not in allowed_backends:
return None
if backend == 'matplotlib':
f = latex_to_png_mpl
elif backend == 'dvipng':
f = latex_to_png_dvipng
else:
raise ValueError('No such backend {0}'.format(backend))
bin_data = f(s, wrap)
if encode and bin_data:
bin_data = encodestring(bin_data)
return bin_data
def latex_to_png_mpl(s, wrap):
try:
from matplotlib import mathtext
except ImportError:
return None
# mpl mathtext doesn't support display math, force inline
s = s.replace('$$', '$')
if wrap:
s = u'${0}$'.format(s)
mt = mathtext.MathTextParser('bitmap')
f = BytesIO()
mt.to_png(f, s, fontsize=12)
return f.getvalue()
def latex_to_png_dvipng(s, wrap):
try:
find_cmd('latex')
find_cmd('dvipng')
except FindCmdError:
return None
try:
workdir = tempfile.mkdtemp()
tmpfile = os.path.join(workdir, "tmp.tex")
dvifile = os.path.join(workdir, "tmp.dvi")
outfile = os.path.join(workdir, "tmp.png")
with open(tmpfile, "w", encoding='utf8') as f:
f.writelines(genelatex(s, wrap))
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(
["latex", "-halt-on-error", "-interaction", "batchmode", tmpfile],
cwd=workdir, stdout=devnull, stderr=devnull)
subprocess.check_call(
["dvipng", "-T", "tight", "-x", "1500", "-z", "9",
"-bg", "transparent", "-o", outfile, dvifile], cwd=workdir,
stdout=devnull, stderr=devnull)
with open(outfile, "rb") as f:
return f.read()
finally:
shutil.rmtree(workdir)
def kpsewhich(filename):
"""Invoke kpsewhich command with an argument `filename`."""
try:
find_cmd("kpsewhich")
proc = subprocess.Popen(
["kpsewhich", filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return stdout.strip().decode('utf8', 'replace')
except FindCmdError:
pass
def genelatex(body, wrap):
|
_data_uri_template_png = u"""<img src="data:image/png;base64,%s" alt=%s />"""
def latex_to_html(s, alt='image'):
"""Render LaTeX to HTML with embedded PNG data using data URIs.
Parameters
----------
s : str
The raw string containing valid inline LateX.
alt : str
The alt text to use for the HTML.
"""
base64_data = latex_to_png(s, encode=True).decode('ascii')
if base64_data:
return _data_uri_template_png % (base64_data, alt)
| """Generate LaTeX document for dvipng backend."""
lt = LaTeXTool.instance()
breqn = wrap and lt.use_breqn and kpsewhich("breqn.sty")
yield u(r'\documentclass{article}')
packages = lt.packages
if breqn:
packages = packages + ['breqn']
for pack in packages:
yield u(r'\usepackage{{{0}}}'.format(pack))
yield u(r'\pagestyle{empty}')
if lt.preamble:
yield lt.preamble
yield u(r'\begin{document}')
if breqn:
yield u(r'\begin{dmath*}')
yield body
yield u(r'\end{dmath*}')
elif wrap:
yield u'$${0}$$'.format(body)
else:
yield body
yield u'\end{document}' |
ecdsa_signature.rs | // Copyright 2020 Parity Technologies
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Signature based on ECDSA, algorithm's description: https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm
use super::{public_to_address, Address, Error, Message, Public, Secret, ZeroesAllowedMessage, SECP256K1};
use ethereum_types::{H256, H520};
use rustc_hex::{FromHex, ToHex};
use secp256k1::key::{PublicKey, SecretKey};
use secp256k1::{
recovery::{RecoverableSignature, RecoveryId},
Error as SecpError, Message as SecpMessage,
};
use std::cmp::PartialEq;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ops::{Deref, DerefMut};
use std::str::FromStr;
/// Signature encoded as RSV components
#[repr(C)]
pub struct Signature([u8; 65]);
impl Signature {
/// Get a slice into the 'r' portion of the data.
pub fn r(&self) -> &[u8] {
&self.0[0..32]
}
/// Get a slice into the 's' portion of the data.
pub fn s(&self) -> &[u8] {
&self.0[32..64]
}
/// Get the recovery byte.
pub fn v(&self) -> u8 {
self.0[64]
}
/// Encode the signature into RSV array (V altered to be in "Electrum" notation).
pub fn into_electrum(mut self) -> [u8; 65] {
self.0[64] += 27;
self.0
}
/// Parse bytes as a signature encoded as RSV (V in "Electrum" notation).
/// May return empty (invalid) signature if given data has invalid length.
pub fn from_electrum(data: &[u8]) -> Self {
if data.len() != 65 || data[64] < 27 {
// fallback to empty (invalid) signature
return Signature::default();
}
let mut sig = [0u8; 65];
sig.copy_from_slice(data);
sig[64] -= 27;
Signature(sig)
}
/// Create a signature object from the RSV triple.
pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Self |
/// Check if this is a "low" signature (that s part of the signature is in range
/// 0x1 and 0x7FFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF 5D576E73 57A4501D DFE92F46 681B20A0 (inclusive)).
/// This condition may be required by some verification algorithms
pub fn is_low_s(&self) -> bool {
const LOW_SIG_THRESHOLD: H256 = H256([
0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x5D, 0x57,
0x6E, 0x73, 0x57, 0xA4, 0x50, 0x1D, 0xDF, 0xE9, 0x2F, 0x46, 0x68, 0x1B, 0x20, 0xA0,
]);
H256::from_slice(self.s()) <= LOW_SIG_THRESHOLD
}
/// Check if each component of the signature is in valid range.
/// r is in range 0x1 and 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 (inclusive)
/// s is in range 0x1 and fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 (inclusive)
/// v is 0 or 1
/// Group order for secp256k1 defined as 'n' in "Standards for Efficient Cryptography" (SEC2) 2.7.1;
/// used here as the upper bound for a valid (r, s, v) tuple
pub fn is_valid(&self) -> bool {
const UPPER_BOUND: H256 = H256([
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xba, 0xae,
0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41,
]);
const ONE: H256 = H256([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
]);
let r = H256::from_slice(self.r());
let s = H256::from_slice(self.s());
self.v() <= 1 && r < UPPER_BOUND && r >= ONE && s < UPPER_BOUND && s >= ONE
}
}
// manual implementation large arrays don't have trait impls by default.
// TODO[grbIzl] remove when integer generics exist
impl PartialEq for Signature {
fn eq(&self, other: &Self) -> bool {
&self.0[..] == &other.0[..]
}
}
// manual implementation required in Rust 1.13+, see `std::cmp::AssertParamIsEq`.
impl Eq for Signature {}
// also manual for the same reason, but the pretty printing might be useful.
impl fmt::Debug for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("Signature")
.field("r", &self.0[0..32].to_hex::<String>())
.field("s", &self.0[32..64].to_hex::<String>())
.field("v", &self.0[64..65].to_hex::<String>())
.finish()
}
}
impl fmt::Display for Signature {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{}", self.to_hex::<String>())
}
}
impl FromStr for Signature {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.from_hex::<Vec<u8>>() {
Ok(ref hex) if hex.len() == 65 => {
let mut data = [0; 65];
data.copy_from_slice(&hex[0..65]);
Ok(Signature(data))
}
_ => Err(Error::InvalidSignature),
}
}
}
impl Default for Signature {
fn default() -> Self {
Signature([0; 65])
}
}
impl Hash for Signature {
fn hash<H: Hasher>(&self, state: &mut H) {
H520::from(self.0).hash(state);
}
}
impl Clone for Signature {
fn clone(&self) -> Self {
Signature(self.0.clone())
}
}
impl From<[u8; 65]> for Signature {
fn from(s: [u8; 65]) -> Self {
Signature(s)
}
}
impl Into<[u8; 65]> for Signature {
fn into(self) -> [u8; 65] {
self.0
}
}
impl From<Signature> for H520 {
fn from(s: Signature) -> Self {
H520::from(s.0)
}
}
impl From<H520> for Signature {
fn from(bytes: H520) -> Self {
Signature(bytes.into())
}
}
impl Deref for Signature {
type Target = [u8; 65];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Signature {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// Signs message with the given secret key.
/// Returns the corresponding signature.
pub fn sign(secret: &Secret, message: &Message) -> Result<Signature, Error> {
let context = &SECP256K1;
let sec = SecretKey::from_slice(secret.as_ref())?;
let s = context.sign_recoverable(&SecpMessage::from_slice(&message[..])?, &sec);
let (rec_id, data) = s.serialize_compact();
let mut data_arr = [0; 65];
// no need to check if s is low, it always is
data_arr[0..64].copy_from_slice(&data[0..64]);
data_arr[64] = rec_id.to_i32() as u8;
Ok(Signature(data_arr))
}
/// Performs verification of the signature for the given message with corresponding public key
pub fn verify_public(public: &Public, signature: &Signature, message: &Message) -> Result<bool, Error> {
let context = &SECP256K1;
let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?;
let sig = rsig.to_standard();
let pdata: [u8; 65] = {
let mut temp = [4u8; 65];
temp[1..65].copy_from_slice(public.as_bytes());
temp
};
let publ = PublicKey::from_slice(&pdata)?;
match context.verify(&SecpMessage::from_slice(&message[..])?, &sig, &publ) {
Ok(_) => Ok(true),
Err(SecpError::IncorrectSignature) => Ok(false),
Err(x) => Err(Error::from(x)),
}
}
/// Checks if the address corresponds to the public key from the signature for the message
pub fn verify_address(address: &Address, signature: &Signature, message: &Message) -> Result<bool, Error> {
let public = recover(signature, message)?;
let recovered_address = public_to_address(&public);
Ok(address == &recovered_address)
}
/// Recovers the public key from the signature for the message
pub fn recover(signature: &Signature, message: &Message) -> Result<Public, Error> {
let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?;
let pubkey = &SECP256K1.recover(&SecpMessage::from_slice(&message[..])?, &rsig)?;
let serialized = pubkey.serialize_uncompressed();
let mut public = Public::default();
public.as_bytes_mut().copy_from_slice(&serialized[1..65]);
Ok(public)
}
/// Recovers the public key from the signature for the given message.
/// This version of `recover()` allows for all-zero messages, which is necessary
/// for ethereum but is otherwise highly discouraged. Use with caution.
pub fn recover_allowing_all_zero_message(
signature: &Signature,
message: ZeroesAllowedMessage,
) -> Result<Public, Error> {
let rsig = RecoverableSignature::from_compact(&signature[0..64], RecoveryId::from_i32(signature[64] as i32)?)?;
let pubkey = &SECP256K1.recover(&message.into(), &rsig)?;
let serialized = pubkey.serialize_uncompressed();
let mut public = Public::zero();
public.as_bytes_mut().copy_from_slice(&serialized[1..65]);
Ok(public)
}
#[cfg(test)]
mod tests {
use super::super::{Generator, Message, Random, SECP256K1};
use super::{
recover, recover_allowing_all_zero_message, sign, verify_address, verify_public, Secret, Signature,
ZeroesAllowedMessage,
};
use secp256k1::SecretKey;
use std::str::FromStr;
// Copy of `sign()` that allows signing all-zero Messages.
// Note: this is for *tests* only. DO NOT USE UNLESS YOU NEED IT.
fn sign_zero_message(secret: &Secret) -> Signature {
let context = &SECP256K1;
let sec = SecretKey::from_slice(secret.as_ref()).unwrap();
// force an all-zero message into a secp `Message` bypassing the validity check.
let zero_msg = ZeroesAllowedMessage(Message::zero());
let s = context.sign_recoverable(&zero_msg.into(), &sec);
let (rec_id, data) = s.serialize_compact();
let mut data_arr = [0; 65];
// no need to check if s is low, it always is
data_arr[0..64].copy_from_slice(&data[0..64]);
data_arr[64] = rec_id.to_i32() as u8;
Signature(data_arr)
}
#[test]
fn vrs_conversion() {
// given
let keypair = Random.generate();
let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message");
// when
let vrs = signature.clone().into_electrum();
let from_vrs = Signature::from_electrum(&vrs);
// then
assert_eq!(signature, from_vrs);
}
#[test]
fn signature_to_and_from_str() {
let keypair = Random.generate();
let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message");
let string = format!("{}", signature);
let deserialized = Signature::from_str(&string).unwrap();
assert_eq!(signature, deserialized);
}
#[test]
fn sign_and_recover_public() {
let keypair = Random.generate();
let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
let signature = sign(keypair.secret(), &message).unwrap();
assert_eq!(keypair.public(), &recover(&signature, &message).unwrap());
}
#[test]
fn sign_and_recover_public_fails_with_zeroed_messages() {
let keypair = Random.generate();
let signature = sign_zero_message(keypair.secret());
let zero_message = Message::zero();
assert!(&recover(&signature, &zero_message).is_err());
}
#[test]
fn recover_allowing_all_zero_message_can_recover_from_all_zero_messages() {
let keypair = Random.generate();
let signature = sign_zero_message(keypair.secret());
let zero_message = ZeroesAllowedMessage(Message::zero());
assert_eq!(keypair.public(), &recover_allowing_all_zero_message(&signature, zero_message).unwrap())
}
#[test]
fn sign_and_verify_public() {
let keypair = Random.generate();
let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message");
assert!(verify_public(keypair.public(), &signature, &message).unwrap());
}
#[test]
fn sign_and_verify_address() {
let keypair = Random.generate();
let message = Message::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
let signature = sign(keypair.secret(), &message).expect("can sign a non-zero message");
assert!(verify_address(&keypair.address(), &signature, &message).unwrap());
}
}
| {
let mut sig = [0u8; 65];
sig[0..32].copy_from_slice(r.as_ref());
sig[32..64].copy_from_slice(s.as_ref());
sig[64] = v;
Signature(sig)
} |
types.py | # -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2005-2009 Johan Dahlin <[email protected]>
#
# types.py: base types for introspected items.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
from __future__ import absolute_import
import sys
import warnings
import re
from ._constants import TYPE_INVALID
from .docstring import generate_doc_string
from ._gi import \
InterfaceInfo, \
ObjectInfo, \
StructInfo, \
VFuncInfo, \
register_interface_info, \
hook_up_vfunc_implementation, \
GInterface
from . import _gi
StructInfo, GInterface # pyflakes
from . import _propertyhelper as propertyhelper
from . import _signalhelper as signalhelper
if (3, 0) <= sys.version_info < (3, 3):
# callable not available for python 3.0 thru 3.2
def callable(obj):
return hasattr(obj, '__call__')
def snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class MetaClassHelper(object):
def _setup_methods(cls):
for method_info in cls.__info__.get_methods():
setattr(cls, method_info.__name__, method_info)
def _setup_class_methods(cls):
info = cls.__info__
class_struct = info.get_class_struct()
if class_struct is None:
return
for method_info in class_struct.get_methods():
name = method_info.__name__
# Don't mask regular methods or base class methods with TypeClass methods.
if not hasattr(cls, name):
setattr(cls, name, classmethod(method_info))
def _setup_fields(cls):
for field_info in cls.__info__.get_fields():
name = field_info.get_name().replace('-', '_')
setattr(cls, name, property(field_info.get_value, field_info.set_value))
def _setup_constants(cls):
for constant_info in cls.__info__.get_constants():
name = constant_info.get_name()
value = constant_info.get_value()
setattr(cls, name, value)
def _setup_vfuncs(cls):
for vfunc_name, py_vfunc in cls.__dict__.items():
if not vfunc_name.startswith("do_") or not callable(py_vfunc):
continue
skip_ambiguity_check = False
# If a method name starts with "do_" assume it is a vfunc, and search
# in the base classes for a method with the same name to override.
# Recursion is necessary as overriden methods in most immediate parent
# classes may shadow vfuncs from classes higher in the hierarchy.
vfunc_info = None
for base in cls.__mro__:
method = getattr(base, vfunc_name, None)
if method is not None and isinstance(method, VFuncInfo):
vfunc_info = method
break
if not hasattr(base, '__info__') or not hasattr(base.__info__, 'get_vfuncs'):
continue
base_name = snake_case(base.__info__.get_type_name())
for v in base.__info__.get_vfuncs():
if vfunc_name == 'do_%s_%s' % (base_name, v.get_name()):
vfunc_info = v
skip_ambiguity_check = True
break
if vfunc_info:
break
# If we did not find a matching method name in the bases, we might
# be overriding an interface virtual method. Since interfaces do not
# provide implementations, there will be no method attribute installed
# on the object. Instead we have to search through
# InterfaceInfo.get_vfuncs(). Note that the infos returned by
# get_vfuncs() use the C vfunc name (ie. there is no "do_" prefix).
if vfunc_info is None:
vfunc_info = find_vfunc_info_in_interface(cls.__bases__, vfunc_name[len("do_"):])
if vfunc_info is not None:
# Check to see if there are vfuncs with the same name in the bases.
# We have no way of specifying which one we are supposed to override.
if not skip_ambiguity_check:
ambiguous_base = find_vfunc_conflict_in_bases(vfunc_info, cls.__bases__)
if ambiguous_base is not None:
base_info = vfunc_info.get_container()
raise TypeError('Method %s() on class %s.%s is ambiguous '
'with methods in base classes %s.%s and %s.%s' %
(vfunc_name,
cls.__info__.get_namespace(),
cls.__info__.get_name(),
base_info.get_namespace(),
base_info.get_name(),
ambiguous_base.__info__.get_namespace(),
ambiguous_base.__info__.get_name()
))
hook_up_vfunc_implementation(vfunc_info, cls.__gtype__,
py_vfunc)
def _setup_native_vfuncs(cls):
# Only InterfaceInfo and ObjectInfo have the get_vfuncs() method.
# We skip InterfaceInfo because interfaces have no implementations for vfuncs.
# Also check if __info__ in __dict__, not hasattr('__info__', ...)
# because we do not want to accidentally retrieve __info__ from a base class.
class_info = cls.__dict__.get('__info__')
if class_info is None or not isinstance(class_info, ObjectInfo):
return
# Special case skipping of vfuncs for GObject.Object because they will break
# the static bindings which will try to use them.
if cls.__module__ == 'gi.repository.GObject' and cls.__name__ == 'Object':
return
for vfunc_info in class_info.get_vfuncs():
name = 'do_%s' % vfunc_info.__name__
setattr(cls, name, vfunc_info)
def find_vfunc_info_in_interface(bases, vfunc_name):
for base in bases:
# All wrapped interfaces inherit from GInterface.
# This can be seen in IntrospectionModule.__getattr__() in module.py.
# We do not need to search regular classes here, only wrapped interfaces.
# We also skip GInterface, because it is not wrapped and has no __info__ attr.
# Skip bases without __info__ (static _gi.GObject)
if base is GInterface or\
not issubclass(base, GInterface) or\
not hasattr(base, '__info__'):
continue
# Only look at this classes vfuncs if it is an interface.
if isinstance(base.__info__, InterfaceInfo):
for vfunc in base.__info__.get_vfuncs():
if vfunc.get_name() == vfunc_name:
return vfunc
# Recurse into the parent classes
vfunc = find_vfunc_info_in_interface(base.__bases__, vfunc_name)
if vfunc is not None:
return vfunc
return None
def find_vfunc_conflict_in_bases(vfunc, bases):
for klass in bases:
if not hasattr(klass, '__info__') or \
not hasattr(klass.__info__, 'get_vfuncs'):
continue
vfuncs = klass.__info__.get_vfuncs()
vfunc_name = vfunc.get_name()
for v in vfuncs:
if v.get_name() == vfunc_name and v != vfunc:
return klass
aklass = find_vfunc_conflict_in_bases(vfunc, klass.__bases__)
if aklass is not None:
return aklass
return None
class _GObjectMetaBase(type):
"""Metaclass for automatically registering GObject classes."""
def __init__(cls, name, bases, dict_):
type.__init__(cls, name, bases, dict_)
propertyhelper.install_properties(cls)
signalhelper.install_signals(cls)
cls._type_register(cls.__dict__)
def _type_register(cls, namespace):
# don't register the class if already registered
if '__gtype__' in namespace:
return
# Do not register a new GType for the overrides, as this would sort of
# defeat the purpose of overrides...
if cls.__module__.startswith('gi.overrides.'):
return
_gi.type_register(cls, namespace.get('__gtype_name__'))
_gi._install_metaclass(_GObjectMetaBase)
class GObjectMeta(_GObjectMetaBase, MetaClassHelper):
"""Meta class used for GI GObject based types."""
def __init__(cls, name, bases, dict_):
super(GObjectMeta, cls).__init__(name, bases, dict_)
is_gi_defined = False
if cls.__module__ == 'gi.repository.' + cls.__info__.get_namespace():
is_gi_defined = True
is_python_defined = False
if not is_gi_defined and cls.__module__ != GObjectMeta.__module__:
is_python_defined = True
if is_python_defined:
cls._setup_vfuncs()
elif is_gi_defined:
if isinstance(cls.__info__, ObjectInfo):
cls._setup_class_methods()
cls._setup_methods()
cls._setup_constants()
cls._setup_native_vfuncs()
if isinstance(cls.__info__, ObjectInfo):
cls._setup_fields()
elif isinstance(cls.__info__, InterfaceInfo):
register_interface_info(cls.__info__.get_g_type())
def mro(cls):
return mro(cls)
@property
def __doc__(cls):
"""Meta class property which shows up on any class using this meta-class."""
if cls == GObjectMeta:
return ''
doc = cls.__dict__.get('__doc__', None)
if doc is not None:
return doc
# For repository classes, dynamically generate a doc string if it wasn't overridden.
if cls.__module__.startswith(('gi.repository.', 'gi.overrides')):
return generate_doc_string(cls.__info__)
return None
def | (C):
"""Compute the class precedence list (mro) according to C3, with GObject
interface considerations.
We override Python's MRO calculation to account for the fact that
GObject classes are not affected by the diamond problem:
http://en.wikipedia.org/wiki/Diamond_problem
Based on http://www.python.org/download/releases/2.3/mro/
"""
# TODO: If this turns out being too slow, consider using generators
bases = []
bases_of_subclasses = [[C]]
if C.__bases__:
for base in C.__bases__:
# Python causes MRO's to be calculated starting with the lowest
# base class and working towards the descendant, storing the result
# in __mro__ at each point. Therefore at this point we know that
# we already have our base class MRO's available to us, there is
# no need for us to (re)calculate them.
if hasattr(base, '__mro__'):
bases_of_subclasses += [list(base.__mro__)]
else:
warnings.warn('Mixin class %s is an old style class, please '
'update this to derive from "object".' % base,
RuntimeWarning)
# For old-style classes (Python2 only), the MRO is not
# easily accessible. As we do need it here, we calculate
# it via recursion, according to the C3 algorithm. Using C3
# for old style classes deviates from Python's own behaviour,
# but visible effects here would be a corner case triggered by
# questionable design.
bases_of_subclasses += [mro(base)]
bases_of_subclasses += [list(C.__bases__)]
while bases_of_subclasses:
for subclass_bases in bases_of_subclasses:
candidate = subclass_bases[0]
not_head = [s for s in bases_of_subclasses if candidate in s[1:]]
if not_head and GInterface not in candidate.__bases__:
candidate = None # conflict, reject candidate
else:
break
if candidate is None:
raise TypeError('Cannot create a consistent method resolution '
'order (MRO)')
bases.append(candidate)
for subclass_bases in bases_of_subclasses[:]: # remove candidate
if subclass_bases and subclass_bases[0] == candidate:
del subclass_bases[0]
if not subclass_bases:
bases_of_subclasses.remove(subclass_bases)
return bases
def nothing(*args, **kwargs):
pass
class StructMeta(type, MetaClassHelper):
"""Meta class used for GI Struct based types."""
def __init__(cls, name, bases, dict_):
super(StructMeta, cls).__init__(name, bases, dict_)
# Avoid touching anything else than the base class.
g_type = cls.__info__.get_g_type()
if g_type != TYPE_INVALID and g_type.pytype is not None:
return
cls._setup_fields()
cls._setup_methods()
for method_info in cls.__info__.get_methods():
if method_info.is_constructor() and \
method_info.__name__ == 'new' and \
(not method_info.get_arguments() or
cls.__info__.get_size() == 0):
cls.__new__ = staticmethod(method_info)
# Boxed will raise an exception
# if arguments are given to __init__
cls.__init__ = nothing
break
@property
def __doc__(cls):
if cls == StructMeta:
return ''
return generate_doc_string(cls.__info__)
| mro |
csc.js | module.exports = function (math) {
var util = require('../../util/index'),
BigNumber = require('bignumber.js'),
Complex = require('../../type/Complex'),
Unit = require('../../type/Unit'),
collection = require('../../type/collection'),
isNumber = util.number.isNumber,
isBoolean = util['boolean'].isBoolean,
isComplex = Complex.isComplex,
isUnit = Unit.isUnit,
isCollection = collection.isCollection;
/**
* Calculate the cosecant of a value, csc(x) = 1/sin(x)
*
* csc(x)
*
* For matrices, the function is evaluated element wise.
*
* @param {Number | Boolean | Complex | Unit | Array | Matrix} x
* @return {Number | Complex | Array | Matrix} res
*/
math.csc = function csc(x) {
if (arguments.length != 1) {
throw new math.error.ArgumentsError('csc', arguments.length, 1);
}
if (isNumber(x)) {
return 1 / Math.sin(x);
}
if (isComplex(x)) {
// csc(z) = 1/sin(z) = (2i) / (exp(iz) - exp(-iz))
var den = 0.25 * (Math.exp(-2.0 * x.im) + Math.exp(2.0 * x.im)) -
0.5 * Math.cos(2.0 * x.re);
return new Complex (
0.5 * Math.sin(x.re) * (Math.exp(-x.im) + Math.exp(x.im)) / den,
0.5 * Math.cos(x.re) * (Math.exp(-x.im) - Math.exp(x.im)) / den
);
}
if (isUnit(x)) {
if (!x.hasBase(Unit.BASE_UNITS.ANGLE)) {
throw new TypeError ('Unit in function csc is no angle');
}
return 1 / Math.sin(x.value);
}
if (isCollection(x)) { |
if (isBoolean(x)) {
return csc(+x);
}
if (x instanceof BigNumber) {
// TODO: implement BigNumber support
// downgrade to Number
return csc(util.number.toNumber(x));
}
throw new math.error.UnsupportedTypeError('csc', x);
};
}; | return collection.deepMap(x, csc);
} |
SGNN_EBM_models.py | import torch
from torch import nn
import torch.nn.functional as F
from torch_scatter import scatter_add
class NCE_C_Parameter(torch.nn.Module):
def __init__(self, N):
super(NCE_C_Parameter, self).__init__()
self.NCE_C = nn.Parameter(torch.zeros(N, requires_grad=True))
class GNN_EBM_Layer_01(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(GNN_EBM_Layer_01, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_layer = torch.nn.Linear(input_dim, output_dim)
self.node_layer = torch.nn.Linear(input_dim, output_dim)
self.mlp = torch.nn.Linear(input_dim, output_dim)
def node_message_passing(self, x, x_2nd_agg, edge):
T = x.size()[1]
node_in, node_out = edge[0], edge[1] # M, M
update = (scatter_add(x_2nd_agg, node_out, dim=1, dim_size=T) +
scatter_add(x_2nd_agg, node_in, dim=1, dim_size=T)) / 2 # B, T, d
x = x + update # B, T, d | def forward(self, x_1st, x_2nd, edge):
'''
:param x: (B, T, 2, d)
:param x_2nd: (B, M, 4, d)
:param edge: (M, 2)
:return: (B, T, 2, d_out)
'''
aggregate_indice = torch.LongTensor([0, 0, 1, 1]).to(x_1st.device)
node_i_indice = torch.LongTensor([0, 0, 1, 1]).to(x_1st.device)
node_j_indice = torch.LongTensor([0, 1, 0, 1]).to(x_1st.device)
x_1st_neg = x_1st[:, :, 0, :] # B, T, d
x_1st_pos = x_1st[:, :, 1, :] # B, T, d
x_2nd_agg = scatter_add(x_2nd, aggregate_indice, dim=2) # B, T, 2, d
x_2nd_neg = x_2nd_agg[:, :, 0, :] # B, M, d
x_2nd_pos = x_2nd_agg[:, :, 1, :] # B, M, d
x_neg = self.node_message_passing(x_1st_neg, x_2nd_neg, edge) # B, T, d
x_pos = self.node_message_passing(x_1st_pos, x_2nd_pos, edge) # B, T, d
x = torch.stack([x_neg, x_pos], dim=2) # B, T, 2, d
x = self.node_layer(x) # B, T, 2, d
edge_i = torch.index_select(x_1st, 1, edge[0]) # B, M, 2, dim
edge_i = torch.index_select(edge_i, 2, node_i_indice) # B, M, 4, dim
edge_j = torch.index_select(x_1st, 1, edge[1]) # B, M, 2, dim
edge_j = torch.index_select(edge_j, 2, node_j_indice) # B, M, 4, dim
edge = x_2nd + self.mlp(edge_i + edge_j) # B, M, 4, d
edge = self.edge_layer(edge)
return x, edge
class GNN_Energy_Model_1st_Order_01(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, output_dim, dropout=0, concat=False):
super(GNN_Energy_Model_1st_Order_01, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.output_dim = output_dim
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Sequential(
torch.nn.Linear(2 * hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, output_dim)
)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: B,T,1
'''
B, T = x_1st.size()[:2]
h_node_list = [x_1st]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
if self.concat:
h = torch.cat(h_node_list, dim=3).view(B, T, -1) # B, T, 2*layer_num*d
else:
h = x_node.view(B, T, -1) # B, T, 2*d
h = self.node_readout(h) # B, T, 1
return h
class GNN_Energy_Model_1st_Order_02(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, output_dim, dropout=0, concat=False):
super(GNN_Energy_Model_1st_Order_02, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.output_dim = output_dim
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Linear(2 * hidden_dim_sum, output_dim)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: B,T,1
'''
B, T = x_1st.size()[:2]
h_node_list = [x_1st]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
if self.concat:
h = torch.cat(h_node_list, dim=3).view(B, T, -1) # B, T, 2*layer_num*d
else:
h = x_node.view(B, T, -1) # B, T, 2*d
h = self.node_readout(h) # B, T, 1
return h
class GNN_Energy_Model_2nd_Order_01(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):
super(GNN_Energy_Model_2nd_Order_01, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Sequential(
torch.nn.Linear(hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 1)
)
self.edge_readout = torch.nn.Sequential(
torch.nn.Linear(hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 1)
)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: (B,T,2), (B,M,4)
'''
B, T = x_1st.size()[:2]
M = edge.size()[1]
h_node_list = [x_1st]
h_edge_list = [x_2nd]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
h_edge_list.append(x_edge)
if self.concat:
h_node = torch.cat(h_node_list, dim=3) # B, T, 2, layer_num*d
h_edge = torch.cat(h_edge_list, dim=3) # B, M, 4, layer_num*d
else:
h_node = x_node # B, T, 2, d
h_edge = x_edge # B, M, 4, d
h_node = self.node_readout(h_node) # B, T, 2, 1
h_edge = self.edge_readout(h_edge) # B, M, 4, 1
h_node = h_node.squeeze(3) # B, T, 2
h_edge = h_edge.squeeze(3) # B, M, 4
return h_node, h_edge
class GNN_Energy_Model_2nd_Order_02(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):
super(GNN_Energy_Model_2nd_Order_02, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = torch.nn.Sequential(
torch.nn.Linear(2 * hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 2)
)
self.edge_readout = torch.nn.Sequential(
torch.nn.Linear(4 * hidden_dim_sum, 2 * hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(2 * hidden_dim_sum, hidden_dim_sum),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim_sum, 4)
)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: (B,T,2), (B,M,4)
'''
B, T = x_1st.size()[:2]
M = x_2nd.size()[1]
h_node_list = [x_1st]
h_edge_list = [x_2nd]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
h_edge_list.append(x_edge)
if self.concat:
h_node = torch.cat(h_node_list, dim=3).view(B, T, -1) # B, T, 2*layer_num*d
h_edge = torch.cat(h_edge_list, dim=3).view(B, M, -1) # B, M, 4*layer_num*d
else:
h_node = x_node.view(B, T, -1) # B, T, 2*d
h_edge = x_edge.view(B, M, -1) # B, M, 4*d
h_node = self.node_readout(h_node) # B, T, 2
h_edge = self.edge_readout(h_edge) # B, M, 4
return h_node, h_edge
class GNN_Energy_Model_2nd_Order_03(torch.nn.Module):
def __init__(self, ebm_GNN_dim, ebm_GNN_layer_num, dropout=0, concat=False):
super(GNN_Energy_Model_2nd_Order_03, self).__init__()
self.ebm_GNN_dim = ebm_GNN_dim
self.ebm_GNN_layer_num = ebm_GNN_layer_num - 1
self.dropout = dropout
self.concat = concat
hidden_layers_dim = [ebm_GNN_dim] * ebm_GNN_layer_num
self.hidden_layers = torch.nn.ModuleList()
for in_, out_ in zip(hidden_layers_dim[:-1], hidden_layers_dim[1:]):
self.hidden_layers.append(GNN_EBM_Layer_01(in_, out_))
if self.concat:
hidden_dim_sum = sum(hidden_layers_dim)
else:
hidden_dim_sum = ebm_GNN_dim
self.node_readout = nn.Linear(2 * hidden_dim_sum, 2)
self.edge_readout = nn.Linear(4 * hidden_dim_sum, 4)
return
def forward(self, x_1st, x_2nd, edge):
'''
:param x_1st: B,T,2,dim
:param x_2nd: B,M,4,dim
:param edge: 2,M
:return: (B,T,2), (B,M,4)
'''
B, T = x_1st.size()[:2]
M = edge.size()[1]
h_node_list = [x_1st]
h_edge_list = [x_2nd]
x_node, x_edge = x_1st, x_2nd
for i in range(self.ebm_GNN_layer_num):
x_node, x_edge = self.hidden_layers[i](x_node, x_edge, edge)
if i < self.ebm_GNN_layer_num - 1:
x_node = F.relu(x_node)
# x_edge = F.relu(x_edge)
x_node = F.dropout(x_node, self.dropout, training=self.training)
# x_edge = F.dropout(x_edge, self.dropout, training=self.training)
h_node_list.append(x_node)
h_edge_list.append(x_edge)
if self.concat:
h_node = torch.cat(h_node_list, dim=3) # B, T, 2, layer_num*d
h_edge = torch.cat(h_edge_list, dim=3) # B, M, 4, layer_num*d
else:
h_node = x_node # B, T, 2, d
h_edge = x_edge # B, M, 4, d
h_node = h_node.view(B, T, -1) # B, T, 2*d
h_edge = h_edge.view(B, M, -1) # B, M, 4*d
h_node = self.node_readout(h_node) # B, T, 2
h_edge = self.edge_readout(h_edge) # B, M, 4
return h_node, h_edge
# class GATNet(torch.nn.Module):
# def __init__(self, embedding_dim=10, hidden_dim=10, num_head=8):
# super(GATNet, self).__init__()
# self.conv1 = GATConv(embedding_dim, hidden_dim, heads=num_head, dropout=0.6)
# self.conv2 = GATConv(hidden_dim * num_head, hidden_dim, heads=1, concat=False, dropout=0.6)
# def forward(self, data):
# x = data.x
# x = F.dropout(x, p=0.6, training=self.training)
# x = F.elu(self.conv1(x, data.edge_index))
# x = F.dropout(x, p=0.6, training=self.training)
# x = self.conv2(x, data.edge_index)
# return x
# class MLP(nn.Sequential):
# def __init__(self, input_dim, output_dim, hidden_dims=[1024, 512], dropout=0.1, use_batch_norm=False):
# super(MLP, self).__init__()
# self.input_dim = input_dim
# self.output_dim = output_dim
# self.hidden_dims = hidden_dims
# self.use_batch_norm = use_batch_norm
# self.dropout = nn.Dropout(0.1)
# self.layer_size = len(self.hidden_dims) + 1
# dims = [self.input_dim] + self.hidden_dims + [self.output_dim]
# self.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i + 1]) for i in range(self.layer_size)])
# if use_batch_norm:
# self.batch_norms = nn.ModuleList([nn.BatchNorm1d(dims[i + 1]) for i in range(self.layer_size)])
# for m in self.modules():
# if isinstance(m, nn.Linear):
# nn.init.xavier_uniform_(m.weight.data)
# if m.bias is not None:
# m.bias.data.fill_(0.0)
# def norm(self):
# with torch.no_grad():
# norm = 0
# for m in self.modules():
# if isinstance(m, nn.Linear):
# norm += torch.norm(m.weight.data).item()
# return norm
# def forward(self, v):
# '''
# : params x: (batch_size, *, input_dim)
# : output : (batch_size, *, output_dim)
# '''
# B, t, _ = v.size()
# v = v.flatten(0, -2)
# # print('input norm: %.5f' % (torch.norm(v).item()))
# for i, l in enumerate(self.predictor):
# v = l(v)
# if i != self.layer_size - 1:
# if self.use_batch_norm:
# v = self.batch_norms[i](v)
# v = F.relu(v)
# v = self.dropout(v)
# # print('layer %d norm: %.5f' % (i, torch.norm(v).item()))
# v = v.reshape(B, t, -1)
# return v
# class GradKnowledgeGraphModel(nn.Module):
# def __init__(self, num_tasks, args):
# super(GradKnowledgeGraphModel, self).__init__()
# self.num_tasks = num_tasks
# self.weights = nn.Parameter(torch.ones(self.num_tasks, 1), requires_grad=True)
# self.register_parameter('grad_KG', self.weights)
# self.softmax = nn.Softmax(dim=0)
# self.normalize_method = args.grad_KG_normalize_method
# def forward(self, task_repr):
# # ########## This won't train ##########
# # task_repr = task_repr * self.weights.data
# task_repr = task_repr * self.weights
# return task_repr
# def renormalize(self):
# if self.normalize_method == 'sum':
# ########## TODO: there might be negatives after backward ##########
# normalize_coeff = self.num_tasks / self.weights.data.sum()
# self.weights.data *= normalize_coeff
# elif self.normalize_method == 'softmax':
# self.weights.data = self.softmax(self.weights.data) * self.num_tasks
# return
# def reset_param(self):
# self.weights.data.fill_(1)
# return |
return x
|
faas_cli.rs | use huber_common::model::package::{Package, PackageManagement, PackageSource, PackageTargetType};
#[allow(dead_code)]
pub fn | () -> Package {
Package {
name: "faas-cli".to_string(),
source: PackageSource::Github {
owner: "openfaas".to_string(),
repo: "faas-cli".to_string(),
},
detail: None,
targets: vec![
PackageTargetType::LinuxAmd64(PackageManagement {
artifact_templates: vec!["{version}/faas-cli".to_string()],
executable_templates: None,
executable_mappings: Some(hashmap! {
"faas-cli".to_string() => "faas".to_string()
}),
install_commands: None,
uninstall_commands: None,
upgrade_commands: None,
tag_version_regex_template: None,
scan_dirs: None,
}),
PackageTargetType::LinuxArm64(PackageManagement {
artifact_templates: vec!["{version}/faas-cli-arm64".to_string()],
executable_templates: None,
executable_mappings: Some(hashmap! {
"faas-cli".to_string() => "faas".to_string()
}),
install_commands: None,
uninstall_commands: None,
upgrade_commands: None,
tag_version_regex_template: None,
scan_dirs: None,
}),
PackageTargetType::MacOS(PackageManagement {
artifact_templates: vec!["{version}/faas-cli-darwin".to_string()],
executable_templates: None,
executable_mappings: Some(hashmap! {
"faas-cli".to_string() => "faas".to_string()
}),
install_commands: None,
uninstall_commands: None,
upgrade_commands: None,
tag_version_regex_template: None,
scan_dirs: None,
}),
PackageTargetType::Windows(PackageManagement {
artifact_templates: vec!["{version}/faas-cli.exe".to_string()],
executable_templates: None,
executable_mappings: Some(hashmap! {
"faas-cli".to_string() => "faas".to_string()
}),
install_commands: None,
uninstall_commands: None,
upgrade_commands: None,
tag_version_regex_template: None,
scan_dirs: None,
}),
],
version: None,
description: None,
release_kind: None,
}
}
| release |
shootout-fannkuch-redux.rs | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor | // products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
use std::{cmp, iter, mem};
use std::thread;
fn rotate(x: &mut [i32]) {
let mut prev = x[0];
for place in x.iter_mut().rev() {
prev = mem::replace(place, prev)
}
}
fn next_permutation(perm: &mut [i32], count: &mut [i32]) {
for i in 1..perm.len() {
rotate(&mut perm[..i + 1]);
let count_i = &mut count[i];
if *count_i >= i as i32 {
*count_i = 0;
} else {
*count_i += 1;
break
}
}
}
#[derive(Copy)]
struct P {
p: [i32; 16],
}
#[derive(Copy)]
struct Perm {
cnt: [i32; 16],
fact: [u32; 16],
n: u32,
permcount: u32,
perm: P,
}
impl Perm {
fn new(n: u32) -> Perm {
let mut fact = [1; 16];
for i in 1..n as uint + 1 {
fact[i] = fact[i - 1] * i as u32;
}
Perm {
cnt: [0; 16],
fact: fact,
n: n,
permcount: 0,
perm: P { p: [0; 16 ] }
}
}
fn get(&mut self, mut idx: i32) -> P {
let mut pp = [0u8; 16];
self.permcount = idx as u32;
for (i, place) in self.perm.p.iter_mut().enumerate() {
*place = i as i32 + 1;
}
for i in (1..self.n as uint).rev() {
let d = idx / self.fact[i] as i32;
self.cnt[i] = d;
idx %= self.fact[i] as i32;
for (place, val) in pp.iter_mut().zip(self.perm.p[..i+1].iter()) {
*place = (*val) as u8
}
let d = d as uint;
for j in 0..i + 1 {
self.perm.p[j] = if j + d <= i {pp[j + d]} else {pp[j+d-i-1]} as i32;
}
}
self.perm
}
fn count(&self) -> u32 { self.permcount }
fn max(&self) -> u32 { self.fact[self.n as uint] }
fn next(&mut self) -> P {
next_permutation(&mut self.perm.p, &mut self.cnt);
self.permcount += 1;
self.perm
}
}
fn reverse(tperm: &mut [i32], k: uint) {
tperm[..k].reverse()
}
fn work(mut perm: Perm, n: uint, max: uint) -> (i32, i32) {
let mut checksum = 0;
let mut maxflips = 0;
let mut p = perm.get(n as i32);
while perm.count() < max as u32 {
let mut flips = 0;
while p.p[0] != 1 {
let k = p.p[0] as uint;
reverse(&mut p.p, k);
flips += 1;
}
checksum += if perm.count() % 2 == 0 {flips} else {-flips};
maxflips = cmp::max(maxflips, flips);
p = perm.next();
}
(checksum, maxflips)
}
fn fannkuch(n: i32) -> (i32, i32) {
let perm = Perm::new(n as u32);
let N = 4;
let mut futures = vec![];
let k = perm.max() / N;
for (_, j) in (0..N).zip(iter::count(0, k)) {
let max = cmp::min(j+k, perm.max());
futures.push(thread::scoped(move|| {
work(perm, j as uint, max as uint)
}))
}
let mut checksum = 0;
let mut maxflips = 0;
for fut in futures {
let (cs, mf) = fut.join();
checksum += cs;
maxflips = cmp::max(maxflips, mf);
}
(checksum, maxflips)
}
fn main() {
let n = std::env::args()
.nth(1)
.and_then(|arg| arg.parse().ok())
.unwrap_or(2i32);
let (checksum, maxflips) = fannkuch(n);
println!("{}\nPfannkuchen({}) = {}", checksum, n, maxflips);
} | // the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote |
resultsProcessor.go | package main
import (
"encoding/json"
"io/ioutil"
)
type checkRange struct {
Filename string `json:"filename"`
StartLine int `json:"start_line"`
EndLine int `json:"end_line"`
}
type result struct {
RuleID string `json:"long_id"`
RuleDescription string `json:"rule_description"`
RuleProvider string `json:"rule_provider"`
Links []string `json:"links"`
Range *checkRange `json:"location"`
Description string `json:"description"`
RangeAnnotation string `json:"-"`
Severity string `json:"severity"`
}
const resultsFile = "results.json"
func | () ([]result, error) {
results := struct{ Results []result }{}
file, err := ioutil.ReadFile(resultsFile)
if err != nil {
return nil, err
}
err = json.Unmarshal(file, &results)
if err != nil {
return nil, err
}
return results.Results, nil
}
| loadResultsFile |
pipe_test.go | package pipe_test
import (
"errors"
"io"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"golang.org/x/sync/errgroup"
"github.com/bmclab-git/v2ray-core/v5/common"
"github.com/bmclab-git/v2ray-core/v5/common/buf"
. "github.com/bmclab-git/v2ray-core/v5/transport/pipe"
)
func TestPipeReadWrite(t *testing.T) {
pReader, pWriter := New(WithSizeLimit(1024))
b := buf.New()
b.WriteString("abcd")
common.Must(pWriter.WriteMultiBuffer(buf.MultiBuffer{b}))
b2 := buf.New()
b2.WriteString("efg")
common.Must(pWriter.WriteMultiBuffer(buf.MultiBuffer{b2}))
rb, err := pReader.ReadMultiBuffer()
common.Must(err)
if r := cmp.Diff(rb.String(), "abcdefg"); r != "" {
t.Error(r)
}
}
func TestPipeInterrupt(t *testing.T) {
pReader, pWriter := New(WithSizeLimit(1024))
payload := []byte{'a', 'b', 'c', 'd'}
b := buf.New()
b.Write(payload)
common.Must(pWriter.WriteMultiBuffer(buf.MultiBuffer{b}))
pWriter.Interrupt()
rb, err := pReader.ReadMultiBuffer()
if err != io.ErrClosedPipe {
t.Fatal("expect io.ErrClosePipe, but got ", err)
}
if !rb.IsEmpty() {
t.Fatal("expect empty buffer, but got ", rb.Len())
}
}
func TestPipeClose(t *testing.T) {
pReader, pWriter := New(WithSizeLimit(1024))
payload := []byte{'a', 'b', 'c', 'd'}
b := buf.New()
common.Must2(b.Write(payload))
common.Must(pWriter.WriteMultiBuffer(buf.MultiBuffer{b}))
common.Must(pWriter.Close())
rb, err := pReader.ReadMultiBuffer()
common.Must(err)
if rb.String() != string(payload) {
t.Fatal("expect content ", string(payload), " but actually ", rb.String())
}
rb, err = pReader.ReadMultiBuffer()
if err != io.EOF {
t.Fatal("expected EOF, but got ", err)
}
if !rb.IsEmpty() {
t.Fatal("expect empty buffer, but got ", rb.String())
}
}
func TestPipeLimitZero(t *testing.T) {
pReader, pWriter := New(WithSizeLimit(0))
bb := buf.New()
common.Must2(bb.Write([]byte{'a', 'b'}))
common.Must(pWriter.WriteMultiBuffer(buf.MultiBuffer{bb}))
var errg errgroup.Group
errg.Go(func() error {
b := buf.New()
b.Write([]byte{'c', 'd'})
return pWriter.WriteMultiBuffer(buf.MultiBuffer{b})
})
errg.Go(func() error {
time.Sleep(time.Second)
var container buf.MultiBufferContainer
if err := buf.Copy(pReader, &container); err != nil {
return err
}
if r := cmp.Diff(container.String(), "abcd"); r != "" {
return errors.New(r)
}
return nil
})
errg.Go(func() error {
time.Sleep(time.Second * 2)
return pWriter.Close()
})
if err := errg.Wait(); err != nil {
t.Error(err)
}
}
func TestPipeWriteMultiThread(t *testing.T) {
pReader, pWriter := New(WithSizeLimit(0))
var errg errgroup.Group
for i := 0; i < 10; i++ {
errg.Go(func() error {
b := buf.New()
b.WriteString("abcd")
return pWriter.WriteMultiBuffer(buf.MultiBuffer{b})
})
}
time.Sleep(time.Millisecond * 100)
pWriter.Close()
errg.Wait()
b, err := pReader.ReadMultiBuffer()
common.Must(err)
if r := cmp.Diff(b[0].Bytes(), []byte{'a', 'b', 'c', 'd'}); r != "" {
t.Error(r)
}
}
func TestInterfaces(t *testing.T) {
_ = (buf.Reader)(new(Reader))
_ = (buf.TimeoutReader)(new(Reader))
_ = (common.Interruptible)(new(Reader))
_ = (common.Interruptible)(new(Writer))
_ = (common.Closable)(new(Writer))
}
func | (b *testing.B) {
reader, writer := New(WithoutSizeLimit())
a := buf.New()
a.Extend(buf.Size)
c := buf.MultiBuffer{a}
b.ResetTimer()
for i := 0; i < b.N; i++ {
common.Must(writer.WriteMultiBuffer(c))
d, err := reader.ReadMultiBuffer()
common.Must(err)
c = d
}
}
| BenchmarkPipeReadWrite |
__init__.py | from je_editor.ui.ui_event.text_process import * | ||
testing.d.ts | /**
* @license Angular v11.0.4
* (c) 2010-2020 Google LLC. https://angular.io/
* License: MIT
*/
import { PlatformRef } from '@angular/core';
import { StaticProvider } from '@angular/core';
/**
* Platform for testing
*
* @publicApi
*/
import * as ɵngcc0 from '@angular/core';
import * as ɵngcc1 from '@angular/platform-browser/animations';
import * as ɵngcc2 from '@angular/platform-browser-dynamic/testing';
export declare const platformServerTesting: (extraProviders?: StaticProvider[] | undefined) => PlatformRef;
/**
* NgModule for testing.
*
* @publicApi
*/
export declare class Ser |
static ɵmod: ɵngcc0.ɵɵNgModuleDefWithMeta<ServerTestingModule, never, [typeof ɵngcc1.NoopAnimationsModule], [typeof ɵngcc2.BrowserDynamicTestingModule]>;
static ɵinj: ɵngcc0.ɵɵInjectorDef<ServerTestingModule>;
}
export { }
//# sourceMappingURL=testing.d.ts.map | verTestingModule {
|
metrics.rs | use futures::future::BoxFuture;
use lazy_static::lazy_static;
use prometheus::{self, register_histogram_vec, HistogramVec};
use regex::Regex;
use std::{future::Future, time::Instant};
use crate::sandbox::{self, Channel, CompileTarget, CrateType, Edition, Mode};
lazy_static! {
pub(crate) static ref REQUESTS: HistogramVec = register_histogram_vec!(
"playground_request_duration_seconds",
"Number of requests made",
Labels::LABELS,
vec![0.1, 1.0, 2.5, 5.0, 10.0, 15.0]
)
.unwrap();
}
#[derive(Debug, Copy, Clone, strum::IntoStaticStr)]
pub(crate) enum Endpoint {
Compile,
Execute,
Format,
Miri,
Clippy,
MacroExpansion,
MetaCrates,
MetaVersionStable,
MetaVersionBeta,
MetaVersionNightly,
MetaVersionRustfmt,
MetaVersionClippy,
MetaVersionMiri,
Evaluate,
}
#[derive(Debug, Copy, Clone, strum::IntoStaticStr)]
pub(crate) enum Outcome {
Success,
ErrorServer,
ErrorTimeoutSoft,
ErrorTimeoutHard,
ErrorUserCode,
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct Labels {
endpoint: Endpoint,
outcome: Outcome,
target: Option<CompileTarget>,
channel: Option<Channel>,
mode: Option<Mode>,
edition: Option<Option<Edition>>,
crate_type: Option<CrateType>,
tests: Option<bool>,
backtrace: Option<bool>,
}
impl Labels {
const COUNT: usize = 9;
const LABELS: &'static [&'static str; Self::COUNT] = &[
"endpoint",
"outcome",
"target",
"channel",
"mode",
"edition",
"crate_type",
"tests",
"backtrace",
];
fn as_values(&self) -> [&'static str; Self::COUNT] {
let Self {
endpoint,
outcome,
target,
channel,
mode,
edition,
crate_type,
tests,
backtrace,
} = *self;
fn b(v: Option<bool>) -> &'static str {
v.map_or("", |v| if v { "true" } else { "false" })
}
let target = target.map_or("", Into::into);
let channel = channel.map_or("", Into::into);
let mode = mode.map_or("", Into::into);
let edition = match edition {
None => "",
Some(None) => "Unspecified",
Some(Some(v)) => v.into(),
};
let crate_type = crate_type.map_or("", Into::into);
let tests = b(tests);
let backtrace = b(backtrace);
[
endpoint.into(),
outcome.into(),
target,
channel,
mode,
edition,
crate_type,
tests,
backtrace,
]
}
}
pub(crate) trait GenerateLabels {
fn generate_labels(&self, outcome: Outcome) -> Labels;
}
impl<T> GenerateLabels for &'_ T
where
T: GenerateLabels,
{
fn generate_labels(&self, outcome: Outcome) -> Labels {
T::generate_labels(self, outcome)
}
}
impl GenerateLabels for sandbox::CompileRequest {
fn generate_labels(&self, outcome: Outcome) -> Labels {
let Self {
target,
channel,
crate_type,
mode,
edition,
tests,
backtrace,
code: _,
} = *self;
Labels {
endpoint: Endpoint::Compile,
outcome,
target: Some(target),
channel: Some(channel),
mode: Some(mode),
edition: Some(edition),
crate_type: Some(crate_type),
tests: Some(tests),
backtrace: Some(backtrace),
}
}
}
impl GenerateLabels for sandbox::ExecuteRequest {
fn generate_labels(&self, outcome: Outcome) -> Labels {
let Self {
channel,
mode,
edition,
crate_type,
tests,
backtrace,
code: _,
} = *self;
Labels {
endpoint: Endpoint::Execute,
outcome,
target: None,
channel: Some(channel),
mode: Some(mode),
edition: Some(edition),
crate_type: Some(crate_type),
tests: Some(tests),
backtrace: Some(backtrace),
}
}
}
impl GenerateLabels for sandbox::FormatRequest {
fn generate_labels(&self, outcome: Outcome) -> Labels {
let Self { edition, code: _ } = *self;
Labels {
endpoint: Endpoint::Format,
outcome,
target: None,
channel: None,
mode: None,
edition: Some(edition),
crate_type: None,
tests: None,
backtrace: None,
}
}
}
impl GenerateLabels for sandbox::ClippyRequest {
fn generate_labels(&self, outcome: Outcome) -> Labels {
let Self {
code: _,
edition,
crate_type,
} = *self;
Labels {
endpoint: Endpoint::Clippy,
outcome,
target: None,
channel: None,
mode: None,
edition: Some(edition),
crate_type: Some(crate_type),
tests: None,
backtrace: None,
}
}
}
impl GenerateLabels for sandbox::MiriRequest {
fn generate_labels(&self, outcome: Outcome) -> Labels {
let Self { code: _, edition } = *self;
Labels {
endpoint: Endpoint::Miri,
outcome,
target: None,
channel: None,
mode: None,
edition: Some(edition),
crate_type: None,
tests: None,
backtrace: None,
}
}
}
impl GenerateLabels for sandbox::MacroExpansionRequest {
fn generate_labels(&self, outcome: Outcome) -> Labels {
let Self { code: _, edition } = *self;
Labels {
endpoint: Endpoint::MacroExpansion,
outcome,
target: None,
channel: None,
mode: None,
edition: Some(edition),
crate_type: None,
tests: None,
backtrace: None,
}
}
}
pub(crate) trait SuccessDetails: Sized {
fn success_details(&self) -> Outcome;
fn for_sandbox_result(r: &Result<Self, sandbox::Error>) -> Outcome {
use sandbox::Error::*;
match r {
Ok(v) => v.success_details(),
Err(CompilerExecutionTimedOut { .. }) => Outcome::ErrorTimeoutHard,
Err(_) => Outcome::ErrorServer,
}
}
}
fn common_success_details(success: bool, stderr: &str) -> Outcome {
lazy_static! {
// Memory allocation failures are "Aborted"
static ref SOFT_TIMEOUT_REGEX: Regex = Regex::new("entrypoint.sh.*Killed.*timeout").unwrap();
}
match success {
true => Outcome::Success,
false => {
if stderr
.lines()
.next_back()
.map_or(false, |l| SOFT_TIMEOUT_REGEX.is_match(l))
{
Outcome::ErrorTimeoutSoft
} else {
Outcome::ErrorUserCode
}
}
}
}
impl SuccessDetails for sandbox::CompileResponse {
fn success_details(&self) -> Outcome {
common_success_details(self.success, &self.stderr)
}
}
impl SuccessDetails for sandbox::ExecuteResponse {
fn success_details(&self) -> Outcome {
common_success_details(self.success, &self.stderr)
}
}
impl SuccessDetails for sandbox::FormatResponse {
fn success_details(&self) -> Outcome {
common_success_details(self.success, &self.stderr)
}
}
impl SuccessDetails for sandbox::ClippyResponse {
fn success_details(&self) -> Outcome {
common_success_details(self.success, &self.stderr)
}
}
impl SuccessDetails for sandbox::MiriResponse {
fn success_details(&self) -> Outcome {
common_success_details(self.success, &self.stderr)
}
}
impl SuccessDetails for sandbox::MacroExpansionResponse {
fn success_details(&self) -> Outcome {
common_success_details(self.success, &self.stderr)
}
}
impl SuccessDetails for Vec<sandbox::CrateInformation> {
fn success_details(&self) -> Outcome {
Outcome::Success
}
}
impl SuccessDetails for sandbox::Version {
fn success_details(&self) -> Outcome {
Outcome::Success
}
}
pub(crate) async fn | <Req, B, Resp>(request: Req, body: B) -> sandbox::Result<Resp>
where
Req: GenerateLabels,
for<'req> B: FnOnce(&'req Req) -> BoxFuture<'req, sandbox::Result<Resp>>,
Resp: SuccessDetails,
{
track_metric_common_async(request, body, |_| {}).await
}
pub(crate) async fn track_metric_force_endpoint_async<Req, B, Resp>(
request: Req,
endpoint: Endpoint,
body: B,
) -> sandbox::Result<Resp>
where
Req: GenerateLabels,
for<'req> B: FnOnce(&'req Req) -> BoxFuture<'req, sandbox::Result<Resp>>,
Resp: SuccessDetails,
{
track_metric_common_async(request, body, |labels| labels.endpoint = endpoint).await
}
async fn track_metric_common_async<Req, B, Resp, F>(
request: Req,
body: B,
f: F,
) -> sandbox::Result<Resp>
where
Req: GenerateLabels,
for<'req> B: FnOnce(&'req Req) -> BoxFuture<'req, sandbox::Result<Resp>>,
Resp: SuccessDetails,
F: FnOnce(&mut Labels),
{
let start = Instant::now();
let response = body(&request).await;
let elapsed = start.elapsed();
let outcome = SuccessDetails::for_sandbox_result(&response);
let mut labels = request.generate_labels(outcome);
f(&mut labels);
let values = &labels.as_values();
let histogram = REQUESTS.with_label_values(values);
histogram.observe(elapsed.as_secs_f64());
response
}
pub(crate) async fn track_metric_no_request_async<B, Fut, Resp>(
endpoint: Endpoint,
body: B,
) -> crate::Result<Resp>
where
B: FnOnce() -> Fut,
Fut: Future<Output = crate::Result<Resp>>,
{
let start = Instant::now();
let response = body().await;
let elapsed = start.elapsed();
let outcome = if response.is_ok() {
Outcome::Success
} else {
Outcome::ErrorServer
};
let labels = Labels {
endpoint,
outcome,
target: None,
channel: None,
mode: None,
edition: None,
crate_type: None,
tests: None,
backtrace: None,
};
let values = &labels.as_values();
let histogram = REQUESTS.with_label_values(values);
histogram.observe(elapsed.as_secs_f64());
response
}
| track_metric_async |
config_model.py | import re
import os
from typing import Optional, Union, List, Dict
from os.path import expandvars
from itertools import chain
from pathlib import Path
from pydantic import (
BaseModel,
SecretStr,
BaseSettings,
PositiveInt,
FilePath,
Field,
validator,
root_validator,
)
from . import consts
__all__ = [
"AppConfig",
"Credential",
"InventorySpec",
"OSNameSpec",
"LinterSpec",
"GitSpec",
"JumphostSpec",
]
_var_re = re.compile(
r"\${(?P<bname>[a-z0-9_]+)}" r"|" r"\$(?P<name>[^{][a-z_0-9]+)", flags=re.IGNORECASE
)
class NoExtraBaseModel(BaseModel):
class Config:
extra = "forbid"
class EnvExpand(str):
"""
When a string value contains a reference to an environment variable, use
this type to expand the contents of the variable using os.path.expandvars.
For example like:
password = "$MY_PASSWORD"
foo_password = "${MY_PASSWORD}_foo"
will be expanded, given MY_PASSWORD is set to 'boo!' in the environment:
password -> "boo!"
foo_password -> "boo!_foo"
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if found_vars := list(filter(len, chain.from_iterable(_var_re.findall(v)))):
for var in found_vars:
if (var_val := os.getenv(var)) is None:
raise ValueError(f'Environment variable "{var}" missing.')
if not len(var_val):
raise ValueError(f'Environment variable "{var}" empty.')
return expandvars(v)
return v
class EnvSecretStr(EnvExpand, SecretStr):
@classmethod
def validate(cls, v):
return SecretStr.validate(EnvExpand.validate(v))
class Credential(NoExtraBaseModel):
|
class DefaultCredential(Credential, BaseSettings):
username: EnvExpand = Field(..., env="NETCFGBU_DEFAULT_USERNAME")
password: EnvSecretStr = Field(..., env="NETCFGBU_DEFAULT_PASSWORD")
class Defaults(NoExtraBaseModel, BaseSettings):
configs_dir: Optional[EnvExpand] = Field(..., env=("NETCFGBU_CONFIGSDIR", "PWD"))
plugins_dir: Optional[EnvExpand] = Field(..., env=("NETCFGBU_PLUGINSDIR", "PWD"))
inventory: EnvExpand = Field(..., env="NETCFGBU_INVENTORY")
credentials: DefaultCredential
@validator("inventory")
def _inventory_provided(cls, value): # noqa
if not len(value):
raise ValueError("inventory empty value not allowed")
return value
@validator("configs_dir")
def _configs_dir(cls, value): # noqa
return Path(value).absolute()
@validator("plugins_dir")
def _plugins_dir(cls, value): # noqa
if value == os.getenv("PWD") and "/plugins" not in value:
value = value + "/plugins"
return Path(value).absolute()
class FilePathEnvExpand(FilePath):
""" A FilePath field whose value can interpolated from env vars """
@classmethod
def __get_validators__(cls):
yield from EnvExpand.__get_validators__()
yield from FilePath.__get_validators__()
class GitSpec(NoExtraBaseModel):
name: Optional[str]
repo: EnvExpand
email: Optional[str]
username: Optional[EnvExpand]
password: Optional[EnvExpand]
token: Optional[EnvSecretStr]
deploy_key: Optional[FilePathEnvExpand]
deploy_passphrase: Optional[EnvSecretStr]
@validator("repo")
def validate_repo(cls, repo): # noqa
expected = ("https:", "git@")
if not repo.startswith(expected):
raise ValueError(
f"Bad repo URL [{repo}]: expected to start with {expected}."
)
return repo
@root_validator
def enure_proper_auth(cls, values):
req = ("token", "deploy_key", "password")
auth_vals = list(filter(None, (values.get(auth) for auth in req)))
auth_c = len(auth_vals)
if not auth_c:
raise ValueError(
f'Missing one of required auth method fields: {"|".join(req)}'
)
if auth_c > 1:
raise ValueError(f'Only one of {"|".join(req)} allowed')
if values.get("deploy_passphrase") and not values.get("deploy_key"):
raise ValueError("deploy_key required when using deploy_passphrase")
return values
class OSNameSpec(NoExtraBaseModel):
credentials: Optional[List[Credential]]
pre_get_config: Optional[Union[str, List[str]]]
get_config: Optional[str]
connection: Optional[str]
linter: Optional[str]
timeout: PositiveInt = Field(consts.DEFAULT_GETCONFIG_TIMEOUT)
ssh_configs: Optional[Dict]
prompt_pattern: Optional[str]
class LinterSpec(NoExtraBaseModel):
config_starts_after: Optional[str]
config_ends_at: Optional[str]
class InventorySpec(NoExtraBaseModel):
name: Optional[str]
script: EnvExpand
@validator("script")
def validate_script(cls, script_exec): # noqa
script_bin, *script_vargs = script_exec.split()
if not os.path.isfile(script_bin):
raise ValueError(f"File not found: {script_bin}")
if not os.access(script_bin, os.X_OK):
raise ValueError(f"{script_bin} is not executable")
return script_exec
class JumphostSpec(NoExtraBaseModel):
proxy: str
name: Optional[str]
include: Optional[List[str]]
exclude: Optional[List[str]]
timeout: PositiveInt = Field(consts.DEFAULT_LOGIN_TIMEOUT)
@validator("name", always=True)
def _default_name(cls, value, values): # noqa
return values["proxy"] if not value else value
class AppConfig(NoExtraBaseModel):
defaults: Defaults
credentials: Optional[List[Credential]]
linters: Optional[Dict[str, LinterSpec]]
os_name: Optional[Dict[str, OSNameSpec]]
inventory: Optional[List[InventorySpec]]
logging: Optional[Dict]
ssh_configs: Optional[Dict]
git: Optional[List[GitSpec]]
jumphost: Optional[List[JumphostSpec]]
@validator("os_name")
def _linters(cls, v, values): # noqa
linters = values.get("linters") or {}
for os_name, os_spec in v.items():
if os_spec.linter and os_spec.linter not in linters:
raise ValueError(
f'OS spec "{os_name}" using undefined linter "{os_spec.linter}"'
)
return v
| username: EnvExpand
password: EnvSecretStr |
core.rs | #![allow(missing_docs)]
pub mod engine_state;
pub mod execution; | pub mod resolvers;
pub mod runtime;
pub mod runtime_context;
pub(crate) mod tracking_copy;
pub use tracking_copy::{validate_balance_proof, validate_query_proof, ValidationError};
pub const ADDRESS_LENGTH: usize = 32;
pub type Address = [u8; ADDRESS_LENGTH]; | |
chimera.py | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
from __future__ import absolute_import
import numpy as np
import numpy.random
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.decorators import graph_argument
from dimod.vartypes import SPIN
__all__ = ['chimera_anticluster']
@graph_argument('subgraph', allow_None=True)
def chimera_anticluster(m, n=None, t=4, multiplier=3.0,
cls=BinaryQuadraticModel, subgraph=None, seed=None):
"""Generate an anticluster problem on a Chimera lattice.
An anticluster problem has weak interactions within a tile and strong | m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default=t):
Size of the shore within each Chimera tile.
multiplier (number, optional, default=3.0):
Strength of the intertile edges.
cls (type, optional):
Binary quadratic model class to build from. Default is
:class:`.BinaryQuadraticModel`.
subgraph (int/tuple[nodes, edges]/list[edge]/:obj:`~networkx.Graph`):
A subgraph of a Chimera(m, n, t) graph to build the anticluster
problem on.
seed (int, optional, default=None):
Random seed.
Returns:
:obj:`.BinaryQuadraticModel`: spin-valued binary quadratic model.
"""
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
m = int(m)
if n is None:
n = m
else:
n = int(n)
t = int(t)
ldata = np.zeros(m*n*t*2) # number of nodes
if m and n and t:
inrow, incol = zip(*_iter_chimera_tile_edges(m, n, t))
if m > 1 or n > 1:
outrow, outcol = zip(*_iter_chimera_intertile_edges(m, n, t))
else:
outrow = outcol = tuple()
qdata = r.choice((-1., 1.), size=len(inrow)+len(outrow))
qdata[len(inrow):] *= multiplier
irow = inrow + outrow
icol = incol + outcol
else:
irow = icol = qdata = tuple()
bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN)
if subgraph is not None:
nodes, edges = subgraph
subbqm = cls.empty(SPIN)
try:
subbqm.add_variables_from((v, bqm.linear[v]) for v in nodes)
except KeyError:
msg = "given 'subgraph' contains nodes not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
try:
subbqm.add_interactions_from((u, v, bqm.adj[u][v]) for u, v in edges)
except KeyError:
msg = "given 'subgraph' contains edges not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
bqm = subbqm
return bqm
def _iter_chimera_tile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
# tile edges
for edge in ((k0, k1)
for i in range(0, ni, hoff)
for j in range(i, mi, voff)
for k0 in range(j, j + t)
for k1 in range(j + t, j + 2 * t)):
yield edge
def _iter_chimera_intertile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
# horizontal edges
for edge in ((k, k + hoff)
for i in range(t, 2 * t)
for j in range(i, ni - hoff, hoff)
for k in range(j, mi, voff)):
yield edge
# vertical edges
for edge in ((k, k + voff)
for i in range(t)
for j in range(i, ni, hoff)
for k in range(j, mi - voff, voff)):
yield edge | interactions between tiles.
Args: |
config.go | package run
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/user"
"path/filepath"
"regexp"
"strings"
"github.com/BurntSushi/toml"
"github.com/influxdata/influxdb/logger"
"github.com/influxdata/influxdb/monitor"
"github.com/influxdata/influxdb/monitor/diagnostics"
"github.com/influxdata/influxdb/pkg/tlsconfig"
"github.com/influxdata/influxdb/services/collectd"
"github.com/influxdata/influxdb/services/continuous_querier"
"github.com/influxdata/influxdb/services/graphite"
"github.com/influxdata/influxdb/services/httpd"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/services/opentsdb"
"github.com/influxdata/influxdb/services/precreator"
"github.com/influxdata/influxdb/services/retention"
"github.com/influxdata/influxdb/services/subscriber"
"github.com/influxdata/influxdb/services/udp"
itoml "github.com/influxdata/influxdb/toml"
"github.com/influxdata/influxdb/tsdb"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/transform"
"github.com/angopher/chronus/coordinator"
"github.com/angopher/chronus/services/controller"
"github.com/angopher/chronus/services/hh"
)
const (
// DefaultBindAddress is the default address for various RPC services.
DefaultBindAddress = "127.0.0.1:8088"
)
// Config represents the configuration format for the influxd binary.
type Config struct {
Meta *meta.Config `toml:"meta"`
Data tsdb.Config `toml:"data"`
Coordinator coordinator.Config `toml:"coordinator"`
Retention retention.Config `toml:"retention"`
Precreator precreator.Config `toml:"shard-precreation"`
Monitor monitor.Config `toml:"monitor"`
Subscriber subscriber.Config `toml:"subscriber"`
HTTPD httpd.Config `toml:"http"`
Logging logger.Config `toml:"logging"`
GraphiteInputs []graphite.Config `toml:"graphite"`
CollectdInputs []collectd.Config `toml:"collectd"`
OpenTSDBInputs []opentsdb.Config `toml:"opentsdb"`
UDPInputs []udp.Config `toml:"udp"`
ContinuousQuery continuous_querier.Config `toml:"continuous_queries"`
HintedHandoff hh.Config `toml:"hinted-handoff"`
Controller controller.Config `toml:"controller"`
// Server reporting
ReportingDisabled bool `toml:"reporting-disabled"`
// BindAddress is the address that all TCP services use (Raft, Snapshot, Cluster, etc.)
BindAddress string `toml:"bind-address"`
// TLS provides configuration options for all https endpoints.
TLS tlsconfig.Config `toml:"tls"`
}
// NewConfig returns an instance of Config with reasonable defaults.
func NewConfig() *Config {
c := &Config{}
c.Meta = meta.NewConfig()
c.Data = tsdb.NewConfig()
c.Coordinator = coordinator.NewConfig()
c.Precreator = precreator.NewConfig()
c.Monitor = monitor.NewConfig()
c.Subscriber = subscriber.NewConfig()
c.HTTPD = httpd.NewConfig()
c.Logging = logger.NewConfig()
c.HintedHandoff = hh.NewConfig()
c.Controller = controller.NewConfig()
c.GraphiteInputs = []graphite.Config{graphite.NewConfig()}
c.CollectdInputs = []collectd.Config{collectd.NewConfig()}
c.OpenTSDBInputs = []opentsdb.Config{opentsdb.NewConfig()}
c.UDPInputs = []udp.Config{udp.NewConfig()}
c.ContinuousQuery = continuous_querier.NewConfig()
c.Retention = retention.NewConfig()
c.BindAddress = DefaultBindAddress
return c
}
// NewDemoConfig returns the config that runs when no config is specified.
func NewDemoConfig() (*Config, error) |
// FromTomlFile loads the config from a TOML file.
func (c *Config) FromTomlFile(fpath string) error {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return err
}
// Handle any potential Byte-Order-Marks that may be in the config file.
// This is for Windows compatibility only.
// See https://github.com/influxdata/telegraf/issues/1378 and
// https://github.com/influxdata/influxdb/issues/8965.
bom := unicode.BOMOverride(transform.Nop)
bs, _, err = transform.Bytes(bom, bs)
if err != nil {
return err
}
return c.FromToml(string(bs))
}
// FromToml loads the config from TOML.
func (c *Config) FromToml(input string) error {
// Replace deprecated [cluster] with [coordinator]
re := regexp.MustCompile(`(?m)^\s*\[cluster\]`)
input = re.ReplaceAllStringFunc(input, func(in string) string {
in = strings.TrimSpace(in)
out := "[coordinator]"
log.Printf("deprecated config option %s replaced with %s; %s will not be supported in a future release\n", in, out, in)
return out
})
_, err := toml.Decode(input, c)
return err
}
// Validate returns an error if the config is invalid.
func (c *Config) Validate() error {
if err := c.Meta.Validate(); err != nil {
return err
}
if err := c.Data.Validate(); err != nil {
return err
}
if err := c.Monitor.Validate(); err != nil {
return err
}
if err := c.ContinuousQuery.Validate(); err != nil {
return err
}
if err := c.Retention.Validate(); err != nil {
return err
}
if err := c.Precreator.Validate(); err != nil {
return err
}
if err := c.Subscriber.Validate(); err != nil {
return err
}
for _, graphite := range c.GraphiteInputs {
if err := graphite.Validate(); err != nil {
return fmt.Errorf("invalid graphite config: %v", err)
}
}
for _, collectd := range c.CollectdInputs {
if err := collectd.Validate(); err != nil {
return fmt.Errorf("invalid collectd config: %v", err)
}
}
if err := c.TLS.Validate(); err != nil {
return err
}
return nil
}
// ApplyEnvOverrides apply the environment configuration on top of the config.
func (c *Config) ApplyEnvOverrides(getenv func(string) string) error {
return itoml.ApplyEnvOverrides(getenv, "INFLUXDB", c)
}
// Diagnostics returns a diagnostics representation of Config.
func (c *Config) Diagnostics() (*diagnostics.Diagnostics, error) {
return diagnostics.RowFromMap(map[string]interface{}{
"reporting-disabled": c.ReportingDisabled,
"bind-address": c.BindAddress,
}), nil
}
func (c *Config) diagnosticsClients() map[string]diagnostics.Client {
// Config settings that are always present.
m := map[string]diagnostics.Client{
"config": c,
"config-data": c.Data,
"config-meta": c.Meta,
"config-coordinator": c.Coordinator,
"config-retention": c.Retention,
"config-precreator": c.Precreator,
"config-monitor": c.Monitor,
"config-subscriber": c.Subscriber,
"config-httpd": c.HTTPD,
"config-cqs": c.ContinuousQuery,
}
// Config settings that can be repeated and can be disabled.
if g := graphite.Configs(c.GraphiteInputs); g.Enabled() {
m["config-graphite"] = g
}
if cc := collectd.Configs(c.CollectdInputs); cc.Enabled() {
m["config-collectd"] = cc
}
if t := opentsdb.Configs(c.OpenTSDBInputs); t.Enabled() {
m["config-opentsdb"] = t
}
if u := udp.Configs(c.UDPInputs); u.Enabled() {
m["config-udp"] = u
}
return m
}
// registerDiagnostics registers the config settings with the Monitor.
func (c *Config) registerDiagnostics(m *monitor.Monitor) {
for name, dc := range c.diagnosticsClients() {
m.RegisterDiagnosticsClient(name, dc)
}
}
// registerDiagnostics deregisters the config settings from the Monitor.
func (c *Config) deregisterDiagnostics(m *monitor.Monitor) {
for name := range c.diagnosticsClients() {
m.DeregisterDiagnosticsClient(name)
}
}
| {
c := NewConfig()
var homeDir string
// By default, store meta and data files in current users home directory
u, err := user.Current()
if err == nil {
homeDir = u.HomeDir
} else if os.Getenv("HOME") != "" {
homeDir = os.Getenv("HOME")
} else {
return nil, fmt.Errorf("failed to determine current user for storage")
}
c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta")
c.Data.Dir = filepath.Join(homeDir, ".influxdb/data")
c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal")
return c, nil
} |
plugin_test.go | // Copyright 2018 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package logs
import (
"compress/gzip"
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"os"
"reflect"
"testing"
"time"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/metrics"
"github.com/open-policy-agent/opa/plugins"
"github.com/open-policy-agent/opa/server"
"github.com/open-policy-agent/opa/storage/inmem"
"github.com/open-policy-agent/opa/version"
)
func TestMain(m *testing.M) {
// call flag.Parse() here if TestMain uses flags
setVersion("XY.Z")
os.Exit(m.Run())
}
type testPlugin struct {
events []EventV1
}
func (p *testPlugin) Start(context.Context) error {
return nil
}
func (p *testPlugin) Stop(context.Context) {
}
func (p *testPlugin) Reconfigure(context.Context, interface{}) {
}
func (p *testPlugin) Log(_ context.Context, event EventV1) {
p.events = append(p.events, event)
}
func TestPluginCustomBackend(t *testing.T) {
ctx := context.Background()
manager, _ := plugins.New(nil, "test-instance-id", inmem.New())
backend := &testPlugin{}
manager.Register("test_plugin", backend)
config, err := ParseConfig([]byte(`{"plugin": "test_plugin"}`), nil, []string{"test_plugin"})
if err != nil {
t.Fatal(err)
}
plugin := New(config, manager)
plugin.Log(ctx, &server.Info{Revision: "A"})
plugin.Log(ctx, &server.Info{Revision: "B"})
if len(backend.events) != 2 || backend.events[0].Revision != "A" || backend.events[1].Revision != "B" {
t.Fatal("Unexpected events:", backend.events)
}
}
func TestPluginErrorNoResult(t *testing.T) {
ctx := context.Background()
manager, _ := plugins.New(nil, "test-instance-id", inmem.New())
backend := &testPlugin{}
manager.Register("test_plugin", backend)
config, err := ParseConfig([]byte(`{"plugin": "test_plugin"}`), nil, []string{"test_plugin"})
if err != nil {
t.Fatal(err)
}
plugin := New(config, manager)
plugin.Log(ctx, &server.Info{Error: fmt.Errorf("some error")})
plugin.Log(ctx, &server.Info{Error: ast.Errors{&ast.Error{
Code: "some_error",
}}})
if len(backend.events) != 2 || backend.events[0].Error == nil || backend.events[1].Error == nil {
t.Fatal("Unexpected events:", backend.events)
}
}
func TestPluginQueriesAndPaths(t *testing.T) {
ctx := context.Background()
manager, _ := plugins.New(nil, "test-instance-id", inmem.New())
backend := &testPlugin{}
manager.Register("test_plugin", backend)
config, err := ParseConfig([]byte(`{"plugin": "test_plugin"}`), nil, []string{"test_plugin"})
if err != nil {
t.Fatal(err)
}
plugin := New(config, manager)
plugin.Log(ctx, &server.Info{Path: "data.foo"})
plugin.Log(ctx, &server.Info{Path: "data.foo.bar"})
plugin.Log(ctx, &server.Info{Query: "a = data.foo"})
exp := []struct {
query string
path string
}{
// TODO(tsandall): we need to fix how POST /v1/data (and
// friends) are represented here. Currently we can't tell the
// difference between /v1/data and /v1/data/data. The decision
// log event paths should be slash prefixed to avoid ambiguity.
// {path: "data"},
{path: "foo"},
{path: "foo/bar"},
{query: "a = data.foo"},
}
if len(exp) != len(backend.events) {
t.Fatalf("Expected %d events but got %v", len(exp), len(backend.events))
}
for i, e := range exp {
if e.query != backend.events[i].Query || e.path != backend.events[i].Path {
t.Fatalf("Unexpected event %d, want %v but got %v", i, e, backend.events[i])
}
}
}
func TestPluginStartSameInput(t *testing.T) {
ctx := context.Background()
fixture := newTestFixture(t)
defer fixture.server.stop()
fixture.server.ch = make(chan []EventV1, 4)
var result interface{} = false
ts, err := time.Parse(time.RFC3339Nano, "2018-01-01T12:00:00.123456Z")
if err != nil {
panic(err)
}
testMetrics := getWellKnownMetrics()
var input interface{} = map[string]interface{}{"method": "GET"}
for i := 0; i < 400; i++ {
fixture.plugin.Log(ctx, &server.Info{
Revision: fmt.Sprint(i),
DecisionID: fmt.Sprint(i),
Path: "data.tda.bar",
Input: &input,
Results: &result,
RemoteAddr: "test",
Timestamp: ts,
Metrics: testMetrics,
})
}
_, err = fixture.plugin.oneShot(ctx)
if err != nil {
t.Fatal(err)
}
chunk1 := <-fixture.server.ch
chunk2 := <-fixture.server.ch
chunk3 := <-fixture.server.ch
chunk4 := <-fixture.server.ch
expLen1 := 122
expLen2 := 121
expLen3 := 121
expLen4 := 36
if len(chunk1) != expLen1 || len(chunk2) != expLen2 || len(chunk3) != expLen3 || len(chunk4) != expLen4 {
t.Fatalf("Expected chunk lens %v, %v, %v and %v but got: %v, %v, %v and %v", expLen1, expLen2, expLen3, expLen4, len(chunk1), len(chunk2), len(chunk3), len(chunk4))
}
var expInput interface{} = map[string]interface{}{"method": "GET"}
msAsFloat64 := map[string]interface{}{}
for k, v := range testMetrics.All() {
msAsFloat64[k] = float64(v.(uint64))
}
exp := EventV1{
Labels: map[string]string{
"id": "test-instance-id",
"app": "example-app",
},
Revision: "399",
DecisionID: "399",
Path: "tda/bar",
Input: &expInput,
Result: &result,
RequestedBy: "test",
Timestamp: ts,
Version: getVersion(),
Metrics: msAsFloat64,
}
if !reflect.DeepEqual(chunk4[expLen4-1], exp) {
t.Fatalf("Expected %+v but got %+v", exp, chunk4[expLen4-1])
}
}
func | (t *testing.T) {
ctx := context.Background()
fixture := newTestFixture(t)
defer fixture.server.stop()
fixture.server.ch = make(chan []EventV1, 4)
var result interface{} = false
ts, err := time.Parse(time.RFC3339Nano, "2018-01-01T12:00:00.123456Z")
if err != nil {
panic(err)
}
var input interface{}
for i := 0; i < 400; i++ {
input = map[string]interface{}{"method": getValueForMethod(i), "path": getValueForPath(i), "user": getValueForUser(i)}
fixture.plugin.Log(ctx, &server.Info{
Revision: fmt.Sprint(i),
DecisionID: fmt.Sprint(i),
Path: "data.foo.bar",
Input: &input,
Results: &result,
RemoteAddr: "test",
Timestamp: ts,
})
}
_, err = fixture.plugin.oneShot(ctx)
if err != nil {
t.Fatal(err)
}
chunk1 := <-fixture.server.ch
chunk2 := <-fixture.server.ch
chunk3 := <-fixture.server.ch
chunk4 := <-fixture.server.ch
expLen1 := 124
expLen2 := 123
expLen3 := 123
expLen4 := 30
if len(chunk1) != expLen1 || len(chunk2) != expLen2 || len((chunk3)) != expLen3 || len(chunk4) != expLen4 {
t.Fatalf("Expected chunk lens %v, %v, %v and %v but got: %v, %v, %v and %v", expLen1, expLen2, expLen3, expLen4, len(chunk1), len(chunk2), len(chunk3), len(chunk4))
}
var expInput interface{} = input
exp := EventV1{
Labels: map[string]string{
"id": "test-instance-id",
"app": "example-app",
},
Revision: "399",
DecisionID: "399",
Path: "foo/bar",
Input: &expInput,
Result: &result,
RequestedBy: "test",
Timestamp: ts,
Version: getVersion(),
}
if !reflect.DeepEqual(chunk4[expLen4-1], exp) {
t.Fatalf("Expected %+v but got %+v", exp, chunk4[expLen4-1])
}
}
func TestPluginStartChangingInputKeysAndValues(t *testing.T) {
ctx := context.Background()
fixture := newTestFixture(t)
defer fixture.server.stop()
fixture.server.ch = make(chan []EventV1, 5)
var result interface{} = false
ts, err := time.Parse(time.RFC3339Nano, "2018-01-01T12:00:00.123456Z")
if err != nil {
panic(err)
}
var input interface{}
for i := 0; i < 250; i++ {
input = generateInputMap(i)
fixture.plugin.Log(ctx, &server.Info{
Revision: fmt.Sprint(i),
DecisionID: fmt.Sprint(i),
Path: "data.foo.bar",
Input: &input,
Results: &result,
RemoteAddr: "test",
Timestamp: ts,
})
}
_, err = fixture.plugin.oneShot(ctx)
if err != nil {
t.Fatal(err)
}
<-fixture.server.ch
chunk2 := <-fixture.server.ch
var expInput interface{} = input
exp := EventV1{
Labels: map[string]string{
"id": "test-instance-id",
"app": "example-app",
},
Revision: "249",
DecisionID: "249",
Path: "foo/bar",
Input: &expInput,
Result: &result,
RequestedBy: "test",
Timestamp: ts,
Version: getVersion(),
}
if !reflect.DeepEqual(chunk2[len(chunk2)-1], exp) {
t.Fatalf("Expected %+v but got %+v", exp, chunk2[len(chunk2)-1])
}
}
func TestPluginRequeue(t *testing.T) {
ctx := context.Background()
fixture := newTestFixture(t)
defer fixture.server.stop()
fixture.server.ch = make(chan []EventV1, 1)
var input interface{} = map[string]interface{}{"method": "GET"}
var result1 interface{} = false
fixture.plugin.Log(ctx, &server.Info{
DecisionID: "abc",
Path: "data.foo.bar",
Input: &input,
Results: &result1,
RemoteAddr: "test",
Timestamp: time.Now().UTC(),
})
fixture.server.expCode = 500
_, err := fixture.plugin.oneShot(ctx)
if err == nil {
t.Fatal("Expected error")
}
events1 := <-fixture.server.ch
fixture.server.expCode = 200
_, err = fixture.plugin.oneShot(ctx)
if err != nil {
t.Fatal(err)
}
events2 := <-fixture.server.ch
if !reflect.DeepEqual(events1, events2) {
t.Fatalf("Expected %v but got: %v", events1, events2)
}
uploaded, err := fixture.plugin.oneShot(ctx)
if uploaded || err != nil {
t.Fatalf("Unexpected error or upload, err: %v", err)
}
}
func TestPluginReconfigure(t *testing.T) {
ctx := context.Background()
fixture := newTestFixture(t)
defer fixture.server.stop()
if err := fixture.plugin.Start(ctx); err != nil {
t.Fatal(err)
}
minDelay := 2
maxDelay := 3
pluginConfig := []byte(fmt.Sprintf(`{
"service": "example",
"reporting": {
"min_delay_seconds": %v,
"max_delay_seconds": %v
}
}`, minDelay, maxDelay))
config, _ := ParseConfig(pluginConfig, fixture.manager.Services(), nil)
fixture.plugin.Reconfigure(ctx, config)
fixture.plugin.Stop(ctx)
actualMin := time.Duration(*fixture.plugin.config.Reporting.MinDelaySeconds) / time.Nanosecond
expectedMin := time.Duration(minDelay) * time.Second
if actualMin != expectedMin {
t.Fatalf("Expected minimum polling interval: %v but got %v", expectedMin, actualMin)
}
actualMax := time.Duration(*fixture.plugin.config.Reporting.MaxDelaySeconds) / time.Nanosecond
expectedMax := time.Duration(maxDelay) * time.Second
if actualMax != expectedMax {
t.Fatalf("Expected maximum polling interval: %v but got %v", expectedMax, actualMax)
}
}
type testFixture struct {
manager *plugins.Manager
plugin *Plugin
server *testServer
}
func newTestFixture(t *testing.T) testFixture {
ts := testServer{
t: t,
expCode: 200,
}
ts.start()
managerConfig := []byte(fmt.Sprintf(`{
"labels": {
"app": "example-app"
},
"services": [
{
"name": "example",
"url": %q,
"credentials": {
"bearer": {
"scheme": "Bearer",
"token": "secret"
}
}
}
]}`, ts.server.URL))
manager, err := plugins.New(managerConfig, "test-instance-id", inmem.New())
if err != nil {
t.Fatal(err)
}
pluginConfig := []byte(fmt.Sprintf(`{
"service": "example",
}`))
config, _ := ParseConfig([]byte(pluginConfig), manager.Services(), nil)
p := New(config, manager)
return testFixture{
manager: manager,
plugin: p,
server: &ts,
}
}
type testServer struct {
t *testing.T
expCode int
server *httptest.Server
ch chan []EventV1
}
func (t *testServer) handle(w http.ResponseWriter, r *http.Request) {
gr, err := gzip.NewReader(r.Body)
if err != nil {
t.t.Fatal(err)
}
var events []EventV1
if err := json.NewDecoder(gr).Decode(&events); err != nil {
t.t.Fatal(err)
}
if err := gr.Close(); err != nil {
t.t.Fatal(err)
}
t.t.Logf("decision log test server received %d events", len(events))
t.ch <- events
w.WriteHeader(t.expCode)
}
func (t *testServer) start() {
t.server = httptest.NewServer(http.HandlerFunc(t.handle))
}
func (t *testServer) stop() {
t.server.Close()
}
func getValueForMethod(idx int) string {
methods := []string{"GET", "POST", "PUT", "DELETE", "PATCH"}
return methods[idx%len(methods)]
}
func getValueForPath(idx int) string {
paths := []string{"/blah1", "/blah2", "/blah3", "/blah4"}
return paths[idx%len(paths)]
}
func getValueForUser(idx int) string {
users := []string{"Alice", "Bob", "Charlie", "David", "Ed"}
return users[idx%len(users)]
}
func generateInputMap(idx int) map[string]interface{} {
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
result := make(map[string]interface{})
for i := 0; i < 20; i++ {
n := idx % len(letters)
key := string(letters[n])
result[key] = fmt.Sprint(idx)
}
return result
}
func setVersion(opaVersion string) {
if version.Version == "" {
version.Version = opaVersion
}
}
func getVersion() string {
return version.Version
}
func getWellKnownMetrics() metrics.Metrics {
m := metrics.New()
m.Counter("test_counter").Incr()
return m
}
| TestPluginStartChangingInputValues |
segment_tree.py | from types import SimpleNamespace
class Segment_Tree:
'''
A Class used to get partial sum of an array and update data
...
Attributes
----------
array : list
a list which to make a segment tree
Methods
-------
init(tree, start, end, node)
make segment tree from the array. don't call this method directly.
sum(left, right, node=1, start=0, end=-1)
return the partial sum of the array.
update(index, diff, node=1, start=0, end=-1)
update the value of the index of array as +diff.
'''
def __init__(self, array):
'''
Parameters
----------
array : list
the array that you want to make tree
'''
self.array = array
self.tree = [SimpleNamespace(value=0, lazy=0) for _ in range(len(self.array) * 4)]
self.init(self.tree, 0, len(self.array)-1, 1)
self.last_index = len(array)-1
def init(self, tree, start, end, node):
'''
Don't Call This Method Directly
'''
if start == end:
tree[node].value = self.array[start]
return tree[node].value
mid = (start + end) // 2
tree[node].value = self.init(tree, start, mid, node * 2) + self.init(tree, mid + 1, end, node * 2 + 1)
return tree[node].value
def | (self, left, right, node=1, start=0, end=-1):
'''
Parameters
----------
left : int
start index of the part [left, left+1, left+2 .. right-2, right-1, right]
right : int
end index of the part [left, left+1, left+2 .. right-2, right-1, right]
Returns
-------
int
a sum of the part of the array. sum([left, left+1, left+2 .. right-2, right-1, right])
'''
if end == -1:
end = self.last_index
if left > end or right < start:
return 0
if left <= start and end <= right:
return self.tree[node].value
return self.sum(left, right, node*2, start, (start+end)//2) + self.sum(left, right, node*2+1, (start+end)//2+1, end)
def lazy_sum(self, left, right, node=1, start=0, end=-1):
if end == -1:
end = self.last_index
if self.tree[node].lazy != 0:
self.tree[node].value += (end-start+1)*self.tree[node].lazy
if start != end:
self.tree[node*2].lazy += self.tree[node].lazy
self.tree[node*2+1].lazy += self.tree[node].lazy
self.tree[node].lazy = 0
if right < start or end < left:
return 0
if left <= start and end <= right:
return self.tree[node].value
return self.lazy_sum(left, right, node*2, start, (start+end)//2) + self.lazy_sum(left, right, node*2+1, (start+end)//2+1, end)
def update(self, index, diff, node=1, start=0, end=-1):
'''
Parameters
----------
index: int
the index of array. which you want to update value
diff: int
the amount of value which you wnat to add. if you want to make 4 to 10, put diff to 6
'''
if end == -1:
end = self.last_index
if not(start <= index <= end):
return
self.tree[node].value += diff
if start != end:
self.update(index, diff, node*2, start, (start+end)//2)
self.update(index, diff, node*2+1, (start+end)//2+1, end)
def update_range(self, diff, left, right, node=1, start=0, end=-1):
if end == -1:
end = self.last_index
if self.tree[node].lazy != 0:
self.tree[node].value += (end-start+1)*self.tree[node].lazy
if start != end:
self.tree[node*2].lazy += self.tree[node].lazy
self.tree[node*2+1].lazy += self.tree[node].lazy
self.tree[node].lazy = 0
if right < start or end < left:
return
if left <= start and end <= right:
self.tree[node].value += (end-start+1)*diff
if start != end:
self.tree[node*2].lazy += diff
self.tree[node*2+1].lazy += diff
return
self.update_range(diff, left, right, node*2, start, (start+end)//2)
self.update_range(diff, left, right, node*2+1, (start+end)//2+1, end)
self.tree[node].value = self.tree[node*2].value + self.tree[node*2+1].value
# init segment tree of an array from index start to end.
# return index of minimum value of array in range from start to end.
def init_segment_min(array, tree, node, start, end):
if start == end:
tree[node] = start
return tree[node]
mid = (start + end) // 2
left = init_segment_min(array, tree, node * 2, start, mid)
right = init_segment_min(array, tree, node * 2 + 1, mid + 1, end)
if array[left] < array[right]:
tree[node] = left
else:
tree[node] = right
return tree[node]
def find_min(array, tree, node, start, end, left, right):
if left > end or right < start:
return -1
if left <= start and end <= right:
return tree[node]
left_index = find_min(array, tree, node*2, start, (start+end)//2, left, right)
right_index = find_min(array, tree, node*2+1, (start+end)//2+1, end, left, right)
if left_index == -1 and right_index == -1:
return -1
elif left_index == -1:
return right_index
elif right_index == -1:
return left_index
else:
if array[left_index] < array[right_index]:
return left_index
return right_index
if __name__ == '__main__':
a = [3, 5, 6, 7, 2, 9, 4, 5, 2, 8, 1, 5]
# tree = [0 for _ in range(len(a) * 4)]
# tree2 = [0 for _ in range(len(a) * 4)]
# init_segment_sum(a, tree, 0, 11)
# print('a: {}'.format(a))
# print('segment tree(sum): {}'.format(tree))
# print('partial sum of (3~9): {}'.format(segment_sum(tree, 1, 0, 11, 3, 9)))
# print('update a[3] to 8')
# update(tree, 1, 0, 11, 3, 1)
# print('segment tree(sum): {}'.format(tree))
# print('partial sum of (3~9): {}'.format(segment_sum(tree, 1, 0, 11, 3, 9)))
segment = Segment_Tree(a)
print(segment.sum(3, 9))
segment.update(3, 1)
print(segment.sum(3, 9))
a = [1,2,3,4,5,6,7,8,9,10]
segment = Segment_Tree(a)
print(segment.lazy_sum(0, 9))
segment.update_range(10, 0, 4)
print(segment.lazy_sum(0, 9)) | sum |
libs.py | # coding: utf8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import contextlib
bn_regularizer = fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.0)
name_scope = ""
@contextlib.contextmanager
def scope(name):
global name_scope
bk = name_scope
name_scope = name_scope + name + '/'
yield
name_scope = bk
def max_pool(input, kernel, stride, padding):
data = fluid.layers.pool2d(
input,
pool_size=kernel,
pool_type='max',
pool_stride=stride,
pool_padding=padding)
return data
def avg_pool(input, kernel, stride, padding=0):
data = fluid.layers.pool2d(
input,
pool_size=kernel,
pool_type='avg',
pool_stride=stride,
pool_padding=padding)
return data
def group_norm(input, G, eps=1e-5, param_attr=None, bias_attr=None):
N, C, H, W = input.shape
if C % G != 0:
for d in range(10):
for t in [d, -d]:
if G + t <= 0: continue
if C % (G + t) == 0:
G = G + t
break
if C % G == 0:
break
assert C % G == 0, "group can not divide channle"
x = fluid.layers.group_norm(
input,
groups=G,
param_attr=param_attr,
bias_attr=bias_attr,
name=name_scope + 'group_norm')
return x
def bn(*args,
norm_type='bn',
eps=1e-5,
bn_momentum=0.99,
group_norm=32,
**kargs):
if norm_type == 'bn':
with scope('BatchNorm'):
return fluid.layers.batch_norm(
*args,
epsilon=eps,
momentum=bn_momentum,
param_attr=fluid.ParamAttr(
name=name_scope + 'gamma', regularizer=bn_regularizer),
bias_attr=fluid.ParamAttr(
name=name_scope + 'beta', regularizer=bn_regularizer),
moving_mean_name=name_scope + 'moving_mean',
moving_variance_name=name_scope + 'moving_variance',
**kargs)
elif norm_type == 'gn':
with scope('GroupNorm'):
return group_norm(
args[0],
group_norm,
eps=eps,
param_attr=fluid.ParamAttr(
name=name_scope + 'gamma', regularizer=bn_regularizer),
bias_attr=fluid.ParamAttr(
name=name_scope + 'beta', regularizer=bn_regularizer))
else:
raise Exception("Unsupport norm type:" + norm_type)
def bn_relu(data, norm_type='bn', eps=1e-5):
return fluid.layers.relu(bn(data, norm_type=norm_type, eps=eps))
def relu(data):
return fluid.layers.relu(data)
def conv(*args, **kargs):
kargs['param_attr'] = name_scope + 'weights'
if 'bias_attr' in kargs and kargs['bias_attr']:
kargs['bias_attr'] = fluid.ParamAttr(
name=name_scope + 'biases',
regularizer=None,
initializer=fluid.initializer.ConstantInitializer(value=0.0))
else:
kargs['bias_attr'] = False
return fluid.layers.conv2d(*args, **kargs)
def deconv(*args, **kargs):
kargs['param_attr'] = name_scope + 'weights'
if 'bias_attr' in kargs and kargs['bias_attr']:
kargs['bias_attr'] = name_scope + 'biases'
else:
kargs['bias_attr'] = False
return fluid.layers.conv2d_transpose(*args, **kargs)
def separate_conv(input,
channel,
stride,
filter,
dilation=1,
act=None,
eps=1e-5):
param_attr = fluid.ParamAttr(
name=name_scope + 'weights',
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=0.0),
initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.33))
with scope('depthwise'):
input = conv(
input,
input.shape[1],
filter,
stride,
groups=input.shape[1],
padding=(filter // 2) * dilation,
dilation=dilation,
use_cudnn=False,
param_attr=param_attr)
input = bn(input, eps=eps)
if act: input = act(input)
param_attr = fluid.ParamAttr(
name=name_scope + 'weights',
regularizer=None,
initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.06))
with scope('pointwise'):
input = conv(
input, channel, 1, 1, groups=1, padding=0, param_attr=param_attr)
input = bn(input, eps=eps)
if act: input = act(input)
return input
def conv_bn_layer(input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
if_act=True,
name=None,
use_cudnn=True):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=fluid.ParamAttr(name=name + '_weights'),
bias_attr=False)
bn_name = name + '_bn'
bn = fluid.layers.batch_norm(
input=conv,
param_attr=fluid.ParamAttr(name=bn_name + "_scale"),
bias_attr=fluid.ParamAttr(name=bn_name + "_offset"),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
if if_act:
return fluid.layers.relu6(bn)
else:
return bn
| logit = fluid.layers.sigmoid(input)
logit_back = 1 - logit
logit = fluid.layers.concat([logit_back, logit], axis=1)
return logit | def sigmoid_to_softmax(input):
"""
one channel to two channel
""" |
behaviour.rs | /*
Copyright 2021 JFrog Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use crate::network::artifact_protocol::{ArtifactExchangeCodec, ArtifactRequest, ArtifactResponse};
use crate::network::idle_metric_protocol::{
IdleMetricExchangeCodec, IdleMetricRequest, IdleMetricResponse,
};
use libp2p::identify::{Identify, IdentifyEvent};
use libp2p::kad::record::store::MemoryStore;
use libp2p::kad::{Kademlia, KademliaEvent};
use libp2p::request_response::{RequestResponse, RequestResponseEvent};
use libp2p::NetworkBehaviour;
/// Defines the [`NetworkBehaviour`] to be used in the libp2p
/// Swarm. The PyrsiaNetworkBehaviour consists of the following
/// behaviours:
///
/// * [`Identify`]
/// * [`Kademlia`]
/// * [`RequestResponse`] for exchanging artifacts
#[derive(NetworkBehaviour)]
#[behaviour(out_event = "PyrsiaNetworkEvent")]
pub struct PyrsiaNetworkBehaviour {
pub identify: Identify,
pub kademlia: Kademlia<MemoryStore>,
pub request_response: RequestResponse<ArtifactExchangeCodec>,
pub idle_metric_request_response: RequestResponse<IdleMetricExchangeCodec>,
}
/// Each event in the `PyrsiaNetworkBehaviour` is wrapped in a
/// `PyrsiaNetworkEvent`.
#[derive(Debug)]
pub enum PyrsiaNetworkEvent {
Identify(IdentifyEvent),
Kademlia(KademliaEvent),
RequestResponse(RequestResponseEvent<ArtifactRequest, ArtifactResponse>),
IdleMetricRequestResponse(RequestResponseEvent<IdleMetricRequest, IdleMetricResponse>),
}
impl From<IdentifyEvent> for PyrsiaNetworkEvent {
fn from(event: IdentifyEvent) -> Self {
PyrsiaNetworkEvent::Identify(event)
}
}
impl From<KademliaEvent> for PyrsiaNetworkEvent {
fn from(event: KademliaEvent) -> Self {
PyrsiaNetworkEvent::Kademlia(event)
}
}
impl From<RequestResponseEvent<ArtifactRequest, ArtifactResponse>> for PyrsiaNetworkEvent {
fn | (event: RequestResponseEvent<ArtifactRequest, ArtifactResponse>) -> Self {
PyrsiaNetworkEvent::RequestResponse(event)
}
}
impl From<RequestResponseEvent<IdleMetricRequest, IdleMetricResponse>> for PyrsiaNetworkEvent {
fn from(event: RequestResponseEvent<IdleMetricRequest, IdleMetricResponse>) -> Self {
PyrsiaNetworkEvent::IdleMetricRequestResponse(event)
}
}
| from |
kernelService.ts | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import type { nbformat } from '@jupyterlab/coreutils';
import type { Kernel } from '@jupyterlab/services';
import { inject, injectable } from 'inversify';
import * as path from 'path';
import * as uuid from 'uuid/v4';
import { CancellationToken, CancellationTokenSource } from 'vscode';
import { IPythonExtensionChecker } from '../../../api/types';
import { Cancellation, wrapCancellationTokens } from '../../../common/cancellation';
import { PYTHON_LANGUAGE, PYTHON_WARNINGS } from '../../../common/constants';
import '../../../common/extensions';
import { traceDecorators, traceError, traceInfo, traceVerbose, traceWarning } from '../../../common/logger';
import { IFileSystem } from '../../../common/platform/types';
import { IPythonExecutionFactory } from '../../../common/process/types';
import { ReadWrite } from '../../../common/types';
import { sleep } from '../../../common/utils/async';
import { noop } from '../../../common/utils/misc';
import { IEnvironmentActivationService } from '../../../interpreter/activation/types';
import { IInterpreterService } from '../../../interpreter/contracts';
import { PythonEnvironment } from '../../../pythonEnvironments/info';
import { captureTelemetry, sendTelemetryEvent } from '../../../telemetry';
import { getRealPath } from '../../common';
import { Telemetry } from '../../constants';
import { reportAction } from '../../progress/decorator';
import { ReportableAction } from '../../progress/types';
import {
IJupyterKernelSpec,
IJupyterSessionManager,
IJupyterSubCommandExecutionService,
IKernelDependencyService,
KernelInterpreterDependencyResponse
} from '../../types';
import { cleanEnvironment, detectDefaultKernelName } from './helpers';
import { JupyterKernelSpec } from './jupyterKernelSpec';
import { LiveKernelModel } from './types';
// eslint-disable-next-line @typescript-eslint/no-var-requires, @typescript-eslint/no-require-imports
const NamedRegexp = require('named-js-regexp') as typeof import('named-js-regexp');
/**
* Helper to ensure we can differentiate between two types in union types, keeping typing information.
* (basically avoiding the need to case using `as`).
* We cannot use `xx in` as jupyter uses `JSONObject` which is too broad and captures anything and everything.
*
* @param {(nbformat.IKernelspecMetadata | PythonEnvironment)} item
* @returns {item is PythonEnvironment}
*/
function isInterpreter(item: nbformat.IKernelspecMetadata | PythonEnvironment): item is PythonEnvironment {
// Interpreters will not have a `display_name` property, but have `path` and `type` properties.
return !!(item as PythonEnvironment).path && !(item as nbformat.IKernelspecMetadata).display_name;
}
/**
* Responsible for kernel management and the like.
*
* @export
* @class KernelService
*/
@injectable()
export class | {
constructor(
@inject(IJupyterSubCommandExecutionService)
private readonly jupyterInterpreterExecService: IJupyterSubCommandExecutionService,
@inject(IPythonExecutionFactory) private readonly execFactory: IPythonExecutionFactory,
@inject(IInterpreterService) private readonly interpreterService: IInterpreterService,
@inject(IKernelDependencyService) private readonly kernelDependencyService: IKernelDependencyService,
@inject(IFileSystem) private readonly fs: IFileSystem,
@inject(IEnvironmentActivationService) private readonly activationHelper: IEnvironmentActivationService,
@inject(IPythonExtensionChecker) private readonly extensionChecker: IPythonExtensionChecker
) {}
/**
* Finds a kernel spec from a given session or jupyter process that matches a given spec.
*
* @param {nbformat.IKernelspecMetadata} kernelSpec The kernelspec (criteria) to be used when searching for a kernel.
* @param {IJupyterSessionManager} [sessionManager] If not provided search against the jupyter process.
* @param {CancellationToken} [cancelToken]
* @returns {(Promise<IJupyterKernelSpec | undefined>)}
* @memberof KernelService
*/
public async findMatchingKernelSpec(
kernelSpec: nbformat.IKernelspecMetadata,
sessionManager?: IJupyterSessionManager,
cancelToken?: CancellationToken
): Promise<IJupyterKernelSpec | undefined>;
/**
* Finds a kernel spec from a given session or jupyter process that matches a given interpreter.
*
* @param {PythonEnvironment} interpreter The interpreter (criteria) to be used when searching for a kernel.
* @param {(IJupyterSessionManager | undefined)} sessionManager If not provided search against the jupyter process.
* @param {CancellationToken} [cancelToken]
* @returns {(Promise<IJupyterKernelSpec | undefined>)}
* @memberof KernelService
*/
public async findMatchingKernelSpec(
interpreter: PythonEnvironment,
sessionManager?: IJupyterSessionManager | undefined,
cancelToken?: CancellationToken
): Promise<IJupyterKernelSpec | undefined>;
public async findMatchingKernelSpec(
option: nbformat.IKernelspecMetadata | PythonEnvironment,
sessionManager: IJupyterSessionManager | undefined,
cancelToken?: CancellationToken
): Promise<IJupyterKernelSpec | undefined> {
const specs = await this.getKernelSpecs(sessionManager, cancelToken);
if (isInterpreter(option)) {
return specs.find((item) => {
if (item.language?.toLowerCase() !== PYTHON_LANGUAGE.toLowerCase()) {
return false;
}
return (
this.fs.areLocalPathsSame(item.argv[0], option.path) ||
this.fs.areLocalPathsSame(item.metadata?.interpreter?.path || '', option.path)
);
});
} else {
return specs.find((item) => item.display_name === option.display_name && item.name === option.name);
}
}
/**
* Given a kernel, this will find an interpreter that matches the kernel spec.
* Note: When we create our own kernels on behalf of the user, the meta data contains the interpreter information.
*
* @param {IJupyterKernelSpec} kernelSpec
* @param {CancellationToken} [cancelToken]
* @returns {(Promise<PythonEnvironment | undefined>)}
* @memberof KernelService
*/
// eslint-disable-next-line complexity
@traceDecorators.verbose('Find matching interpreter for a given kernel spec')
public async findMatchingInterpreter(
kernelSpec: IJupyterKernelSpec | LiveKernelModel,
cancelToken?: CancellationToken
): Promise<PythonEnvironment | undefined> {
// If we know for a fact that the kernel spec is a Non-Python kernel, then return nothing.
if (kernelSpec?.language && kernelSpec.language !== PYTHON_LANGUAGE) {
return;
}
if (!this.extensionChecker.isPythonExtensionInstalled) {
return;
}
const activeInterpreterPromise = this.interpreterService.getActiveInterpreter(undefined);
const allInterpretersPromise = this.interpreterService.getInterpreters(undefined);
// Ensure we handle errors if any (this is required to ensure we do not exit this function without using this promise).
// If promise is rejected and we do not use it, then ignore errors.
activeInterpreterPromise.ignoreErrors();
// Ensure we handle errors if any (this is required to ensure we do not exit this function without using this promise).
// If promise is rejected and we do not use it, then ignore errors.
allInterpretersPromise.ignoreErrors();
// 1. Check if current interpreter has the same path
const interpreterPath = kernelSpec.metadata?.interpreter?.path || kernelSpec.interpreterPath;
if (interpreterPath) {
const interpreter = await this.interpreterService.getInterpreterDetails(interpreterPath);
if (interpreter) {
traceInfo(
`Found matching interpreter based on interpreter or interpreterPath in metadata, for the kernel ${kernelSpec.name}, ${kernelSpec.display_name}, ${interpreterPath}`
);
return interpreter;
}
traceError(
`KernelSpec has interpreter information, however a matching interpreter could not be found for ${interpreterPath}`
);
}
// 2. Check if we have a fully qualified path in `argv`
const pathInArgv =
Array.isArray(kernelSpec.argv) && kernelSpec.argv.length > 0 ? kernelSpec.argv[0] : undefined;
if (pathInArgv && path.basename(pathInArgv) !== pathInArgv) {
const interpreter = await this.interpreterService.getInterpreterDetails(pathInArgv).catch((ex) => {
traceError(
`Failed to get interpreter information for python defined in kernel ${kernelSpec.name}, ${
kernelSpec.display_name
} with argv: ${(kernelSpec.argv || [])?.join(',')}`,
ex
);
return;
});
if (interpreter) {
traceInfo(
`Found matching interpreter based on argv in metadata, for the kernel ${kernelSpec.name}, ${kernelSpec.display_name}, ${pathInArgv}`
);
return interpreter;
}
traceError(
`KernelSpec has path information, however a matching interpreter could not be found for ${kernelSpec.metadata?.interpreter?.path}`
);
}
if (Cancellation.isCanceled(cancelToken)) {
return;
}
// 3. Check if current interpreter has the same display name
const activeInterpreter = await activeInterpreterPromise;
// If the display name matches the active interpreter then use that.
if (kernelSpec.display_name === activeInterpreter?.displayName) {
return activeInterpreter;
}
// Check if kernel is `Python2` or `Python3` or a similar generic kernel.
const match = detectDefaultKernelName(kernelSpec.name);
if (match && match.groups()) {
// 3. Look for interpreter with same major version
const majorVersion = parseInt(match.groups()!.version, 10) || 0;
// If the major versions match, that's sufficient.
if (!majorVersion || (activeInterpreter?.version && activeInterpreter.version.major === majorVersion)) {
traceInfo(
`Using current interpreter for kernel ${kernelSpec.name}, ${kernelSpec.display_name}, (interpreter is ${activeInterpreter?.displayName} # ${activeInterpreter?.path})`
);
return activeInterpreter;
}
// Find an interpreter that matches the
const allInterpreters = await allInterpretersPromise;
const found = allInterpreters.find((item) => item.version?.major === majorVersion);
// If we cannot find a matching one, then use the current interpreter.
if (found) {
traceVerbose(
`Using interpreter ${found.path} for the kernel ${kernelSpec.name}, ${kernelSpec.display_name}`
);
return found;
}
traceWarning(
`Unable to find an interpreter that matches the kernel ${kernelSpec.name}, ${kernelSpec.display_name}, some features might not work , (interpreter is ${activeInterpreter?.displayName} # ${activeInterpreter?.path}).`
);
return activeInterpreter;
} else {
// 5. Look for interpreter with same display name across all interpreters.
// If the display name matches the active interpreter then use that.
// Look in all of our interpreters if we have something that matches this.
const allInterpreters = await allInterpretersPromise;
if (Cancellation.isCanceled(cancelToken)) {
return;
}
const found = allInterpreters.find((item) => item.displayName === kernelSpec.display_name);
if (found) {
traceVerbose(
`Found an interpreter that has the same display name as kernelspec ${kernelSpec.display_name}, matches interpreter ${found.displayName} # ${found.path}`
);
return found;
} else {
traceWarning(
`Unable to determine version of Python interpreter to use for kernel ${kernelSpec.name}, ${kernelSpec.display_name}, some features might not work , (interpreter is ${activeInterpreter?.displayName} # ${activeInterpreter?.path}).`
);
return activeInterpreter;
}
}
}
public async searchAndRegisterKernel(
interpreter: PythonEnvironment,
disableUI?: boolean,
cancelToken?: CancellationToken
): Promise<IJupyterKernelSpec | undefined> {
// If a kernelspec already exists for this, then use that.
const found = await this.findMatchingKernelSpec(interpreter, undefined, cancelToken);
if (found) {
sendTelemetryEvent(Telemetry.UseExistingKernel);
// Make sure the kernel is up to date with the current environment before
// we return it.
await this.updateKernelEnvironment(interpreter, found, cancelToken);
return found;
}
return this.registerKernel(interpreter, disableUI, cancelToken);
}
/**
* Registers an interpreter as a kernel.
* The assumption is that `ipykernel` has been installed in the interpreter.
* Kernel created will have following characteristics:
* - display_name = Display name of the interpreter.
* - metadata.interperter = Interpreter information (useful in finding a kernel that matches a given interpreter)
* - env = Will have environment variables of the activated environment.
*
* @param {PythonEnvironment} interpreter
* @param {boolean} [disableUI]
* @param {CancellationToken} [cancelToken]
* @returns {Promise<IJupyterKernelSpec>}
* @memberof KernelService
*/
// eslint-disable-next-line
// eslint-disable-next-line complexity
@captureTelemetry(Telemetry.RegisterInterpreterAsKernel, undefined, true)
@traceDecorators.error('Failed to register an interpreter as a kernel')
@reportAction(ReportableAction.KernelsRegisterKernel)
// eslint-disable-next-line
public async registerKernel(
interpreter: PythonEnvironment,
disableUI?: boolean,
cancelToken?: CancellationToken
): Promise<IJupyterKernelSpec | undefined> {
if (!interpreter.displayName) {
throw new Error('Interpreter does not have a display name');
}
const execServicePromise = this.execFactory.createActivatedEnvironment({
interpreter,
allowEnvironmentFetchExceptions: true,
bypassCondaExecution: true
});
// Swallow errors if we get out of here and not resolve this.
execServicePromise.ignoreErrors();
const name = this.generateKernelNameForInterpreter(interpreter);
// If ipykernel is not installed, prompt to install it.
if (!(await this.kernelDependencyService.areDependenciesInstalled(interpreter, cancelToken)) && !disableUI) {
// If we wish to wait for installation to complete, we must provide a cancel token.
const token = new CancellationTokenSource();
const response = await this.kernelDependencyService.installMissingDependencies(
interpreter,
wrapCancellationTokens(cancelToken, token.token)
);
if (response !== KernelInterpreterDependencyResponse.ok) {
traceWarning(
`Prompted to install ipykernel, however ipykernel not installed in the interpreter ${interpreter.path}. Response ${response}`
);
return;
}
}
if (Cancellation.isCanceled(cancelToken)) {
return;
}
const execService = await execServicePromise;
const output = await execService.execModule(
'ipykernel',
['install', '--user', '--name', name, '--display-name', interpreter.displayName],
{
throwOnStdErr: false,
encoding: 'utf8',
token: cancelToken
}
);
if (Cancellation.isCanceled(cancelToken)) {
return;
}
let kernel = await this.findMatchingKernelSpec(
{ display_name: interpreter.displayName, name },
undefined,
cancelToken
);
// Wait for at least 5s. We know launching a python (conda env) process on windows can sometimes take around 4s.
for (let counter = 0; counter < 10; counter += 1) {
if (Cancellation.isCanceled(cancelToken)) {
return;
}
if (kernel) {
break;
}
traceWarning('Waiting for 500ms for registered kernel to get detected');
// Wait for jupyter server to get updated with the new kernel information.
await sleep(500);
kernel = await this.findMatchingKernelSpec(
{ display_name: interpreter.displayName, name },
undefined,
cancelToken
);
}
if (!kernel) {
// Possible user doesn't have kernelspec installed.
kernel = await this.getKernelSpecFromStdOut(await execService.getExecutablePath(), output.stdout).catch(
(ex) => {
traceError('Failed to get kernelspec from stdout', ex);
return undefined;
}
);
}
if (!kernel) {
const error = `Kernel not created with the name ${name}, display_name ${interpreter.displayName}. Output is ${output.stdout}`;
throw new Error(error);
}
if (!(kernel instanceof JupyterKernelSpec)) {
const error = `Kernel not registered locally, created with the name ${name}, display_name ${interpreter.displayName}. Output is ${output.stdout}`;
throw new Error(error);
}
if (!kernel.specFile) {
const error = `kernel.json not created with the name ${name}, display_name ${interpreter.displayName}. Output is ${output.stdout}`;
throw new Error(error);
}
// Update the json with our environment.
await this.updateKernelEnvironment(interpreter, kernel, cancelToken, true);
sendTelemetryEvent(Telemetry.RegisterAndUseInterpreterAsKernel);
traceInfo(
`Kernel successfully registered for ${interpreter.path} with the name=${name} and spec can be found here ${kernel.specFile}`
);
return kernel;
}
public async updateKernelEnvironment(
interpreter: PythonEnvironment | undefined,
kernel: IJupyterKernelSpec,
cancelToken?: CancellationToken,
forceWrite?: boolean
) {
const specedKernel = kernel as JupyterKernelSpec;
if (specedKernel.specFile) {
let specModel: ReadWrite<Kernel.ISpecModel> = JSON.parse(
await this.fs.readLocalFile(specedKernel.specFile)
);
let shouldUpdate = false;
// Make sure the specmodel has an interpreter or already in the metadata or we
// may overwrite a kernel created by the user
if (interpreter && (specModel.metadata?.interpreter || forceWrite)) {
// Ensure we use a fully qualified path to the python interpreter in `argv`.
if (specModel.argv[0].toLowerCase() === 'conda') {
// If conda is the first word, its possible its a conda activation command.
traceInfo(`Spec argv[0], not updated as it is using conda.`);
} else {
traceInfo(`Spec argv[0] updated from '${specModel.argv[0]}' to '${interpreter.path}'`);
specModel.argv[0] = interpreter.path;
}
// Get the activated environment variables (as a work around for `conda run` and similar).
// This ensures the code runs within the context of an activated environment.
specModel.env = await this.activationHelper
.getActivatedEnvironmentVariables(undefined, interpreter, true)
.catch(noop)
// eslint-disable-next-line @typescript-eslint/no-explicit-any
.then((env) => (env || {}) as any);
if (Cancellation.isCanceled(cancelToken)) {
return;
}
// Special case, modify the PYTHONWARNINGS env to the global value.
// otherwise it's forced to 'ignore' because activated variables are cached.
if (specModel.env && process.env[PYTHON_WARNINGS]) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
specModel.env[PYTHON_WARNINGS] = process.env[PYTHON_WARNINGS] as any;
} else if (specModel.env && specModel.env[PYTHON_WARNINGS]) {
delete specModel.env[PYTHON_WARNINGS];
}
// Ensure we update the metadata to include interpreter stuff as well (we'll use this to search kernels that match an interpreter).
// We'll need information such as interpreter type, display name, path, etc...
// Its just a JSON file, and the information is small, hence might as well store everything.
specModel.metadata = specModel.metadata || {};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
specModel.metadata.interpreter = interpreter as any;
// Indicate we need to write
shouldUpdate = true;
}
// Scrub the environment of the specmodel to make sure it has allowed values (they all must be strings)
// See this issue here: https://github.com/microsoft/vscode-python/issues/11749
if (specModel.env) {
specModel = cleanEnvironment(specModel);
shouldUpdate = true;
}
// Update the kernel.json with our new stuff.
if (shouldUpdate) {
await this.fs.writeLocalFile(specedKernel.specFile, JSON.stringify(specModel, undefined, 2));
}
// Always update the metadata for the original kernel.
specedKernel.metadata = specModel.metadata;
}
}
/**
* Gets a list of all kernel specs.
*
* @param {IJupyterSessionManager} [sessionManager]
* @param {CancellationToken} [cancelToken]
* @returns {Promise<IJupyterKernelSpec[]>}
* @memberof KernelService
*/
@reportAction(ReportableAction.KernelsGetKernelSpecs)
public async getKernelSpecs(
sessionManager?: IJupyterSessionManager,
cancelToken?: CancellationToken
): Promise<IJupyterKernelSpec[]> {
const enumerator = sessionManager
? sessionManager.getKernelSpecs()
: this.jupyterInterpreterExecService.getKernelSpecs(cancelToken);
if (Cancellation.isCanceled(cancelToken)) {
return [];
}
traceInfo('Enumerating kernel specs...');
const specs: IJupyterKernelSpec[] = await enumerator;
const result = specs.filter((item) => !!item);
traceInfo(`Found ${result.length} kernelspecs`);
// Send telemetry on this enumeration.
const anyPython = result.find((k) => k.language === 'python') !== undefined;
sendTelemetryEvent(Telemetry.KernelEnumeration, undefined, {
count: result.length,
isPython: anyPython,
source: sessionManager ? 'connection' : 'cli'
});
return result;
}
/**
* Not all characters are allowed in a kernel name.
* This method will generate a name for a kernel based on display name and path.
* Algorithm = <displayname - invalid characters> + <hash of path>
*
* @private
* @param {PythonEnvironment} interpreter
* @memberof KernelService
*/
private generateKernelNameForInterpreter(interpreter: PythonEnvironment): string {
// Never change this logic, this is used in other places to determine the format of names we have generated.
return `${interpreter.displayName || ''}${uuid()}`.replace(/[^A-Za-z0-9]/g, '').toLowerCase();
}
/**
* Will scrape kernelspec info from the output when a new kernel is created.
*
* @private
* @param {string} output
* @returns {JupyterKernelSpec}
* @memberof KernelService
*/
@traceDecorators.error('Failed to parse kernel creation stdout')
private async getKernelSpecFromStdOut(pythonPath: string, output: string): Promise<JupyterKernelSpec | undefined> {
if (!output) {
return;
}
// Output should be of the form
// `Installed kernel <kernelname> in <path>`
const regEx = NamedRegexp('Installed\\skernelspec\\s(?<name>\\w*)\\sin\\s(?<path>.*)', 'g');
const match = regEx.exec(output);
if (!match || !match.groups()) {
return;
}
type RegExGroup = { name: string; path: string };
const groups = match.groups() as RegExGroup | undefined;
if (!groups || !groups.name || !groups.path) {
traceError('Kernel Output not parsed', output);
throw new Error('Unable to parse output to get the kernel info');
}
const specFile = await getRealPath(
this.fs,
this.execFactory,
pythonPath,
path.join(groups.path, 'kernel.json')
);
if (!specFile) {
throw new Error('KernelSpec file not found');
}
const kernelModel = JSON.parse(await this.fs.readLocalFile(specFile));
kernelModel.name = groups.name;
return new JupyterKernelSpec(kernelModel as Kernel.ISpecModel, specFile);
}
}
| KernelService |
USocket.py | from socket import socket, AF_INET, SOCK_DGRAM, inet_aton, inet_ntoa
import time
sockets = {}
network = ('127.0.0.1', 12345)
def bytes_to_addr(bytes):
return inet_ntoa(bytes[:4]), int.from_bytes(bytes[4:8], 'big')
def addr_to_bytes(addr):
return inet_aton(addr[0]) + addr[1].to_bytes(4, 'big')
def get_sendto(id, rate=None):
if rate:
def sendto(data: bytes, addr):
time.sleep(len(data) / rate)
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
else:
def sendto(data: bytes, addr):
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
class UnreliableSocket:
def __init__(self, rate=None):
assert rate is None or rate > 0, 'Rate should be positive or None.'
sockets[id(self)] = socket(AF_INET, SOCK_DGRAM)
self.sendto = get_sendto(id(self), rate)
def bind(self, address: (str, int)):
sockets[id(self)].bind(address)
def | (self, bufsize) -> bytes:
data, frm = sockets[id(self)].recvfrom(bufsize)
addr = bytes_to_addr(data[:8])
if frm == network:
return data[8:], addr
else:
return self.recvfrom(bufsize)
def settimeout(self, value):
sockets[id(self)].settimeout(value)
def gettimeout(self):
return sockets[id(self)].gettimeout()
def setblocking(self, flag):
sockets[id(self)].setblocking(flag)
def getblocking(self):
sockets[id(self)].getblocking()
def getsockname(self):
return sockets[id(self)].getsockname()
def close(self):
sockets[id(self)].close()
| recvfrom |
getCode.js | /** | * @category External
*
* @returns {string}
*/
getCode = function () {
let obj = arguments.callee;
while (!(obj.arguments[0] instanceof Processing)) {
obj = obj.caller;
if (obj.arguments[0] instanceof MouseEvent || obj.arguments[0] instanceof KeyboardEvent) return console.warn('`getCode` is not supported in Processing event functions.');
}
return obj.caller.arguments[0];
}; | * Gets current code in editor.
* |
sensitivities.py | """
Sensitivities of *DSLR* Cameras
===============================
Defines the sensitivities of *DSLR* cameras.
Each *DSLR* camera data is in the form of a *dict* of
:class:`colour.characterisation.RGB_CameraSensitivities` classes as follows::
{
'name': RGB_CameraSensitivities,
...,
'name': RGB_CameraSensitivities
}
The following *DSLR* cameras are available:
- Nikon 5100 (NPL)
- Sigma SDMerill (NPL)
References
----------
- :cite:`Darrodi2015a` : Darrodi, M. M., Finlayson, G., Goodman, T., &
Mackiewicz, M. (2015). Reference data set for camera spectral sensitivity
estimation. Journal of the Optical Society of America A, 32(3), 381.
doi:10.1364/JOSAA.32.000381
"""
from __future__ import annotations
from functools import partial
from colour.characterisation import RGB_CameraSensitivities
from colour.hints import Dict
from colour.utilities import LazyCaseInsensitiveMapping
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"DATA_CAMERA_SENSITIVITIES_DSLR",
"MSDS_CAMERA_SENSITIVITIES_DSLR",
]
DATA_CAMERA_SENSITIVITIES_DSLR: Dict = {
"Nikon 5100 (NPL)": {
380.0: (
0.00156384299336578000,
0.00011500000000000000,
0.00180956039402335990,
),
385.0: (
0.00189691771384825000,
0.00152114360178015000,
0.00048982814544150399,
),
390.0: (
0.00000000000000000000,
0.00057430499183558695,
0.00087943069176996504,
),
395.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00000000000000000000,
),
400.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00153246068848051000,
),
405.0: (
0.00071776703300973298,
0.00119722386224553000,
0.00569805602282062030,
),
410.0: (
0.00292397466563330000,
0.00133571498448177000,
0.01660828769874150200,
),
415.0: (
0.01293626801713740000,
0.01319431696052810100,
0.07879120559214590500,
),
420.0: (
0.04959786481566520000,
0.06497102451249539600,
0.36171350364994898000,
),
425.0: (
0.07607250435970400200,
0.11510308718828900000,
0.65970462106512295000,
),
430.0: (
0.07658892708274399300,
0.13706582547087201000,
0.75534360010359503000,
),
435.0: (
0.06833381956036009600,
0.15242852584030600000,
0.81045312707380701000,
),
440.0: (
0.06131816189646559900,
0.16864005450745301000,
0.87494523362472998000,
),
445.0: (
0.05473314457789760200,
0.18329934605049600000,
0.92671273991178704000,
),
450.0: (
0.04886204743702320100,
0.19603263456229600000,
0.96314088025989897000,
),
455.0: (
0.04284591974257399800,
0.21733653278361301000,
0.98065048133510302000,
),
460.0: (
0.04022845332691499900,
0.25424357380995000000,
1.00000000000000000000,
),
465.0: (
0.04340795992263239700,
0.30864811930649899000,
0.99640467488711104000,
),
470.0: (
0.04762021431177430200,
0.37346871184252001000,
0.98896988650084305000,
),
475.0: (
0.05077188480559390000,
0.42915806139893697000,
0.95660139953157997000,
),
480.0: (
0.05280329597225499900,
0.45965432432137399000,
0.90495886986980800000,
),
485.0: (
0.05257122025495090300,
0.47106435446394301000,
0.83940927710351598000,
),
490.0: (
0.04789463902845950100,
0.48885616444524799000,
0.75146259578963404000,
),
495.0: (
0.04823994170483859900,
0.53715178104087602000,
0.66010202032260801000,
),
500.0: (
0.05022924089718029700,
0.61649118695883898000,
0.56706879193613802000,
),
505.0: (
0.05507649735001429700,
0.70700638759968903000,
0.47935094782603899000,
),
510.0: (
0.06370211901178619900,
0.80096424601366301000,
0.39406273870351299000,
),
515.0: (
0.08038951305895999900,
0.88137256686267296000,
0.31427061879449603000,
),
520.0: (
0.10038750399831201000,
0.93887792119838498000,
0.24981663439426000000,
),
525.0: (
0.11861314902313400000,
0.98446559576523596000,
0.20182351924718100000,
),
530.0: (
0.12360875120338000000,
1.00000000000000000000,
0.16163395085177601000,
),
535.0: (
0.10306249932787701000,
0.99084026557129701000,
0.13516143147333401000,
),
540.0: (
0.07634108360672720000,
0.96154626462922099000,
0.10998875716043301000,
),
545.0: (
0.05278086364640900000,
0.92814388346877297000,
0.08639435407789379500,
),
550.0: (
0.04118873831058649700,
0.88910231592076505000,
0.06525313059219839400,
),
555.0: (
0.03904385351931050100,
0.83494222924161199000,
0.04785595345227559900,
),
560.0: (
0.04254429440089119900,
0.77631807500187500000,
0.03413932303860940000,
),
565.0: (
0.06021313241068020100,
0.70731424532056497000,
0.02401990976851929900,
),
570.0: (
0.11179621705066800000,
0.63579620249170998000,
0.01976793598476750100,
),
575.0: (
0.26967059703276203000,
0.56551528450380395000,
0.01634844781073010000,
),
580.0: (
0.56450337990639099000,
0.49275517253522499000,
0.01381733937020259900,
),
585.0: (
0.85360126947261405000,
0.42475654159075799000,
0.01195294647966710000,
),
590.0: (
0.98103242181506201000,
0.35178931226078303000,
0.01000909395820090100,
),
595.0: (
1.00000000000000000000,
0.27817849879541801000,
0.00758776308929657970,
),
600.0: (
0.96307105371259005000,
0.21167353249961901000,
0.00645584463521649970,
),
605.0: (
0.90552061898043101000,
0.15671644549433000000,
0.00522978285684488030,
),
610.0: (
0.83427841652645296000,
0.11803962073050200000,
0.00365998459503786990,
),
615.0: (
0.76798733762510296000,
0.08885249534231440300,
0.00395538505488667040,
),
620.0: (
0.70366798041157996000,
0.07010184404853669900,
0.00396835221654468030,
),
625.0: (
0.63916484476123703000,
0.05690899470893220200,
0.00349138004486036990,
),
630.0: (
0.57081292173776299000,
0.04729879101895839700,
0.00404302103181797010,
),
635.0: (
0.49581796193158800000,
0.04119589002556579800,
0.00418929985295813000,
),
640.0: (
0.43833913452368101000,
0.03525207084991220000,
0.00554676856500057980,
),
645.0: (
0.38896992260406899000,
0.03069313144532450100,
0.00546423323547744030,
),
650.0: (
0.34295621205484700000,
0.02680396295683950100,
0.00597382847392098970,
),
655.0: (
0.29278541836293998000,
0.02352430119871520100,
0.00630906774763779000,
),
660.0: (
0.23770718073119301000,
0.02034633252474659900,
0.00610412697742267980,
),
665.0: (
0.16491386803178501000,
0.01545848325340879900,
0.00483655792375416000,
),
670.0: (
0.09128771706377150600,
0.00944075104617158980,
0.00302664794586984980,
),
675.0: (
0.04205615047283590300,
0.00508102204063505970,
0.00172169700987674990,
),
680.0: (
0.02058267877678380100,
0.00291019166901752010,
0.00078065128657817595,
),
685.0: (
0.01028680596369610000,
0.00162657557793382010,
0.00056963070848184102,
),
690.0: (
0.00540759846247261970,
0.00092251569139627796,
0.00027523296133938200,
),
695.0: (
0.00272409261591003000,
0.00049743349969026901,
0.00029672137857068598,
),
700.0: (
0.00127834798711079000,
0.00041215940263165701,
0.00024951192304202899,
),
705.0: (
0.00078123118374132301,
0.00031692634104666300,
8.5000000000000006e-05,
),
710.0: (
0.00047981421940270001,
0.00025621496960251102,
0.00041916895092770603,
),
715.0: (
0.00049133356428571098,
0.00000000000000000000,
0.00015331743444139899,
),
720.0: (
0.00017414897796340199,
0.00024353518865341200,
1.8300000000000001e-05,
),
725.0: (
0.00012017462571764001,
6.0200000000000000e-05,
0.00000000000000000000,
),
730.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00033869381945204901,
),
735.0: (
6.1199999999999997e-05,
0.00000000000000000000,
0.00000000000000000000,
),
740.0: (
0.00000000000000000000,
0.00000000000000000000,
0.00000000000000000000,
),
745.0: (
0.00000000000000000000,
1.7099999999999999e-05,
0.00016527828734010200,
),
750.0: (
0.00031099754946016501,
5.2099999999999999e-05,
0.00017755262214537101,
),
755.0: (
0.00000000000000000000,
8.8499999999999996e-05,
0.00000000000000000000,
),
760.0: (
0.00000000000000000000,
0.00000000000000000000, | 765.0: (
0.00000000000000000000,
0.00000000000000000000,
6.1799999999999998e-05,
),
770.0: (
8.5599999999999994e-05,
0.00013799999999999999,
0.00026260703183506501,
),
775.0: (
0.00013831372865247499,
0.0001786501727059410,
0.00028050537004191899,
),
780.0: (
3.6199999999999999e-05,
4.2500000000000003e-05,
0.00000000000000000000,
),
},
"Sigma SDMerill (NPL)": {
400.0: (
0.00562107440608700020,
0.00632809751263116970,
0.16215942413307899000,
),
410.0: (
0.00650335624511722000,
0.00976180459591275040,
0.28549837804628603000,
),
420.0: (
0.07407911289140040000,
0.02527177008261050100,
0.39690431060902098000,
),
430.0: (
0.04302295946292879900,
0.08375118585311219800,
0.50831024317175599000,
),
440.0: (
0.03450952562247010200,
0.14370381974360999000,
0.62211847246948804000,
),
450.0: (
0.01889156723434350100,
0.18361168930882199000,
0.73742136245769496000,
),
460.0: (
0.00731107699680200000,
0.40909478009952999000,
0.94538036670138004000,
),
470.0: (
0.04549915123096019700,
0.51595564086176404000,
0.96441494770280400000,
),
480.0: (
0.05676752921111680200,
0.60120664662705503000,
1.00000000000000000000,
),
490.0: (
0.13419592065917799000,
0.67031679980136305000,
0.98598021188452500000,
),
500.0: (
0.16475268997837600000,
0.75258747153475802000,
0.98340266357529005000,
),
510.0: (
0.21712641978639199000,
0.84381384368944201000,
0.96969219567072595000,
),
520.0: (
0.30648343835824399000,
0.90151724558812696000,
0.94280817402079797000,
),
530.0: (
0.34984579614888500000,
0.91975030668767699000,
0.89664279918070899000,
),
540.0: (
0.44374258133259298000,
0.96799429052157804000,
0.88444590220041897000,
),
550.0: (
0.44488860528126301000,
0.95725231064041105000,
0.86791899071597101000,
),
560.0: (
0.47897575674702603000,
0.95204791860047400000,
0.83375679584908402000,
),
570.0: (
0.50950291481073895000,
0.97628014458399803000,
0.83204140240572999000,
),
580.0: (
0.59262909378530504000,
0.97258624388955806000,
0.80054956384778198000,
),
590.0: (
0.67383327560697603000,
1.00000000000000000000,
0.78289512474646505000,
),
600.0: (
0.71403771488106504000,
0.96948452757777404000,
0.73946953007191796000,
),
610.0: (
0.86000761311495100000,
0.95441319124850699000,
0.66718640174985699000,
),
620.0: (
0.89810302849565204000,
0.93335435890921303000,
0.62043627806816704000,
),
630.0: (
1.00000000000000000000,
0.92571406833636205000,
0.61116087876956704000,
),
640.0: (
0.99494213311245205000,
0.88486439541503403000,
0.55173556195710605000,
),
650.0: (
0.92085127736137995000,
0.76165184741615699000,
0.46538831744516401000,
),
660.0: (
0.18143311631425299000,
0.14052437057150499000,
0.07961907836720690000,
),
670.0: (
0.00630978795372749960,
0.00414367215817645990,
0.00059244446107236802,
),
680.0: (
0.00528874383171553000,
0.00183198958165669010,
0.00468563680483140980,
),
},
}
MSDS_CAMERA_SENSITIVITIES_DSLR = LazyCaseInsensitiveMapping(
{
"Nikon 5100 (NPL)": partial(
RGB_CameraSensitivities,
DATA_CAMERA_SENSITIVITIES_DSLR["Nikon 5100 (NPL)"],
name="Nikon 5100 (NPL)",
),
"Sigma SDMerill (NPL)": partial(
RGB_CameraSensitivities,
DATA_CAMERA_SENSITIVITIES_DSLR["Sigma SDMerill (NPL)"],
name="Sigma SDMerill (NPL)",
),
}
)
"""
Multi-spectral distributions of *DSLR* camera sensitivities.
References
----------
:cite:`Darrodi2015a`
""" | 2.4300000000000001e-05,
), |
log_decorator.py | # -*- coding: UTF-8 -*-
from common_utils.new_log import NewLog
class LogDecorator:
| log = NewLog(__name__)
logger = log.get_log()
def __call__(self, func):
def wrapper(*args, **kw):
self.logger.debug("call method %s ===============" % func.__name__)
self.logger.debug("method [%s] input args: [%s], kw: [%s]" % (func.__name__, args, kw))
result = func(*args, **kw)
self.logger.debug("method [%s] response: [%s]" % (func.__name__, result))
return result
return wrapper |
|
test_views.py | import shutil
import tempfile
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.cache import cache
from django.contrib.auth import get_user_model
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from django import forms
from ..models import Post, Group, Follow
User = get_user_model()
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
class PaginatorTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='NoName')
cls.user_client = Client()
cls.user_client.force_login(cls.user)
cls.count_posts_on_page = 10
cls.count_posts = 15
cls.rest_posts = cls.count_posts % cls.count_posts_on_page
cls.group = Group.objects.create(
title='Тестовый заголовок',
slug='test-slug',
description='Описание...'
)
posts = (
Post(
text='Тестовый текст поста.',
author=cls.user,
group=cls.group
) for i in range(cls.count_posts)
)
Post.objects.bulk_create(posts, cls.count_posts)
def test_paginator_pages(self):
pages_paginator = [
reverse(
'posts:index'
),
reverse(
'posts:group_list',
kwargs={'slug': PaginatorTest.group.slug}
),
reverse(
'posts:profile',
kwargs={'username': PaginatorTest.user}
)
]
for page in pages_paginator:
with self.subTest(page=page):
response = PaginatorTest.user_client.get(page)
self.assertEqual(
len(response.context['page_obj']), (
PaginatorTest.count_posts_on_page
)
)
response = PaginatorTest.user_client.get(
page + '?page=2'
)
self.assertEqual(
len(response.context['page_obj']),
PaginatorTest.rest_posts
)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class PostPagesTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='NoName')
cls.user_client = Client()
cls.user_client.force_login(cls.user)
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
image = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
cls.group = Group.objects.create(
title='Тестовый заголовок',
slug='test-slug',
description='Описание...'
)
cls.post = Post.objects.create(
text='Тестовый текст поста.',
image=image,
author=cls.user,
group=cls.group
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def test_pages_uses_correct_template(self):
templates_pages_names = {
reverse('posts:index'): 'posts/index.html',
reverse(
'posts:group_list',
kwargs={'slug': PostPagesTests.group.slug}
): 'posts/group_list.html',
reverse(
'posts:profile',
kwargs={'username': PostPagesTests.user}
): 'posts/profile.html',
reverse(
'posts:post_detail',
kwargs={'post_id': PostPagesTests.post.id}
): 'posts/post_detail.html',
reverse('posts:post_create'): 'posts/create_post.html',
reverse(
'posts:post_edit',
kwargs={'post_id': PostPagesTests.post.id}
): 'posts/create_post.html'
}
for reverse_name, template in templates_pages_names.items():
with self.subTest(reverse_name=reverse_name):
response = PostPagesTests.user_client.get(reverse_name)
self.assertTemplateUsed(response, template)
def check_post_context(self, post):
self.assertEqual(post.id, PostPagesTests.post.id)
self.assertEqual(post.author, PostPagesTests.post.author)
self.assertEqual(post.text, PostPagesTests.post.text)
self.assertEqual(post.image, PostPagesTests.post.image)
self.assertEqual(post.group, PostPagesTests.post.group)
def test_index_page_context(self):
response = PostPagesTests.user_client.get(reverse('posts:index'))
self.check_post_context(response.context['page_obj'][0])
def test_group_list_page_context(self):
response = PostPagesTests.user_client.get(
reverse(
'posts:group_list',
kwargs={'slug': PostPagesTests.group.slug}
)
)
self.check_post_context(response.context['page_obj'][0])
self.assertEqual(
PostPagesTests.group,
response.context['group']
)
def test_new_group_list_none(self):
group = Group.objects.create(
title='Тестовый заголовок',
slug='test-slug-new',
description='Описание...'
)
response = PostPagesTests.user_client.get(
reverse(
'posts:group_list', | self.assertEqual(len(response.context['page_obj']), 0)
def test_profile_page_context(self):
response = PostPagesTests.user_client.get(
reverse(
'posts:profile',
kwargs={'username': PostPagesTests.user}
)
)
self.check_post_context(response.context['page_obj'][0])
self.assertEqual(
PostPagesTests.user,
response.context['author']
)
self.assertIsNotNone(response.context['following'])
def test_post_detail_page_context(self):
response = PostPagesTests.user_client.get(
reverse(
'posts:post_detail',
kwargs={'post_id': PostPagesTests.post.id}
)
)
self.check_post_context(response.context['post'])
form_fields = {'text': forms.fields.CharField}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_fields = response.context['form'].fields.get(value)
self.assertIsInstance(form_fields, expected)
self.assertIsNotNone(response.context['comments'])
def test_edit_post_page_context(self):
response = PostPagesTests.user_client.get(
reverse(
'posts:post_edit',
kwargs={'post_id': PostPagesTests.post.id}
)
)
self.assertIsNotNone(response.context['form'])
self.assertIsNotNone(response.context['is_edit'])
self.assertTrue(response.context['is_edit'])
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_fields = response.context['form'].fields.get(value)
self.assertIsInstance(form_fields, expected)
def test_create_post_page_context(self):
response = PostPagesTests.user_client.get(
reverse('posts:post_create')
)
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_fields = response.context['form'].fields.get(value)
self.assertIsInstance(form_fields, expected)
def test_cache_index(self):
post = Post.objects.create(
text='Тестовый текст поста.',
author=PostPagesTests.user,
)
response = PostPagesTests.user_client.get(
reverse('posts:index')
)
page = response.content
post.delete()
response = PostPagesTests.user_client.get(
reverse('posts:index')
)
self.assertEqual(page, response.content)
cache.clear()
response = PostPagesTests.user_client.get(
reverse('posts:index')
)
self.assertNotEqual(page, response.content)
class FollowTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='NoName')
cls.user_client = Client()
cls.user_client.force_login(cls.user)
cls.author = User.objects.create_user(username='Author')
cls.author_client = Client()
cls.author_client.force_login(cls.author)
def test_following_auth(self):
#FIXME: connection through ORM
FollowTests.user_client.get(
reverse(
'posts:profile_follow',
kwargs={'username': FollowTests.author}
)
)
#FIXME: necessary to calculate that a BD increased by one object
follow = Follow.objects.last()
self.assertEqual(follow.user, FollowTests.user)
self.assertEqual(follow.author, FollowTests.author)
def test_unfollow_auth(self):
#FIXME: connection through ORM
FollowTests.user_client.get(
reverse(
'posts:profile_follow',
kwargs={'username': FollowTests.author}
)
)
follows_count = Follow.objects.count()
FollowTests.user_client.get(
reverse(
'posts:profile_unfollow',
kwargs={'username': FollowTests.author}
)
)
self.assertEqual(Follow.objects.count(), follows_count - 1)
def test_new_post_follow(self):
FollowTests.user_client.get(
reverse(
'posts:profile_follow',
kwargs={'username': FollowTests.author}
)
)
post = Post.objects.create(
text='Тестовый текст поста.',
author=FollowTests.author,
)
response = FollowTests.user_client.get(
reverse(
'posts:follow_index'
)
)
self.assertEqual(
post.id, response.context['page_obj'][0].id
)
def test_new_post_unfollow(self):
FollowTests.user_client.get(
reverse(
'posts:profile_follow',
kwargs={'username': FollowTests.author}
)
)
Post.objects.create(
text='Тестовый текст поста.',
author=FollowTests.author,
)
user = User.objects.create_user(username='NameNo')
user_client = Client()
user_client.force_login(user)
response = user_client.get(
reverse(
'posts:follow_index'
)
)
self.assertEqual(len(response.context['page_obj']), 0) | kwargs={'slug': group.slug}
)
) |
milvus.go | package controllers
import (
"context"
"fmt"
"github.com/milvus-io/milvus-operator/apis/milvus.io/v1alpha1"
"github.com/milvus-io/milvus-operator/pkg/helm"
"github.com/milvus-io/milvus-operator/pkg/util"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func IsSetDefaultDone(mc *v1alpha1.Milvus) bool {
return mc.Status.Status != ""
}
func (r *MilvusReconciler) Finalize(ctx context.Context, mil v1alpha1.Milvus) error {
deletingReleases := map[string]bool{}
if mil.Spec.Dep.Etcd.InCluster.DeletionPolicy == v1alpha1.DeletionPolicyDelete {
deletingReleases[mil.Name+"-etcd"] = mil.Spec.Dep.Etcd.InCluster.PVCDeletion
}
if mil.Spec.Dep.Storage.InCluster.DeletionPolicy == v1alpha1.DeletionPolicyDelete {
deletingReleases[mil.Name+"-minio"] = mil.Spec.Dep.Storage.InCluster.PVCDeletion
}
if len(deletingReleases) > 0 {
cfg := r.helmReconciler.NewHelmCfg(mil.Namespace)
errs := []error{}
for releaseName, deletePVC := range deletingReleases {
if err := helm.Uninstall(cfg, releaseName); err != nil {
errs = append(errs, err)
continue
}
if deletePVC {
pvcList := &corev1.PersistentVolumeClaimList{}
if err := r.List(ctx, pvcList, &client.ListOptions{
Namespace: mil.Namespace,
LabelSelector: labels.SelectorFromSet(map[string]string{
AppLabelInstance: releaseName,
}),
}); err != nil {
errs = append(errs, err)
continue
}
for _, pvc := range pvcList.Items {
if err := r.Delete(ctx, &pvc); err != nil {
errs = append(errs, err)
} else {
r.logger.Info("pvc deleted", "name", pvc.Name, "namespace", pvc.Namespace)
}
}
}
}
if len(errs) > 0 {
return errors.Errorf(util.JoinErrors(errs))
}
}
return nil
}
func (r *MilvusReconciler) SetDefault(ctx context.Context, mc *v1alpha1.Milvus) error {
if !mc.Spec.Dep.Etcd.External && len(mc.Spec.Dep.Etcd.Endpoints) == 0 {
mc.Spec.Dep.Etcd.Endpoints = []string{fmt.Sprintf("%s-etcd.%s:2379", mc.Name, mc.Namespace)}
}
if !mc.Spec.Dep.Storage.External && len(mc.Spec.Dep.Storage.Endpoint) == 0 {
mc.Spec.Dep.Storage.Endpoint = fmt.Sprintf("%s-minio.%s:9000", mc.Name, mc.Namespace)
}
return nil
}
// SetDefaultStatus update status if default not set; return true if updated, return false if not, return err if update failed
func (r *MilvusReconciler) SetDefaultStatus(ctx context.Context, mc *v1alpha1.Milvus) (bool, error) {
if mc.Status.Status == "" {
mc.Status.Status = v1alpha1.StatusCreating
err := r.Client.Status().Update(ctx, mc)
if err != nil |
return true, nil
}
return false, nil
}
func (r *MilvusReconciler) ReconcileAll(ctx context.Context, mil v1alpha1.Milvus) error {
milvusReconcilers := []Func{
r.ReconcileEtcd,
r.ReconcileMinio,
r.ReconcileMilvus,
}
err := defaultGroupRunner.Run(milvusReconcilers, ctx, mil)
return errors.Wrap(err, "reconcile milvus")
}
func (r *MilvusReconciler) ReconcileMilvus(ctx context.Context, mil v1alpha1.Milvus) error {
if !IsDependencyReady(mil.Status.Conditions, false) {
return nil
}
if err := r.ReconcileConfigMaps(ctx, mil); err != nil {
return errors.Wrap(err, "configmap")
}
milvusComsReconcilers := []Func{
r.ReconcileDeployments,
r.ReconcileServices,
r.ReconcilePodMonitor,
}
err := defaultGroupRunner.Run(milvusComsReconcilers, ctx, mil)
return errors.Wrap(err, "reconcile components")
}
| {
return false, errors.Wrapf(err, "set mc default status[%s/%s] failed", mc.Namespace, mc.Name)
} |
settings.py | """
Django settings for my_project project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/ | """
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*wi&v$_lj_y9m_4^i583hb+*zdmm&mx_=c$_v*j9lk*tyaiiwj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'my_apps.user_auth',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/' |
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/ |
registry.py | import inspect
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = self.__class__.__name__ + '(name={}, items={})'.format(
self._name, list(self._module_dict.keys()))
return format_str
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
| def get(self, key):
return self._module_dict.get(key, None)
def _register_module(self, module_class):
"""Register a module.
Args:
module (:obj:`nn.Module`): Module to be registered.
"""
if not inspect.isclass(module_class):
raise TypeError('module must be a class, but got {}'.format(
type(module_class)))
module_name = module_class.__name__
if module_name in self._module_dict:
raise KeyError('{} is already registered in {}'.format(
module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls):
self._register_module(cls)
return cls
def build_from_cfg(cfg, registry, default_args=None):
"""Build a module from config dict.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
registry (:obj:`Registry`): The registry to search the type from.
default_args (dict, optional): Default initialization arguments.
Returns:
obj: The constructed object.
"""
assert isinstance(cfg, dict) and 'TYPE' in cfg
assert isinstance(default_args, dict) or default_args is None
args = cfg.copy()
obj_type = args.pop('TYPE')
if isinstance(obj_type, str):
obj_cls = registry.get(obj_type)
if obj_cls is None:
raise KeyError('{} is not in the {} registry'.format(
obj_type, registry.name))
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError('type must be a str or valid type, but got {}'.format(
type(obj_type)))
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
return obj_cls(**args) | |
socket.rs | use super::{RawDevice, Type};
use crate::buffer::Buffer;
use crate::ethernet::{MacAddr, ADDR_LEN};
use crate::util::*;
use ifstructs::ifreq;
use libc::{self, pollfd, ETH_P_ALL, POLLIN};
use nix::{
errno::{errno, Errno},
sys::socket::{bind, socket, AddressFamily, LinkAddr, SockAddr, SockFlag, SockType},
unistd,
};
use std::convert::TryInto;
use std::error::Error;
use std::sync::Arc;
use std::thread::JoinHandle;
ioctl_readwrite_bad!(get_iface_index, 0x8933, ifreq);
ioctl_readwrite_bad!(get_iface_flags, libc::SIOCGIFFLAGS, ifreq);
ioctl_readwrite_bad!(get_hwaddr, libc::SIOCGIFHWADDR, ifreq);
#[derive(Debug)]
pub struct Device {
fd: i32,
name: String,
}
impl Device {
pub fn open(name: &str) -> Result<Arc<dyn RawDevice + Sync + Send>, Box<dyn Error>> {
let device = Device {
fd: socket(
AddressFamily::Packet,
SockType::Raw,
SockFlag::empty(),
Some(unsafe { ::std::mem::transmute(libc::ETH_P_ALL) }),
)?,
name: name.to_string(),
};
if device.fd == -1 {
device.close()?;
return Err(RuntimeError::new("socket failed".to_string()));
}
let mut ifr = ifreq::from_name(name)?;
if let Err(err) = unsafe { get_iface_index(device.fd, &mut ifr) } {
device.close()?;
return Err(Box::new(err));
// return Err(Box::new(RuntimeError::new("ioctl [SIOCGIFINDEX]".to_string())));
}
let socket_addr = SockAddr::Link(LinkAddr(libc::sockaddr_ll {
sll_family: libc::AF_PACKET.try_into().unwrap(),
sll_protocol: htons(ETH_P_ALL.try_into().unwrap()),
sll_ifindex: unsafe { ifr.ifr_ifru.ifr_ifindex },
sll_hatype: 0,
sll_pkttype: 0,
sll_halen: 0,
sll_addr: [0; 8],
}));
if let Err(err) = bind(device.fd, &socket_addr) {
device.close()?;
return Err(Box::new(err));
}
if let Err(err) = unsafe { get_iface_flags(device.fd, &mut ifr) } {
device.close()?;
return Err(Box::new(err));
}
unsafe {
ifr.ifr_ifru.ifr_flags = ifr.ifr_ifru.ifr_flags | (libc::IFF_PROMISC as i16);
}
if let Err(err) = unsafe { get_iface_flags(device.fd, &mut ifr) } {
device.close()?;
return Err(Box::new(err));
}
Ok(Arc::new(device))
}
}
impl RawDevice for Device {
fn type_(&self) -> Type {
Type::Socket
}
fn name(&self) -> &String {
&self.name
}
fn addr(&self) -> Result<MacAddr, Box<dyn Error>> {
let fd = socket(
AddressFamily::Inet,
SockType::Datagram,
SockFlag::empty(),
None,
)?;
let mut ifr = ifreq::from_name(self.name.as_str())?;
ifr.ifr_ifru.ifr_addr.sa_family = libc::AF_INET.try_into().unwrap();
if let Err(err) = unsafe { get_hwaddr(fd, &mut ifr) } {
unistd::close(fd)?;
Err(Box::new(err))
} else {
let addr = unsafe { ifr.ifr_ifru.ifr_hwaddr.sa_data };
let addr =
unsafe { &*(addr.as_ptr() as *const [i8; ADDR_LEN] as *const [u8; ADDR_LEN]) };
unsafe {
libc::close(fd);
}
Ok(MacAddr(*addr))
}
}
fn close(&self) -> Result<(), Box<dyn Error>> {
if self.fd != -1 {
unistd::close(self.fd)? // TODO
}
// free device
Ok(())
}
fn rx(
&self,
callback: Box<dyn FnOnce(Buffer) -> Result<Option<JoinHandle<()>>, Box<dyn Error>>>,
timeout: i32,
) -> Result<Option<JoinHandle<()>>, Box<dyn Error>> |
fn tx(&self, buf: Buffer) -> Result<(), Box<dyn Error>> {
let buf = buf.to_vec();
unsafe { libc::write(self.fd, buf.as_ptr() as *const libc::c_void, buf.len()) };
Ok(())
}
}
| {
let mut pfd = pollfd {
fd: self.fd,
events: POLLIN,
revents: 0,
};
match unsafe { libc::poll(&mut pfd, 1, timeout) } {
0 => return Ok(None), // timeout
-1 => {
if errno() != Errno::EINTR as i32 {
return Err(RuntimeError::new("poll error".to_string()));
} else {
return Ok(None);
}
}
_ => (),
}
let mut buf = vec![];
buf.resize(2048, 0);
let len: usize = match unsafe {
libc::read(self.fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len())
} {
0 => return Ok(None), // timeout
-1 => return Err(RuntimeError::new("read error".to_string())),
len => len,
}
.try_into()
.unwrap();
buf.resize(len, 0);
callback(Buffer::from_vec(buf))
} |
inst_business.go | /*
* Tencent is pleased to support the open source community by making 蓝鲸 available.
* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package operation
import (
"context"
"strings"
"configcenter/src/apimachinery"
"configcenter/src/common"
"configcenter/src/common/blog"
"configcenter/src/common/condition"
"configcenter/src/common/mapstr"
"configcenter/src/common/metadata"
"configcenter/src/common/util"
"configcenter/src/scene_server/topo_server/core/inst"
"configcenter/src/scene_server/topo_server/core/model"
"configcenter/src/scene_server/topo_server/core/types"
)
// BusinessOperationInterface business operation methods
type BusinessOperationInterface interface {
CreateBusiness(params types.ContextParams, obj model.Object, data mapstr.MapStr) (inst.Inst, error)
DeleteBusiness(params types.ContextParams, obj model.Object, bizID int64) error
FindBusiness(params types.ContextParams, obj model.Object, fields []string, cond condition.Condition) (count int, results []inst.Inst, err error)
GetInternalModule(params types.ContextParams, obj model.Object, bizID int64) (count int, result *metadata.InnterAppTopo, err error)
UpdateBusiness(params types.ContextParams, data mapstr.MapStr, obj model.Object, bizID int64) error
SetProxy(set SetOperationInterface, module ModuleOperationInterface, inst InstOperationInterface, obj ObjectOperationInterface)
}
// NewBusinessOperation create a business instance
func NewB | ent apimachinery.ClientSetInterface) BusinessOperationInterface {
return &business{
clientSet: client,
}
}
type business struct {
clientSet apimachinery.ClientSetInterface
inst InstOperationInterface
set SetOperationInterface
module ModuleOperationInterface
obj ObjectOperationInterface
}
func (b *business) SetProxy(set SetOperationInterface, module ModuleOperationInterface, inst InstOperationInterface, obj ObjectOperationInterface) {
b.inst = inst
b.set = set
b.module = module
b.obj = obj
}
func (b *business) CreateBusiness(params types.ContextParams, obj model.Object, data mapstr.MapStr) (inst.Inst, error) {
defaulFieldVal, err := data.Int64(common.BKDefaultField)
if nil != err {
blog.Errorf("[operation-biz] failed to create business, error info is did not set the default field, %s", err.Error())
return nil, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
if defaulFieldVal == int64(common.DefaultAppFlag) && params.SupplierAccount != common.BKDefaultOwnerID {
// this is a new supplier owner and prepare to create a new business.
asstQuery := map[string]interface{}{
common.BKOwnerIDField: common.BKDefaultOwnerID,
}
defaultOwnerHeader := util.CopyHeader(params.Header)
defaultOwnerHeader.Set(common.BKHTTPOwnerID, common.BKDefaultOwnerID)
asstRsp, err := b.clientSet.ObjectController().Meta().SelectObjectAssociations(context.Background(), defaultOwnerHeader, asstQuery)
if nil != err {
blog.Errorf("[operation-biz] failed to get default assts, error info is %s", err.Error())
return nil, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
if !asstRsp.Result {
return nil, params.Err.Error(asstRsp.Code)
}
expectAssts := asstRsp.Data
blog.Infof("copy asst for %s, %+v", params.SupplierAccount, expectAssts)
existAsstRsp, err := b.clientSet.ObjectController().Meta().SelectObjectAssociations(context.Background(), params.Header, asstQuery)
if nil != err {
blog.Errorf("[operation-biz] failed to get default assts, error info is %s", err.Error())
return nil, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
if !existAsstRsp.Result {
return nil, params.Err.Error(existAsstRsp.Code)
}
existAssts := existAsstRsp.Data
expectLoop:
for _, asst := range expectAssts {
asst.OwnerID = params.SupplierAccount
for _, existAsst := range existAssts {
if existAsst.ObjectID == asst.ObjectID &&
existAsst.AsstObjID == asst.AsstObjID &&
existAsst.AsstKindID == asst.AsstKindID {
continue expectLoop
}
}
createAsstRsp, err := b.clientSet.ObjectController().Meta().CreateObjectAssociation(context.Background(), params.Header, &asst)
if nil != err {
blog.Errorf("[operation-biz] failed to copy default assts, error info is %s", err.Error())
return nil, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
if !createAsstRsp.Result {
return nil, params.Err.Error(createAsstRsp.Code)
}
}
}
data.Set(common.BKOwnerIDField, params.SupplierAccount)
data.Set(common.BKSupplierIDField, common.BKDefaultSupplierID)
bizInst, err := b.inst.CreateInst(params, obj, data)
if nil != err {
blog.Errorf("[opeartion-biz] failed to create business, error info is %s", err.Error())
return bizInst, err
}
bizID, err := bizInst.GetInstID()
if nil != err {
blog.Errorf("[operation-biz] failed to create business, error info is %s", err.Error())
return bizInst, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
// create set
objSet, err := b.obj.FindSingleObject(params, common.BKInnerObjIDSet)
if nil != err {
blog.Errorf("failed to search the set, %s", err.Error())
return nil, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
setData := mapstr.New()
setData.Set(common.BKAppIDField, bizID)
setData.Set(common.BKInstParentStr, bizID)
setData.Set(common.BKSetNameField, common.DefaultResSetName)
setData.Set(common.BKDefaultField, common.DefaultResSetFlag)
setData.Set(common.BKOwnerIDField, params.SupplierAccount)
setInst, err := b.set.CreateSet(params, objSet, bizID, setData)
if nil != err {
blog.Errorf("[operation-biz] failed to create business, error info is %s", err.Error())
return bizInst, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
setID, err := setInst.GetInstID()
if nil != err {
blog.Errorf("[operation-biz] failed to create business, error info is %s", err.Error())
return bizInst, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
// create module
objModule, err := b.obj.FindSingleObject(params, common.BKInnerObjIDModule)
if nil != err {
blog.Errorf("failed to search the set, %s", err.Error())
return nil, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
moduleData := mapstr.New()
moduleData.Set(common.BKSetIDField, setID)
moduleData.Set(common.BKInstParentStr, setID)
moduleData.Set(common.BKAppIDField, bizID)
moduleData.Set(common.BKModuleNameField, common.DefaultResModuleName)
moduleData.Set(common.BKDefaultField, common.DefaultResModuleFlag)
moduleData.Set(common.BKOwnerIDField, params.SupplierAccount)
_, err = b.module.CreateModule(params, objModule, bizID, setID, moduleData)
if nil != err {
blog.Errorf("[operation-biz] failed to create business, error info is %s", err.Error())
return bizInst, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
// create fault module
faultModuleData := mapstr.New()
faultModuleData.Set(common.BKSetIDField, setID)
faultModuleData.Set(common.BKInstParentStr, setID)
faultModuleData.Set(common.BKAppIDField, bizID)
faultModuleData.Set(common.BKModuleNameField, common.DefaultFaultModuleName)
faultModuleData.Set(common.BKDefaultField, common.DefaultFaultModuleFlag)
faultModuleData.Set(common.BKOwnerIDField, params.SupplierAccount)
_, err = b.module.CreateModule(params, objModule, bizID, setID, faultModuleData)
if nil != err {
blog.Errorf("[operation-biz] failed to create business, error info is %s", err.Error())
return bizInst, params.Err.New(common.CCErrTopoAppCreateFailed, err.Error())
}
return bizInst, nil
}
func (b *business) DeleteBusiness(params types.ContextParams, obj model.Object, bizID int64) error {
setObj, err := b.obj.FindSingleObject(params, common.BKInnerObjIDSet)
if nil != err {
blog.Errorf("failed to search the set, %s", err.Error())
return err
}
bizObj, err := b.obj.FindSingleObject(params, common.BKInnerObjIDApp)
if nil != err {
blog.Errorf("failed to search the set, %s", err.Error())
return err
}
if err = b.set.DeleteSet(params, setObj, bizID, nil); nil != err {
blog.Errorf("[operation-biz] failed to delete the set, error info is %s", err.Error())
return params.Err.New(common.CCErrTopoAppDeleteFailed, err.Error())
}
innerCond := condition.CreateCondition()
innerCond.Field(common.BKOwnerIDField).Eq(params.SupplierAccount)
innerCond.Field(common.BKAppIDField).Eq(bizID)
return b.inst.DeleteInst(params, bizObj, innerCond, true)
}
func (b *business) FindBusiness(params types.ContextParams, obj model.Object, fields []string, cond condition.Condition) (count int, results []inst.Inst, err error) {
query := &metadata.QueryInput{}
cond.Field(common.BKDefaultField).Eq(0)
query.Condition = cond.ToMapStr()
query.Limit = int(cond.GetLimit())
query.Fields = strings.Join(fields, ",")
query.Sort = cond.GetSort()
query.Start = int(cond.GetStart())
return b.inst.FindInst(params, obj, query, false)
}
func (b *business) GetInternalModule(params types.ContextParams, obj model.Object, bizID int64) (count int, result *metadata.InnterAppTopo, err error) {
// search the sets
cond := condition.CreateCondition()
cond.Field(common.BKAppIDField).Eq(bizID)
cond.Field(common.BKDefaultField).Eq(common.DefaultResModuleFlag)
setObj, err := b.obj.FindSingleObject(params, common.BKInnerObjIDSet)
if nil != err {
return 0, nil, params.Err.New(common.CCErrTopoAppSearchFailed, err.Error())
}
querySet := &metadata.QueryInput{}
querySet.Condition = cond.ToMapStr()
_, sets, err := b.set.FindSet(params, setObj, querySet)
if nil != err {
return 0, nil, params.Err.New(common.CCErrTopoAppSearchFailed, err.Error())
}
// search modules
cond.Field(common.BKDefaultField).In([]int{
common.DefaultResModuleFlag,
common.DefaultFaultModuleFlag,
})
moduleObj, err := b.obj.FindSingleObject(params, common.BKInnerObjIDModule)
if nil != err {
return 0, nil, params.Err.New(common.CCErrTopoAppSearchFailed, err.Error())
}
queryModule := &metadata.QueryInput{}
queryModule.Condition = cond.ToMapStr()
_, modules, err := b.module.FindModule(params, moduleObj, queryModule)
if nil != err {
return 0, nil, params.Err.New(common.CCErrTopoAppSearchFailed, err.Error())
}
// construct result
result = &metadata.InnterAppTopo{}
for _, set := range sets {
id, err := set.GetInstID()
if nil != err {
return 0, nil, params.Err.New(common.CCErrTopoAppSearchFailed, err.Error())
}
name, err := set.GetInstName()
if nil != err {
return 0, nil, params.Err.New(common.CCErrTopoAppSearchFailed, err.Error())
}
result.SetID = id
result.SetName = name
break // should be only one set
}
for _, module := range modules {
id, err := module.GetInstID()
if nil != err {
return 0, nil, params.Err.New(common.CCErrTopoAppSearchFailed, err.Error())
}
name, err := module.GetInstName()
if nil != err {
return 0, nil, params.Err.New(common.CCErrTopoAppSearchFailed, err.Error())
}
result.Module = append(result.Module, metadata.InnerModule{
ModuleID: id,
ModuleName: name,
})
}
return 0, result, nil
}
func (b *business) UpdateBusiness(params types.ContextParams, data mapstr.MapStr, obj model.Object, bizID int64) error {
innerCond := condition.CreateCondition()
innerCond.Field(common.BKOwnerIDField).Eq(params.SupplierAccount)
innerCond.Field(common.BKAppIDField).Eq(bizID)
return b.inst.UpdateInst(params, data, obj, innerCond, bizID)
}
| usinessOperation(cli |
Dockerfile.py | # Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import textwrap
applianceSelf = os.environ['TOIL_APPLIANCE_SELF']
sdistName = os.environ['_TOIL_SDIST_NAME']
dependencies = ' '.join(['libffi-dev', # For client side encryption for extras with PyNACL
'python3.6',
'python3.6-dev',
'python-dev', # For installing Python packages with native code
'python-pip', # Bootstrap pip, but needs upgrading, see below
'python3-pip',
'libcurl4-openssl-dev',
'libssl-dev',
'wget',
'curl',
'openssh-server',
'mesos=1.0.1-2.0.94.ubuntu1604',
"nodejs", # CWL support for javascript expressions
'rsync',
'screen',
'build-essential', # We need a build environment to build Singularity 3.
'uuid-dev',
'libgpgme11-dev',
'libseccomp-dev',
'pkg-config',
'squashfs-tools',
'cryptsetup',
'git'])
def heredoc(s):
s = textwrap.dedent(s).format(**globals())
return s[1:] if s.startswith('\n') else s
motd = heredoc('''
This is the Toil appliance. You can run your Toil script directly on the appliance.
Run toil <workflow>.py --help to see all options for running your workflow.
For more information see http://toil.readthedocs.io/en/latest/
Copyright (C) 2015-2018 Regents of the University of California
Version: {applianceSelf}
''')
# Prepare motd to be echoed in the Dockerfile using a RUN statement that uses bash's print
motd = ''.join(l + '\\n\\\n' for l in motd.splitlines())
print(heredoc('''
FROM ubuntu:16.04
RUN apt-get -y update --fix-missing && apt-get -y upgrade && apt-get -y install apt-transport-https ca-certificates software-properties-common && apt-get clean && rm -rf /var/lib/apt/lists/*
RUN echo "deb http://repos.mesosphere.io/ubuntu/ xenial main" \
> /etc/apt/sources.list.d/mesosphere.list \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF \
&& echo "deb http://deb.nodesource.com/node_6.x xenial main" \
> /etc/apt/sources.list.d/nodesource.list \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 68576280
RUN add-apt-repository -y ppa:deadsnakes/ppa
RUN apt-get -y update --fix-missing && \
DEBIAN_FRONTEND=noninteractive apt-get -y upgrade && \
DEBIAN_FRONTEND=noninteractive apt-get -y install {dependencies} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN wget https://dl.google.com/go/go1.13.3.linux-amd64.tar.gz && \
tar xvf go1.13.3.linux-amd64.tar.gz && \
mv go/bin/* /usr/bin/ && \
mv go /usr/local/
RUN mkdir -p $(go env GOPATH)/src/github.com/sylabs && \
cd $(go env GOPATH)/src/github.com/sylabs && \
git clone https://github.com/sylabs/singularity.git && \
cd singularity && \
git checkout v3.4.2 && \
./mconfig && \
cd ./builddir && \
make -j4 && \
make install
RUN mkdir /root/.ssh && \
chmod 700 /root/.ssh
ADD waitForKey.sh /usr/bin/waitForKey.sh
ADD customDockerInit.sh /usr/bin/customDockerInit.sh
RUN chmod 777 /usr/bin/waitForKey.sh && chmod 777 /usr/bin/customDockerInit.sh
# The stock pip is too old and can't install from sdist with extras
RUN pip install --upgrade pip==9.0.1
# Default setuptools is too old
RUN pip install --upgrade setuptools==36.5.0
# Include virtualenv, as it is still the recommended way to deploy pipelines
RUN pip install --upgrade virtualenv==15.0.3
# Install s3am (--never-download prevents silent upgrades to pip, wheel and setuptools)
RUN virtualenv --never-download /home/s3am \
&& /home/s3am/bin/pip install s3am==2.0 \
&& ln -s /home/s3am/bin/s3am /usr/local/bin/
# Install statically linked version of docker client
RUN curl https://download.docker.com/linux/static/stable/x86_64/docker-18.06.1-ce.tgz \
| tar -xvzf - --transform='s,[^/]*/,,g' -C /usr/local/bin/ \
&& chmod u+x /usr/local/bin/docker
# Fix for Mesos interface dependency missing on ubuntu |
# Fix for https://issues.apache.org/jira/browse/MESOS-3793
ENV MESOS_LAUNCHER=posix
# Fix for `screen` (https://github.com/BD2KGenomics/toil/pull/1386#issuecomment-267424561)
ENV TERM linux
# Run bash instead of sh inside of screen
ENV SHELL /bin/bash
RUN echo "defshell -bash" > ~/.screenrc
# An appliance may need to start more appliances, e.g. when the leader appliance launches the
# worker appliance on a worker node. To support this, we embed a self-reference into the image:
ENV TOIL_APPLIANCE_SELF {applianceSelf}
RUN mkdir /var/lib/toil
ENV TOIL_WORKDIR /var/lib/toil
# This component changes most frequently and keeping it last maximizes Docker cache hits.
COPY {sdistName} .
RUN pip install {sdistName}[all]
RUN rm {sdistName}
# We intentionally inherit the default ENTRYPOINT and CMD from the base image, to the effect
# that the running appliance just gives you a shell. To start the Mesos master or slave
# daemons, the user # should override the entrypoint via --entrypoint.
RUN echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/bash.bashrc \
&& printf '{motd}' > /etc/motd
''')) | RUN pip install protobuf==3.0.0 |
hyper.rs | #[macro_use]
extern crate lazy_static;
use hyper::{
header::CONTENT_TYPE,
service::{make_service_fn, service_fn},
Body, Request, Response, Server,
};
use opentelemetry::{
global,
metrics::{BoundCounter, BoundValueRecorder},
KeyValue,
};
use opentelemetry_prometheus::PrometheusExporter;
use prometheus::{Encoder, TextEncoder};
use std::convert::Infallible;
use std::sync::Arc;
use std::time::SystemTime;
lazy_static! {
static ref HANDLER_ALL: [KeyValue; 1] = [KeyValue::new("handler", "all")];
}
async fn serve_req(
_req: Request<Body>,
state: Arc<AppState>,
) -> Result<Response<Body>, hyper::Error> {
let request_start = SystemTime::now();
let mut buffer = vec![];
let encoder = TextEncoder::new();
let metric_families = state.exporter.registry().gather();
encoder.encode(&metric_families, &mut buffer).unwrap();
state.http_counter.add(1);
state.http_body_gauge.record(buffer.len() as u64);
let response = Response::builder()
.status(200)
.header(CONTENT_TYPE, encoder.format_type())
.body(Body::from(buffer))
.unwrap();
state
.http_req_histogram
.record(request_start.elapsed().map_or(0.0, |d| d.as_secs_f64()));
Ok(response)
}
struct AppState {
exporter: PrometheusExporter,
http_counter: BoundCounter<'static, u64>,
http_body_gauge: BoundValueRecorder<'static, u64>,
http_req_histogram: BoundValueRecorder<'static, f64>,
}
#[tokio::main]
pub async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> | {
let exporter = opentelemetry_prometheus::exporter().init();
let meter = global::meter("ex.com/hyper");
let state = Arc::new(AppState {
exporter,
http_counter: meter
.u64_counter("example.http_requests_total")
.with_description("Total number of HTTP requests made.")
.init()
.bind(HANDLER_ALL.as_ref()),
http_body_gauge: meter
.u64_value_recorder("example.http_response_size_bytes")
.with_description("The HTTP response sizes in bytes.")
.init()
.bind(HANDLER_ALL.as_ref()),
http_req_histogram: meter
.f64_value_recorder("example.http_request_duration_seconds")
.with_description("The HTTP request latencies in seconds.")
.init()
.bind(HANDLER_ALL.as_ref()),
});
// For every connection, we must make a `Service` to handle all
// incoming HTTP requests on said connection.
let make_svc = make_service_fn(move |_conn| {
let state = state.clone();
// This is the `Service` that will handle the connection.
// `service_fn` is a helper to convert a function that
// returns a Response into a `Service`.
async move { Ok::<_, Infallible>(service_fn(move |req| serve_req(req, state.clone()))) }
});
let addr = ([127, 0, 0, 1], 3000).into();
let server = Server::bind(&addr).serve(make_svc);
println!("Listening on http://{}", addr);
server.await?;
Ok(())
} |
|
make.go | /*
Copyright (c) 2017 Alexander Klauer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package queue
import(
"errors"
"fmt"
"reflect"
)
// Make creates a new queue.
// The argument qptr must be a pointer to an instance of
// a structure satisfying the constraints documented in GenericQueue.
// The parameter config can be used to specify the characteristics
// of the queue.
// A nil argument is permissible.
// In this case, the default configuration is used.
// On success, nil is returned.
// On error, an appropriate error is returned.
func Make( qptr interface{}, config *Config ) error | {
// Tag IDs
const(
queue = "queue"
enqueue = "enqueue"
dequeue = "dequeue"
)
// Get config
if config == nil {
config = DefaultConfig()
}
if !config.IsValid() {
return errors.New( "Invalid queue configuration" )
}
factory := config.factory()
if factory == nil {
return errors.New( "This queue configuration has not been implemented yet" )
}
factory.prepare()
defer factory.reset()
// Extract function pointers
qptrValue := reflect.ValueOf( qptr )
if qptrValue.Kind() != reflect.Ptr {
return errors.New( "The argument qptr must be a pointer" )
}
qValue := qptrValue.Elem()
qType := qValue.Type()
if qType.Kind() != reflect.Struct {
return errors.New( "The argument qptr must be a pointer to a structure" )
}
haveEnqueue := false
haveDequeue := false
var elementType reflect.Type = nil
for i := 0; i != qType.NumField(); i++ {
field := qType.Field( i )
tagstring := field.Tag.Get( queue )
switch tagstring {
case enqueue:
if field.Type.Kind() != reflect.Func {
return fmt.Errorf( "Field '%s' must be a function", field.Name )
}
if field.Type.NumIn() != 1 {
return fmt.Errorf( "Function '%s' must take exactly one argument", field.Name )
}
if field.Type.NumOut() != 0 {
return fmt.Errorf( "Function '%s' must not return anything", field.Name )
}
if elementType == nil {
elementType = field.Type.In( 0 )
} else {
if elementType != field.Type.In( 0 ) {
return fmt.Errorf( "Argument to function '%s' has wrong type '%s', expected '%s'", field.Name, field.Type.In( 0 ).Name(), elementType.Name() )
}
}
qValue.Field( i ).Set( factory.makeEnqueue( field.Type ) )
haveEnqueue = true
case dequeue:
if field.Type.Kind() != reflect.Func {
return fmt.Errorf( "Field '%s' must be a function", field.Name )
}
if field.Type.NumIn() != 0 {
return fmt.Errorf( "Function '%s' must not take any arguments", field.Name )
}
if field.Type.NumOut() != 2 {
return fmt.Errorf( "Function '%s' must return exactly two values", field.Name )
}
if elementType == nil {
elementType = field.Type.Out( 0 )
} else {
if elementType != field.Type.Out( 0 ) {
return fmt.Errorf( "First return value of function '%s' has wrong type '%s', expected '%s'", field.Name, field.Type.Out( 0 ).Name(), elementType.Name() )
}
}
if field.Type.Out( 1 ).Kind() != reflect.Bool {
return fmt.Errorf( "Second return value of function '%s' must have type bool", field.Name )
}
qValue.Field( i ).Set( factory.makeDequeue( field.Type ) )
haveDequeue = true
default:
continue
}
}
if !haveEnqueue || !haveDequeue {
return errors.New( "Passed structure must have enqueue and dequeue tags" )
}
factory.commit()
return nil
} |
|
session.rs | //! # Session
//!
//! `session` is the module providing the type that represent an I/O session.
use base::Result;
use base::Checkable;
use base::Serializable;
use base::Sizable;
use base::Datable;
use util::Timestamp;
use io::Permission;
/// A type representing an I/O session.
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Default, Hash, Serialize, Deserialize)]
pub struct | <P>
where P: Datable
{
/// Id of the session.
pub id: u64,
/// Permission guaranteed by the session.
pub permission: Permission,
/// Expiration time of the session.
pub expires_at: Timestamp,
/// Payload of the session.
pub payload: P,
}
impl<P> Session<P>
where P: Datable
{
/// Creates a new `Session` from its components.
pub fn new(id: u64, permission: &Permission, expires_at: &Timestamp, payload: &P) -> Result<Self> {
permission.check()?;
payload.check()?;
let session = Session {
id: id,
permission: permission.to_owned(),
expires_at: expires_at.to_owned(),
payload: payload.to_owned(),
};
Ok(session)
}
/// Returns if the `Session` has already expired.
pub fn is_expired(&self) -> Result<bool> {
let now = Timestamp::now()?;
Ok(self.expires_at <= now)
}
}
impl<P> Checkable for Session<P>
where P: Datable
{
fn check(&self) -> Result<()> {
self.permission.check()?;
self.payload.check()
}
}
impl<P> Sizable for Session<P>
where P: Datable
{
fn size(&self) -> u64 {
self.id.size() +
self.permission.size() +
self.expires_at.size() +
self.payload.size()
}
}
impl<P> Serializable for Session<P>
where P: Datable + Serializable
{}
impl<P> Datable for Session<P>
where P: Datable
{} | Session |
07_mad_lib_game.py | # Primer juego... | print("Las violetas son Azules")
print("Y yo te amo a ti")
# Mad Libs
# ingresar palabras random, adjetivos, verbos, sustantivos.
print("Ahora te toca a vos")
print("")
color = input("Ingrese un color: ")
sustantivo_plular = input("Ingrese un sustantivo en plural: ")
celebridad = input("Ingrese el nombre de una celebridad: ")
print("Las rosas son " + color)
print( sustantivo_plular + " son Azules")
print("Y yo te amo a ti " + celebridad ) |
print("Mi poesia:")
print("Las rosas son Rojas") |
Step.skeleton.tsx | import React from 'react';
import styled from 'styled-components';
import Skeleton from '../../../Skeleton';
import tokens from '../../../../tokens';
import { StepProps } from '../Step';
const StepSkeletonWrapper = styled.span.attrs({ className: 'step--skeleton' })`
position: relative;
&:before { | right: 0;
bottom: 0;
background: ${({ theme }) => theme.colors?.backgroundColor};
border-radius: ${tokens.radii.rectRadius};
}
&,
.skeleton {
height: ${tokens.space.l};
width: 11.5rem;
}
`;
const StepSkeleton = (props: StepProps) => (
<StepSkeletonWrapper {...props}>
<Skeleton />
</StepSkeletonWrapper>
);
export default React.memo(StepSkeleton); | position: absolute;
content: '';
top: 0;
left: 0; |
list-network-sources-request.ts | /**
*
*
* OpenAPI spec version: 20160918
*
*
* NOTE: This class is auto generated by OracleSDKGenerator.
* Do not edit the class manually.
*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
*/ |
import * as model from "../model";
import common = require("oci-common");
export interface ListNetworkSourcesRequest {
/**
* The OCID of the compartment (remember that the tenancy is simply the root compartment).
*
*/
"compartmentId": string;
/**
* The value of the `opc-next-page` response header from the previous \"List\" call.
*
*/
"page"?: string;
/**
* The maximum number of items to return in a paginated \"List\" call.
*
*/
"limit"?: number;
} | |
lsp_notification_dispatch.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use lsp_types::notification::Notification;
use crate::lsp_runtime_error::LSPRuntimeResult;
pub(crate) struct | <'state, TState> {
notification: lsp_server::Notification,
state: &'state mut TState,
}
impl<'state, TState> LSPNotificationDispatch<'state, TState> {
pub fn new(notification: lsp_server::Notification, state: &'state mut TState) -> Self {
LSPNotificationDispatch {
notification,
state,
}
}
/// Calls handler if the LSPNotificationDispatch's notifications's method matches
/// the method of TNotification. Returns a Result which will be Ok if the handler
/// was not called, or Err if the handler was called.
/// Thus, multiple calls to `on_notification_sync(...)?` can be chained. Doing so will
/// cause LSPNotificationDispatch to execute the first matching handler, if any.
pub fn on_notification_sync<TNotification: Notification>(
self,
handler: fn(&mut TState, TNotification::Params) -> LSPRuntimeResult<()>,
) -> Result<Self, ()> {
if self.notification.method == TNotification::METHOD {
let params = extract_notification_params::<TNotification>(self.notification);
// TODO propagate these errors
let _response = handler(self.state, params);
return Err(());
}
Ok(self)
}
pub fn notification(self) -> lsp_server::Notification {
self.notification
}
}
fn extract_notification_params<N>(notification: lsp_server::Notification) -> N::Params
where
N: Notification,
{
notification
.extract(N::METHOD)
.expect("extract_notification_params: could not extract notification param")
}
#[cfg(test)]
mod test {
use lsp_types::{
notification::{LogMessage, Notification, TelemetryEvent},
LogMessageParams, MessageType,
};
use crate::lsp_runtime_error::LSPRuntimeResult;
use super::LSPNotificationDispatch;
#[test]
fn calls_first_matching_notification_handler() {
let mut state: i32 = 0;
let dispatch = LSPNotificationDispatch::new(
lsp_server::Notification {
method: "window/logMessage".to_string(),
params: serde_json::to_value(LogMessageParams {
typ: MessageType::Error,
message: "Use Relay!".to_string(),
})
.unwrap(),
},
&mut state,
);
let dispatch = || -> Result<(), ()> {
dispatch
.on_notification_sync::<TelemetryEvent>(telemetry_handler)?
.on_notification_sync::<LogMessage>(log_message_handler)?;
Ok(())
};
let result = dispatch();
assert!(result.is_err());
assert_eq!(state, 2);
}
fn telemetry_handler(
state: &mut i32,
_params: <TelemetryEvent as Notification>::Params,
) -> LSPRuntimeResult<()> {
*state = 1;
Ok(())
}
fn log_message_handler(
state: &mut i32,
_params: <LogMessage as Notification>::Params,
) -> LSPRuntimeResult<()> {
*state = 2;
Ok(())
}
}
| LSPNotificationDispatch |
UploadResource.py | from os import write as os_write, close as os_close, O_WRONLY as os_O_WRONLY, O_CREAT as os_O_CREAT, open as os_open, remove as os_remove
from twisted.web import resource, http
class UploadResource(resource.Resource):
FILENAME = "/tmp/autotimer_backup.tar"
def __init__(self, session):
self.session = session
resource.Resource.__init__(self)
def render_POST(self, req):
req.setResponseCode(http.OK)
req.setHeader('Content-type', 'application/xhtml+xml;')
req.setHeader('charset', 'UTF-8')
data = req.args['file'][0]
if not data:
result = """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n
<e2simplexmlresult>\n
<e2state>False</e2state>
<e2statetext>Filesize was 0, not uploaded</e2statetext>
</e2simplexmlresult>\n"""
return result
fd = os_open(self.FILENAME, os_O_WRONLY | os_O_CREAT)
if fd: | try:
os_remove(FILENAME)
except OSError, oe:
pass
result = """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n
<e2simplexmlresult>\n
<e2state>False</e2state>
<e2statetext>Error writing to disk, not uploaded</e2statetext>
</e2simplexmlresult>\n"""
else:
result = """<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n
<e2simplexmlresult>\n
<e2state>True</e2state>
<e2statetext>%s</e2statetext>
</e2simplexmlresult>\n""" % self.FILENAME
return result | cnt = os_write(fd, data)
os_close(fd)
if cnt <= 0: |
index.tsx | export {default as useIntersectionObserver} from './use-intersection-observer'; |
||
state_processor.go | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/harmony-one/harmony/block"
consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/ctxerror"
"github.com/harmony-one/harmony/internal/params"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/shard"
staking "github.com/harmony-one/harmony/staking/types"
)
// StateProcessor is a basic Processor, which takes care of transitioning
// state from one point to another.
//
// StateProcessor implements Processor.
type StateProcessor struct {
config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain
engine consensus_engine.Engine // Consensus engine used for block rewards
}
// NewStateProcessor initialises a new StateProcessor.
func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus_engine.Engine) *StateProcessor {
return &StateProcessor{
config: config,
bc: bc,
engine: engine,
}
}
// Process processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb and applying any rewards to both
// the processor (coinbase) and any included uncles.
//
// Process returns the receipts and logs accumulated during the process and
// returns the amount of gas that was used in the process. If any of the
// transactions failed to execute due to insufficient gas it will return an error.
func (p *StateProcessor) Process(block *types.Block, statedb *state.DB, cfg vm.Config) (
types.Receipts, types.CXReceipts, []*types.Log, uint64, *big.Int, error,
) {
var (
receipts types.Receipts
outcxs types.CXReceipts
incxs = block.IncomingReceipts()
usedGas = new(uint64)
header = block.Header()
coinbase = block.Header().Coinbase()
allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit())
)
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
statedb.Prepare(tx.Hash(), block.Hash(), i)
receipt, cxReceipt, _, err := ApplyTransaction(p.config, p.bc, &coinbase, gp, statedb, header, tx, usedGas, cfg)
if err != nil {
return nil, nil, nil, 0, nil, err
}
receipts = append(receipts, receipt)
if cxReceipt != nil {
outcxs = append(outcxs, cxReceipt)
}
allLogs = append(allLogs, receipt.Logs...)
}
// Iterate over staking transactions
L := len(block.Transactions())
for i, tx := range block.StakingTransactions() {
statedb.Prepare(tx.Hash(), block.Hash(), i+L)
receipt, _, err :=
ApplyStakingTransaction(p.config, p.bc, &coinbase, gp, statedb, header, tx, usedGas, cfg)
if err != nil {
return nil, nil, nil, 0, nil, err
}
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
}
// incomingReceipts should always be processed after transactions (to be consistent with the block proposal)
for _, cx := range block.IncomingReceipts() {
err := ApplyIncomingReceipt(p.config, statedb, header, cx)
if err != nil {
return nil, nil, nil, 0, nil, ctxerror.New("cannot apply incoming receipts").WithCause(err)
}
}
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
_, payout, err := p.engine.Finalize(p.bc, header, statedb, block.Transactions(), receipts, outcxs, incxs, block.StakingTransactions())
if err != nil {
return nil, nil, nil, 0, nil, ctxerror.New("cannot finalize block").WithCause(err)
}
return receipts, outcxs, allLogs, *usedGas, payout, nil
}
// return true if it is valid
func getTransactionType(config *params.ChainConfig, header *block.Header, tx *types.Transaction) types.TransactionType {
if header.ShardID() == tx.ShardID() && (!config.AcceptsCrossTx(header.Epoch()) || tx.ShardID() == tx.ToShardID()) {
return types.SameShardTx
}
numShards := shard.Schedule.InstanceForEpoch(header.Epoch()).NumShards()
// Assuming here all the shards are consecutive from 0 to n-1, n is total number of shards
if tx.ShardID() != tx.ToShardID() && header.ShardID() == tx.ShardID() && tx.ToShardID() < numShards {
return types.SubtractionOnly
}
return types.InvalidTx
}
// ApplyTransaction attempts to apply a transaction to the given state database
// and uses the input parameters for its environment. It returns the receipt
// for the transaction, gas used and an error if the transaction failed,
// indicating the block was invalid.
func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.DB, header *block.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, *types.CXReceipt, uint64, error) {
txType := getTransactionType(config, header, tx)
if txType == types.InvalidTx {
return nil, nil, 0, fmt.Errorf("Invalid Transaction Type")
}
if txType != types.SameShardTx && !config.AcceptsCrossTx(header.Epoch()) {
return nil, nil, 0, fmt.Errorf(
"cannot handle cross-shard transaction until after epoch %v (now %v)",
config.CrossTxEpoch, header.Epoch())
}
msg, err := tx.AsMessage(types.MakeSigner(config, header.Epoch()))
// skip signer err for additiononly tx
if err != nil {
return nil, nil, 0, err
}
// Create a new context to be used in the EVM environment
context := NewEVMContext(msg, header, bc, author)
context.TxType = txType
// Create a new environment which holds all relevant information
// about the transaction and calling mechanisms.
vmenv := vm.NewEVM(context, statedb, config, cfg)
// Apply the transaction to the current state (included in the env)
_, gas, failed, err := ApplyMessage(vmenv, msg, gp)
if err != nil {
return nil, nil, 0, err
}
// Update the state with pending changes
var root []byte
if config.IsS3(header.Epoch()) {
statedb.Finalise(true)
} else {
root = statedb.IntermediateRoot(config.IsS3(header.Epoch())).Bytes()
}
*usedGas += gas
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
// based on the eip phase, we're passing whether the root touch-delete accounts.
receipt := types.NewReceipt(root, failed, *usedGas)
receipt.TxHash = tx.Hash()
receipt.GasUsed = gas
// if the transaction created a contract, store the creation address in the receipt.
if msg.To() == nil {
receipt.ContractAddress = crypto.CreateAddress(vmenv.Context.Origin, tx.Nonce())
}
// Set the receipt logs and create a bloom for filtering
//receipt.Logs = statedb.GetLogs(tx.Hash())
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
var cxReceipt *types.CXReceipt
if txType == types.SubtractionOnly {
cxReceipt = &types.CXReceipt{tx.Hash(), msg.From(), msg.To(), tx.ShardID(), tx.ToShardID(), msg.Value()}
} else {
cxReceipt = nil
}
return receipt, cxReceipt, gas, err
}
// ApplyStakingTransaction attempts to apply a staking transaction to the given state database
// and uses the input parameters for its environment. It returns the receipt
// for the staking transaction, gas used and an error if the transaction failed,
// indicating the block was invalid.
// staking transaction will use the code field in the account to store the staking information
func ApplyStakingTransaction(
config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.DB,
header *block.Header, tx *staking.StakingTransaction, usedGas *uint64, cfg vm.Config) (receipt *types.Receipt, gas uint64, err error) {
msg, err := StakingToMessage(tx, header.Number())
if err != nil {
return nil, 0, err
}
// Create a new context to be used in the EVM environment
context := NewEVMContext(msg, header, bc, author)
// Create a new environment which holds all relevant information
// about the transaction and calling mechanisms.
vmenv := vm.NewEVM(context, statedb, config, cfg)
// Apply the transaction to the current state (included in the env)
gas, err = ApplyStakingMessage(vmenv, msg, gp, bc)
utils.Logger().Info().Msgf("ApplyStakingMessage: usedGas: %v, err: %v, stakingTxn:", gas, err)
// even there is error, we charge it
if err != nil {
return nil, gas, err
}
// Update the state with pending changes
var root []byte
if config.IsS3(header.Epoch()) {
statedb.Finalise(true)
} else {
root = statedb.IntermediateRoot(config.IsS3(header.Epoch())).Bytes()
}
*usedGas += gas
receipt = types.NewReceipt(root, false, *usedGas)
receipt.TxHash = tx.Hash()
receipt.GasUsed = gas
return receipt, gas, nil
}
// ApplyIncomingReceipt will add amount into ToAddress in the receipt
func ApplyIncomingReceipt(config *params.ChainConfig, db *state.DB, header *block.Header, cxp *types.CXReceiptsProof) error {
if cxp == nil {
return nil
}
for _, cx := range cxp.Receipts {
if cx == nil || cx.To == nil { // should not happend
return ctxerror.New("ApplyIncomingReceipts: Invalid incomingReceipt!", "receipt", cx)
}
utils.Logger().Info().Msgf("ApplyIncomingReceipts: ADDING BALANCE %d", cx.Amount)
if !db.Exist(*cx.To) {
db.CreateAccount(*cx.To)
}
db.AddBalance(*cx.To, cx.Amount)
db.IntermediateRoot(config.IsS3(header.Epoch()))
}
return nil
}
// StakingToMessage returns the staking transaction as a core.Message.
// requires a signer to derive the sender.
// put it here to avoid cyclic import
func StakingToMessage(tx *staking.StakingTransaction, blockNum *big.Int) (types.Message, error) | {
payload, err := tx.RLPEncodeStakeMsg()
if err != nil {
return types.Message{}, err
}
from, err := tx.SenderAddress()
if err != nil {
return types.Message{}, err
}
msg := types.NewStakingMessage(from, tx.Nonce(), tx.Gas(), tx.Price(), payload, blockNum)
stkType := tx.StakingType()
if _, ok := types.StakingTypeMap[stkType]; !ok {
return types.Message{}, staking.ErrInvalidStakingKind
}
msg.SetType(types.StakingTypeMap[stkType])
return msg, nil
} |
|
kubernetes.go | package kubernetes
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"strings"
"get.porter.sh/porter/pkg/context"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/xeipuuv/gojsonschema"
)
const (
defaultKubernetesClientVersion string = "v1.15.5"
)
type Mixin struct {
*context.Context
KubernetesClientVersion string
}
func New() *Mixin |
func (m *Mixin) getCommandFile(commandFile string, w io.Writer) ([]byte, error) {
if commandFile == "" {
reader := bufio.NewReader(m.In)
return ioutil.ReadAll(reader)
}
return ioutil.ReadFile(commandFile)
}
func (m *Mixin) getPayloadData() ([]byte, error) {
reader := bufio.NewReader(m.In)
data, err := ioutil.ReadAll(reader)
if err != nil {
errors.Wrap(err, "could not read payload from STDIN")
}
return data, nil
}
func (m *Mixin) ValidatePayload(b []byte) error {
// Load the step as a go dump
s := make(map[string]interface{})
err := yaml.Unmarshal(b, &s)
if err != nil {
return errors.Wrap(err, "could not marshal payload as yaml")
}
manifestLoader := gojsonschema.NewGoLoader(s)
// Load the step schema
schema := m.GetSchema()
schemaLoader := gojsonschema.NewStringLoader(schema)
validator, err := gojsonschema.NewSchema(schemaLoader)
if err != nil {
return errors.Wrap(err, "unable to compile the mixin step schema")
}
// Validate the manifest against the schema
result, err := validator.Validate(manifestLoader)
if err != nil {
return errors.Wrap(err, "unable to validate the mixin step schema")
}
if !result.Valid() {
errs := make([]string, 0, len(result.Errors()))
for _, err := range result.Errors() {
errs = append(errs, err.String())
}
return errors.New(strings.Join(errs, "\n\t* "))
}
return nil
}
func (m *Mixin) getOutput(resourceType, resourceName, namespace, jsonPath string) ([]byte, error) {
args := []string{"get", resourceType, resourceName}
args = append(args, fmt.Sprintf("-o=jsonpath=%s", jsonPath))
if namespace != "" {
args = append(args, fmt.Sprintf("--namespace=%s", namespace))
}
cmd := m.NewCommand("kubectl", args...)
cmd.Stderr = m.Err
prettyCmd := fmt.Sprintf("%s%s", cmd.Dir, strings.Join(cmd.Args, " "))
if m.Debug {
fmt.Fprintln(m.Err, prettyCmd)
}
out, err := cmd.Output()
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("couldn't run command %s", prettyCmd))
}
return out, nil
}
func (m *Mixin) handleOutputs(outputs []KubernetesOutput) error {
//Now get the outputs
for _, output := range outputs {
bytes, err := m.getOutput(
output.ResourceType,
output.ResourceName,
output.Namespace,
output.JSONPath,
)
if err != nil {
return err
}
err = m.Context.WriteMixinOutputToFile(output.Name, bytes)
if err != nil {
return err
}
}
return nil
}
| {
return &Mixin{
Context: context.New(),
KubernetesClientVersion: defaultKubernetesClientVersion,
}
} |
snapshot.go | /*
Copyright The KubeDB Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
api "kubedb.dev/apimachinery/apis/kubedb/v1alpha1"
cs "kubedb.dev/apimachinery/client/clientset/versioned/typed/kubedb/v1alpha1"
jsonpatch "github.com/evanphx/json-patch"
"github.com/golang/glog"
kerr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
kutil "kmodules.xyz/client-go"
)
func CreateOrPatchSnapshot(c cs.KubedbV1alpha1Interface, meta metav1.ObjectMeta, transform func(*api.Snapshot) *api.Snapshot) (*api.Snapshot, kutil.VerbType, error) {
cur, err := c.Snapshots(meta.Namespace).Get(meta.Name, metav1.GetOptions{})
if kerr.IsNotFound(err) {
glog.V(3).Infof("Creating Snapshot %s/%s.", meta.Namespace, meta.Name)
out, err := c.Snapshots(meta.Namespace).Create(transform(&api.Snapshot{
TypeMeta: metav1.TypeMeta{
Kind: "Snapshot",
APIVersion: api.SchemeGroupVersion.String(),
},
ObjectMeta: meta,
}))
return out, kutil.VerbCreated, err
} else if err != nil {
return nil, kutil.VerbUnchanged, err
}
return PatchSnapshot(c, cur, transform)
}
func | (c cs.KubedbV1alpha1Interface, cur *api.Snapshot, transform func(*api.Snapshot) *api.Snapshot) (*api.Snapshot, kutil.VerbType, error) {
return PatchSnapshotObject(c, cur, transform(cur.DeepCopy()))
}
func PatchSnapshotObject(c cs.KubedbV1alpha1Interface, cur, mod *api.Snapshot) (*api.Snapshot, kutil.VerbType, error) {
curJson, err := json.Marshal(cur)
if err != nil {
return nil, kutil.VerbUnchanged, err
}
modJson, err := json.Marshal(mod)
if err != nil {
return nil, kutil.VerbUnchanged, err
}
patch, err := jsonpatch.CreateMergePatch(curJson, modJson)
if err != nil {
return nil, kutil.VerbUnchanged, err
}
if len(patch) == 0 || string(patch) == "{}" {
return cur, kutil.VerbUnchanged, nil
}
glog.V(3).Infof("Patching Snapshot %s/%s with %s.", cur.Namespace, cur.Name, string(patch))
out, err := c.Snapshots(cur.Namespace).Patch(cur.Name, types.MergePatchType, patch)
return out, kutil.VerbPatched, err
}
func TryUpdateSnapshot(c cs.KubedbV1alpha1Interface, meta metav1.ObjectMeta, transform func(*api.Snapshot) *api.Snapshot) (result *api.Snapshot, err error) {
attempt := 0
err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) {
attempt++
cur, e2 := c.Snapshots(meta.Namespace).Get(meta.Name, metav1.GetOptions{})
if kerr.IsNotFound(e2) {
return false, e2
} else if e2 == nil {
result, e2 = c.Snapshots(cur.Namespace).Update(transform(cur.DeepCopy()))
return e2 == nil, nil
}
glog.Errorf("Attempt %d failed to update Snapshot %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
if err != nil {
err = fmt.Errorf("failed to update Snapshot %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
return
}
func WaitUntilSnapshotCompletion(c cs.KubedbV1alpha1Interface, meta metav1.ObjectMeta) (result *api.Snapshot, err error) {
err = wait.PollImmediate(kutil.RetryInterval, kutil.ReadinessTimeout, func() (bool, error) {
result, err = c.Snapshots(meta.Namespace).Get(meta.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if result.Status.CompletionTime != nil {
return true, nil
}
return false, nil
})
return
}
func UpdateSnapshotStatus(
c cs.KubedbV1alpha1Interface,
in *api.Snapshot,
transform func(*api.SnapshotStatus) *api.SnapshotStatus,
) (result *api.Snapshot, err error) {
apply := func(x *api.Snapshot) *api.Snapshot {
return &api.Snapshot{
TypeMeta: x.TypeMeta,
ObjectMeta: x.ObjectMeta,
Spec: x.Spec,
Status: *transform(in.Status.DeepCopy()),
}
}
attempt := 0
cur := in.DeepCopy()
err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) {
attempt++
var e2 error
result, e2 = c.Snapshots(in.Namespace).UpdateStatus(apply(cur))
if kerr.IsConflict(e2) {
latest, e3 := c.Snapshots(in.Namespace).Get(in.Name, metav1.GetOptions{})
switch {
case e3 == nil:
cur = latest
return false, nil
case kutil.IsRequestRetryable(e3):
return false, nil
default:
return false, e3
}
} else if err != nil && !kutil.IsRequestRetryable(e2) {
return false, e2
}
return e2 == nil, nil
})
if err != nil {
err = fmt.Errorf("failed to update status of Snapshot %s/%s after %d attempts due to %v", in.Namespace, in.Name, attempt, err)
}
return
}
func MarkAsFailedSnapshot(
c cs.KubedbV1alpha1Interface,
cur *api.Snapshot,
reason string,
) (*api.Snapshot, error) {
return UpdateSnapshotStatus(c, cur, func(in *api.SnapshotStatus) *api.SnapshotStatus {
t := metav1.Now()
in.CompletionTime = &t
in.Phase = api.SnapshotPhaseFailed
in.Reason = reason
return in
})
}
| PatchSnapshot |
uint.go | package perseus
// InUint returns whether i is in list
func InUint(i uint, list []uint) bool {
for _, b := range list {
if b == i {
return true
}
}
return false
}
// IndexUint returns the position of s in list. If s is not found, return -1.
func IndexUint(s uint, list []uint) int {
for i, b := range list {
if b == s |
}
return -1
}
// ShiftUint returns the first element of slice and other element's slice.
func ShiftUint(slice []uint) (uint, []uint) {
return slice[0], slice[1:]
}
// UnshiftUint add an element to the beginning of a slice.
func UnshiftUint(sep uint, i []uint) []uint {
return append([]uint{sep}, i...)
}
// DeleteUint delete specified element from slice
func DeleteUint(slice []uint, sep uint) []uint {
return append(slice[:sep], slice[sep+1:]...)
}
// CutUint Delete from i to j from the slice
func CutUint(slice []uint, i, j uint) []uint {
return append(slice[:i], slice[j:]...)
}
// InsertUint insert element to specified position
func InsertUint(slice []uint, element, position uint) []uint {
return append(slice[:position], append([]uint{element}, slice[position:]...)...)
}
// InsertVectorUint insert slice to specified position
func InsertVectorUint(origin, insert []uint, position uint) []uint {
return append(origin[:position], append(insert, origin[position:]...)...)
}
// PopUint returns the last element of slice and other element's slice.
func PopUint(slice []uint) (uint, []uint) {
return slice[0], slice[1:]
}
// ReversedUint returns reversed slice
func ReversedUint(slice []uint) []uint {
for left, right := 0, len(slice)-1; left < right; left, right = left+1, right-1 {
slice[left], slice[right] = slice[right], slice[left]
}
return slice
}
// ExtendUint connect slices together
func ExtendUint(A, B []uint) []uint {
return append(A, B...)
}
func sumUint(values ...uint) uint64 {
var sum uint64
for _, v := range values {
sum += uint64(v)
}
return sum
}
// SumUint calculate summaries of arguments
func SumUint(values ...uint) uint {
return uint(sumUint(values...))
}
// SumUintToUint64 calculate summaries of arguments
func SumUintToUint64(values ...int64) int {
return int(sumInt64(values...))
}
| {
return i
} |
11.ts | import { Card } from '../../../interfaces'
import Set from '../Sun & Moon'
const card: Card = {
name: {
en: "Decidueye",
fr: "Archéduc",
es: "Decidueye",
it: "Decidueye",
pt: "Decidueye",
de: "Silvarro"
},
illustrator: "Kouki Saitou",
rarity: "Rare",
category: "Pokemon",
set: Set,
dexId: [
724,
],
hp: 140,
types: [
"Grass",
],
evolveFrom: {
en: "Dartrix",
fr: "Efflèche",
},
stage: "Stage2",
attacks: [
{
cost: [
"Grass",
],
name: {
en: "Leaf Blade",
fr: "Lame-Feuille",
es: "Hoja Aguda",
it: "Fendifoglia",
pt: "Lâmina de Folha",
de: "Laubklinge"
},
effect: {
en: "Flip a coin. If heads, this attack does 30 more damage.",
fr: "Lancez une pièce. Si c’est face, cette attaque inflige 30 dégâts supplémentaires.",
es: "Lanza 1 moneda. Si sale cara, este ataque hace 30 puntos de daño más.", | pt: "Jogue 1 moeda. Se sair cara, este ataque causará 30 pontos de dano a mais.",
de: "Wirf 1 Münze. Bei Kopf fügt diese Attacke 30 Schadenspunkte mehr zu."
},
damage: "30+",
},
{
cost: [
"Grass",
"Colorless",
"Colorless",
],
name: {
en: "Brave Bird",
fr: "Rapace",
es: "Pájaro Osado",
it: "Baldeali",
pt: "Pássaro Bravo",
de: "Sturzflug"
},
effect: {
en: "This Pokémon does 20 damage to itself.",
fr: "Ce Pokémon s’inflige 20 dégâts.",
es: "Este Pokémon se hace 20 puntos de daño a sí mismo.",
it: "Questo Pokémon infligge 20 danni a se stesso.",
pt: "Este Pokémon causa 20 pontos de dano a si mesmo.",
de: "Dieses Pokémon fügt sich selbst 20 Schadenspunkte zu."
},
damage: 120,
},
],
weaknesses: [
{
type: "Fire",
value: "×2"
},
],
retreat: 1,
}
export default card | it: "Lancia una moneta. Se esce testa, questo attacco infligge 30 danni in più.", |
index.js | const {
CONSTANTS,
getDevServerLogLevel,
getDevServerPort, |
const buildDevServer = configuration => ({
clientLogLevel: getDevServerLogLevel(configuration),
compress: true,
historyApiFallback: true,
hot: isDevServerHot(configuration),
noInfo: true,
port: getDevServerPort(configuration),
publicPath: CONSTANTS.PATH_PUBLIC,
});
const devServer = configuration =>
(isDevServerEnabled(configuration) ? buildDevServer(configuration) : {});
module.exports = devServer; | isDevServerEnabled,
isDevServerHot,
} = require('../../config/config'); |
kmsjwt.go | package kmsjwt
import (
kms "cloud.google.com/go/kms/apiv1"
jwtinterface "github.com/wearemojo/mojo-public-go/lib/jwt"
)
var (
_ jwtinterface.Signer = (*KMSJWT)(nil)
_ jwtinterface.Verifier = (*KMSJWT)(nil)
)
type KMSJWT struct {
*Signer
*Verifier
}
func | (client *kms.KeyManagementClient, projectID, env, serviceName string) *KMSJWT {
return &KMSJWT{
Signer: NewSigner(client, projectID, env, serviceName),
Verifier: NewVerifier(client, projectID),
}
}
| New |
test_odefilter_cases.py | """Test-cases for ODE filters."""
import pytest_cases
from probnum import diffeq, randprocs
import probnum.problems.zoo.diffeq as diffeq_zoo
# logistic.rhs is implemented backend-agnostic,
# thus it works for both numpy and jax
@pytest_cases.case(tags=("numpy", "jax"))
def problem_logistic():
return diffeq_zoo.logistic()
def steprule_constant():
return diffeq.stepsize.ConstantSteps(0.5)
def steprule_adaptive():
return diffeq.stepsize.AdaptiveSteps(firststep=0.5, atol=0.2, rtol=0.2)
def diffusion_constant():
return randprocs.markov.continuous.ConstantDiffusion()
def diffusion_piecewise_constant():
return randprocs.markov.continuous.PiecewiseConstantDiffusion(t0=0.0)
@pytest_cases.case(tags=("numpy",))
def init_non_prob_fit():
return diffeq.odefilter.init_routines.NonProbabilisticFit()
@pytest_cases.case(tags=("numpy",))
def init_non_prob_fit_with_jacobian():
return diffeq.odefilter.init_routines.NonProbabilisticFitWithJacobian()
@pytest_cases.case(tags=("numpy",))
def init_stack():
return diffeq.odefilter.init_routines.Stack()
@pytest_cases.case(tags=("numpy",))
def init_stack_with_jacobian():
return diffeq.odefilter.init_routines.StackWithJacobian()
@pytest_cases.case(tags=("jax",))
def init_forward():
return diffeq.odefilter.init_routines.ForwardMode()
@pytest_cases.case(tags=("jax",))
def init_forward_jvp():
return diffeq.odefilter.init_routines.ForwardModeJVP()
@pytest_cases.case(tags=("jax",))
def init_reverse():
return diffeq.odefilter.init_routines.ReverseMode()
@pytest_cases.case(tags=("jax",))
def init_taylor():
return diffeq.odefilter.init_routines.TaylorMode()
def approx_ek0():
return diffeq.odefilter.approx_strategies.EK0()
def approx_ek1():
| return diffeq.odefilter.approx_strategies.EK1() |
|
url.go | // Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
import (
"net/url"
"regexp"
"strings"
)
var (
protocolRe = regexp.MustCompile("^[a-zA-Z_+-]+://")
)
// URLParser represents a git URL parser
type URLParser struct {
}
// Parse parses the git URL
func (p *URLParser) Parse(rawURL string) (u *url.URL, err error) {
rawURL = strings.TrimSpace(rawURL)
if !protocolRe.MatchString(rawURL) {
// convert the weird git ssh url format to a canonical url:
// [email protected]:gitea/tea -> ssh://[email protected]/gitea/tea
if strings.Contains(rawURL, ":") &&
// not a Windows path
!strings.Contains(rawURL, "\\") {
rawURL = "ssh://" + strings.Replace(rawURL, ":", "/", 1)
} else if !strings.Contains(rawURL, "@") &&
strings.Count(rawURL, "/") == 2 {
// match cases like gitea.com/gitea/tea
rawURL = "https://" + rawURL
}
}
u, err = url.Parse(rawURL)
if err != nil {
return
}
if u.Scheme == "git+ssh" {
u.Scheme = "ssh"
}
if strings.HasPrefix(u.Path, "//") {
u.Path = strings.TrimPrefix(u.Path, "/")
}
// .git suffix is optional and breaks normalization
if strings.HasSuffix(u.Path, ".git") {
u.Path = strings.TrimSuffix(u.Path, ".git")
}
return
}
// ParseURL parses URL string and return URL struct
func | (rawURL string) (u *url.URL, err error) {
p := &URLParser{}
return p.Parse(rawURL)
}
| ParseURL |
civo.go | package civo
import (
_ "embed"
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"time"
"go.uber.org/zap"
"github.com/minectl/pkg/update"
"github.com/civo/civogo"
"github.com/minectl/pkg/automation"
"github.com/minectl/pkg/common"
minctlTemplate "github.com/minectl/pkg/template"
)
type Civo struct {
client *civogo.Client
tmpl *minctlTemplate.Template
}
func NewCivo(APIKey, region string) (*Civo, error) |
func (c *Civo) CreateServer(args automation.ServerArgs) (*automation.RessourceResults, error) {
pubKeyFile, err := ioutil.ReadFile(fmt.Sprintf("%s.pub", args.MinecraftResource.GetSSH()))
if err != nil {
return nil, err
}
sshPubKey, err := c.client.NewSSHKey(fmt.Sprintf("%s-ssh", args.MinecraftResource.GetName()), string(pubKeyFile))
if err != nil {
return nil, err
}
zap.S().Infow("Civo SSH Key created", "id", sshPubKey.ID)
network, err := c.client.GetDefaultNetwork()
if err != nil {
return nil, err
}
zap.S().Infow("Civo get default network created", "network", network)
template, err := c.client.FindDiskImage("ubuntu-focal")
if err != nil {
return nil, err
}
zap.S().Infow("Civo get disk image", "template", template)
config, err := c.client.NewInstanceConfig()
if err != nil {
return nil, err
}
config.TemplateID = template.ID
config.Size = args.MinecraftResource.GetSize()
config.Hostname = args.MinecraftResource.GetName()
config.Region = args.MinecraftResource.GetRegion()
config.SSHKeyID = sshPubKey.ID
config.PublicIPRequired = "create"
config.InitialUser = "root"
config.Tags = []string{common.InstanceTag, args.MinecraftResource.GetEdition()}
script, err := c.tmpl.GetTemplate(args.MinecraftResource, "", minctlTemplate.GetTemplateBashName(args.MinecraftResource.IsProxyServer()))
if err != nil {
return nil, err
}
config.Script = script
instance, err := c.client.CreateInstance(config)
if err != nil {
return nil, err
}
zap.S().Infow("Civo create instance", "instance", instance)
if args.MinecraftResource.GetEdition() == "bedrock" {
firewall, err := c.client.NewFirewall(fmt.Sprintf("%s-fw", args.MinecraftResource.GetName()), network.ID)
if err != nil {
return nil, err
}
_, err = c.client.NewFirewallRule(&civogo.FirewallRuleConfig{
FirewallID: firewall.ID,
Protocol: "udp",
StartPort: "19132",
EndPort: "19133",
Cidr: []string{
"0.0.0.0/0",
},
Label: "Minecraft Bedrock UDP",
})
zap.S().Infow("Civo create firewall", "firewall", firewall)
if err != nil {
return nil, err
}
_, err = c.client.SetInstanceFirewall(instance.ID, firewall.ID)
if err != nil {
return nil, err
}
}
stillCreating := true
for stillCreating {
instance, err = c.client.FindInstance(instance.ID)
if err != nil {
return nil, err
}
if instance.Status == "ACTIVE" {
stillCreating = false
time.Sleep(2 * time.Second)
} else {
time.Sleep(2 * time.Second)
}
}
instance, err = c.client.FindInstance(instance.ID)
if err != nil {
return nil, err
}
zap.S().Infow("Civo instance ready", "instance", instance)
region := c.client.Region
if len(instance.Region) > 0 {
region = instance.Region
}
return &automation.RessourceResults{
ID: instance.ID,
Name: instance.Hostname,
Region: region,
PublicIP: instance.PublicIP,
Tags: strings.Join(instance.Tags, ","),
}, err
}
func (c *Civo) DeleteServer(id string, args automation.ServerArgs) error {
_, err := c.client.DeleteInstance(id)
if err != nil {
return err
}
zap.S().Infow("Civo delete instance", "id", id)
pubKeyFile, err := c.client.FindSSHKey(fmt.Sprintf("%s-ssh", args.MinecraftResource.GetName()))
if err != nil {
return err
}
_, err = c.client.DeleteSSHKey(pubKeyFile.ID)
if err != nil {
return err
}
zap.S().Infow("Civo delete ssh key", "pubKeyFile", pubKeyFile)
if args.MinecraftResource.GetEdition() == "bedrock" {
firewall, err := c.client.FindFirewall(fmt.Sprintf("%s-fw", args.MinecraftResource.GetName()))
if err != nil {
return err
}
_, err = c.client.DeleteFirewall(firewall.ID)
if err != nil {
return err
}
zap.S().Infow("Civo delete firewall", "firewall", firewall)
}
return nil
}
func (c *Civo) ListServer() ([]automation.RessourceResults, error) {
var result []automation.RessourceResults
instances, err := c.client.ListAllInstances()
if err != nil {
return nil, err
}
for _, instance := range instances {
if instance.Tags[0] == common.InstanceTag {
for _, tag := range instance.Tags {
if tag == common.InstanceTag {
region := c.client.Region
if len(instance.Region) > 0 {
region = instance.Region
}
result = append(result, automation.RessourceResults{
ID: instance.ID,
PublicIP: instance.PublicIP,
Name: instance.Hostname,
Region: region,
Tags: strings.Join(instance.Tags, ","),
})
}
}
}
}
if len(result) > 0 {
zap.S().Infow("Civo list all instances", "list", result)
} else {
zap.S().Infow("No minectl instances found")
}
return result, nil
}
func (c *Civo) UpdateServer(id string, args automation.ServerArgs) error {
instance, err := c.client.GetInstance(id)
if err != nil {
return err
}
remoteCommand := update.NewRemoteServer(args.MinecraftResource.GetSSH(), instance.PublicIP, "root")
err = remoteCommand.UpdateServer(args.MinecraftResource)
if err != nil {
return err
}
zap.S().Infow("Civo minectl server updated", "instance", instance)
return nil
}
func (c *Civo) UploadPlugin(id string, args automation.ServerArgs, plugin, destination string) error {
instance, err := c.client.GetInstance(id)
if err != nil {
return err
}
remoteCommand := update.NewRemoteServer(args.MinecraftResource.GetSSH(), instance.PublicIP, "root")
err = remoteCommand.TransferFile(plugin, filepath.Join(destination, filepath.Base(plugin)))
if err != nil {
return err
}
_, err = remoteCommand.ExecuteCommand("systemctl restart minecraft.service")
if err != nil {
return err
}
zap.S().Infow("Minecraft plugin uploaded", "plugin", plugin, "instance", instance)
return nil
}
func (c *Civo) GetServer(id string, _ automation.ServerArgs) (*automation.RessourceResults, error) {
instance, err := c.client.GetInstance(id)
if err != nil {
return nil, err
}
region := c.client.Region
if len(instance.Region) > 0 {
region = instance.Region
}
return &automation.RessourceResults{
ID: instance.ID,
Name: instance.Hostname,
Region: region,
PublicIP: instance.PublicIP,
Tags: strings.Join(instance.Tags, ","),
}, err
}
| {
client, err := civogo.NewClient(APIKey, region)
if err != nil {
return nil, err
}
tmpl, err := minctlTemplate.NewTemplateBash()
if err != nil {
return nil, err
}
do := &Civo{
client: client,
tmpl: tmpl,
}
return do, nil
} |
while_loop.rs |
fn while_and_loop() |
fn for_loop() {
for x in 1..11 {
if x == 3 {
continue;
}
println!("x = {}", x);
}
for (pos, value) in (30..40).enumerate() {
println!("{}:{}", pos,value);
}
}
fn main() {
//while_and_loop();
for_loop()
}
| {
let mut x = 1;
while x < 1000 {
x *= 2;
if x == 64 {
continue;
}
println!("x = {}", x);
}
let mut y = 1;
loop {
y *= 2;
println!("y = {}", y);
if y == 1 << 10 {
break;
}
}
} |
ComponentControlsMaximize.js | import PropTypes from 'prop-types'
import React from 'react'
import { Icon, Menu } from 'semantic-ui-react'
import { neverUpdate } from 'docs/app/HOC'
import ComponentControlsToolTip from './ComponentControlsToolTip'
const ComponentControlsMaximize = ({ anchorName }) => (
<ComponentControlsToolTip content='Full Screen'>
<Menu.Item href={`/maximize/${anchorName}`} target='_blank'> | size='large'
/>
</Menu.Item>
</ComponentControlsToolTip>
)
ComponentControlsMaximize.propTypes = {
anchorName: PropTypes.string,
}
export default neverUpdate(ComponentControlsMaximize) | <Icon
color='grey'
fitted
name='window maximize' |
dcache_test.go | package dcache
import (
"testing"
"github.com/heqzha/dcache/core"
)
func | (t *testing.T) {
pool := GetCliPoolInst()
cli, err := pool.GetOrAdd("127.0.0.1:11000")
if err != nil {
t.Error(err)
return
}
res, err := cli.register("test1", "127.0.0.1:11001")
if err != nil {
t.Error(err)
return
}
t.Logf("cli.Register: %t", res.Status)
}
func TestRPCClientPing(t *testing.T) {
pool := GetCliPoolInst()
cli, err := pool.GetOrAdd("127.0.0.1:11000")
if err != nil {
t.Error(err)
return
}
res, err := cli.ping("test", "127.0.0.1:11009")
if err != nil {
t.Error(err)
return
}
sgm := new(core.SGM)
sgm.Init()
sgm.RegisterLocalAddr("test", "127.0.0.1:11009")
sgm.Load(res.SrvGroup)
table, err := sgm.GetTable("default")
if err != nil {
t.Error(err)
return
}
t.Log(table.String())
}
| TestRPCClient |
cache.rs | pub unsafe fn init() | {
} |
|
ToolsBar.tsx | import React from 'react';
import { CogIcon, PlayIcon, ShareIcon, ArchiveIcon, BookOpenIcon } from '@heroicons/react/outline';
import { TemplateIcon } from '@heroicons/react/solid';
import useEditor from 'src/context/hooks/useEditor';
import { DrawerKind, EditorActionKind } from 'src/context/Editor';
import pkg from 'src/../package.json';
interface ToolsBarProps {
compile: () => void;
resizeWorkspace: () => void;
}
const ToolsBar: React.FC<ToolsBarProps> = ({ compile, resizeWorkspace }) => {
const { state, dispatch } = useEditor();
const onMenuSelection = (drawer?: DrawerKind) => {
dispatch({
type: EditorActionKind.UPDATE_DRAWER,
payload: drawer,
});
};
React.useEffect(() => {
if (state.drawer === DrawerKind.Compilation) {
compile();
}
setTimeout(resizeWorkspace, 100);
}, [state.drawer]);
return (
<div className="flex flex-col w-24 border-l border-black dark:border-white">
<div className="flex-1 flex flex-col items-center justify-start pt-5">
<button
onClick={() => onMenuSelection(DrawerKind.Compilation)}
className="w-14 h-14 flex flex-col items-center justify-center hover:text-yellow-400 active:text-yellow-500 font-bold"
>
<PlayIcon className="block" />
<p>Compile</p>
</button>
<div className="border mt-5 mb-5 w-20" />
</div>
<div className="flex flex-col items-center justify-start ">
<div className="border mt-5 mb-5 w-20" />
<button
onClick={() => onMenuSelection(DrawerKind.Templates)}
className="w-14 h-14 flex flex-col items-center justify-center hover:text-yellow-400 active:text-yellow-500 font-bold"
>
<TemplateIcon className="block" />
<p>Templates</p>
</button>
<div className="border mt-5 mb-5 w-20" />
<button
onClick={() => onMenuSelection(DrawerKind.Share)}
className="w-14 h-14 flex flex-col items-center justify-center hover:text-yellow-400 active:text-yellow-500 font-bold"
>
<ShareIcon className="block" />
<p>Share</p>
</button>
<div className="border mt-5 mb-5 w-20" />
<button
onClick={() => onMenuSelection(DrawerKind.Storage)}
className="w-14 h-14 flex flex-col items-center justify-center hover:text-yellow-400 active:text-yellow-500 font-bold"
>
<ArchiveIcon className="block" />
<p>Storage</p>
</button>
<div className="border mt-5 mb-5 w-20" />
<button
onClick={() => onMenuSelection(DrawerKind.Settings)}
className="w-14 h-14 flex flex-col items-center justify-center hover:text-yellow-400 active:text-yellow-500 font-bold"
> | <p>Settings</p>
</button>
<div className="border mt-5 mb-5 w-20" />
<a
href="/docs"
target="_blank"
className="w-14 h-14 flex flex-col items-center justify-center hover:text-yellow-400 active:text-yellow-500 font-bold"
>
<BookOpenIcon className="block" />
<p>Docs</p>
</a>
<div className="border mt-5 mb-2 w-20" />v{pkg.version}
</div>
</div>
);
};
export default ToolsBar; | <CogIcon className="block" /> |
train.py | from data.config import cfg, process_funcs_dict
from data.coco import CocoDataset
from data.loader import build_dataloader
#from modules.solov1 import SOLOV1 as solo
# from modules.solov2 import SOLOV2 as solo
from modules.solov1d import SOLOV1 as solo
import time
import torch
import numpy as np
# 梯度均衡
def clip_grads(params_):
params_ = list(filter(lambda p: p.requires_grad and p.grad is not None, params_))
if len(params_) > 0:
return torch.nn.utils.clip_grad.clip_grad_norm_(params_, max_norm=35, norm_type=2)
# 设置新学习率
def set_lr(optimizer_, newLr_):
for paramGroup_ in optimizer_.param_groups:
paramGroup_['lr'] = newLr_
# 设置requires_grad为False
def gradinator(x_):
x_.requires_grad = False
return x_
# 设置pipline
def build_process_pipeline(pipelin | isinstance(pipelineConfgs_, list)
process_pipelines = []
for pConfig_ in pipelineConfgs_:
assert isinstance(pConfig_, dict) and 'type' in pConfig_
args = pConfig_.copy()
obj_type = args.pop('type')
if isinstance(obj_type, str):
process_pipelines.append(process_funcs_dict[obj_type](**args))
return process_pipelines
# 计算warmup学习率
def get_warmup_lr(curIter_, totalIters_, baseLr_, warmupRatio_, warmUpOption='linear'):
if warmUpOption == 'constant':
warmupLr = baseLr_ * warmupRatio_
elif warmUpOption == 'linear':
k = (1 - curIter_ / totalIters_) * (1 - warmupRatio_)
warmupLr = baseLr_ * (1 - k)
elif warmUpOption == 'exp':
k = warmupRatio_**(1 - curIter_ / totalIters_)
warmupLr = baseLr_ * k
return warmupLr
def train(globalStartEpoch, totalEpoches):
# train process pipelines func
trainTransformsPiplines = build_process_pipeline(cfg.train_pipeline)
print(trainTransformsPiplines)
# build datashet
casiadata = CocoDataset(ann_file=cfg.dataset.train_info,
pipeline = trainTransformsPiplines,
img_prefix = cfg.dataset.trainimg_prefix,
data_root=cfg.dataset.train_prefix)
torchdataLoader = build_dataloader(casiadata, cfg.imgs_per_gpu, cfg.workers_per_gpu, num_gpus=cfg.num_gpus, shuffle=True)
if cfg.resume_from is None:
model = solo(cfg, pretrained=None, mode='train')
print('cfg.resume_from is None')
else:
model = solo(cfg, pretrained=cfg.resume_from, mode='train')
model = model.cuda()
model = model.train()
lrOri = cfg.optimizer['lr']
lrStages = cfg.lr_config["step"]
lrList = np.full(totalEpoches, lrOri)
for ii in range(len(lrStages)):
lrList[lrStages[ii]:]*=0.1
print("starting epoch: ", globalStartEpoch)
print("lr adapting stages: ", end=' ')
for ii in range(len(lrStages)):
print(cfg.lr_config["step"][ii], end=" ")
print("\ntotal training epoches: ", totalEpoches)
optimizer_config = cfg.optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=optimizer_config['lr'], momentum=optimizer_config['momentum'], weight_decay=optimizer_config['weight_decay'])
batchSize = cfg.imgs_per_gpu * cfg.num_gpus
epochSize = len(casiadata) // batchSize
# nums of trained epoches, idx of epoch to start
pastEpoches = globalStartEpoch
# nums of trained iters, idx of iter to start
pastIters = (globalStartEpoch-1) * epochSize
# nums of left epoches
leftEpoches = totalEpoches - pastEpoches + 1
# nums of left iters
leftIters = leftEpoches * epochSize
print('##### begin train ######')
currentIter = 0
for epoch in range(leftEpoches):
currentEpoch = epoch + pastEpoches
# 终止训练
if currentEpoch >= totalEpoches:
print("Current epoch is larger than setting epoch nums, training stop.")
return
# 仅用于打印
loss_sum = 0.0
loss_ins = 0.0
loss_cate = 0.0
for j, data in enumerate(torchdataLoader):
iterStartTime = time.time()
if cfg.lr_config['warmup'] is not None and pastIters < cfg.lr_config['warmup_iters']:
cur_lr = get_warmup_lr(pastIters, cfg.lr_config['warmup_iters'],
optimizer_config['lr'], cfg.lr_config['warmup_ratio'],
cfg.lr_config['warmup'])
else:
cur_lr = lrList[currentEpoch]
set_lr(optimizer, cur_lr)
imgs = gradinator(data['img'].data[0].cuda())
img_meta = data['img_metas'].data[0] #图片的一些原始信息
gt_bboxes = []
for bbox in data['gt_bboxes'].data[0]:
bbox = gradinator(bbox.cuda())
gt_bboxes.append(bbox)
gt_masks = data['gt_masks'].data[0] #cpu numpy data
gt_labels = []
for label in data['gt_labels'].data[0]:
label = gradinator(label.cuda())
gt_labels.append(label)
loss = model.forward(img=imgs,
img_meta=img_meta,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks)
losses = loss['loss_ins'] + loss['loss_cate']
loss_sum += losses.cpu().item()
loss_ins += loss['loss_ins'].cpu().item()
loss_cate += loss['loss_cate'].cpu().item()
optimizer.zero_grad()
losses.backward()
if torch.isfinite(losses).item():
grad_norm = clip_grads(model.parameters()) #梯度平衡
optimizer.step()
else:
NotImplementedError("loss type error!can't backward!")
leftIters -= 1
pastIters += 1
currentIter += 1
showIters = 10
if j%int(showIters) == 0 and j != 0:
iterLastTime = time.time() - iterStartTime
left_seconds = iterLastTime * leftIters
left_minutes = left_seconds / 60.0
left_hours = left_minutes / 60.0
left_days = left_hours // 24
left_hours = left_hours % 24
out_srt = 'epoch:['+str(currentEpoch)+']/['+str(totalEpoches)+'],' # end of epoch of idx currentEpoch
out_srt = out_srt + '['+str(j)+']/'+str(epochSize)+'], left_time: ' + str(left_days)+'days '+format(left_hours,'.2f')+'hours,'
print(out_srt, "loss:", format(loss_sum/showIters,'.4f'), 'loss_ins:', format(loss_ins/showIters,'.4f'), "loss_cate:", format(loss_cate/showIters,'.4f'), "lr:", format(cur_lr,'.8f'))
loss_sum = 0.0
loss_ins = 0.0
loss_cate = 0.0
leftEpoches -= 1
save_name = "./weights/solo1/" + cfg.name + "_epoch_" + str(currentEpoch) + ".pth"
model.save_weights(save_name)
if __name__ == '__main__':
train(globalStartEpoch=cfg.epoch_iters_start, totalEpoches=cfg.total_epoch) #设置本次训练的起始epoch
| eConfgs_):
assert |
connection.py | import os
import shlex
import getpass
import warnings
import asyncio
import logging
import collections
import atexit
from typing import Optional, Dict, Deque
import tqdm
import asyncssh
from .conf import env, options_to_connect
from .utils import run_in_loop, CommandResult, prepare_environment, split_lines
# disable annoying warnings (we can't fix the problems in 3rd party libs)
warnings.simplefilter("ignore")
log = logging.getLogger(__name__)
# A cache of Connection objects indexed by *name* (not hostname!). We only cache connections creates
# with the global run() and sudo() methods. Maybe the tunnels too?
_connections_cache: Dict[str, "Connection"] = {}
def _clean_connections():
# would be better to close them all at once with gather() or similar
for hostname, conn in _connections_cache.items():
if conn.connected:
log.info(f"Cleaning up connection for {hostname}")
conn.disconnect()
atexit.register(_clean_connections)
class Connection:
"""A SSH connection to a remote server.
:param hostname: hostname of the remote server.
:param username: the username used to log into the remote server.
:param port: the optional port for connecting to the remote server (default: 22).
:param private_key: the optional path to a OpenSSH private key.
:param password: the optional password used to authenticate to the remote server.
:param agent_path: the optional path to a OpenSSH agent socket.
:param tunnel: the optional hostname of another server that will be used as tunnel.
:param nickname: the hostname of the server as passed on the command line (could be different
from the real hostname configured in `~/.ssh/config`).
"""
def __init__(
self,
hostname: str,
username: str,
port: int,
private_key=None,
password: Optional[str] = None,
agent_path: Optional[str] = None,
tunnel: Optional[str] = None,
nickname: Optional[str] = None,
):
self.hostname = hostname
self.username = username
self.port = port
self.private_key = private_key
self.password = password
self.agent_path = agent_path
self.tunnel = tunnel
if nickname:
self.nickname = nickname
else:
self.nickname = self.hostname
self._connection: Optional[asyncssh.SSHClientConnection] = None
self._sftp_client: Optional[asyncssh.SFTPClient] = None
async def _read_from(self, stream, writer, maxlen=10, echo=True) -> str:
buf: Deque[str] = collections.deque(maxlen=maxlen)
trail = ""
while True:
data = await stream.read(1024)
if data == "":
break
# everything gets stored in `buf` (within its limits)
buf.append(data)
# handle previously unprinted output, if any
if trail:
data = trail + data
trail = ""
# split lines and keep any non-newline ended data
lines, rest = split_lines(data)
if echo:
for line in lines:
print(f"[{self.nickname}] {line}")
# if the last part of `data` contains the sudo prompt, handle it
if rest.endswith(env.sudo_prompt):
print(f"[{self.nickname}] {rest}")
# we need to handle sudo erroring because the password was wrong
if lines[-1] == "Sorry, try again.":
print("Unsetting env.sudo_password")
env.sudo_password = None
if env.sudo_password is None:
env.sudo_password = getpass.getpass("Need password for sudo: ")
writer.write(f"{env.sudo_password}\n")
else:
if rest:
trail += rest
output = "".join(list(buf))
return output
async def _run(
self,
command: str,
sudo=False,
cd: Optional[str] = None,
pty=False,
environ: Optional[Dict[str, str]] = None,
echo=True,
**kwargs,
) -> CommandResult:
"""Run a shell command on the remote host"""
if self._connection is None:
await self._connect()
original_command = command
if cd:
command = 'cd "{}" && {}'.format(cd, command)
env_command = prepare_environment(environ)
log.debug(f"*{self.nickname}* environment for command: {env_command}")
if sudo:
command = f"{env_command}{command}"
command = f"sudo -S -p {shlex.quote(env.sudo_prompt)} $SHELL -c {shlex.quote(command)}"
else:
command = f"{env_command}{command}"
log.debug(f"*{self.nickname}* final command: {command}")
args = {}
if pty:
args.update({"term_type": env.term_type, "term_size": env.term_size})
async with self._connection.create_process(command, **args) as proc: # type: ignore
stdout, stderr = await asyncio.gather(
self._read_from(proc.stdout, proc.stdin, echo=echo),
self._read_from(proc.stderr, proc.stdin, echo=echo),
)
return CommandResult(
command=original_command,
actual_command=command,
exit_code=proc.exit_status,
stdout=stdout,
# if we use a pty this will be empty
stderr=stderr,
hostname=self.nickname,
sudo=sudo,
)
# use the event loop
def run(self, command, pty=True, cd=None, environ=None, echo=True) -> CommandResult:
"""Execute a command on the remote server.
:param command: the command line string to execute.
:param pty: wether to request a remote pty.
:param cd: the optional name of the directory where the command will be executed.
:param environ: an optional dictionary containing environment variables to set when
executing the command.
:param echo: set to `False` to hide the output of the command.
"""
print(f"*{self.nickname}* Running: {command}")
kwargs = {"pty": pty, "cd": cd, "environ": environ}
return run_in_loop(self._run(command, **kwargs))
# use the event loop
def | (self, command, pty=True, cd=None, environ=None, echo=True) -> CommandResult:
"""Execute a command with sudo on the remote server.
:param command: the command line string to execute.
:param pty: wether to request a remote pty.
:param cd: the optional name of the directory where the command will be executed.
:param environ: an optional dictionary containing environment variables to set when
executing the command.
:param echo: set to `False` to hide the output of the command.
"""
print(f"*{self.nickname}* - Sudo: {command}")
kwargs = {"pty": pty, "cd": cd, "sudo": True, "environ": environ}
return run_in_loop(self._run(command, **kwargs))
async def _connect(self):
log.info(f"Connecting to {self.hostname}:{self.port}")
args = {"username": self.username}
if env.use_known_hosts is False:
args["known_hosts"] = None
if self.tunnel:
log.info(f"Connecting to tunnel {self.tunnel}")
tunnel_conn = _get_connection(self.tunnel, use_cache=False)
await tunnel_conn._connect()
args["tunnel"] = tunnel_conn
# we either use the private key OR the agent; loading the private key might fail while the
# agent could still be working.
if self.agent_path:
args["agent_path"] = self.agent_path
elif self.private_key:
args["client_keys"] = [self.private_key]
# this may throw several exceptions:
# asyncssh.misc.HostKeyNotVerifiable: Host key is not trusted
self._connection = await asyncssh.connect(self.hostname, self.port, **args)
# use the event loop
def disconnect(self):
"""Close the SSH connection to the server."""
# Maybe here we should also delete ourself from the connection cache, but we don't know our
# own "nickname"!
if self._connection is not None:
self._connection.close()
run_in_loop(self._connection.wait_closed())
self._connection = None
print("disconnected")
@property
def connected(self) -> bool:
return self._connection is not None
async def get_sftp_client(self) -> asyncssh.SFTPClient:
if self._connection is None:
await self._connect()
if self._sftp_client is None:
self._sftp_client = await self._connection.start_sftp_client() # type: ignore
return self._sftp_client
async def _get(self, remotefile, localfile):
sftp_client = await self.get_sftp_client()
try:
size = await sftp_client.getsize(remotefile)
# from https://asyncssh.readthedocs.io/en/latest/api.html#asyncssh.SFTPClient.get
block_size = 16384
i = size // block_size + 1
if i < 0:
i = 1
bar = tqdm.tqdm(total=i, desc=os.path.basename(remotefile))
def _update_bar(source, dest, cur, tot):
bar.update(1)
await sftp_client.get(
remotefile, localfile, progress_handler=_update_bar, block_size=block_size
)
bar.close()
except (OSError, asyncssh.SFTPError):
raise
# use the event loop
def get(self, remotefile, localfile):
"""Download a file from the remote server.
:param remotefile: the path to the remote file to download.
:param localfile: the local path where to write the downloaded file.
"""
run_in_loop(self._get(remotefile, localfile))
async def _read(self, remotefile) -> bytes:
sftp_client = await self.get_sftp_client()
try:
size = await sftp_client.getsize(remotefile)
bar = tqdm.tqdm(total=size, desc=os.path.basename(remotefile))
fd = await sftp_client.open(remotefile, "rb")
data = []
while True:
# 16384 is the default block size
buf = await fd.read(16384)
if buf == b"":
break
data.append(buf)
bar.update(len(buf))
fd.close()
bar.close()
return b"".join(data)
except (OSError, asyncssh.SFTPError):
raise
# use the event loop
def read(self, remotefile) -> bytes:
"""Read the contents of a remote file.
:param remotefile: the path of the remote file to read.
This is useful when you just want to read the contents of a remote file without downloading
it.
"""
return run_in_loop(self._read(remotefile))
async def _put(self, localfile, remotefile):
sftp_client = await self.get_sftp_client()
try:
size = os.path.getsize(localfile)
# from https://asyncssh.readthedocs.io/en/latest/api.html#asyncssh.SFTPClient.get
block_size = 16384
i = size // block_size + 1
if i < 0:
i = 1
bar = tqdm.tqdm(total=i, desc=os.path.basename(localfile))
def _update_bar(source, dest, cur, tot):
bar.update(1)
await sftp_client.put(
localfile, remotefile, progress_handler=_update_bar, block_size=block_size
)
bar.close()
except (OSError, asyncssh.SFTPError):
raise
# use the event loop
def put(self, localfile, remotefile):
"""Upload a local file to a remote server.
:param localfile: the path of the local file to upload.
:param remotefile: the path where to write the file on the remote server.
"""
run_in_loop(self._put(localfile, remotefile))
async def _file_exists(self, remotefile) -> bool:
sftp_client = await self.get_sftp_client()
return await sftp_client.exists(remotefile)
# use the event loop
def file_exists(self, remotefile) -> bool:
"""Check if a file exists on the remote server.
:param remotefile: the path of the remote file that will be checked.
"""
return run_in_loop(self._file_exists(remotefile))
def _get_connection(name=None, use_cache=True) -> Connection:
"""Get a connection for `name`.
`name` does not need to be a FQDN; it can be a "nickname" from a SSH configuration file.
"""
global _connections_cache
if name is None and env.host_string is None:
raise RuntimeError("env.host_string is empty!")
if name is None:
name = env.host_string
if use_cache and name in _connections_cache:
conn = _connections_cache[name]
# here we delete stale Connections objects.
if conn.connected:
return conn
del _connections_cache[name]
ssh_options = options_to_connect(name)
args = {}
if "identityfile" in ssh_options:
args["private_key"] = ssh_options["identityfile"]
if "identityagent" in ssh_options:
args["agent_path"] = ssh_options["identityagent"]
# TODO:
# identitiesonly yes
# NOTE: we only cache connections created here, and maybe the tunnels.
# maybe by default we should not re-use the tunnels, as the default behavior of SSH
c = Connection(
ssh_options["hostname"], ssh_options["user"], ssh_options["port"], nickname=name, **args
)
if use_cache:
_connections_cache[name] = c
return c
| sudo |
service.go | // Copyright © 2020, 2021 Attestant Limited.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"sync"
"time"
api "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/pkg/errors"
"github.com/rs/zerolog"
zerologger "github.com/rs/zerolog/log"
)
// Service is an Ethereum 2 client service.
type Service struct {
// Hold the initialising context to use for streams.
ctx context.Context
base *url.URL
address string
client *http.Client
timeout time.Duration
// Various information from the node that does not change during the
// lifetime of a beacon node.
genesis *api.Genesis
genesisMutex sync.Mutex
spec map[string]interface{}
specMutex sync.Mutex
depositContract *api.DepositContract
depositContractMutex sync.Mutex
forkSchedule []*phase0.Fork
forkScheduleMutex sync.Mutex
nodeVersion string
nodeVersionMutex sync.Mutex
// API support.
supportsV2BeaconBlocks bool
supportsV2BeaconState bool
supportsV2ValidatorBlocks bool
}
// log is a service-wide logger.
var log zerolog.Logger
// New creates a new Ethereum 2 client service, connecting with a standard HTTP.
func New(ctx context.Context, params ...Parameter) (*Service, error) { |
// fetchStaticValues fetches values that never change.
// This caches the values, avoiding future API calls.
func (s *Service) fetchStaticValues(ctx context.Context) error {
if _, err := s.Genesis(ctx); err != nil {
return errors.Wrap(err, "failed to fetch genesis")
}
if _, err := s.Spec(ctx); err != nil {
return errors.Wrap(err, "failed to fetch spec")
}
if _, err := s.DepositContract(ctx); err != nil {
return errors.Wrap(err, "failed to fetch deposit contract")
}
if _, err := s.ForkSchedule(ctx); err != nil {
return errors.Wrap(err, "failed to fetch fork schedule")
}
return nil
}
// checkAPIVersioning checks the versions of some APIs and sets
// internal flags appropriately.
func (s *Service) checkAPIVersioning(ctx context.Context) error {
// Start by setting the API v2 flag for blocks and fetching block 0.
s.supportsV2BeaconBlocks = true
_, err := s.SignedBeaconBlock(ctx, "0")
if err == nil {
// It's good. Assume that other V2 APIs introduced with Altair
// are present.
s.supportsV2BeaconState = true
s.supportsV2ValidatorBlocks = true
} else {
// Assume this is down to the V2 endpoint missing rather than
// some other failure.
s.supportsV2BeaconBlocks = false
}
return nil
}
// Name provides the name of the service.
func (s *Service) Name() string {
return "Standard (HTTP)"
}
// Address provides the address for the connection.
func (s *Service) Address() string {
return s.address
}
// close closes the service, freeing up resources.
func (s *Service) close() {
}
|
parameters, err := parseAndCheckParameters(params...)
if err != nil {
return nil, errors.Wrap(err, "problem with parameters")
}
// Set logging.
log = zerologger.With().Str("service", "client").Str("impl", "http").Logger()
if parameters.logLevel != log.GetLevel() {
log = log.Level(parameters.logLevel)
}
client := &http.Client{
Transport: &http.Transport{
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 64,
MaxIdleConnsPerHost: 64,
IdleConnTimeout: 384 * time.Second,
},
}
address := parameters.address
if !strings.HasPrefix(address, "http") {
address = fmt.Sprintf("http://%s", parameters.address)
}
base, err := url.Parse(address)
if err != nil {
return nil, errors.Wrap(err, "invalid URL")
}
s := &Service{
ctx: ctx,
base: base,
address: parameters.address,
client: client,
timeout: parameters.timeout,
}
// Fetch static values to confirm the connection is good.
if err := s.fetchStaticValues(ctx); err != nil {
return nil, errors.Wrap(err, "failed to confirm node connection")
}
// Handle flags for API versioning.
if err := s.checkAPIVersioning(ctx); err != nil {
return nil, errors.Wrap(err, "failed to check API versioning")
}
// Close the service on context done.
go func(s *Service) {
<-ctx.Done()
log.Trace().Msg("Context done; closing connection")
s.close()
}(s)
return s, nil
}
|
jquery.bxslider.min.js | /**
* BxSlider v4.0 - Fully loaded, responsive content slider
*
* Copyright 2015
*/
jQuery(document).ready(function () {
jQuery('.bxslider').bxSlider({
mode: 'horizontal',
slideMargin: 3,
auto:true
});
});
|
(function(t){var e={},n={mode:"horizontal",slideSelector:"",infiniteLoop:!0,hideControlOnEnd:!1,speed:1000,easing:null,slideMargin:0,startSlide:0,randomStart:!1,captions:!1,ticker:!1,tickerHover:!1,adaptiveHeight:!1,adaptiveHeightSpeed:1000,touchEnabled:!0,swipeThreshold:50,video:!1,useCSS:!0,pager:!0,pagerType:"full",pagerShortSeparator:" / ",pagerSelector:null,buildPager:null,pagerCustom:null,controls:!0,nextText:"Next",prevText:"Prev",nextSelector:null,prevSelector:null,autoControls:!1,startText:"Start",stopText:"Stop",autoControlsCombine:!1,autoControlsSelector:null,auto:!1,pause:4e3,autoStart:!0,autoDirection:"next",autoHover:!1,autoDelay:0,minSlides:1,maxSlides:1,moveSlides:0,slideWidth:0,onSliderLoad:function(){},onSlideBefore:function(){},onSlideAfter:function(){},onSlideNext:function(){},onSlidePrev:function(){}};t.fn.bxSlider=function(s){if(this.length!=0){if(this.length>1)return this.each(function(){t(this).bxSlider(s)}),this;var o={},r=this;e.el=this;var a=function(){o.settings=t.extend({},n,s),o.children=r.children(o.settings.slideSelector),o.settings.randomStart&&(o.settings.startSlide=Math.floor(Math.random()*o.children.length)),o.active={index:o.settings.startSlide},o.carousel=o.settings.minSlides>1||o.settings.maxSlides>1,o.minThreshold=o.settings.minSlides*o.settings.slideWidth+(o.settings.minSlides-1)*o.settings.slideMargin,o.maxThreshold=o.settings.maxSlides*o.settings.slideWidth+(o.settings.maxSlides-1)*o.settings.slideMargin,o.working=!1,o.controls={},o.animProp=o.settings.mode=="vertical"?"top":"left",o.usingCSS=o.settings.useCSS&&o.settings.mode!="fade"&&function(){var t=document.createElement("div"),e=["WebkitPerspective","MozPerspective","OPerspective","msPerspective"];for(var i in e)if(t.style[e[i]]!==void 0)return o.cssPrefix=e[i].replace("Perspective","").toLowerCase(),o.animProp="-"+o.cssPrefix+"-transform",!0;return!1}(),o.settings.mode=="vertical"&&(o.settings.maxSlides=o.settings.minSlides),l()},l=function(){if(r.wrap('<div class="bx-wrapper"><div class="bx-viewport"></div></div>'),o.viewport=r.parent(),o.loader=t('<div class="bx-loading" />'),o.viewport.prepend(o.loader),r.css({width:o.settings.mode=="horizontal"?o.children.length*215+"%":"auto",position:"relative"}),o.usingCSS&&o.settings.easing?r.css("-"+o.cssPrefix+"-transition-timing-function",o.settings.easing):o.settings.easing||(o.settings.easing="swing"),o.viewport.css({width:"100%",overflow:"hidden",position:"relative"}),o.children.css({"float":o.settings.mode=="horizontal"?"left":"none",listStyle:"none"}),o.children.width(c()),o.settings.mode=="horizontal"&&o.settings.slideMargin>0&&o.children.css("marginRight",o.settings.slideMargin),o.settings.mode=="vertical"&&o.settings.slideMargin>0&&o.children.css("marginBottom",o.settings.slideMargin),o.settings.mode=="fade"&&(o.children.css({position:"absolute",zIndex:0,display:"none"}),o.children.eq(o.settings.startSlide).css({zIndex:50,display:"block"})),o.controls.el=t('<div class="bx-controls" />'),o.settings.captions&&b(),o.settings.infiniteLoop&&o.settings.mode!="fade"&&!o.settings.ticker){var e=o.settings.mode=="vertical"?o.settings.minSlides:o.settings.maxSlides,i=o.children.slice(0,e).clone().addClass("bx-clone"),n=o.children.slice(-e).clone().addClass("bx-clone");r.append(i).prepend(n)}o.active.last=o.settings.startSlide==h()-1,o.settings.video&&r.fitVids(),o.settings.ticker||(o.settings.pager&&x(),o.settings.controls&&m(),o.settings.auto&&o.settings.autoControls&&S(),(o.settings.controls||o.settings.autoControls||o.settings.pager)&&o.viewport.after(o.controls.el)),r.children().imagesLoaded(function(){o.loader.remove(),p(),o.settings.mode=="vertical"&&(o.settings.adaptiveHeight=!0),o.viewport.height(d()),o.settings.onSliderLoad(o.active.index),o.settings.auto&&o.settings.autoStart&&z(),o.settings.ticker&&q(),o.settings.pager&&A(o.settings.startSlide),o.settings.controls&&M(),o.settings.touchEnabled&&!o.settings.ticker&&D()})},d=function(){var e=0,n=t();if(o.settings.mode=="vertical"||o.settings.adaptiveHeight)if(o.carousel){var s=o.settings.moveSlides==1?o.active.index:o.active.index*u();for(n=o.children.eq(s),i=1;o.settings.maxSlides-1>=i;i++)n=s+i>=o.children.length?n.add(o.children.eq(i-1)):n.add(o.children.eq(s+i))}else n=o.children.eq(o.active.index);else n=o.children;return o.settings.mode=="vertical"?(n.each(function(){e+=t(this).outerHeight()}),o.settings.slideMargin>0&&(e+=o.settings.slideMargin*(o.settings.minSlides-1))):e=Math.max.apply(Math,n.map(function(){return t(this).outerHeight(!1)}).get()),e},c=function(){var t=o.settings.slideWidth,e=o.viewport.width();return o.settings.slideWidth==0?t=e:e>o.maxThreshold?t=(e-o.settings.slideMargin*(o.settings.maxSlides-1))/o.settings.maxSlides:o.minThreshold>e&&(t=(e-o.settings.slideMargin*(o.settings.minSlides-1))/o.settings.minSlides),t},g=function(){var t=1;if(o.settings.mode=="horizontal")if(o.minThreshold>o.viewport.width())t=o.settings.minSlides;else if(o.viewport.width()>o.maxThreshold)t=o.settings.maxSlides;else{var e=o.children.first().width();t=Math.floor(o.viewport.width()/e)}else o.settings.mode=="vertical"&&(t=o.settings.minSlides);return t},h=function(){var t=0;if(o.settings.moveSlides>0)if(o.settings.infiniteLoop)t=o.children.length/u();else{var e=0,i=0;while(o.children.length>e)++t,e=i+g(),i+=g()>=o.settings.moveSlides?o.settings.moveSlides:g()}else t=Math.ceil(o.children.length/g());return t},u=function(){return o.settings.moveSlides>0&&g()>=o.settings.moveSlides?o.settings.moveSlides:g()},p=function(){if(o.active.last){if(o.settings.mode=="horizontal"){var t=o.children.last(),e=t.position();v(-(e.left-(o.viewport.width()-t.width())),"reset",0)}else if(o.settings.mode=="vertical"){var i=o.children.length-o.settings.minSlides,e=o.children.eq(i).position();v(-e.top,"reset",0)}}else{var e=o.children.eq(o.active.index*u()).position();o.active.index==h()-1&&(o.active.last=!0),e!=void 0&&(o.settings.mode=="horizontal"?v(-e.left,"reset",0):o.settings.mode=="vertical"&&v(-e.top,"reset",0))}},v=function(t,e,i,n){if(o.usingCSS){var s=o.settings.mode=="vertical"?"translate3d(0, "+t+"px, 0)":"translate3d("+t+"px, 0, 0)";r.css("-"+o.cssPrefix+"-transition-duration",i/1e3+"s"),e=="slide"?(r.css(o.animProp,s),r.bind("transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd",function(){r.unbind("transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd"),k()})):e=="reset"?r.css(o.animProp,s):e=="ticker"&&(r.css("-"+o.cssPrefix+"-transition-timing-function","linear"),r.css(o.animProp,s),r.bind("transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd",function(){r.unbind("transitionend webkitTransitionEnd oTransitionEnd MSTransitionEnd"),v(n.resetValue,"reset",0),L()}))}else{var a={};a[o.animProp]=t,e=="slide"?r.animate(a,i,o.settings.easing,function(){k()}):e=="reset"?r.css(o.animProp,t):e=="ticker"&&r.animate(a,speed,"linear",function(){v(n.resetValue,"reset",0),L()})}},f=function(){var e="";pagerQty=h();for(var i=0;pagerQty>i;i++){var n="";o.settings.buildPager&&t.isFunction(o.settings.buildPager)?(n=o.settings.buildPager(i),o.pagerEl.addClass("bx-custom-pager")):(n=i+1,o.pagerEl.addClass("bx-default-pager")),e+='<div class="bx-pager-item"><a href="" data-slide-index="'+i+'" class="bx-pager-link">'+n+"</a></div>"}o.pagerEl.html(e)},x=function(){o.settings.pagerCustom?o.pagerEl=t(o.settings.pagerCustom):(o.pagerEl=t('<div class="bx-pager" />'),o.settings.pagerSelector?t(o.settings.pagerSelector).html(o.pagerEl):o.controls.el.addClass("bx-has-pager").append(o.pagerEl),f()),o.pagerEl.delegate("a","click",P)},m=function(){o.controls.next=t('<a class="bx-next" href="">'+o.settings.nextText+"</a>"),o.controls.prev=t('<a class="bx-prev" href="">'+o.settings.prevText+"</a>"),o.controls.next.bind("click",w),o.controls.prev.bind("click",T),o.settings.nextSelector&&t(o.settings.nextSelector).append(o.controls.next),o.settings.prevSelector&&t(o.settings.prevSelector).append(o.controls.prev),o.settings.nextSelector||o.settings.prevSelector||(o.controls.directionEl=t('<div class="bx-controls-direction" />'),o.controls.directionEl.append(o.controls.prev).append(o.controls.next),o.controls.el.addClass("bx-has-controls-direction").append(o.controls.directionEl))},S=function(){o.controls.start=t('<div class="bx-controls-auto-item"><a class="bx-start" href="">'+o.settings.startText+"</a></div>"),o.controls.stop=t('<div class="bx-controls-auto-item"><a class="bx-stop" href="">'+o.settings.stopText+"</a></div>"),o.controls.autoEl=t('<div class="bx-controls-auto" />'),o.controls.autoEl.delegate(".bx-start","click",C),o.controls.autoEl.delegate(".bx-stop","click",E),o.settings.autoControlsCombine?o.controls.autoEl.append(o.controls.start):o.controls.autoEl.append(o.controls.start).append(o.controls.stop),o.settings.autoControlsSelector?t(o.settings.autoControlsSelector).html(o.controls.autoEl):o.controls.el.addClass("bx-has-controls-auto").append(o.controls.autoEl),y(o.settings.autoStart?"stop":"start")},b=function(){o.children.each(function(){var e=t(this).find("img:first").attr("title");e!=void 0&&t(this).append('<div class="bx-caption"><span>'+e+"</span></div>")})},w=function(t){o.settings.auto&&r.stopAuto(),r.goToNextSlide(),t.preventDefault()},T=function(t){o.settings.auto&&r.stopAuto(),r.goToPrevSlide(),t.preventDefault()},C=function(t){r.startAuto(),t.preventDefault()},E=function(t){r.stopAuto(),t.preventDefault()},P=function(e){o.settings.auto&&r.stopAuto();var i=t(e.currentTarget),n=parseInt(i.attr("data-slide-index"));n!=o.active.index&&r.goToSlide(n),e.preventDefault()},A=function(t){return o.settings.pagerType=="short"?(o.pagerEl.html(t+1+o.settings.pagerShortSeparator+o.children.length),void 0):(o.pagerEl.find("a").removeClass("active"),o.pagerEl.find("a").eq(t).addClass("active"),void 0)},k=function(){if(o.settings.infiniteLoop){var t="";o.active.index==0?t=o.children.eq(0).position():o.active.index==h()-1&&o.carousel?t=o.children.eq((h()-1)*u()).position():o.active.index==o.children.length-1&&(t=o.children.eq(o.children.length-1).position()),o.settings.mode=="horizontal"?v(-t.left,"reset",0):o.settings.mode=="vertical"&&v(-t.top,"reset",0)}o.working=!1,o.settings.onSlideAfter(o.children.eq(o.active.index),o.oldIndex,o.active.index)},y=function(t){o.settings.autoControlsCombine?o.controls.autoEl.html(o.controls[t]):(o.controls.autoEl.find("a").removeClass("active"),o.controls.autoEl.find("a:not(.bx-"+t+")").addClass("active"))},M=function(){!o.settings.infiniteLoop&&o.settings.hideControlOnEnd&&(o.active.index==0?(o.controls.prev.addClass("disabled"),o.controls.next.removeClass("disabled")):o.active.index==h()-1?(o.controls.next.addClass("disabled"),o.controls.prev.removeClass("disabled")):(o.controls.prev.removeClass("disabled"),o.controls.next.removeClass("disabled")))},z=function(){o.settings.autoDelay>0?setTimeout(r.startAuto,o.settings.autoDelay):r.startAuto(),o.settings.autoHover&&r.hover(function(){o.interval&&(r.stopAuto(!0),o.autoPaused=!0)},function(){o.autoPaused&&(r.startAuto(!0),o.autoPaused=null)})},q=function(){var e=0;if(o.settings.autoDirection=="next")r.append(o.children.clone().addClass("bx-clone"));else{r.prepend(o.children.clone().addClass("bx-clone"));var i=o.children.first().position();e=o.settings.mode=="horizontal"?-i.left:-i.top}v(e,"reset",0),o.settings.pager=!1,o.settings.controls=!1,o.settings.autoControls=!1,o.settings.tickerHover&&!o.usingCSS&&o.viewport.hover(function(){r.stop()},function(){var e=0;o.children.each(function(){e+=o.settings.mode=="horizontal"?t(this).outerWidth(!0):t(this).outerHeight(!0)});var i=o.settings.speed/e,n=o.settings.mode=="horizontal"?"left":"top",s=i*(e-Math.abs(parseInt(r.css(n))));L(s)}),L()},L=function(t){speed=t?t:o.settings.speed;var e={left:0,top:0},i={left:0,top:0};o.settings.autoDirection=="next"?e=r.find(".bx-clone").first().position():i=o.children.first().position();var n=o.settings.mode=="horizontal"?-e.left:-e.top,s=o.settings.mode=="horizontal"?-i.left:-i.top,a={resetValue:s};v(n,"ticker",speed,a)},D=function(){o.touch={start:{x:0,y:0},end:{x:0,y:0}},o.viewport.bind("touchstart",H)},H=function(t){if(o.working)t.preventDefault();else{o.touch.originalPos=r.position();var e=t.originalEvent;o.touch.start.x=e.changedTouches[0].pageX,o.touch.start.y=e.changedTouches[0].pageY,o.viewport.bind("touchmove",I),o.viewport.bind("touchend",W)}},I=function(t){if(t.preventDefault(),o.settings.mode!="fade"){var e=t.originalEvent,i=0;if(o.settings.mode=="horizontal"){var n=e.changedTouches[0].pageX-o.touch.start.x;i=o.touch.originalPos.left+n}else{var n=e.changedTouches[0].pageY-o.touch.start.y;i=o.touch.originalPos.top+n}v(i,"reset",0)}},W=function(t){o.viewport.unbind("touchmove",I);var e=t.originalEvent,i=0;if(o.touch.end.x=e.changedTouches[0].pageX,o.touch.end.y=e.changedTouches[0].pageY,o.settings.mode=="fade"){var n=Math.abs(o.touch.start.x-o.touch.end.x);n>=o.settings.swipeThreshold&&(o.touch.start.x>o.touch.end.x?r.goToNextSlide():r.goToPrevSlide(),r.stopAuto())}else{var n=0;o.settings.mode=="horizontal"?(n=o.touch.end.x-o.touch.start.x,i=o.touch.originalPos.left):(n=o.touch.end.y-o.touch.start.y,i=o.touch.originalPos.top),!o.settings.infiniteLoop&&(o.active.index==0&&n>0||o.active.last&&0>n)?v(i,"reset",200):Math.abs(n)>=o.settings.swipeThreshold?(0>n?r.goToNextSlide():r.goToPrevSlide(),r.stopAuto()):v(i,"reset",200)}o.viewport.unbind("touchend",W)};r.goToSlide=function(e,i){if(!o.working&&o.active.index!=e)if(o.working=!0,o.oldIndex=o.active.index,o.active.index=0>e?h()-1:e>=h()?0:e,o.settings.onSlideBefore(o.children.eq(o.active.index),o.oldIndex,o.active.index),i=="next"?o.settings.onSlideNext(o.children.eq(o.active.index),o.oldIndex,o.active.index):i=="prev"&&o.settings.onSlidePrev(o.children.eq(o.active.index),o.oldIndex,o.active.index),o.active.last=o.active.index>=h()-1,o.settings.pager&&A(o.active.index),o.settings.controls&&M(),o.settings.mode=="fade")o.settings.adaptiveHeight&&o.viewport.height()!=d()&&o.viewport.animate({height:d()},o.settings.adaptiveHeightSpeed),o.children.filter(":visible").fadeOut(o.settings.speed).css({zIndex:0}),o.children.eq(o.active.index).css("zIndex",51).fadeIn(o.settings.speed,function(){t(this).css("zIndex",50),k()});else{o.settings.adaptiveHeight&&o.viewport.height()!=d()&&o.viewport.animate({height:d()},o.settings.adaptiveHeightSpeed);var n=0,s={left:0,top:0};if(!o.settings.infiniteLoop&&o.carousel&&o.active.last)if(o.settings.mode=="horizontal"){var a=o.children.eq(o.children.length-1);s=a.position(),n=o.viewport.width()-a.width()}else{var l=o.children.length-o.settings.minSlides;s=o.children.eq(l).position()}else if(o.carousel&&o.active.last&&i=="prev"){var c=o.settings.moveSlides==1?o.settings.maxSlides-u():(h()-1)*u()-(o.children.length-o.settings.maxSlides),a=r.children(".bx-clone").eq(c);s=a.position()}else if(i=="next"&&o.active.index==0)s=r.find(".bx-clone").eq(o.settings.maxSlides).position(),o.active.last=!1;else if(e>=0){var g=e*u();s=o.children.eq(g).position()}var p=o.settings.mode=="horizontal"?-(s.left-n):-s.top;v(p,"slide",o.settings.speed)}},r.goToNextSlide=function(){if(o.settings.infiniteLoop||!o.active.last){var t=o.active.index+1;r.goToSlide(t,"next")}},r.goToPrevSlide=function(){if(o.settings.infiniteLoop||o.active.index!=0){var t=o.active.index-1;r.goToSlide(t,"prev")}},r.startAuto=function(t){o.interval||(o.interval=setInterval(function(){o.settings.autoDirection=="next"?r.goToNextSlide():r.goToPrevSlide()},o.settings.pause),o.settings.autoControls&&t!=1&&y("stop"))},r.stopAuto=function(t){o.interval&&(clearInterval(o.interval),o.interval=null,o.settings.autoControls&&t!=1&&y("start"))},r.getCurrentSlide=function(){return o.active.index},r.getSlideCount=function(){return o.children.length};var N=t(window).width(),B=t(window).height();return t(window).resize(function(){var e=t(window).width(),i=t(window).height();(N!=e||B!=i)&&(N=e,B=i,o.children.add(r.find(".bx-clone")).width(c()),o.viewport.css("height",d()),o.active.last&&(o.active.index=h()-1),o.active.index>=h()&&(o.active.last=!0),o.settings.pager&&!o.settings.pagerCustom&&(f(),A(o.active.index)),o.settings.ticker||p())}),a(),this}}})(jQuery),function(t,e){var i="data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///ywAAAAAAQABAAACAUwAOw==";t.fn.imagesLoaded=function(n){function s(){var e=t(g),i=t(h);a&&(h.length?a.reject(d,e,i):a.resolve(d)),t.isFunction(n)&&n.call(r,d,e,i)}function o(e,n){e.src===i||-1!==t.inArray(e,c)||(c.push(e),n?h.push(e):g.push(e),t.data(e,"imagesLoaded",{isBroken:n,src:e.src}),l&&a.notifyWith(t(e),[n,d,t(g),t(h)]),d.length===c.length&&(setTimeout(s),d.unbind(".imagesLoaded")))}var r=this,a=t.isFunction(t.Deferred)?t.Deferred():0,l=t.isFunction(a.notify),d=r.find("img").add(r.filter("img")),c=[],g=[],h=[];return t.isPlainObject(n)&&t.each(n,function(t,e){"callback"===t?n=e:a&&a[t](e)}),d.length?d.bind("load.imagesLoaded error.imagesLoaded",function(t){o(t.target,"error"===t.type)}).each(function(n,s){var r=s.src,a=t.data(s,"imagesLoaded");a&&a.src===r?o(s,a.isBroken):s.complete&&s.naturalWidth!==e?o(s,0===s.naturalWidth||0===s.naturalHeight):(s.readyState||s.complete)&&(s.src=i,s.src=r)}):s(),a?a.promise(r):r}}(jQuery) |
|
conv_to_str.py | def conv_to_str(o):
| if isinstance(o, str):
# Remove initial "u" chars before strings
# if no Unicode in them if possible
try:
o = str(o)
except:
o = str(o)
elif isinstance(o, (list, tuple)):
is_tuple = isinstance(o, tuple)
o = [conv_to_str(i) for i in o]
if is_tuple:
o = tuple(o)
elif isinstance(o, dict):
for k in o:
o[k] = conv_to_str(o[k])
return o |
|
state.service.ts | import * as _isNil from 'lodash/isNil';
import * as moment from 'moment';
import { Injectable } from '@angular/core';
import { HelpersService } from './helpers.service';
import {
DaysForecast,
HoursForecast,
Overcast,
State,
TimeOfDay,
WeatherDefinitions,
WeatherTypes,
WindDirections
} from '../../../shared/public-api';
@Injectable({
providedIn: 'root',
})
export class | {
public currentState: State = <State>{};
constructor(private helpersService: HelpersService) { }
public adjustReceivedData(weatherData: State): void {
this.currentState = weatherData;
this.currentState.location = this.setLocation();
this.currentState.currentDate = this.setCurrentDate();
this.currentState.currentTimeString = this.setCurrentTimeString();
this.currentState.currentBackground = this.defineSkyBackground();
this.currentState.moonPhase = this.helpersService.calculateMoonPhase();
}
public setLocation(): string {
return `${this.currentState.locationData.city}, ${this.currentState.locationData.country}`;
}
public setCurrentDate(): string {
return moment().format('D MMM YYYY');
}
public setCurrentTimeString(): string {
return moment().format('HH:mm');
}
public defineSkyBackground(): string {
const currentHour: number = moment.duration(this.currentState.currentTime).hours();
const shouldAdjustCurrentHour: boolean =
this.currentState.dayLength / this.currentState.nightLength >= 1 ||
currentHour === 0 ||
currentHour === 12 ||
currentHour === 24;
let adjustedHour: number;
let adjustedHourFormatted: string;
adjustedHour = shouldAdjustCurrentHour
? currentHour
: (currentHour < 12)
? (currentHour - 1)
: (currentHour + 1);
adjustedHourFormatted = moment().hour(adjustedHour).format('HH');
return `app-sky-gradient-${adjustedHourFormatted}`;
}
public saveStateToLocalStorage(): void {
if (this.helpersService.isStorageAvailable('localStorage')) {
localStorage.setItem('lastSavedWeatherState', JSON.stringify(this.currentState));
}
}
public getInitialState(): void {
const isStateSavedInLocalStorage: boolean =
this.helpersService.isStorageAvailable('localStorage') &&
!_isNil(localStorage.getItem('lastSavedWeatherState'));
this.currentState = isStateSavedInLocalStorage
? JSON.parse(localStorage.getItem('lastSavedWeatherState'))
: this.setMockedState();
}
public setMockedState(): State {
const currentDate = this.setCurrentDate();
const currentTimeString = this.setCurrentTimeString();
const moonPhase = this.helpersService.calculateMoonPhase();
return <State>{
timeOfDay: TimeOfDay.day,
dayLength: 43200000,
nightLength: 43200000,
currentTime: 43200000,
location: 'Kyiv, Ukraine',
currentTimeString: currentTimeString,
currentDate: currentDate,
currentBackground: `app-sky-gradient-12`,
cloudy: true,
rainy: false,
snowy: false,
foggy: false,
overcast: Overcast.light,
weatherType: WeatherTypes.dayClear,
weatherDefinition: WeatherDefinitions.dayClear,
temperatureCurrent: 20,
temperatureFeelsLike: 25,
humidityCurrent: 10,
uvIndex: 1,
airPressure: 750,
windSpeed: 1,
windDirection: WindDirections.eastSouth,
moonPhase: moonPhase,
hoursForecast: <HoursForecast>[],
daysForecast: <DaysForecast>[]
};
}
}
| StateService |
math_test.go | package utils
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestIsZero(t *testing.T) | {
t.Parallel()
testCases := []struct {
value string
isZero bool
errExpected bool
}{
{"0", true, false},
{"0.0", true, false},
{"1", false, false},
{"1.0", false, false},
{"", false, true},
}
for _, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("isZero %s", tc.value), func(t *testing.T) {
t.Parallel()
isZero, err := IsZero(tc.value)
if tc.errExpected {
require.Error(t, err)
} else {
require.NoError(t, err)
}
if tc.isZero {
require.True(t, isZero)
} else {
require.False(t, isZero)
}
})
}
} |
|
config.rs | //use std::convert::From;
use std::fs;
use toml::Value;
use crate::ROOT_PATH;
#[derive(Debug)]
pub struct Config {
pub height: i64,
pub width: i64,
pub ui_scale: i64,
pub vsync: bool,
}
// This will return the config
// On the mean time just fake it
pub fn load_config() -> Config {
let filename = format!("{}/config.toml", ROOT_PATH);
let contents = fs::read_to_string(filename).expect("Something went wrong reading the file");
let value = contents.parse::<Value>().unwrap();
let mut config = Config { | width: 0,
ui_scale: 1,
vsync: false,
};
/* //This insures the values exist
match value["height"].as_integer() {
Some(e) => {
config.height = e;
}
_ => {}
}*/
config.width = value["width"].as_integer().unwrap();
config.height = value["height"].as_integer().unwrap();
config.ui_scale = value["ui_scale"].as_integer().unwrap();
config.vsync = value["vsync"].as_bool().unwrap();
config
} | height: 0, |
JRAreaChartFactory.rs | net.sf.jasperreports.charts.xml.JRAreaChartFactory |
||
merkle.go | package types
import (
"bytes"
"encoding/binary"
"math"
"reflect"
"sort"
"strconv"
"golang.org/x/crypto/blake2b"
)
type Range struct {
Lower uint64 `json:"lower"`
Upper uint64 `json:"upper"`
}
func (r Range) Bytes() []byte {
return append(uint64ToBytes(r.Lower), uint64ToBytes(r.Upper)...)
}
type proofAndRanges struct {
hr []HashRange
p []Proof
}
type SortByProof proofAndRanges
func (a SortByProof) Len() int { return len(a.hr) }
func (a SortByProof) Swap(i, j int) {
a.hr[i], a.hr[j] = a.hr[j], a.hr[i]
a.p[i], a.p[j] = a.p[j], a.p[i]
}
func (a SortByProof) Less(i, j int) bool { return a.hr[i].Range.Upper < a.hr[j].Range.Upper }
// "uint64ToBytes" - convert the uint64 to bytes
func uint64ToBytes(a uint64) (bz []byte) {
bz = make([]byte, 8)
binary.LittleEndian.PutUint64(bz, a)
return
}
// "HashRange" - A structure to represent the merkleHash and the range at an index in the merkle sum tree
type HashRange struct {
Hash []byte `json:"merkleHash"`
Range Range `json:"range"`
}
func (hr HashRange) isValidRange() bool {
if hr.Range.Upper == 0 {
return false
}
if hr.Range.Lower >= hr.Range.Upper {
return false
}
return true
}
// "MerkleProof" - A structure used to verify a leaf of the tree.
type MerkleProof struct {
TargetIndex int `json:"index"`
HashRanges []HashRange `json:"hash_ranges"`
Target HashRange `json:"target_range"`
}
// "Validate" - Verifies the Proof from the leaf/cousin node data, the merkle root, and the Proof object
func (mp MerkleProof) Validate(root HashRange, leaf Proof, totalRelays int64) (isValid bool) {
// ensure root lower is zero
if root.Range.Lower != 0 {
return
}
// check if levels and total relays is valid
numOfLevels, valid := levelsIsValid(len(mp.HashRanges), totalRelays)
if !valid {
return
}
// check to see that target merkleHash is leaf merkleHash
if !bytes.Equal(mp.Target.Hash, merkleHash(leaf.Bytes())) {
return
}
// check to see that target upper == decimal representation of merkleHash
if mp.Target.Range.Upper != sumFromHash(mp.Target.Hash) {
return
}
// execute the for loop for each level
for i := 0; i < numOfLevels; i++ {
// check for valid range
if !mp.Target.isValidRange() {
return
}
// get sibling from mp object
sibling := mp.HashRanges[i]
// check to see if sibling is within a valid range
if !sibling.isValidRange() {
return
}
if mp.TargetIndex%2 == 1 { // odd target index
// target lower should be GTE sibling upper
if mp.Target.Range.Lower != sibling.Range.Upper {
return
}
// calculate the parent range and store it where the child used to be
mp.Target.Range.Lower = sibling.Range.Lower
// **upper stays the same**
// generate the parent merkleHash and store it where the child used to be
mp.Target.Hash = parentHash(sibling.Hash, mp.Target.Hash, mp.Target.Range)
} else { // even index
// target upper should be LTE sibling lower
if mp.Target.Range.Upper != sibling.Range.Lower {
return
}
// calculate the parent range and store it where the child used to be
mp.Target.Range.Upper = sibling.Range.Upper
// **lower stays the same**
// generate the parent merkleHash and store it where the child used to be
mp.Target.Hash = parentHash(mp.Target.Hash, sibling.Hash, mp.Target.Range)
}
// half the indices as we are going up one level
mp.TargetIndex /= 2
}
// ensure root == verification for leaf and cousin
return reflect.DeepEqual(root, mp.Target)
}
// "sumFromHash" - get leaf sum from merkleHash
func sumFromHash(hash []byte) uint64 {
hashCopy := make([]byte, len(hash))
copy(hashCopy, hash)
return binary.LittleEndian.Uint64(hash[:8])
}
// "newLevelIsValid" - Ensure that the number of levels in the relayProof is valid
func levelsIsValid(leafNumOfLevels int, totalRelays int64) (numOfLevels int, isValid bool) {
return leafNumOfLevels, nextPowerOfTwo(uint(totalRelays)) == uint(math.Pow(2, float64(leafNumOfLevels)))
}
// "nextPowrOfTwo" - Computes the next power of 2 given an u-integer
func nextPowerOfTwo(v uint) uint {
v--
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v++
return v
}
// "GenerateProofs" - Generates the merkle Proof object from the leaf node data and the index
func GenerateProofs(p []Proof, index int) (mProof MerkleProof, leaf Proof) {
data, proofs := structureProofs(p) // proofs are already sorted
// make a copy of the data because the merkle proof function will manipulate the slice
dataCopy := make([]HashRange, len(data))
// Copy from the original map to the target map
copy(dataCopy, data)
// generate Proof for leaf
mProof = merkleProof(data, index, &MerkleProof{})
// reset leaf index
mProof.TargetIndex = index
// get the leaf
leaf = proofs[index]
// get the targetHashRange
mProof.Target = dataCopy[index]
// return merkleProofs object
return
}
// "merkleProof" - recursive Proof function that generates the Proof object one level at a time
func merkleProof(data []HashRange, index int, p *MerkleProof) MerkleProof {
if index%2 == 1 { // odd index so sibling to the left
p.HashRanges = append(p.HashRanges, data[index-1])
} else { // even index so sibling to the right
p.HashRanges = append(p.HashRanges, data[index+1])
}
data, atRoot := levelUp(data)
if !atRoot {
// next level Entropy = previous index / 2 (
merkleProof(data, index/2, p)
}
return *p
}
// "newParentHash" - Compute the merkleHash of the parent by hashing the hashes, sum and parent
func parentHash(hash1, hash2 []byte, r Range) []byte {
return merkleHash(append(append(hash1, hash2...), r.Bytes()...))
}
// "merkleHash" - the merkleHash function used in the merkle tree
func | (data []byte) []byte {
hash := blake2b.Sum256(data)
return hash[:]
}
// "GenerateRoot" - generates the merkle root from leaf node data
func GenerateRoot(data []Proof) (r HashRange, sortedData []Proof) {
// structure the leafs
adjacentHashRanges, sortedProofs := sortAndStructure(data)
// call the root function and return
return root(adjacentHashRanges), sortedProofs
}
// "root" - Generates the root (highest level) from the merkleHash range data recursively
// CONTRACT: dataLength must be > 1 or this breaks
func root(data []HashRange) HashRange {
data, atRoot := levelUp(data)
if !atRoot {
// if not at root continue to level up
root(data)
}
// if at root return
return data[0]
}
// "levelUp" - takes the previous level data and converts it to the next level data
func levelUp(data []HashRange) (nextLevelData []HashRange, atRoot bool) {
for i, d := range data {
// if odd element, skip
if i%2 == 1 {
continue
}
// calculate the parent range, the right child upper is new upper
data[i/2].Range.Upper = data[i+1].Range.Upper
// the left child lower is new lower
data[i/2].Range.Lower = data[i].Range.Lower
// calculate the parent merkleHash
data[i/2].Hash = parentHash(d.Hash, data[i+1].Hash, data[i/2].Range)
}
// check to see if at root
dataLen := len(data) / 2
if dataLen == 1 {
return data[:dataLen], true
}
return data[:dataLen], false
}
func sortAndStructure(proofs []Proof) (d []HashRange, sortedProofs []Proof) {
// get the # of proofs
numberOfProofs := len(proofs)
// initialize the hashRange
hashRanges := make([]HashRange, numberOfProofs)
// sort the slice based on the numerical value of the upper value (just the decimal representation of the merkleHash)
if hashRanges[0].Range.Upper == 0 {
for i := range hashRanges {
// save the merkleHash and sum of the Proof in the new tree slice
hashRanges[i].Hash = merkleHash(proofs[i].Bytes())
// get the inital sum (just the dec val of the merkleHash)
hashRanges[i].Range.Upper = sumFromHash(hashRanges[i].Hash)
}
}
sortedRangesAndProofs := proofAndRanges{hashRanges, proofs}
sort.Sort(SortByProof(sortedRangesAndProofs))
hashRanges, proofs = sortedRangesAndProofs.hr, sortedRangesAndProofs.p
// keep track of previous upper (next values lower)
lower := uint64(0)
// set the lower values of each
for i := range proofs {
// the range is the previous
hashRanges[i].Range.Lower = lower
// update the lower
lower = hashRanges[i].Range.Upper
}
// calculate the proper length of the merkle tree
properLength := nextPowerOfTwo(uint(numberOfProofs))
// generate padding to make it a proper merkle tree
padding := make([]HashRange, int(properLength)-numberOfProofs)
// add it to the merkleHash rangeds
hashRanges = append(hashRanges, padding...)
// add padding to the end of the hashRange
for i := numberOfProofs; i < int(properLength); i++ {
hashRanges[i] = HashRange{
Hash: merkleHash([]byte(strconv.Itoa(i))),
Range: Range{Lower: lower, Upper: lower + 1},
}
lower = hashRanges[i].Range.Upper
}
return hashRanges, proofs
}
// "structureProofs" - structure hash ranges when proofs are already sorted
func structureProofs(proofs []Proof) (d []HashRange, sortedProofs []Proof) {
// get the # of proofs
numberOfProofs := len(proofs)
// initialize the hashRange
hashRanges := make([]HashRange, numberOfProofs)
// keep track of previous upper (next values lower)
lower := uint64(0)
// sort the slice based on the numerical value of the upper value (just the decimal representation of the merkleHash)
if hashRanges[0].Range.Upper == 0 {
for i := range hashRanges {
// save the merkleHash and sum of the Proof in the new tree slice
hashRanges[i].Hash = merkleHash(proofs[i].Bytes())
// get the inital sum (just the dec val of the merkleHash)
hashRanges[i].Range.Upper = sumFromHash(hashRanges[i].Hash)
// the range is the previous
hashRanges[i].Range.Lower = lower
// update the lower
lower = hashRanges[i].Range.Upper
}
}
properLength := nextPowerOfTwo(uint(numberOfProofs))
// generate padding to make it a proper merkle tree
padding := make([]HashRange, int(properLength)-numberOfProofs)
// add it to the merkleHash rangeds
hashRanges = append(hashRanges, padding...)
// add padding to the end of the hashRange
for i := numberOfProofs; i < int(properLength); i++ {
hashRanges[i] = HashRange{
Hash: merkleHash([]byte(strconv.Itoa(i))),
Range: Range{Lower: lower, Upper: lower + 1},
}
lower = hashRanges[i].Range.Upper
}
return hashRanges, proofs
}
| merkleHash |
converter.go | package mp_package
import (
"database/sql"
"encoding/json"
"github.com/kyma-incubator/compass/components/director/pkg/str"
"github.com/kyma-incubator/compass/components/director/internal/model"
"github.com/kyma-incubator/compass/components/director/internal/repo"
"github.com/kyma-incubator/compass/components/director/pkg/graphql"
"github.com/pkg/errors"
)
//go:generate mockery -name=AuthConverter -output=automock -outpkg=automock -case=underscore
type AuthConverter interface {
ToGraphQL(in *model.Auth) *graphql.Auth
InputFromGraphQL(in *graphql.AuthInput) *model.AuthInput
}
type converter struct {
auth AuthConverter
}
func | (auth AuthConverter) *converter {
return &converter{
auth: auth,
}
}
func (c *converter) ToEntity(in *model.Package) (*Entity, error) {
if in == nil {
return nil, nil
}
defaultInstanceAuth, err := c.marshalDefaultInstanceAuth(in.DefaultInstanceAuth)
if err != nil {
return nil, err
}
output := &Entity{
ID: in.ID,
TenantID: in.TenantID,
ApplicationID: in.ApplicationID,
Name: in.Name,
Description: repo.NewNullableString(in.Description),
DefaultInstanceAuth: repo.NewNullableString(defaultInstanceAuth),
InstanceAuthRequestJSONSchema: repo.NewNullableString(in.InstanceAuthRequestInputSchema),
}
return output, nil
}
func (c *converter) FromEntity(entity *Entity) (*model.Package, error) {
if entity == nil {
return nil, errors.New("the Package entity is nil")
}
defaultInstanceAuth, err := c.unmarshalDefaultInstanceAuth(entity.DefaultInstanceAuth)
if err != nil {
return nil, err
}
output := &model.Package{
ID: entity.ID,
TenantID: entity.TenantID,
ApplicationID: entity.ApplicationID,
Name: entity.Name,
Description: &entity.Description.String,
DefaultInstanceAuth: defaultInstanceAuth,
InstanceAuthRequestInputSchema: repo.StringPtrFromNullableString(entity.InstanceAuthRequestJSONSchema),
}
return output, nil
}
func (c *converter) ToGraphQL(in *model.Package) (*graphql.Package, error) {
if in == nil {
return nil, errors.New("the model Package is nil")
}
return &graphql.Package{
ID: in.ID,
Name: in.Name,
Description: in.Description,
InstanceAuthRequestInputSchema: c.strPtrToJSONSchemaPtr(in.InstanceAuthRequestInputSchema),
DefaultInstanceAuth: c.auth.ToGraphQL(in.DefaultInstanceAuth),
}, nil
}
func (c *converter) MultipleToGraphQL(in []*model.Package) ([]*graphql.Package, error) {
var packages []*graphql.Package
for _, r := range in {
if r == nil {
continue
}
pkg, err := c.ToGraphQL(r)
if err != nil {
return nil, errors.Wrap(err, "while converting Package to GraphQL")
}
packages = append(packages, pkg)
}
return packages, nil
}
func (c *converter) CreateInputFromGraphQL(in graphql.PackageCreateInput) (*model.PackageCreateInput, error) {
return &model.PackageCreateInput{
Name: in.Name,
Description: in.Description,
InstanceAuthRequestInputSchema: c.jsonSchemaPtrToStrPtr(in.InstanceAuthRequestInputSchema),
DefaultInstanceAuth: c.auth.InputFromGraphQL(in.DefaultInstanceAuth),
}, nil
}
func (c *converter) UpdateInputFromGraphQL(in graphql.PackageUpdateInput) (*model.PackageUpdateInput, error) {
return &model.PackageUpdateInput{
Name: in.Name,
Description: in.Description,
InstanceAuthRequestInputSchema: c.jsonSchemaPtrToStrPtr(in.InstanceAuthRequestInputSchema),
DefaultInstanceAuth: c.auth.InputFromGraphQL(in.DefaultInstanceAuth),
}, nil
}
func (c *converter) marshalDefaultInstanceAuth(defaultInstanceAuth *model.Auth) (*string, error) {
if defaultInstanceAuth == nil {
return nil, nil
}
output, err := json.Marshal(defaultInstanceAuth)
if err != nil {
return nil, errors.Wrap(err, "while marshaling default auth")
}
return str.Ptr(string(output)), nil
}
func (c *converter) unmarshalDefaultInstanceAuth(defaultInstanceAuthSql sql.NullString) (*model.Auth, error) {
var defaultInstanceAuth *model.Auth
if defaultInstanceAuthSql.Valid && defaultInstanceAuthSql.String != "" {
defaultInstanceAuth = &model.Auth{}
err := json.Unmarshal([]byte(defaultInstanceAuthSql.String), defaultInstanceAuth)
if err != nil {
return nil, errors.Wrap(err, "while unmarshalling default instance auth")
}
}
return defaultInstanceAuth, nil
}
func (c *converter) strPtrToJSONSchemaPtr(in *string) *graphql.JSONSchema {
if in == nil {
return nil
}
out := graphql.JSONSchema(*in)
return &out
}
func (c *converter) jsonSchemaPtrToStrPtr(in *graphql.JSONSchema) *string {
if in == nil {
return nil
}
out := string(*in)
return &out
}
| NewConverter |
material_homogenization.py | #!/usr/bin/env python
# This code was adapted from http://sfepy.org/doc-devel/mat_optim.html.
from __future__ import print_function
from __future__ import absolute_import
import sys
sys.path.append('.')
import matplotlib as mlp
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import numpy as np
from sfepy.base.base import Struct, output
from sfepy.base.log import Log
from sfepy import data_dir
class MaterialSimulator(object):
@staticmethod
def create_app(filename, is_homog=False, **kwargs):
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.applications import PDESolverApp
required, other = get_standard_keywords()
if is_homog:
required.remove('equations')
conf = ProblemConf.from_file(filename, required, other,
define_args=kwargs)
options = Struct(output_filename_trunk=None,
save_ebc=False,
save_ebc_nodes=False,
save_regions=False,
save_regions_as_groups=False,
save_field_meshes=False,
solve_not=False,
)
output.set_output(filename='sfepy_log.txt', quiet=True)
if is_homog:
app = HomogenizationApp(conf, options, 'material_opt_micro:')
else:
app = PDESolverApp(conf, options, 'material_opt_macro:')
app.conf.opt_data = {}
opts = conf.options
if hasattr(opts, 'parametric_hook'): # Parametric study.
parametric_hook = conf.get_function(opts.parametric_hook)
app.parametrize(parametric_hook)
return app
def __init__(self, macro_fn, micro_fn, phis, plot_meshes_bool=False):
|
@staticmethod
def rotate_mat(D, angle):
s = np.sin(angle)
c = np.cos(angle)
s2 = s**2
c2 = c**2
sc = s * c
T = np.array([[c2, 0, s2, 0, 2*sc,0],
[0, 1, 0, 0, 0, 0],
[s2, 0, c2, 0, -2*sc, 0],
[0, 0, 0, c, 0, -s],
[-sc, 0, sc, 0, c2 - s2, 0],
[0, 0, 0, s, 0, c]])
return np.dot(np.dot(T, D), T.T)
def plot_meshes(self):
# plot mesh for micro problem
pb = self.micro_app.problem
coors = pb.domain.mesh.coors
#print(set(coors[:,2]))
graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])
graph_slice = np.zeros((graph.shape[0], 4))
for j in range(graph.shape[0]):
graph_slice[j,:] = graph[j,coors[graph[j,:],2] == 0]
cells_matrix = pb.domain.regions['Ym'].get_cells()
cells_fibers = pb.domain.regions['Yf'].get_cells()
fig = plt.figure(figsize = (12, 5))
ax = fig.add_subplot(121)
pc = PolyCollection(verts=coors[graph[cells_matrix,0:4],:2], facecolors='white',
edgecolors='black')
ax.add_collection(pc)
pc = PolyCollection(verts=coors[graph[cells_fibers,0:4],:2], facecolors='gray',
edgecolors='black')
ax.add_collection(pc)
ax.axis('equal')
ax.set_title('2D plot of microstructure')
ax = fig.add_subplot(122, projection='3d')
for e in range(graph.shape[0]):
if e in cells_fibers:
color = 'gray'
else:
color = 'white'
tupleList = coors[graph[e,:],:]
vertices = [[0, 1, 2, 3], [4, 5, 6, 7],
[0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))]
for ix in range(len(vertices))]
pc3d = Poly3DCollection(verts=verts, facecolors=color,
edgecolors='black', linewidths=1, alpha=0.5)
ax.add_collection3d(pc3d)
ax.set_title('3D plot of microstructure')
plt.show(fig)
# plot mesh for macro problem
pb = self.macro_app.problem
coors = pb.domain.mesh.coors
graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])
fig2 = plt.figure(figsize=(5,6))
ax = fig2.add_subplot(111, projection='3d')
for e in range(graph.shape[0]):
tupleList = coors[graph[e,:],:]
vertices = [[0, 1, 2, 3], [4, 5, 6, 7],
[0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))]
for ix in range(len(vertices))]
pc3d = Poly3DCollection(verts=verts, facecolors='white',
edgecolors='black', linewidths=1, alpha=0.5)
ax.add_collection3d(pc3d)
ax.set_xlim3d(-0.03, 0.03)
ax.set_ylim3d(-0.01, 0.01)
ax.set_zlim3d(-0.01, 0.1)
ax.set_title('3D plot of macro system')
plt.show(fig2)
return None
def mat_eval(self, x):
mic_od = self.micro_app.conf.opt_data
mac_od = self.macro_app.conf.opt_data
mic_od['coefs'] = {}
mic_od['mat_params'] = x_norm2real(x)
self.micro_app()
D = mic_od['D_homog']
comp_k = []
for phi in self.phis:
#print('phi = %d' % phi)
mac_od['D_homog'] = self.rotate_mat(D, np.deg2rad(phi))
self.macro_app()
comp_k.append(mac_od['k'])
# added by Audrey: get a plot of a slice of the mesh
if self.plot_meshes_bool:
self.plot_meshes()
return comp_k
def bounds():
x_L = [120e9, 0.2, 2e9, 0.2]
x_U = [200e9, 0.45, 8e9, 0.45]
return x_L, x_U
def x_norm2real(x):
x_L, x_U = np.array(bounds())
return x * (x_U - x_L) + x_L
def x_real2norm(x):
x_L, x_U = np.array(bounds())
return (x - x_L) / (x_U - x_L)
micro_filename = data_dir + '/examples/homogenization/' + 'homogenization_opt.py'
macro_filename = data_dir + '/examples/homogenization/' + 'linear_elasticity_opt.py'
def one_simulation(x0, plot_meshes_bool=False):
"""
This function is the main callable here: it takes in as input the parameter vector,
here x0=[E_fiber, nu_fiber, E_matrix, nu_matrix], and returns the simulated output
(here slope of the force-elongation curve obtained during a tensile test), to be compared
with the measured data.
"""
x0 = x0.reshape((-1, ))
phis = [0, 30, 60, 90]
#exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])
ms = MaterialSimulator(macro_filename, micro_filename,
phis,
plot_meshes_bool=plot_meshes_bool)
qoi = ms.mat_eval(x0)
return qoi
def one_simulation_2params(x0, plot_meshes_bool=False):
x0 = x0.reshape((-1, ))
x0 = np.array([x0[0], 0.45, x0[1], 0.])
phis = [0, 30, 60, 90]
#exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])
ms = MaterialSimulator(macro_filename, micro_filename,
phis, plot_meshes_bool=plot_meshes_bool)
qoi = ms.mat_eval(x0)
return qoi
def one_simulation_2params_rvs(x0, plot_meshes_bool=False):
x0 = x0.reshape((-1, ))
x0 = np.array([x0[0], 0.45, x0[1], 0.])
phis = [0, 30, 60, 90]
ms = MaterialSimulator(macro_filename, micro_filename,
phis,
plot_meshes_bool=plot_meshes_bool)
qoi = ms.mat_eval(x0)
qoi = np.tile(np.array(qoi), 100)
return qoi
| self.macro_app = self.create_app(macro_fn, is_homog=False, is_opt=True)
self.micro_app = self.create_app(micro_fn, is_homog=True, is_opt=True)
self.phis = phis
self.plot_meshes_bool = plot_meshes_bool |
api_op_ListStorageLensConfigurations.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package s3control
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3controlcust "github.com/aws/aws-sdk-go-v2/service/s3control/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3control/types"
smithy "github.com/awslabs/smithy-go"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
"strings"
)
// Gets a list of Amazon S3 Storage Lens configurations. For more information about
// S3 Storage Lens, see Working with Amazon S3 Storage Lens
// (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html)
// in the Amazon Simple Storage Service Developer Guide. To use this action, you
// must have permission to perform the s3:ListStorageLensConfigurations action. For
// more information, see Setting permissions to use Amazon S3 Storage Lens
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens.html#storage_lens_IAM)
// in the Amazon Simple Storage Service Developer Guide.
func (c *Client) ListStorageLensConfigurations(ctx context.Context, params *ListStorageLensConfigurationsInput, optFns ...func(*Options)) (*ListStorageLensConfigurationsOutput, error) {
if params == nil {
params = &ListStorageLensConfigurationsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListStorageLensConfigurations", params, optFns, addOperationListStorageLensConfigurationsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListStorageLensConfigurationsOutput)
out.ResultMetadata = metadata
return out, nil
}
type ListStorageLensConfigurationsInput struct {
// The account ID of the requester.
//
// This member is required.
AccountId *string
// A pagination token to request the next page of results.
NextToken *string
}
type ListStorageLensConfigurationsOutput struct {
// If the request produced more than the maximum number of S3 Storage Lens
// configuration results, you can pass this value into a subsequent request to
// retrieve the next page of results.
NextToken *string
// A list of S3 Storage Lens configurations.
StorageLensConfigurationList []types.ListStorageLensConfigurationEntry
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationListStorageLensConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestxml_serializeOpListStorageLensConfigurations{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestxml_deserializeOpListStorageLensConfigurations{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addEndpointPrefix_opListStorageLensConfigurationsMiddleware(stack); err != nil {
return err
}
if err = addOpListStorageLensConfigurationsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListStorageLensConfigurations(options.Region), middleware.Before); err != nil {
return err
}
if err = addMetadataRetrieverMiddleware(stack); err != nil {
return err
}
if err = addListStorageLensConfigurationsUpdateEndpoint(stack, options); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
type endpointPrefix_opListStorageLensConfigurationsMiddleware struct {
}
func (*endpointPrefix_opListStorageLensConfigurationsMiddleware) ID() string {
return "EndpointHostPrefix"
}
func (m *endpointPrefix_opListStorageLensConfigurationsMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if smithyhttp.GetHostnameImmutable(ctx) || smithyhttp.IsEndpointHostPrefixDisabled(ctx) {
return next.HandleSerialize(ctx, in)
} |
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
input, ok := in.Parameters.(*ListStorageLensConfigurationsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input type %T", in.Parameters)
}
var prefix strings.Builder
if input.AccountId == nil {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("AccountId forms part of the endpoint host and so may not be nil")}
} else if !smithyhttp.ValidHostLabel(*input.AccountId) {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("AccountId forms part of the endpoint host and so must match \"[a-zA-Z0-9-]{1,63}\", but was \"%s\"", *input.AccountId)}
} else {
prefix.WriteString(*input.AccountId)
}
prefix.WriteString(".")
req.URL.Host = prefix.String() + req.URL.Host
return next.HandleSerialize(ctx, in)
}
func addEndpointPrefix_opListStorageLensConfigurationsMiddleware(stack *middleware.Stack) error {
return stack.Serialize.Insert(&endpointPrefix_opListStorageLensConfigurationsMiddleware{}, `OperationSerializer`, middleware.After)
}
func newServiceMetadataMiddleware_opListStorageLensConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "s3",
OperationName: "ListStorageLensConfigurations",
}
}
func copyListStorageLensConfigurationsInputForUpdateEndpoint(params interface{}) (interface{}, error) {
input, ok := params.(*ListStorageLensConfigurationsInput)
if !ok {
return nil, fmt.Errorf("expect *ListStorageLensConfigurationsInput type, got %T", params)
}
cpy := *input
return &cpy, nil
}
func backFillListStorageLensConfigurationsAccountID(input interface{}, v string) error {
in := input.(*ListStorageLensConfigurationsInput)
if in.AccountId != nil {
if !strings.EqualFold(*in.AccountId, v) {
return fmt.Errorf("error backfilling account id")
}
return nil
}
in.AccountId = &v
return nil
}
func addListStorageLensConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error {
return s3controlcust.UpdateEndpoint(stack, s3controlcust.UpdateEndpointOptions{
Accessor: s3controlcust.UpdateEndpointParameterAccessor{GetARNInput: nopGetARNAccessor,
BackfillAccountID: nopBackfillAccountIDAccessor,
GetOutpostIDInput: nopGetOutpostIDFromInput,
UpdateARNField: nopSetARNAccessor,
CopyInput: copyListStorageLensConfigurationsInputForUpdateEndpoint,
},
EndpointResolver: options.EndpointResolver,
EndpointResolverOptions: options.EndpointOptions,
UseDualstack: options.UseDualstack,
UseARNRegion: options.UseARNRegion,
})
} | |
remove_test.go | package main
import (
"fmt"
"strings"
"testing"
)
func teardownTest(t *testing.T) {
if err := registryHelper.RefillRegistry("busybox:glibc"); err != nil {
t.Fatalf("adding image after remove failed: +%v", err)
}
}
func | (t *testing.T) {
defer teardownTest(t)
// Make sure we have busybox in list.
out, err := run("ls", domain)
if err != nil {
t.Fatalf("output: %s, error: %v", out, err)
}
expected := `REPO TAGS
alpine 3.5, latest
busybox glibc, latest, musl`
if !strings.HasSuffix(strings.TrimSpace(out), expected) {
t.Fatalf("expected to contain: %s\ngot: %s", expected, out)
}
// Remove busybox image.
if out, err := run("rm", fmt.Sprintf("%s/busybox:glibc", domain)); err != nil {
t.Fatalf("output: %s, error: %v", out, err)
}
// Make sure we have removed busybox:glibc.
out, err = run("ls", domain)
if err != nil {
t.Fatalf("output: %s, error: %v", out, err)
}
expected = `REPO TAGS
alpine 3.5, latest
busybox latest, musl`
if !strings.HasSuffix(strings.TrimSpace(out), expected) {
t.Fatalf("expected to contain: %s\ngot: %s", expected, out)
}
}
| TestRemove |
unused_import_warning_issue_45268.rs | // check-pass
#![warn(unused_imports)] // Warning explanation here, it's OK
mod test {
pub trait A {
fn a();
}
impl A for () { |
pub trait B {
fn b(self);
}
impl B for () {
fn b(self) { }
}
pub trait Unused {
}
}
use test::Unused; // This is really unused, so warning is OK
use test::A; // This is used by the test2::func() through import of super::*
use test::B; // This is used by the test2::func() through import of super::*
mod test2 {
use super::*;
pub fn func() {
let _ = <()>::a();
let _ = ().b();
test3::inner_func();
}
mod test3 {
use super::*;
pub fn inner_func() {
let _ = <()>::a();
let _ = ().b();
}
}
}
fn main() {
test2::func();
} | fn a() { }
} |
tokio_helper.rs | use once_cell::sync::Lazy;
use tokio::runtime::Runtime;
pub static TOKIO: Lazy<Runtime> = Lazy::new(|| new_runtime(None, None));
/// Instantiate a new runtime.
pub fn new_runtime(worker_threads: Option<usize>, max_blocking_threads: Option<usize>) -> Runtime {
// we want to use multiple threads
let mut builder = tokio::runtime::Builder::new_multi_thread();
builder
// we use both IO and Time tokio utilities
.enable_all()
// give our threads a descriptive name (they'll be numbered too)
.thread_name("holochain-tokio-thread");
if let Some(worker_threads) = worker_threads {
builder.worker_threads(worker_threads);
};
if let Some(max_blocking_threads) = max_blocking_threads {
builder.max_blocking_threads(max_blocking_threads);
};
builder
// build the runtime
.build()
// panic if we cannot (we cannot run without it)
.expect("can build tokio runtime")
}
fn block_on_given<F>(f: F, runtime: &Runtime) -> F::Output
where
F: futures::future::Future,
{
let _g = runtime.enter();
tokio::task::block_in_place(|| runtime.block_on(async { f.await }))
}
/// Run a blocking thread on `TOKIO` with a timeout.
pub fn block_on<F>(
f: F,
timeout: std::time::Duration,
) -> Result<F::Output, tokio::time::error::Elapsed>
where
F: futures::future::Future,
{
block_on_given(async { tokio::time::timeout(timeout, f).await }, &TOKIO)
}
/// Run a blocking thread on `TOKIO`.
pub fn block_forever_on<F>(f: F) -> F::Output
where
F: futures::future::Future,
{
block_on_given(f, &TOKIO)
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test(flavor = "multi_thread")]
async fn block_forever_on_works() {
block_forever_on(async { println!("stdio can block") });
assert_eq!(1, super::block_forever_on(async { 1 }));
let r = "1";
let test1 = super::block_forever_on(async { r.to_string() });
assert_eq!("1", &test1);
// - wasm style use case -
// we are in a non-tokio context
let test2 = std::thread::spawn(|| {
let r = "2";
super::block_forever_on(async { r.to_string() })
})
.join()
.unwrap();
assert_eq!("2", &test2);
}
#[tokio::test(flavor = "multi_thread")]
async fn block_on_allows_spawning() {
let r = "works";
let test = block_forever_on(tokio::task::spawn(async move { r.to_string() })).unwrap();
assert_eq!("works", &test);
}
// test calling without an existing reactor
#[test]
fn block_on_works() |
}
| {
assert_eq!(
Ok(1),
block_on(async { 1 }, std::time::Duration::from_millis(0))
);
} |
okex.rs | use crate::WSClient;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use super::ws_client_internal::{MiscMessage, WSClientInternal};
use super::{Candlestick, OrderBook, OrderBookSnapshot, Ticker, Trade, BBO};
use log::*;
use serde_json::Value;
pub(super) const EXCHANGE_NAME: &str = "okex";
const WEBSOCKET_URL: &str = "wss://real.okex.com:8443/ws/v3";
const CLIENT_PING_INTERVAL_AND_MSG: (u64, &str) = (30, "ping");
/// The WebSocket client for OKEx.
///
/// OKEx has Spot, Future, Swap and Option markets.
///
/// * WebSocket API doc: <https://www.okex.com/docs/en/>
/// * Trading at:
/// * Spot <https://www.bitmex.com/app/trade/>
/// * Future <https://www.okex.com/derivatives/futures>
/// * Swap <https://www.okex.com/derivatives/swap>
/// * Option <https://www.okex.com/derivatives/options>
pub struct OkexWSClient<'a> {
client: WSClientInternal<'a>,
}
fn channels_to_commands(channels: &[String], subscribe: bool) -> Vec<String> {
let mut all_commands: Vec<String> = channels
.iter()
.filter(|ch| ch.starts_with('{'))
.map(|s| s.to_string())
.collect();
let channels_to_parse: Vec<&String> =
channels.iter().filter(|ch| !ch.starts_with('{')).collect();
if !channels_to_parse.is_empty() {
all_commands.append(&mut vec![format!(
r#"{{"op":"{}","args":{}}}"#,
if subscribe {
"subscribe"
} else {
"unsubscribe"
},
serde_json::to_string(&channels_to_parse).unwrap()
)])
};
all_commands
}
fn on_misc_msg(msg: &str) -> MiscMessage {
if msg == "pong" {
info!("Received {} from {}", msg, EXCHANGE_NAME);
return MiscMessage::Pong;
}
let resp = serde_json::from_str::<HashMap<String, Value>>(&msg);
if resp.is_err() {
error!("{} is not a JSON string, {}", msg, EXCHANGE_NAME);
return MiscMessage::Misc;
}
let obj = resp.unwrap();
if let Some(event) = obj.get("event") {
match event.as_str().unwrap() {
"error" => {
error!("Received {} from {}", msg, EXCHANGE_NAME);
if let Some(error_code) = obj.get("errorCode") {
#[allow(clippy::single_match)]
match error_code.as_i64().unwrap() {
30040 => {
// channel doesn't exist
panic!("Received {} from {}", msg, EXCHANGE_NAME);
}
_ => (),
}
}
}
"subscribe" => info!("Received {} from {}", msg, EXCHANGE_NAME),
"unsubscribe" => info!("Received {} from {}", msg, EXCHANGE_NAME),
_ => warn!("Received {} from {}", msg, EXCHANGE_NAME),
}
MiscMessage::Misc
} else if !obj.contains_key("table") || !obj.contains_key("data") {
error!("Received {} from {}", msg, EXCHANGE_NAME);
MiscMessage::Misc
} else {
MiscMessage::Normal
}
}
fn pair_to_market_type(pair: &str) -> &'static str {
if pair.ends_with("-SWAP") {
"swap"
} else {
let c = pair.matches('-').count();
if c == 1 {
"spot"
} else if c == 2 {
let date = &pair[(pair.len() - 6)..];
debug_assert!(date.parse::<i64>().is_ok());
"futures"
} else {
debug_assert!(pair.ends_with("-C") || pair.ends_with("-P"));
"option"
}
}
}
fn to_raw_channel(channel: &str, pair: &str) -> String |
#[rustfmt::skip]
impl_trait!(Trade, OkexWSClient, subscribe_trade, "trade", to_raw_channel);
#[rustfmt::skip]
impl_trait!(Ticker, OkexWSClient, subscribe_ticker, "ticker", to_raw_channel);
#[rustfmt::skip]
impl_trait!(OrderBook, OkexWSClient, subscribe_orderbook, "depth_l2_tbt", to_raw_channel);
impl_trait!(
OrderBookSnapshot,
OkexWSClient,
subscribe_orderbook_snapshot,
"depth5",
to_raw_channel
);
impl<'a> BBO for OkexWSClient<'a> {
fn subscribe_bbo(&self, _pairs: &[String]) {
panic!("OKEx WebSocket does NOT have BBO channel");
}
}
fn to_candlestick_raw_channel(pair: &str, interval: u32) -> String {
let valid_set: Vec<u32> = vec![
60, 180, 300, 900, 1800, 3600, 7200, 14400, 21600, 43200, 86400, 604800,
];
if !valid_set.contains(&interval) {
let joined = valid_set
.into_iter()
.map(|x| x.to_string())
.collect::<Vec<String>>()
.join(",");
panic!("OKEx has intervals {}", joined);
}
let channel = format!("candle{}s", interval);
to_raw_channel(&channel, pair)
}
impl_candlestick!(OkexWSClient);
define_client!(
OkexWSClient,
EXCHANGE_NAME,
WEBSOCKET_URL,
channels_to_commands,
on_misc_msg,
Some(CLIENT_PING_INTERVAL_AND_MSG),
None
);
#[cfg(test)]
mod tests {
#[test]
fn test_one_channel() {
let commands = super::channels_to_commands(&vec!["spot/trade:BTC-USDT".to_string()], true);
assert_eq!(1, commands.len());
assert_eq!(
r#"{"op":"subscribe","args":["spot/trade:BTC-USDT"]}"#,
commands[0]
);
}
#[test]
fn test_two_channel() {
let commands = super::channels_to_commands(
&vec![
"spot/trade:BTC-USDT".to_string(),
"ticker/trade:BTC-USDT".to_string(),
],
true,
);
assert_eq!(1, commands.len());
assert_eq!(
r#"{"op":"subscribe","args":["spot/trade:BTC-USDT","ticker/trade:BTC-USDT"]}"#,
commands[0]
);
}
#[test]
fn test_pair_to_market_type() {
assert_eq!("spot", super::pair_to_market_type("BTC-USDT"));
assert_eq!("futures", super::pair_to_market_type("BTC-USDT-210625"));
assert_eq!("swap", super::pair_to_market_type("BTC-USDT-SWAP"));
assert_eq!(
"option",
super::pair_to_market_type("BTC-USD-210625-72000-C")
);
}
}
| {
format!("{}/{}:{}", pair_to_market_type(pair), channel, pair)
} |
validators.go | package api
import (
"github.com/MinterTeam/minter-go-node/core/types"
)
type ValidatorResponse struct {
Pubkey string `json:"pub_key"`
VotingPower int64 `json:"voting_power"`
}
type ResponseValidators []ValidatorResponse
func Validators(height uint64, page, perPage int) (*ResponseValidators, error) {
if height == 0 {
height = blockchain.Height()
}
h := int64(height)
tmVals, err := client.Validators(&h, page, perPage)
if err != nil {
return nil, err
}
responseValidators := make(ResponseValidators, len(tmVals.Validators))
for i, val := range tmVals.Validators {
var pk types.Pubkey | VotingPower: val.VotingPower,
}
}
return &responseValidators, nil
} | copy(pk[:], val.PubKey.Bytes()[5:])
responseValidators[i] = ValidatorResponse{
Pubkey: pk.String(), |
enum-null-pointer-opt.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate core;
use core::nonzero::NonZero;
use std::mem::size_of;
use std::rc::Rc;
use std::sync::Arc;
trait Trait {}
fn main() {
// Functions
assert_eq!(size_of::<fn(int)>(), size_of::<Option<fn(int)>>());
assert_eq!(size_of::<extern "C" fn(int)>(), size_of::<Option<extern "C" fn(int)>>());
// Slices - &str / &[T] / &mut [T]
assert_eq!(size_of::<&str>(), size_of::<Option<&str>>());
assert_eq!(size_of::<&[int]>(), size_of::<Option<&[int]>>());
assert_eq!(size_of::<&mut [int]>(), size_of::<Option<&mut [int]>>());
// Traits - Box<Trait> / &Trait / &mut Trait
assert_eq!(size_of::<Box<Trait>>(), size_of::<Option<Box<Trait>>>());
assert_eq!(size_of::<&Trait>(), size_of::<Option<&Trait>>());
assert_eq!(size_of::<&mut Trait>(), size_of::<Option<&mut Trait>>());
// Pointers - Box<T>
assert_eq!(size_of::<Box<int>>(), size_of::<Option<Box<int>>>());
// The optimization can't apply to raw pointers
assert!(size_of::<Option<*const int>>() != size_of::<*const int>());
assert!(Some(0 as *const int).is_some()); // Can't collapse None to null
struct Foo {
_a: Box<int>
}
struct Bar(Box<int>);
// Should apply through structs
assert_eq!(size_of::<Foo>(), size_of::<Option<Foo>>());
assert_eq!(size_of::<Bar>(), size_of::<Option<Bar>>());
// and tuples
assert_eq!(size_of::<(u8, Box<int>)>(), size_of::<Option<(u8, Box<int>)>>());
// and fixed-size arrays
assert_eq!(size_of::<[Box<int>; 1]>(), size_of::<Option<[Box<int>; 1]>>());
// Should apply to NonZero
assert_eq!(size_of::<NonZero<uint>>(), size_of::<Option<NonZero<uint>>>());
assert_eq!(size_of::<NonZero<*mut i8>>(), size_of::<Option<NonZero<*mut i8>>>());
// Should apply to types that use NonZero internally
assert_eq!(size_of::<Vec<int>>(), size_of::<Option<Vec<int>>>());
assert_eq!(size_of::<Arc<int>>(), size_of::<Option<Arc<int>>>());
assert_eq!(size_of::<Rc<int>>(), size_of::<Option<Rc<int>>>());
| } | // Should apply to types that have NonZero transitively
assert_eq!(size_of::<String>(), size_of::<Option<String>>());
|
losses.py | import keras.backend as K
from keras.losses import categorical_crossentropy
def hard_dice_coef(y_true, y_pred, smooth=1e-3):
y_true_f = K.flatten(K.round(y_true[..., 0]))
y_pred_f = K.flatten(K.round(y_pred[..., 0]))
intersection = K.sum(y_true_f * y_pred_f)
return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def hard_dice_coef_ch1(y_true, y_pred, smooth=1e-3):
y_true_f = K.flatten(K.round(y_true[..., 1]))
y_pred_f = K.flatten(K.round(y_pred[..., 1]))
intersection = K.sum(y_true_f * y_pred_f)
return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef(y_true, y_pred, smooth=1e-3):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return K.mean((2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth))
def dice_coef_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred)
def dice_coef_loss_bce(y_true, y_pred, dice=0.5, bce=0.5):
return binary_crossentropy(y_true, y_pred) * bce + dice_coef_loss(y_true, y_pred) * dice
|
def binary_crossentropy(y, p):
return K.mean(K.binary_crossentropy(y, p))
def double_head_loss(y_true, y_pred):
mask_loss = dice_coef_loss_bce(y_true[..., 0], y_pred[..., 0])
contour_loss = dice_coef_loss_bce(y_true[..., 1], y_pred[..., 1])
return mask_loss + contour_loss
def mask_contour_mask_loss(y_true, y_pred):
mask_loss = dice_coef_loss_bce(y_true[..., 0], y_pred[..., 0])
contour_loss = dice_coef_loss_bce(y_true[..., 1], y_pred[..., 1])
full_mask = dice_coef_loss_bce(y_true[..., 2], y_pred[..., 2])
return mask_loss + 2 * contour_loss + full_mask
def softmax_dice_loss(y_true, y_pred):
return categorical_crossentropy(y_true, y_pred) * 0.6 + dice_coef_loss(y_true[..., 0], y_pred[..., 0]) * 0.2 + dice_coef_loss(y_true[..., 1], y_pred[..., 1]) * 0.2
def make_loss(loss_name):
if loss_name == 'bce_dice':
def loss(y, p):
return dice_coef_loss_bce(y, p, dice=0.5, bce=0.5)
return loss
elif loss_name == 'bce':
def loss(y, p):
return dice_coef_loss_bce(y, p, dice=0, bce=1)
return loss
elif loss_name == 'categorical_dice':
return softmax_dice_loss
elif loss_name == 'double_head_loss':
return double_head_loss
elif loss_name == 'mask_contour_mask_loss':
return mask_contour_mask_loss
else:
ValueError("Unknown loss.") | |
Battleground.ts | import {Scene} from "./Scene";
import {Game} from "../Game";
import * as BABYLON from 'babylonjs';
import { WaterMaterial } from 'babylonjs-materials';
import {EnvironmentCaveExit} from "./Mountains/CaveExit/EnvironmentCaveExit";
import {Fog} from "../Particles/Fog";
import {Guard} from "../Characters/npc/Guard";
export class Battleground extends Scene {
static TYPE = 99;
initScene(game: Game) {
let self = this;
let scene = new BABYLON.Scene(game.engine);
let ground, guard;
self
.setDefaults(game, scene)
.executeWhenReady(function () {
ground = BABYLON.MeshBuilder.CreateGround("Ground", {width: 256, height: 256}, scene);
new Guard(game, new BABYLON.Vector3(-12, 0, 8), new BABYLON.Vector3(0, 3, 0));
guard = new Guard(game, new BABYLON.Vector3(-15, 0, 10), new BABYLON.Vector3(0, 4, 0));
guard.mesh.name = 'questLog';
let plane = BABYLON.Mesh.CreatePlane("entrace", 16, scene);
plane.position = new BABYLON.Vector3(-25, 2, 25);
plane.layerMask = 2;
self.environment = new EnvironmentCaveExit(game);
}, function() {
let waterMaterial = new WaterMaterial("waterMaterial", scene, new BABYLON.Vector2(512, 512));
waterMaterial.bumpTexture = new BABYLON.Texture("//www.babylonjs.com/assets/waterbump.png", scene);
waterMaterial.windForce = -5;
waterMaterial.waveHeight = 0.05;
waterMaterial.bumpHeight = 0.05; | waterMaterial.waveSpeed = 1.0;
waterMaterial.colorBlendFactor = 0.1;
waterMaterial.waterColor = new BABYLON.Color3(0.1, 1, 1);
let waterMesh = BABYLON.Mesh.CreateGround("waterMesh", 256, 256, 32, scene, false);
waterMesh.position.y = 0.5;
waterMesh.material = waterMaterial;
waterMesh.layerMask = 2;
waterMesh.isPickable = false;
waterMaterial.addToRenderList(ground);
waterMaterial.addToRenderList(self.game.player.mesh);
waterMaterial.addToRenderList(guard.mesh);
});
}
} | waterMaterial.waveLength = 0.1; |
nipype_reconall_with_tracker.py | # Import modules
import os
import sys
from os.path import join as opj
import pandas as pd
import time
from nipype.interfaces.freesurfer import ReconAll
from nipype.interfaces.utility import IdentityInterface
from nipype.pipeline.engine import Workflow, Node
from pypapi import events, papi_high as high
import argparse
# Add paths (singularity should see these)
# FastSurfer and carbon trackers are in the mounted dir as these repos keep getting updated.
# TODO replace this with setup.py once the dependencis become stable
# sys.path.append('../../../experiment-impact-tracker/')
# sys.path.append('../../../codecarbon/')
from experiment_impact_tracker.compute_tracker import ImpactTracker
from codecarbon import EmissionsTracker, OfflineEmissionsTracker
def get_reconall(recon_directive,fs_folder):
# This node represents the actual recon-all command
reconall = Node(ReconAll(directive=recon_directive,
flags='-nuintensitycor -3T',
subjects_dir=fs_folder),
name="reconall")
return reconall
# This function returns for each subject the path to struct.nii.gz
def pathfinder(subject, foldername, filename):
from os.path import join as opj
struct_path = opj(foldername, subject, filename)
return struct_path
def main():
# setup
exp_start_time = time.time()
# argparse
parser = argparse.ArgumentParser(description='Script to run freesurfer reconall with nipype and track compute costs', epilog='$Id: fast_surfer_cnn, v 1.0 2019/09/30$')
# Data
parser.add_argument('--experiment_dir', dest='experiment_dir', help='path to directory to store freesurfer derived data.')
parser.add_argument('--data_dir', help="path to input data", default='/neurohub/ukbb/imaging/')
parser.add_argument('--subject_id', dest='subject_id', help='subject_id')
parser.add_argument('--T1_identifier', help='T1 identifier string relateive to the subject directory')
# FreeSurfer
parser.add_argument('--recon_directive', dest='recon_directive', help='recon_directive (autorecon 1, 2, or 3)', default='1') #MTL
# Trackers
parser.add_argument('--tracker_log_dir', dest='tracker_log_dir',
help="log dir for experiment impact tracker",
type=str, default='./tracker_logs/')
parser.add_argument('--geo_loc', dest='geo_loc',
help="(lat,log) coords for experiment impact tracker",
type=str, default='45.4972159,-73.6103642') #MTL Beluga
parser.add_argument('--CC_offline',
help="Run CC in offline mode",
action='store_true')
parser.add_argument('--TZ', dest='TZ',
help="TimeZone",
type=str, default='America/New_York')
parser.add_argument('--iso_code', dest='iso_code',
help="Country ISO code",
type=str, default='USA')
# PAPI
parser.add_argument('--count_FLOPs', dest='count_FLOPs',help="Count FLOPs using PAPI",action='store_true')
args = parser.parse_args()
# Data
experiment_dir = args.experiment_dir
data_dir = args.data_dir
subject_id = args.subject_id
T1_identifier = args.T1_identifier
# FreeSurfer
recon_directive = args.recon_directive
# FLOPs
count_FLOPs = args.count_FLOPs
# Trackers
tracker_log_dir = args.tracker_log_dir
geo_loc = args.geo_loc
CC_offline = args.CC_offline
TZ = args.TZ
iso_code = args.iso_code
print(f'Using offline mode for CC tracker: {CC_offline}')
if CC_offline:
print(f'Using {TZ} timezone and {iso_code} country iso code')
print(f'Starting subject: {subject_id}')
# Set up the trackers
log_dir = '{}/{}/'.format(tracker_log_dir,subject_id)
log_dir_EIT = f'{log_dir}/EIT/'
log_dir_CC = f'{log_dir}/CC/'
for d in [log_dir_EIT,log_dir_CC]:
if not os.path.exists(d):
os.makedirs(d)
# Use specified geo location for the HPC
ly,lx = float(geo_loc.split(',')[0]), float(geo_loc.split(',')[1])
coords = (ly,lx)
print(f'Using geographical coordinates (long,lat): {coords}')
# EIT tracker
tracker_EIT = ImpactTracker(log_dir_EIT,coords)
tracker_EIT.launch_impact_monitor()
# CodeCarbon tracker
os.environ['TZ']= TZ
if CC_offline:
tracker_CC = OfflineEmissionsTracker(output_dir=log_dir_CC, country_iso_code=iso_code)
else:
tracker_CC = EmissionsTracker(output_dir=log_dir_CC)
| flop_csv = tracker_log_dir + 'compute_costs_flop.csv'
flop_df = pd.DataFrame(columns=['task','start_time','duration','DP'])
# Start FS processing for a given subject
subject_list = [subject_id]
fs_folder = opj(experiment_dir, 'freesurfer') # location of freesurfer folder
# Create the output folder - FreeSurfer can only run if this folder exists
os.system('mkdir -p %s' % fs_folder)
# Specify recon workflow stages
if recon_directive == 'all':
recon_directives = ['autorecon1','autorecon2','autorecon3']
else:
recon_directives = [recon_directive]
for r, recon_directive in enumerate(recon_directives):
print('\nStarting stage: {}'.format(recon_directive))
# Create the pipeline that runs the recon-all command
reconflow = Workflow(name="reconflow")
reconflow.base_dir = opj(experiment_dir, 'workingdir_reconflow')
# Some magical stuff happens here (not important for now)
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = ('subject_id', subject_list)
# Specify recon-all stage based on recon-directive
reconall = get_reconall(recon_directive, fs_folder)
# This section connects all the nodes of the pipeline to each other
reconflow.connect([(infosource, reconall, [('subject_id', 'subject_id')]),
(infosource, reconall, [(('subject_id', pathfinder,
data_dir, T1_identifier),
'T1_files')]),
])
if count_FLOPs:
# start flop counter
start_time = time.time()
high.start_counters([events.PAPI_DP_OPS,]) #default: PAPI_FP_OPS
# This command runs the recon-all pipeline in parallel (using n_procs cores)
# reconflow.run('MultiProc', plugin_args={'n_procs': 4})
reconflow.run()
if count_FLOPs:
# stop flop counter
DP = high.stop_counters()[0]
end_time = time.time()
duration = end_time - start_time
print('Duration: {}, Flops: {}'.format(duration, DP))
flop_df.loc[r] = [recon_directive,start_time, duration, DP]
## code-carbon tracker
tracker_CC.stop()
if count_FLOPs:
flop_df.to_csv(flop_csv)
if __name__=='__main__':
main() | tracker_CC.start()
if count_FLOPs:
print('Counting flops using PAPI') |
seller_swap.rs | // Copyright 2020 The MWC Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Sell swap happy path states
use super::state::{
JOURNAL_CANCELLED_BYER_LOCK_TOO_MUCH_FUNDS, JOURNAL_CANCELLED_BY_TIMEOUT,
JOURNAL_CANCELLED_BY_USER, JOURNAL_NOT_LOCKED,
};
use crate::swap::fsm::state;
use crate::swap::fsm::state::{Input, State, StateEtaInfo, StateId, StateProcessRespond};
use crate::swap::message::Message;
use crate::swap::types::{Action, Currency, SwapTransactionsConfirmations};
use crate::swap::{swap, Context, ErrorKind, SellApi, Swap, SwapApi};
use crate::NodeClient;
use chrono::{Local, TimeZone};
use failure::_core::marker::PhantomData;
use grin_keychain::Keychain;
use std::sync::Arc;
/// State SellerOfferCreated
pub struct SellerOfferCreated {}
impl SellerOfferCreated {
/// create a new instance
pub fn new() -> Self {
Self {}
}
}
impl State for SellerOfferCreated {
fn get_state_id(&self) -> StateId {
StateId::SellerOfferCreated
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
let dt = Local.timestamp(swap.started.timestamp(), 0);
let time_str = dt.format("%B %e %H:%M:%S").to_string();
Some(StateEtaInfo::new(&format!("Offer Created at {}", time_str)))
}
fn is_cancellable(&self) -> bool {
true
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
_tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Cancel => {
swap.add_journal_message(JOURNAL_CANCELLED_BY_USER.to_string());
Ok(StateProcessRespond::new(StateId::SellerCancelled))
}
Input::Check => Ok(StateProcessRespond::new(StateId::SellerSendingOffer)),
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerOfferCreated get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
None
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerSendingOffer)
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerSendingOffer
pub struct SellerSendingOffer<'a, K>
where
K: Keychain + 'a,
{
keychain: Arc<K>,
swap_api: Arc<Box<dyn SwapApi<K> + 'a>>,
message: Option<Message>,
phantom: PhantomData<&'a K>,
}
impl<'a, K> SellerSendingOffer<'a, K>
where
K: Keychain + 'a,
{
/// Create new instance
pub fn new(keychain: Arc<K>, swap_api: Arc<Box<dyn SwapApi<K> + 'a>>) -> Self {
Self {
keychain,
swap_api,
phantom: PhantomData,
message: None,
}
}
}
impl<'a, K> State for SellerSendingOffer<'a, K>
where
K: Keychain + 'a,
{
fn get_state_id(&self) -> StateId {
StateId::SellerSendingOffer
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
Some(StateEtaInfo::new("Sending Offer to Buyer").end_time(swap.get_time_message_offers()))
}
fn is_cancellable(&self) -> bool {
true
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
_tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Cancel => {
swap.add_journal_message(JOURNAL_CANCELLED_BY_USER.to_string());
Ok(StateProcessRespond::new(StateId::SellerCancelled))
}
Input::Check => {
if swap.posted_msg1.unwrap_or(0)
< swap::get_cur_time() - super::state::SEND_MESSAGE_RETRY_PERIOD
{
let time_limit = swap.get_time_message_offers();
if swap::get_cur_time() < time_limit {
if self.message.is_none() {
self.message = swap.message1.clone();
}
if self.message.is_none() {
let sec_update = self
.swap_api
.build_offer_message_secondary_update(&*self.keychain, swap);
self.message = Some(SellApi::offer_message(swap, sec_update)?);
}
Ok(StateProcessRespond::new(StateId::SellerSendingOffer)
.action(Action::SellerSendOfferMessage(
self.message.clone().unwrap(),
))
.time_limit(time_limit))
} else {
swap.add_journal_message(JOURNAL_CANCELLED_BY_TIMEOUT.to_string());
Ok(StateProcessRespond::new(StateId::SellerCancelled))
}
} else {
// Probably it is a rerun because of some reset. We should tolerate that
Ok(StateProcessRespond::new(
StateId::SellerWaitingForAcceptanceMessage,
))
}
}
Input::Execute => {
debug_assert!(self.message.is_some()); // Check expected to be called first
if swap.message1.is_none() {
swap.message1 = Some(self.message.clone().unwrap());
}
swap.posted_msg1 = Some(swap::get_cur_time());
swap.add_journal_message("Offer message was sent".to_string());
Ok(StateProcessRespond::new(
StateId::SellerWaitingForAcceptanceMessage,
))
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerSendingOffer get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerOfferCreated)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForAcceptanceMessage)
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerWaitingForAcceptanceMessage
pub struct SellerWaitingForAcceptanceMessage<K: Keychain> {
keychain: Arc<K>,
}
impl<K: Keychain> SellerWaitingForAcceptanceMessage<K> {
/// Create new instance
pub fn new(keychain: Arc<K>) -> Self {
Self { keychain }
}
}
impl<K: Keychain> State for SellerWaitingForAcceptanceMessage<K> {
fn get_state_id(&self) -> StateId {
StateId::SellerWaitingForAcceptanceMessage
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
Some(
StateEtaInfo::new("Waiting For Buyer to accept the offer")
.end_time(swap.get_time_message_offers()),
)
}
fn is_cancellable(&self) -> bool {
true
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
context: &Context,
_tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Cancel => {
swap.add_journal_message(JOURNAL_CANCELLED_BY_USER.to_string());
Ok(StateProcessRespond::new(StateId::SellerCancelled))
}
Input::Check => {
if swap.redeem_public.is_none() {
let time_limit = swap.get_time_message_offers();
if swap::get_cur_time() < time_limit {
// Check if we need to retry to send the message
if swap.posted_msg1.unwrap_or(0)
< swap::get_cur_time() - super::state::SEND_MESSAGE_RETRY_PERIOD
{
return Ok(StateProcessRespond::new(StateId::SellerSendingOffer));
}
Ok(
StateProcessRespond::new(StateId::SellerWaitingForAcceptanceMessage)
.action(Action::SellerWaitingForOfferMessage)
.time_limit(time_limit),
)
} else {
// cancelling
swap.add_journal_message(JOURNAL_CANCELLED_BY_TIMEOUT.to_string());
Ok(StateProcessRespond::new(StateId::SellerCancelled))
}
} else {
// Probably it is a rerun because of some reset. We should tolerate that
Ok(StateProcessRespond::new(StateId::SellerWaitingForBuyerLock))
}
}
Input::IncomeMessage(message) => {
// Double processing should be fine
if swap.redeem_public.is_none() {
let (_, accept_offer, secondary_update) = message.unwrap_accept_offer()?;
let btc_update = secondary_update.unwrap_btc()?.unwrap_accept_offer()?;
SellApi::accepted_offer(&*self.keychain, swap, context, accept_offer)?;
let btc_data = swap.secondary_data.unwrap_btc_mut()?;
btc_data.accepted_offer(btc_update)?;
swap.add_journal_message("Processed Offer Accept message".to_string());
swap.ack_msg1(); // Just in case duplicate ack, because we get a respond, so the message was delivered
}
debug_assert!(swap.redeem_public.is_some());
Ok(StateProcessRespond::new(StateId::SellerWaitingForBuyerLock))
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerWaitingForAcceptanceMessage get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerSendingOffer)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForBuyerLock)
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// SellerWaitingForBuyerLock state
pub struct SellerWaitingForBuyerLock<'a, K>
where
K: Keychain + 'a,
{
swap_api: Arc<Box<dyn SwapApi<K> + 'a>>,
phantom: PhantomData<&'a K>,
}
impl<'a, K> SellerWaitingForBuyerLock<'a, K>
where
K: Keychain + 'a,
{
/// Create new instance
pub fn new(swap_api: Arc<Box<dyn SwapApi<K> + 'a>>) -> Self {
Self {
swap_api,
phantom: PhantomData,
}
}
}
impl<'a, K> State for SellerWaitingForBuyerLock<'a, K>
where
K: Keychain + 'a,
{
fn get_state_id(&self) -> StateId {
StateId::SellerWaitingForBuyerLock
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
if swap.seller_lock_first {
None
} else {
let name = match self.swap_api.get_secondary_lock_address(swap) {
Ok(address) => {
debug_assert!(address.len() > 0);
if address.len() > 1 {
format!(
"Waiting For Buyer to send {} coins to any of those addresses: {}",
swap.secondary_currency,
address.join(", ")
)
} else {
format!(
"Waiting For Buyer to send {} coins to {}",
swap.secondary_currency, address[0]
)
}
}
Err(_) => format!("Post {} to lock account", swap.secondary_currency),
};
Some(StateEtaInfo::new(&name).end_time(swap.get_time_start_lock()))
}
}
fn is_cancellable(&self) -> bool {
true
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Cancel => {
swap.add_journal_message(JOURNAL_CANCELLED_BY_USER.to_string());
Ok(StateProcessRespond::new(StateId::SellerCancelled))
}
Input::Check => {
// Check the deadline for locking
let time_limit = swap.get_time_start_lock();
if swap::get_cur_time() > time_limit {
// cancelling
swap.add_journal_message(JOURNAL_CANCELLED_BY_TIMEOUT.to_string());
return Ok(StateProcessRespond::new(StateId::SellerCancelled));
}
if swap.wait_for_backup1 {
return Ok(StateProcessRespond::new(StateId::SellerWaitingForBuyerLock)
.action(Action::WaitingForTradeBackup)
.time_limit(time_limit));
}
if swap.seller_lock_first {
// Skipping this step. Buyer waiting for us to start locking
Ok(StateProcessRespond::new(StateId::SellerPostingLockMwcSlate))
} else {
let mut conf = tx_conf.secondary_lock_conf.unwrap_or(0);
if tx_conf.secondary_lock_amount < swap.secondary_amount {
conf = 0;
}
let lock_addresses = self.swap_api.get_secondary_lock_address(swap)?;
debug_assert!(lock_addresses.len() > 0);
debug_assert!(lock_addresses.len() <= 2);
if tx_conf.secondary_lock_amount > swap.secondary_amount {
// Posted too much, byer probably will cancel the deal, we are not going to lock the MWCs
swap.add_journal_message(format!(
"Cancelled because buyer sent funds greater than the agreed upon {} amount to the lock address {}",
swap.secondary_currency,
lock_addresses.join(" or "),
));
return Ok(StateProcessRespond::new(StateId::SellerCancelled));
}
// Memory pool does count
if tx_conf.secondary_lock_amount == swap.secondary_amount {
swap.other_lock_first_done = true;
}
if conf < 1 {
Ok(StateProcessRespond::new(StateId::SellerWaitingForBuyerLock)
.action(Action::WaitForSecondaryConfirmations {
name: format!("Buyer to lock {}", swap.secondary_currency),
expected_to_be_posted: swap
.secondary_amount
.saturating_sub(tx_conf.secondary_lock_amount),
currency: swap.secondary_currency,
address: self
.swap_api
.get_secondary_lock_address(swap)
.unwrap_or(vec!["XXXXX".to_string()]),
required: 1,
actual: conf,
})
.time_limit(time_limit))
} else {
swap.add_journal_message(format!(
"Buyer sent the funds to lock address {}",
lock_addresses.join(" or ")
));
swap.other_lock_first_done = true;
Ok(StateProcessRespond::new(StateId::SellerPostingLockMwcSlate))
}
}
}
Input::IncomeMessage(message) => {
// Message must be ignored. Late delivery sometimes is possible
// Still checking the type of the message
let _ = message.unwrap_accept_offer()?;
Ok(StateProcessRespond::new(StateId::SellerWaitingForBuyerLock))
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerWaitingForBuyerLock get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForAcceptanceMessage)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerPostingLockMwcSlate)
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerPostingLockMwcSlate
pub struct SellerPostingLockMwcSlate<'a, C>
where
C: NodeClient + 'a,
{
node_client: Arc<C>,
phantom: PhantomData<&'a C>,
}
impl<'a, C> SellerPostingLockMwcSlate<'a, C>
where
C: NodeClient + 'a,
{
/// Create an instance
pub fn new(node_client: Arc<C>) -> Self {
Self {
node_client,
phantom: PhantomData,
}
}
fn generate_cancel_respond(swap: &Swap) -> Result<StateProcessRespond, ErrorKind> {
if swap.posted_lock.is_none() {
Ok(StateProcessRespond::new(StateId::SellerCancelled))
} else {
// Better to wait for some time. Since it was posted, it can be pablished later by anybody.
// Let's be ready to refund. We better stuck there.
Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundHeight,
))
}
}
}
impl<'a, C> State for SellerPostingLockMwcSlate<'a, C>
where
C: NodeClient + 'a,
{
fn get_state_id(&self) -> StateId {
StateId::SellerPostingLockMwcSlate
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
Some(StateEtaInfo::new("Locking MWC funds").end_time(swap.get_time_start_lock()))
}
fn is_cancellable(&self) -> bool {
true
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
let time_limit = swap.get_time_start_lock();
match input {
Input::Cancel => {
swap.add_journal_message(JOURNAL_CANCELLED_BY_USER.to_string());
Self::generate_cancel_respond(swap)
} // Locking is not done yet, we can cancel easy way
Input::Check => {
// Check if mwc lock is already done
if tx_conf.mwc_lock_conf.is_some() {
// Going to the next step... MWC lock is already published.
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForLockConfirmations,
));
}
// Check the deadline for locking
if swap::get_cur_time() > time_limit {
// cancelling because of timeout
swap.add_journal_message(JOURNAL_CANCELLED_BY_TIMEOUT.to_string());
return Self::generate_cancel_respond(swap);
}
Ok(StateProcessRespond::new(StateId::SellerPostingLockMwcSlate)
.action(Action::SellerPublishMwcLockTx)
.time_limit(time_limit))
}
Input::Execute => {
if tx_conf.mwc_lock_conf.is_some() {
// Going to the next step... MWC lock is already published.
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForLockConfirmations,
));
}
// Executing the MWC lock transaction
if swap::get_cur_time() > time_limit {
// cancelling because of timeout. The last Chance to cancel easy way.
return Self::generate_cancel_respond(swap);
}
// Posting the transaction
swap::publish_transaction(&*self.node_client, &swap.lock_slate.tx, false)?;
swap.posted_lock = Some(swap::get_cur_time());
swap.add_journal_message("MWC lock slate posted".to_string());
Ok(StateProcessRespond::new(
StateId::SellerWaitingForLockConfirmations,
))
}
Input::IncomeMessage(message) => {
// Message must be ignored. Late delivery sometimes is possible
// Still checking the type of the message
let _ = message.unwrap_accept_offer()?;
Ok(StateProcessRespond::new(StateId::SellerPostingLockMwcSlate))
} /*_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerPostingLockMwcSlate get {:?}",
input
))),*/
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForBuyerLock)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForLockConfirmations)
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerWaitingForLockConfirmations
pub struct SellerWaitingForLockConfirmations<'a, K: Keychain> {
keychain: Arc<K>,
swap_api: Arc<Box<dyn SwapApi<K> + 'a>>,
}
impl<'a, K: Keychain> SellerWaitingForLockConfirmations<'a, K> {
/// Create a new instance
pub fn new(keychain: Arc<K>, swap_api: Arc<Box<dyn SwapApi<K> + 'a>>) -> Self {
Self { keychain, swap_api }
}
}
impl<'a, K: Keychain> State for SellerWaitingForLockConfirmations<'a, K> {
fn get_state_id(&self) -> StateId {
StateId::SellerWaitingForLockConfirmations
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
let address_info = match self.swap_api.get_secondary_lock_address(swap) {
Ok(address) => {
debug_assert!(address.len() > 0);
debug_assert!(address.len() <= 2);
format!(
" {} lock address {}",
swap.secondary_currency,
address.join(" or ")
)
}
Err(_) => "".to_string(),
};
Some(
StateEtaInfo::new(&format!(
"Waiting for Lock funds confirmations.{}",
address_info
))
.end_time(swap.get_time_message_redeem()),
)
}
fn is_cancellable(&self) -> bool {
true
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
context: &Context,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Cancel => {
swap.add_journal_message(JOURNAL_CANCELLED_BY_USER.to_string());
Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundHeight,
)) // Long cancellation path
}
Input::Check => {
let mwc_lock = tx_conf.mwc_lock_conf.unwrap_or(0);
let mut secondary_lock = tx_conf.secondary_lock_conf.unwrap_or(0);
if tx_conf.secondary_lock_amount < swap.secondary_amount {
secondary_lock = 0;
}
if tx_conf.secondary_lock_amount > swap.secondary_amount {
// Posted too much, bayer probably will cancel the deal, let's be in sync
swap.add_journal_message(format!(
"{}. Expected {} {}, but get {} {}",
JOURNAL_CANCELLED_BYER_LOCK_TOO_MUCH_FUNDS,
swap.secondary_currency
.amount_to_hr_string(swap.secondary_amount, true),
swap.secondary_currency,
swap.secondary_currency
.amount_to_hr_string(tx_conf.secondary_lock_amount, true),
swap.secondary_currency
));
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundHeight,
));
}
let time_limit = swap.get_time_message_redeem();
if mwc_lock < swap.mwc_confirmations
|| secondary_lock < swap.secondary_confirmations
{
// Checking for a deadline. Note time_message_redeem is fine, we can borrow time from that operation and still be safe
if swap::get_cur_time() > time_limit {
// cancelling because of timeout
swap.add_journal_message(JOURNAL_CANCELLED_BY_TIMEOUT.to_string());
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundHeight,
));
}
if mwc_lock == 0
&& swap.posted_lock.clone().unwrap_or(0)
< swap::get_cur_time() - super::state::POST_MWC_RETRY_PERIOD
{
return Ok(StateProcessRespond::new(StateId::SellerPostingLockMwcSlate));
}
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForLockConfirmations,
)
.action(Action::WaitForLockConfirmations {
mwc_required: swap.mwc_confirmations,
mwc_actual: mwc_lock,
currency: swap.secondary_currency,
address: self.swap_api.get_secondary_lock_address(swap)?,
sec_expected_to_be_posted: swap.secondary_amount
- tx_conf.secondary_lock_amount,
sec_required: swap.secondary_confirmations,
sec_actual: tx_conf.secondary_lock_conf,
})
.time_limit(time_limit));
}
// Waiting for own funds first. For seller it is MWC
if mwc_lock < swap.mwc_confirmations {
if mwc_lock == 0
&& swap.posted_lock.clone().unwrap_or(0)
< swap::get_cur_time() - super::state::POST_MWC_RETRY_PERIOD
{
return Ok(StateProcessRespond::new(StateId::SellerPostingLockMwcSlate));
}
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForLockConfirmations,
)
.action(Action::WaitForMwcConfirmations {
name: "MWC Lock transaction".to_string(),
required: swap.mwc_confirmations,
actual: mwc_lock,
})
.time_limit(time_limit));
}
if secondary_lock < swap.secondary_confirmations {
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForLockConfirmations,
)
.action(Action::WaitForSecondaryConfirmations {
name: format!("{} Locking Account", swap.secondary_currency),
expected_to_be_posted: swap.secondary_amount
- tx_conf.secondary_lock_amount,
currency: swap.secondary_currency,
address: self.swap_api.get_secondary_lock_address(swap)?,
required: swap.secondary_confirmations,
actual: secondary_lock,
})
.time_limit(time_limit));
}
swap.add_journal_message(format!(
"MWC and {} funds are Locked",
swap.secondary_currency
));
Ok(StateProcessRespond::new(
StateId::SellerWaitingForInitRedeemMessage,
))
}
Input::IncomeMessage(message) => {
// That can be late Accept offer message
if message.clone().unwrap_accept_offer().is_ok() {
return Ok(StateProcessRespond::new(
StateId::SellerSendingInitRedeemMessage,
));
}
// We can accept message durinf the wait. Byers can already get a confirmation and sending a message
if swap.adaptor_signature.is_none() {
let (_, init_redeem, _) = message.unwrap_init_redeem()?;
SellApi::init_redeem(&*self.keychain, swap, context, init_redeem)?;
}
debug_assert!(swap.adaptor_signature.is_some());
swap.add_journal_message("Init Redeem message is accepted".to_string());
Ok(StateProcessRespond::new(
StateId::SellerSendingInitRedeemMessage,
))
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerWaitingForLockConfirmations get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerPostingLockMwcSlate)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForInitRedeemMessage)
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
/// SellerWaitingForInitRedeemMessage
pub struct SellerWaitingForInitRedeemMessage<K: Keychain> {
keychain: Arc<K>,
}
impl<K: Keychain> SellerWaitingForInitRedeemMessage<K> {
/// Create an instance
pub fn new(keychain: Arc<K>) -> Self {
Self { keychain }
}
}
impl<K: Keychain> State for SellerWaitingForInitRedeemMessage<K> {
fn get_state_id(&self) -> StateId {
StateId::SellerWaitingForInitRedeemMessage
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
Some(
StateEtaInfo::new("Waiting For Init Redeem message")
.end_time(swap.get_time_message_redeem()),
)
}
fn is_cancellable(&self) -> bool {
true
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
context: &Context,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Cancel => {
swap.add_journal_message(JOURNAL_CANCELLED_BY_USER.to_string());
Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundHeight,
))
}
Input::Check => {
if swap.adaptor_signature.is_some() {
// Was already processed. Can go to the next step
return Ok(StateProcessRespond::new(
StateId::SellerSendingInitRedeemMessage,
));
}
// Check if everything is still locked...
let mwc_lock = tx_conf.mwc_lock_conf.unwrap_or(0);
let secondary_lock = tx_conf.secondary_lock_conf.unwrap_or(0);
if mwc_lock < swap.mwc_confirmations
|| secondary_lock < swap.secondary_confirmations
{
swap.add_journal_message(JOURNAL_NOT_LOCKED.to_string());
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForLockConfirmations,
));
}
let time_limit = swap.get_time_message_redeem();
if swap::get_cur_time() < time_limit {
Ok(
StateProcessRespond::new(StateId::SellerWaitingForInitRedeemMessage)
.action(Action::SellerWaitingForInitRedeemMessage)
.time_limit(time_limit),
)
} else {
// cancelling
swap.add_journal_message(JOURNAL_CANCELLED_BY_TIMEOUT.to_string());
Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundHeight,
))
}
}
Input::IncomeMessage(message) => {
if swap.adaptor_signature.is_none() {
let (_, init_redeem, _) = message.unwrap_init_redeem()?;
SellApi::init_redeem(&*self.keychain, swap, context, init_redeem)?;
}
debug_assert!(swap.adaptor_signature.is_some());
swap.add_journal_message("Init Redeem message is accepted".to_string());
Ok(StateProcessRespond::new(
StateId::SellerSendingInitRedeemMessage,
))
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerWaitingForInitRedeemMessage get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForLockConfirmations)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerSendingInitRedeemMessage)
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerSendingInitRedeemMessage
pub struct SellerSendingInitRedeemMessage<'a, C>
where
C: NodeClient + 'a,
{
node_client: Arc<C>,
phantom: PhantomData<&'a C>,
message: Option<Message>,
}
impl<'a, C> SellerSendingInitRedeemMessage<'a, C>
where
C: NodeClient + 'a,
{
/// Create in instance
pub fn new(node_client: Arc<C>) -> Self {
Self {
node_client,
phantom: PhantomData,
message: None,
}
}
}
impl<'a, C> State for SellerSendingInitRedeemMessage<'a, C>
where
C: NodeClient + 'a,
{
fn get_state_id(&self) -> StateId {
StateId::SellerSendingInitRedeemMessage
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
Some(
StateEtaInfo::new("Sending back Redeem Message")
.end_time(swap.get_time_message_redeem()),
)
}
fn is_cancellable(&self) -> bool {
false
}
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => {
// Checking if can redeem. The Buyer can be sneaky and try to fool us. We should assume that
// message was delivered and buyer can do the redeem.
if !swap.redeem_slate.tx.kernels().is_empty() {
if check_mwc_redeem(swap, &*self.node_client)? {
// Buyer did a redeem, we can continue processing and redeem BTC
swap.posted_msg2 = Some(u32::MAX as i64);
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForBuyerToRedeemMwc,
));
}
}
// Redeem is published, so we are good
if swap.redeem_kernel_updated {
swap.posted_msg2 = Some(u32::MAX as i64);
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForBuyerToRedeemMwc,
));
}
if swap.posted_msg2.unwrap_or(0)
< swap::get_cur_time() - super::state::SEND_MESSAGE_RETRY_PERIOD
{
// Check if everything is still locked...
let mwc_lock = tx_conf.mwc_lock_conf.unwrap_or(0);
let secondary_lock = tx_conf.secondary_lock_conf.unwrap_or(0);
if mwc_lock < swap.mwc_confirmations
|| secondary_lock < swap.secondary_confirmations
{
swap.add_journal_message(JOURNAL_NOT_LOCKED.to_string());
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForLockConfirmations,
));
}
let time_limit = swap.get_time_message_redeem();
if swap::get_cur_time() < time_limit {
if self.message.is_none() {
self.message = swap.message2.clone();
}
if self.message.is_none() {
self.message = Some(SellApi::redeem_message(swap)?);
}
Ok(
StateProcessRespond::new(StateId::SellerSendingInitRedeemMessage)
.action(Action::SellerSendRedeemMessage(
self.message.clone().unwrap(),
))
.time_limit(time_limit),
)
} else {
// we can't cancel, we must continue to wait
// because it is cancellation, let's do ack for this send.
// Sending really doesn't needed any more
swap.posted_msg2 = Some(u32::MAX as i64);
Ok(StateProcessRespond::new(
StateId::SellerWaitingForBuyerToRedeemMwc,
))
}
} else {
// Probably it is a rerun because of some reset. We should tolerate that
Ok(StateProcessRespond::new(
StateId::SellerWaitingForBuyerToRedeemMwc,
))
}
}
Input::Execute => {
debug_assert!(self.message.is_some()); // Check expected to be called first
if swap.message2.is_none() {
swap.message2 = Some(self.message.clone().unwrap());
}
swap.posted_msg2 = Some(swap::get_cur_time());
swap.add_journal_message("Send response to Redeem message".to_string());
Ok(StateProcessRespond::new(
StateId::SellerWaitingForBuyerToRedeemMwc,
))
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerSendingInitRedeemMessage get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForInitRedeemMessage)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForBuyerToRedeemMwc)
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Return true if mwc redeemed by Buyer. So we are good to claim BTC
pub(crate) fn check_mwc_redeem<C: NodeClient>(
swap: &mut Swap,
node_client: &C,
) -> Result<bool, ErrorKind> {
// Trying to find redeem
if let Some((kernel, _h)) = swap.find_redeem_kernel(node_client)? {
// Replace kernel
let _ = std::mem::replace(
swap.redeem_slate
.tx
.body
.kernels
.get_mut(0)
.ok_or(ErrorKind::UnexpectedAction(
"Seller Fn required_action() redeem slate not initialized, kernels are empty"
.to_string(),
))?,
kernel,
);
swap.redeem_kernel_updated = true;
swap.add_journal_message(
"Buyer redeemed MWC, transaction published on the blockchain".to_string(),
);
swap.ack_msg2();
return Ok(true);
}
Ok(false)
}
/// State SellerWaitingForBuyerToRedeemMwc
pub struct SellerWaitingForBuyerToRedeemMwc<'a, C>
where
C: NodeClient + 'a,
{
node_client: Arc<C>,
phantom: PhantomData<&'a C>,
}
impl<'a, C> SellerWaitingForBuyerToRedeemMwc<'a, C>
where
C: NodeClient + 'a,
{
/// Create a new instance
pub fn new(node_client: Arc<C>) -> Self {
Self {
node_client,
phantom: PhantomData,
}
}
}
fn calc_mwc_unlock_time(swap: &Swap, tip: &u64) -> i64 {
swap::get_cur_time() + (swap.refund_slate.lock_height.saturating_sub(*tip) * 60) as i64
}
impl<'a, C> State for SellerWaitingForBuyerToRedeemMwc<'a, C>
where
C: NodeClient + 'a,
{
fn get_state_id(&self) -> StateId {
StateId::SellerWaitingForBuyerToRedeemMwc
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
// Time limit is defined by the chain height
if let Ok((height, _, _)) = self.node_client.get_chain_tip() {
Some(
StateEtaInfo::new("Wait For Buyer to redeem MWC")
.end_time(calc_mwc_unlock_time(swap, &height)),
)
} else {
Some(StateEtaInfo::new("Wait For Buyer to redeem MWC"))
}
}
fn is_cancellable(&self) -> bool {
false
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
_tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => {
// Redeem slate is already found, it's kernel updated, we can go forward
if swap.redeem_kernel_updated {
return Ok(StateProcessRespond::new(
StateId::SellerRedeemSecondaryCurrency,
));
}
// Checking if can redeem first because redeem can be made when we can do refund.
// Then we want to do redeem and refund from redeem branch.
if !swap.redeem_slate.tx.kernels().is_empty() {
if check_mwc_redeem(swap, &*self.node_client)? {
// Buyer did a redeem, we can continue processing and redeem BTC
return Ok(StateProcessRespond::new(
StateId::SellerRedeemSecondaryCurrency,
));
}
}
// Check the deadline for locking
//
let (height, _, _) = self.node_client.get_chain_tip()?;
if height > swap.refund_slate.lock_height {
swap.add_journal_message(
"Buyer didn't redeem, time to get a refund".to_string(),
);
// Time to to get my MWC back with a refund.
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundHeight,
));
}
// Check if we need to retry to send the message
if swap.posted_msg2.unwrap_or(0)
< swap::get_cur_time() - super::state::SEND_MESSAGE_RETRY_PERIOD
{
return Ok(StateProcessRespond::new(
StateId::SellerSendingInitRedeemMessage,
));
}
// Still waiting...
Ok(
StateProcessRespond::new(StateId::SellerWaitingForBuyerToRedeemMwc)
.action(Action::SellerWaitForBuyerRedeemPublish {
mwc_tip: height,
lock_height: swap.refund_slate.lock_height,
})
.time_limit(calc_mwc_unlock_time(swap, &height)),
)
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerWaitingForBuyerToRedeemMwc get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerSendingInitRedeemMessage)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerRedeemSecondaryCurrency)
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Greedy approach, check the deadline for locking
// It is fair because that code will work only of Buyer delay a lot the redeeming on MWC transaction.
// One of the reasons to delay is attack.
fn post_refund_if_possible<C: NodeClient>(
node_client: Arc<C>,
swap: &Swap,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<(), ErrorKind> {
let (height, _, _) = node_client.get_chain_tip()?;
if height > swap.refund_slate.lock_height
&& tx_conf.mwc_redeem_conf.is_none()
&& tx_conf.mwc_refund_conf.is_none()
{
let res = swap::publish_transaction(&*node_client, &swap.refund_slate.tx, false);
if let Err(e) = res {
info!("MWC refund can be issued even likely it will fail. Trying to post it. get an error {}", e);
} else {
info!("MWC refund can be was issued, even it was expected to fail");
}
}
Ok(())
}
/// State SellerRedeemSecondaryCurrency
pub struct SellerRedeemSecondaryCurrency<'a, C, K>
where
C: NodeClient + 'a,
K: Keychain + 'a,
{
keychain: Arc<K>,
node_client: Arc<C>,
swap_api: Arc<Box<dyn SwapApi<K> + 'a>>,
phantom: PhantomData<&'a K>,
}
impl<'a, C, K> SellerRedeemSecondaryCurrency<'a, C, K>
where
C: NodeClient + 'a,
K: Keychain + 'a,
{
/// Create a new instance
pub fn new(
keychain: Arc<K>,
node_client: Arc<C>,
swap_api: Arc<Box<dyn SwapApi<K> + 'a>>,
) -> Self {
Self {
keychain,
node_client,
swap_api,
phantom: PhantomData,
}
}
}
impl<'a, C, K> State for SellerRedeemSecondaryCurrency<'a, C, K>
where
C: NodeClient + 'a,
K: Keychain + 'a,
{
fn get_state_id(&self) -> StateId {
StateId::SellerRedeemSecondaryCurrency
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
Some(
// Using script lock time as more pessimistic
StateEtaInfo::new(&format!(
"Post {} Redeem Transaction, address {}",
swap.secondary_currency,
swap.unwrap_seller().unwrap_or(("XXXXXX".to_string(), 0)).0
))
.end_time(swap.get_time_btc_lock_script() - swap.get_timeinterval_btc_lock()),
)
}
fn is_cancellable(&self) -> bool {
false
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
context: &Context,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => {
// Be greedy, check the deadline for locking
post_refund_if_possible(self.node_client.clone(), swap, tx_conf)?;
if !swap.redeem_kernel_updated {
debug_assert!(false); // That shouldn't happen
// let's go back to the waiting since the data is not ready
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForBuyerToRedeemMwc,
));
}
// Check if already processed
if tx_conf.secondary_redeem_conf.is_some()
&& (tx_conf.secondary_redeem_conf.unwrap() > 0
|| !self.swap_api.is_secondary_tx_fee_changed(swap)?)
{
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForRedeemConfirmations,
));
}
// Ready to redeem BTC.
Ok(
// Using script lock time for ETA as more pessimistic
StateProcessRespond::new(StateId::SellerRedeemSecondaryCurrency)
.action(Action::SellerPublishTxSecondaryRedeem {
currency: swap.secondary_currency,
address: swap.unwrap_seller()?.0,
})
.time_limit(
swap.get_time_btc_lock_script() - swap.get_timeinterval_btc_lock(),
),
)
}
Input::Execute => {
self.swap_api.publish_secondary_transaction(
&*self.keychain,
swap,
context,
true,
)?;
debug_assert!(swap.secondary_data.unwrap_btc()?.redeem_tx.is_some());
swap.posted_redeem = Some(swap::get_cur_time());
swap.posted_secondary_height = Some(tx_conf.secondary_tip);
swap.add_journal_message(format!(
"{} redeem transaction is sent, address {}",
swap.secondary_currency,
swap.unwrap_seller()?.0,
));
Ok(StateProcessRespond::new(
StateId::SellerWaitingForRedeemConfirmations,
))
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerRedeemSecondaryCurrency get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForBuyerToRedeemMwc)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForRedeemConfirmations)
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerWaitingForRedeemConfirmations
pub struct SellerWaitingForRedeemConfirmations<'a, C, K>
where
C: NodeClient + 'a,
K: Keychain + 'a,
{
node_client: Arc<C>,
swap_api: Arc<Box<dyn SwapApi<K> + 'a>>,
phantom: PhantomData<&'a K>,
}
impl<'a, C, K> SellerWaitingForRedeemConfirmations<'a, C, K>
where
C: NodeClient + 'a,
K: Keychain + 'a,
{
/// Create a new instance
pub fn new(node_client: Arc<C>, swap_api: Arc<Box<dyn SwapApi<K> + 'a>>) -> Self {
Self {
node_client,
swap_api,
phantom: PhantomData,
}
}
}
impl<'a, C, K> State for SellerWaitingForRedeemConfirmations<'a, C, K>
where
C: NodeClient + 'a,
K: Keychain + 'a,
{
fn | (&self) -> StateId {
StateId::SellerWaitingForRedeemConfirmations
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
Some(StateEtaInfo::new(&format!(
"Wait For {} Redeem Tx Confirmations",
swap.secondary_currency
)))
}
fn is_cancellable(&self) -> bool {
false
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => {
// Be greedy, check the deadline for locking
post_refund_if_possible(self.node_client.clone(), swap, tx_conf)?;
// Just waiting
if let Some(conf) = tx_conf.secondary_redeem_conf {
if conf >= swap.secondary_confirmations {
// We are done
swap.add_journal_message(format!(
"{} redeem transaction has enough confirmations. Trade is complete",
swap.secondary_currency
));
return Ok(StateProcessRespond::new(StateId::SellerSwapComplete));
}
// If transaction was published for a while ago and still in mem pool. we need to bump the fees.
// It is applicable to BTC only
if swap.secondary_currency == Currency::Btc && conf == 0 {
match swap.posted_secondary_height {
Some(h) => {
if h < tx_conf.secondary_tip
- state::SECONDARY_HEIGHT_TO_INCREASE_FEE
{
// we can bump the fees if there is enough amount. Tx redeem size is about 660 bytes. And we don't want to spend more then half of the BTC funds.
if swap.secondary_fee
* state::SECONDARY_INCREASE_FEE_K * 660.0
* 2.0 < swap.secondary_amount as f32
{
swap.secondary_fee *= state::SECONDARY_INCREASE_FEE_K;
swap.posted_secondary_height = None;
swap.posted_redeem = None;
swap.add_journal_message(format!(
"Fee for {} redeem transaction is increased. New fee is {} {}",
swap.secondary_currency,
swap.secondary_fee,
swap.secondary_currency.get_fee_units().0
));
}
}
}
None => (),
}
}
// If transaction in the memory pool for a long time or fee is different now, we should do a retry
if conf == 0
&& (self.swap_api.is_secondary_tx_fee_changed(swap)?
&& swap.posted_redeem.unwrap_or(0)
< swap::get_cur_time() - super::state::POST_SECONDARY_RETRY_PERIOD)
{
return Ok(StateProcessRespond::new(
StateId::SellerRedeemSecondaryCurrency,
));
}
} else {
// might need to retry
if swap.posted_redeem.unwrap_or(0)
< swap::get_cur_time() - super::state::POST_SECONDARY_RETRY_PERIOD
{
return Ok(StateProcessRespond::new(
StateId::SellerRedeemSecondaryCurrency,
));
}
}
return Ok(
StateProcessRespond::new(StateId::SellerWaitingForRedeemConfirmations).action(
Action::WaitForSecondaryConfirmations {
name: "Redeeming funds".to_string(),
expected_to_be_posted: 0,
currency: swap.secondary_currency,
address: vec![swap.unwrap_seller()?.0],
required: swap.secondary_confirmations,
actual: tx_conf.secondary_redeem_conf.unwrap_or(0),
},
),
);
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerWaitingForRedeemConfirmations get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerRedeemSecondaryCurrency)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerSwapComplete)
}
}
/////////////////////////////////////////////////////////////////////////////////
/// State SellerSwapComplete
pub struct SellerSwapComplete {}
impl SellerSwapComplete {
/// Create a new instance
pub fn new() -> Self {
Self {}
}
}
impl State for SellerSwapComplete {
fn get_state_id(&self) -> StateId {
StateId::SellerSwapComplete
}
fn get_eta(&self, _swap: &Swap) -> Option<StateEtaInfo> {
Some(StateEtaInfo::new("Swap completed"))
}
fn is_cancellable(&self) -> bool {
false
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
_swap: &mut Swap,
_context: &Context,
_tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => Ok(StateProcessRespond::new(StateId::SellerSwapComplete)),
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerSwapComplete get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForRedeemConfirmations)
}
fn get_next_swap_state(&self) -> Option<StateId> {
None
}
}
///////////////////////////////////////////////////////////////////
/// State SellerCancelled
pub struct SellerCancelled {}
impl SellerCancelled {
/// Create a new instance
pub fn new() -> Self {
Self {}
}
}
impl State for SellerCancelled {
fn get_state_id(&self) -> StateId {
StateId::SellerCancelled
}
fn get_eta(&self, _swap: &Swap) -> Option<StateEtaInfo> {
Some(StateEtaInfo::new(
"Swap is cancelled, no funds was locked, no refund needed",
))
}
fn is_cancellable(&self) -> bool {
false
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
_swap: &mut Swap,
_context: &Context,
_tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => Ok(StateProcessRespond::new(StateId::SellerCancelled)),
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerCancelled get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
None
}
fn get_next_swap_state(&self) -> Option<StateId> {
None
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
// Refund workflow
////////////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerWaitingForRefundHeight
pub struct SellerWaitingForRefundHeight<'a, C>
where
C: NodeClient + 'a,
{
node_client: Arc<C>,
phantom: PhantomData<&'a C>,
}
impl<'a, C> SellerWaitingForRefundHeight<'a, C>
where
C: NodeClient + 'a,
{
/// Create a new instance
pub fn new(node_client: Arc<C>) -> Self {
Self {
node_client,
phantom: PhantomData,
}
}
}
impl<'a, C> State for SellerWaitingForRefundHeight<'a, C>
where
C: NodeClient + 'a,
{
fn get_state_id(&self) -> StateId {
StateId::SellerWaitingForRefundHeight
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
if let Ok((height, _, _)) = self.node_client.get_chain_tip() {
Some(
StateEtaInfo::new("Wait for MWC refund to unlock")
.end_time(calc_mwc_unlock_time(swap, &height)),
)
} else {
Some(StateEtaInfo::new("Wait for MWC refund to unlock"))
}
}
fn is_cancellable(&self) -> bool {
false
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
_tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => {
// Check the deadline for locking
//
let (height, _, _) = self.node_client.get_chain_tip()?;
if height > swap.refund_slate.lock_height {
swap.add_journal_message("MWC funds are unlocked".to_string());
return Ok(StateProcessRespond::new(StateId::SellerPostingRefundSlate));
}
// Still waiting...
Ok(
StateProcessRespond::new(StateId::SellerWaitingForRefundHeight)
.action(Action::WaitForMwcRefundUnlock {
mwc_tip: height,
lock_height: swap.refund_slate.lock_height,
})
.time_limit(calc_mwc_unlock_time(swap, &height)),
)
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerWaitingForRefundHeight get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
None
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerPostingRefundSlate)
}
}
/////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerPostingRefundSlate
pub struct SellerPostingRefundSlate<'a, C>
where
C: NodeClient + 'a,
{
node_client: Arc<C>,
phantom: PhantomData<&'a C>,
}
impl<'a, C> SellerPostingRefundSlate<'a, C>
where
C: NodeClient + 'a,
{
/// Create a new instance
pub fn new(node_client: Arc<C>) -> Self {
Self {
node_client,
phantom: PhantomData,
}
}
}
impl<'a, C> State for SellerPostingRefundSlate<'a, C>
where
C: NodeClient + 'a,
{
fn get_state_id(&self) -> StateId {
StateId::SellerPostingRefundSlate
}
fn get_eta(&self, swap: &Swap) -> Option<StateEtaInfo> {
if let Ok((height, _, _)) = self.node_client.get_chain_tip() {
let start_time_limit = calc_mwc_unlock_time(swap, &height);
Some(
StateEtaInfo::new("Post MWC Refund Slate")
.start_time(start_time_limit)
.end_time(start_time_limit + swap.redeem_time_sec as i64),
)
} else {
Some(StateEtaInfo::new("Post MWC Refund Slate"))
}
}
fn is_cancellable(&self) -> bool {
false
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => {
// Check if mwc lock is already done
if tx_conf.mwc_refund_conf.is_some() {
// already published.
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundConfirmations,
));
}
if tx_conf.mwc_redeem_conf.is_some() {
// Buyer published the slate, we can to redeem BTCs now
swap.add_journal_message("Buyer published redeem transaction".to_string());
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForBuyerToRedeemMwc,
));
}
Ok(StateProcessRespond::new(StateId::SellerPostingRefundSlate)
.action(Action::SellerPublishMwcRefundTx))
}
Input::Execute => {
// Executing the MWC lock transaction
// Posting the transaction
debug_assert!(tx_conf.mwc_refund_conf.is_none());
swap::publish_transaction(&*self.node_client, &swap.refund_slate.tx, false)?;
swap.posted_refund = Some(swap::get_cur_time());
swap.add_journal_message("MWC refund slate is posted".to_string());
Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundConfirmations,
))
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerPostingRefundSlate get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForRefundHeight)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForRefundConfirmations)
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerWaitingForRefundConfirmations
pub struct SellerWaitingForRefundConfirmations {}
impl SellerWaitingForRefundConfirmations {
/// Create a new instance
pub fn new() -> Self {
Self {}
}
}
impl State for SellerWaitingForRefundConfirmations {
fn get_state_id(&self) -> StateId {
StateId::SellerWaitingForRefundConfirmations
}
fn get_eta(&self, _swap: &Swap) -> Option<StateEtaInfo> {
Some(StateEtaInfo::new("Wait for MWC Refund confirmations"))
}
fn is_cancellable(&self) -> bool {
false
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
swap: &mut Swap,
_context: &Context,
tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => {
// Check if mwc lock is already done
if tx_conf.mwc_refund_conf.is_none() {
if tx_conf.mwc_redeem_conf.is_some() {
// Found that Buyer redeem, let's switch to that branch
swap.add_journal_message("Buyer published redeem transaction".to_string());
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForBuyerToRedeemMwc,
));
}
if swap.posted_refund.unwrap_or(0)
< swap::get_cur_time() - super::state::POST_MWC_RETRY_PERIOD
{
// We can retry to post
return Ok(StateProcessRespond::new(
StateId::SellerWaitingForRefundHeight,
));
}
}
let refund_conf = tx_conf.mwc_refund_conf.unwrap_or(0);
if refund_conf > swap.mwc_confirmations {
// already published.
swap.add_journal_message("MWC refund transaction has enough confirmation. Trade is cancelled and refunded".to_string());
return Ok(StateProcessRespond::new(StateId::SellerCancelledRefunded));
}
Ok(
StateProcessRespond::new(StateId::SellerWaitingForRefundConfirmations).action(
Action::WaitForMwcConfirmations {
name: "MWC Refund".to_string(),
required: swap.mwc_confirmations,
actual: refund_conf,
},
),
)
}
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerWaitingForRefundConfirmations get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerPostingRefundSlate)
}
fn get_next_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerCancelledRefunded)
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
/// State SellerCancelledRefunded
pub struct SellerCancelledRefunded {}
impl SellerCancelledRefunded {
/// Create a new instance
pub fn new() -> Self {
Self {}
}
}
impl State for SellerCancelledRefunded {
fn get_state_id(&self) -> StateId {
StateId::SellerCancelledRefunded
}
fn get_eta(&self, _swap: &Swap) -> Option<StateEtaInfo> {
Some(StateEtaInfo::new("Swap is cancelled, MWC are refunded"))
}
fn is_cancellable(&self) -> bool {
false
}
/// Process the state. Result will be the next state
fn process(
&mut self,
input: Input,
_swap: &mut Swap,
_context: &Context,
_tx_conf: &SwapTransactionsConfirmations,
) -> Result<StateProcessRespond, ErrorKind> {
match input {
Input::Check => Ok(StateProcessRespond::new(StateId::SellerCancelledRefunded)),
_ => Err(ErrorKind::InvalidSwapStateInput(format!(
"SellerCancelled get {:?}",
input
))),
}
}
fn get_prev_swap_state(&self) -> Option<StateId> {
Some(StateId::SellerWaitingForRefundConfirmations)
}
fn get_next_swap_state(&self) -> Option<StateId> {
None
}
}
| get_state_id |
frame.go | package smux
import (
"encoding/binary"
"fmt"
)
const ( // cmds
// protocol version 1:
cmdSYN byte = iota // stream open
cmdFIN // stream close, a.k.a EOF mark
cmdPSH // data push
cmdNOP // no operation
// protocol version 2 extra commands
// notify bytes consumed by remote peer-end
cmdUPD
)
const (
// data size of cmdUPD, format:
// |4B data consumed(ACK)| 4B window size(WINDOW) |
szCmdUPD = 8
)
const (
// initial peer window guess, a slow-start
initialPeerWindow = 262144
)
const (
sizeOfVer = 1
sizeOfCmd = 1
sizeOfLength = 2
sizeOfSid = 4
headerSize = sizeOfVer + sizeOfCmd + sizeOfSid + sizeOfLength
)
// Frame defines a packet from or to be multiplexed into a single connection
type Frame struct {
ver byte
cmd byte
sid uint32
data []byte
}
func | (version byte, cmd byte, sid uint32) Frame {
return Frame{ver: version, cmd: cmd, sid: sid}
}
type rawHeader [headerSize]byte
func (h rawHeader) Version() byte {
return h[0]
}
func (h rawHeader) Cmd() byte {
return h[1]
}
func (h rawHeader) Length() uint16 {
return binary.LittleEndian.Uint16(h[2:])
}
func (h rawHeader) StreamID() uint32 {
return binary.LittleEndian.Uint32(h[4:])
}
func (h rawHeader) String() string {
return fmt.Sprintf("Version:%d Cmd:%d StreamID:%d Length:%d",
h.Version(), h.Cmd(), h.StreamID(), h.Length())
}
type updHeader [szCmdUPD]byte
func (h updHeader) Consumed() uint32 {
return binary.LittleEndian.Uint32(h[:])
}
func (h updHeader) Window() uint32 {
return binary.LittleEndian.Uint32(h[4:])
}
| newFrame |
evalie.py | # MIT License
#
# Copyright (c) 2022 Ferhat Geçdoğan All Rights Reserved.
# Distributed under the terms of the MIT License.
#
#
# evalie - a toy evaluator using
# shunting-yard algorithm.
# ------
# github.com/ferhatgec/evalie
#
import math
class evalie:
def __init__(self):
self.precedence = {
'+': 2,
'-': 2,
'*': 3,
'/': 3,
'!': 4,
'^': 4,
'%': 4
}
self.left = 0
self.right = 0
self.op = ''
self.stack = self.evalie_values()
self.pi = str(math.pi)
self.e = str(math.e)
self.tau = str(math.tau)
self.golden_ratio = str(1.618033988749895)
class evalie_values:
def __init__(self):
self.values = []
self.operators = []
@staticmethod
def check_none(val):
return val if val is not None else -1
def get_precedence(self, ch) -> int:
return self.check_none(self.precedence.get(ch))
def perform(self):
if self.left is None:
self.left = 0
if self.right is None:
self.right = 0
match self.op:
case '+':
return self.left + self.right
case '-':
return self.right - self.left
case '*':
return self.left * self.right
case '/':
return self.right / self.left
case '^':
return self.right ** self.left
case '!':
return float(math.factorial(int(self.left)))
case '%':
return self.right % self.left
def pop(self, data):
if type(data) == float:
data = [data]
return data.pop()
if len(data) > 0:
val = data.pop()
return val
def precalc(self, data: str):
return data.replace('pi', self.pi) \
.replace('π', self.pi) \
.replace('e', self.e) \
.replace('tau', self.tau) \
.replace('τ', self.tau) \
.replace('phi', self.golden_ratio) \
.replace('φ', self.golden_ratio) \
.replace('mod', '%')\
.replace('+', ' + ')\
.replace('-', ' - ')\
.replace('/', ' / ')\
.replace('*', ' * ')
| self.op = 0
self.stack = self.evalie_values()
def eval(self, data):
data = self.precalc(data)
i = 0
while i < len(data):
match data[i]:
case ' ':
i += 1
continue
case '(':
self.stack.operators.append(data[i])
case ')':
while len(self.stack.operators) != 0 and self.stack.operators[-1] != '(':
self.left = self.pop(self.stack.values)
self.right = self.pop(self.stack.values)
self.op = self.pop(self.stack.operators)
self.stack.values.append(self.perform())
self.pop(self.stack.operators)
case _ if data[i].isdigit() or (data[i] == '-' and self.left > 0 and self.right == 0):
value = ''
while i < len(data) and (data[i].isdigit() or data[i] == '.' or data[i] == '-'):
value += data[i]
i += 1
value = float(value)
self.stack.values.append(value)
i -= 1
case _ as arg:
while (len(self.stack.operators) != 0
and self.get_precedence(self.stack.operators[-1]) >=
self.get_precedence(arg)):
self.left = self.pop(self.stack.values)
if self.stack.operators[-1] != '!':
self.right = self.pop(self.stack.values)
self.op = self.pop(self.stack.operators)
self.stack.values.append(self.perform())
self.stack.operators.append(data[i])
i += 1
while len(self.stack.operators) != 0:
self.left = self.pop(self.stack.values)
self.right = self.pop(self.stack.values)
self.op = self.pop(self.stack.operators)
self.stack.values = self.perform()
if type(self.stack.values) == float:
self.stack.values = [self.stack.values]
if type(self.stack.values) == list and len(self.stack.values) > 0:
return self.stack.values[-1] | def clear(self):
self.left = self.right = 0 |
provider.go | package provider
import (
"crypto/rsa"
"io/ioutil"
"github.com/chanzuckerberg/terraform-provider-snowflake/pkg/datasources"
"github.com/chanzuckerberg/terraform-provider-snowflake/pkg/db"
"github.com/chanzuckerberg/terraform-provider-snowflake/pkg/resources"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
homedir "github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/snowflakedb/gosnowflake"
"golang.org/x/crypto/ssh"
)
// Provider is a provider
func Provider() *schema.Provider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"account": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("SNOWFLAKE_ACCOUNT", nil),
},
"username": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("SNOWFLAKE_USER", nil),
},
"password": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("SNOWFLAKE_PASSWORD", nil),
Sensitive: true, | Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("SNOWFLAKE_OAUTH_ACCESS_TOKEN", nil),
Sensitive: true,
ConflictsWith: []string{"browser_auth", "private_key_path", "password"},
},
"browser_auth": {
Type: schema.TypeBool,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("SNOWFLAKE_USE_BROWSER_AUTH", nil),
Sensitive: false,
ConflictsWith: []string{"password", "private_key_path", "oauth_access_token"},
},
"private_key_path": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("SNOWFLAKE_PRIVATE_KEY_PATH", nil),
Sensitive: true,
ConflictsWith: []string{"browser_auth", "password", "oauth_access_token"},
},
"role": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("SNOWFLAKE_ROLE", nil),
},
"region": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("SNOWFLAKE_REGION", "us-west-2"),
},
},
ResourcesMap: map[string]*schema.Resource{
"snowflake_account_grant": resources.AccountGrant(),
"snowflake_database": resources.Database(),
"snowflake_database_grant": resources.DatabaseGrant(),
"snowflake_integration_grant": resources.IntegrationGrant(),
"snowflake_managed_account": resources.ManagedAccount(),
"snowflake_pipe": resources.Pipe(),
"snowflake_resource_monitor": resources.ResourceMonitor(),
"snowflake_resource_monitor_grant": resources.ResourceMonitorGrant(),
"snowflake_role": resources.Role(),
"snowflake_role_grants": resources.RoleGrants(),
"snowflake_schema": resources.Schema(),
"snowflake_schema_grant": resources.SchemaGrant(),
"snowflake_share": resources.Share(),
"snowflake_stage": resources.Stage(),
"snowflake_stage_grant": resources.StageGrant(),
"snowflake_storage_integration": resources.StorageIntegration(),
"snowflake_user": resources.User(),
"snowflake_view": resources.View(),
"snowflake_view_grant": resources.ViewGrant(),
"snowflake_task": resources.Task(),
"snowflake_table_grant": resources.TableGrant(),
"snowflake_warehouse": resources.Warehouse(),
"snowflake_warehouse_grant": resources.WarehouseGrant(),
},
DataSourcesMap: map[string]*schema.Resource{
"snowflake_system_get_aws_sns_iam_policy": datasources.SystemGetAWSSNSIAMPolicy(),
},
ConfigureFunc: ConfigureProvider,
}
}
func ConfigureProvider(s *schema.ResourceData) (interface{}, error) {
account := s.Get("account").(string)
user := s.Get("username").(string)
password := s.Get("password").(string)
browserAuth := s.Get("browser_auth").(bool)
privateKeyPath := s.Get("private_key_path").(string)
oauthAccessToken := s.Get("oauth_access_token").(string)
region := s.Get("region").(string)
role := s.Get("role").(string)
dsn, err := DSN(account, user, password, browserAuth, privateKeyPath, oauthAccessToken, region, role)
if err != nil {
return nil, errors.Wrap(err, "could not build dsn for snowflake connection")
}
db, err := db.Open(dsn)
if err != nil {
return nil, errors.Wrap(err, "Could not open snowflake database.")
}
return db, nil
}
func DSN(
account,
user,
password string,
browserAuth bool,
privateKeyPath,
oauthAccessToken,
region,
role string) (string, error) {
// us-west-2 is their default region, but if you actually specify that it won't trigger their default code
// https://github.com/snowflakedb/gosnowflake/blob/52137ce8c32eaf93b0bd22fc5c7297beff339812/dsn.go#L61
if region == "us-west-2" {
region = ""
}
config := gosnowflake.Config{
Account: account,
User: user,
Region: region,
Role: role,
}
if privateKeyPath != "" {
rsaPrivateKey, err := ParsePrivateKey(privateKeyPath)
if err != nil {
return "", errors.Wrap(err, "Private Key could not be parsed")
}
config.PrivateKey = rsaPrivateKey
config.Authenticator = gosnowflake.AuthTypeJwt
} else if browserAuth {
config.Authenticator = gosnowflake.AuthTypeExternalBrowser
} else if oauthAccessToken != "" {
config.Authenticator = gosnowflake.AuthTypeOAuth
config.Token = oauthAccessToken
} else if password != "" {
config.Password = password
} else {
return "", errors.New("no authentication method provided")
}
return gosnowflake.DSN(&config)
}
func ParsePrivateKey(privateKeyPath string) (*rsa.PrivateKey, error) {
expandedPrivateKeyPath, err := homedir.Expand(privateKeyPath)
if err != nil {
return nil, errors.Wrap(err, "Invalid Path to private key")
}
privateKeyBytes, err := ioutil.ReadFile(expandedPrivateKeyPath)
if err != nil {
return nil, errors.Wrap(err, "Could not read private key")
}
if len(privateKeyBytes) == 0 {
return nil, errors.New("Private key is empty")
}
privateKey, err := ssh.ParseRawPrivateKey(privateKeyBytes)
if err != nil {
return nil, errors.Wrap(err, "Could not parse private key")
}
rsaPrivateKey, ok := privateKey.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("privateKey not of type RSA")
}
return rsaPrivateKey, nil
} | ConflictsWith: []string{"browser_auth", "private_key_path", "oauth_access_token"},
},
"oauth_access_token": { |
chpass.py | from typing import Any
from typing import Iterator
from typing import List
from django import forms
from django.http import HttpRequest
from django.http import HttpResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from ocflib.account.search import user_exists
from ocflib.account.search import users_by_calnet_uid
from ocflib.ucb.directory import name_by_calnet_uid
from ocflib.ucb.groups import groups_by_student_signat
from requests.exceptions import ConnectionError
from requests.exceptions import ReadTimeout
from ocfweb.account.constants import TEST_OCF_ACCOUNTS
from ocfweb.account.constants import TESTER_CALNET_UIDS
from ocfweb.auth import calnet_required
from ocfweb.component.celery import change_password as change_password_task
from ocfweb.component.forms import Form
CALLINK_ERROR_MSG = (
"Couldn't connect to CalLink API. Resetting group "
'account passwords online is unavailable.'
)
def get_accounts_signatory_for(calnet_uid: str) -> List[Any]:
def flatten(lst: Iterator[Any]) -> List[Any]:
return [item for sublist in lst for item in sublist]
group_accounts = flatten(
map(
lambda group: group['accounts'],
groups_by_student_signat(calnet_uid).values(),
),
)
# sanity check since we don't trust CalLink API that much:
# if >= 10 groups, can't change online, sorry
assert len(group_accounts) < 10, 'should be less than 10 group accounts'
return group_accounts
def get_accounts_for(calnet_uid: str) -> List[Any]:
accounts = users_by_calnet_uid(calnet_uid)
if calnet_uid in TESTER_CALNET_UIDS:
# these test accounts don't have to exist in in LDAP
accounts.extend(TEST_OCF_ACCOUNTS)
return accounts
@calnet_required
def change_password(request: HttpRequest) -> HttpResponse:
calnet_uid = request.session['calnet_uid']
error = None
accounts = get_accounts_for(calnet_uid)
try:
accounts += get_accounts_signatory_for(calnet_uid)
except (ConnectionError, ReadTimeout):
error = CALLINK_ERROR_MSG
if not accounts and error is None:
error = mark_safe(
render_to_string(
'account/partials/chpass-no-accounts.html',
{
'calnet_uid': calnet_uid,
},
),
)
if request.method == 'POST':
form = ChpassForm(accounts, calnet_uid, request.POST)
if form.is_valid():
account = form.cleaned_data['ocf_account']
password = form.cleaned_data['new_password']
try:
calnet_name = name_by_calnet_uid(calnet_uid)
task = change_password_task.delay(
account,
password,
comment=f'Your password was reset online by {calnet_name}.',
)
result = task.wait(timeout=10)
if isinstance(result, Exception):
raise result
except ValueError as ex:
error = str(ex)
else:
# deleting this session variable will force the next
# change_password request to reauthenticate with CalNet
del request.session['calnet_uid']
return render(
request,
'account/chpass/success.html',
{
'account': account,
'title': 'Password Changed Successfully',
},
)
else:
form = ChpassForm(accounts, calnet_uid)
return render(
request,
'account/chpass/index.html',
{
'calnet_uid': calnet_uid,
'error': error,
'form': form,
'title': 'Reset Password',
},
)
class ChpassForm(Form):
# fix self.fields.keyOrder type error in mypy
field_order = [
'ocf_account',
'new_password',
'confirm_password',
]
def __init__(self, ocf_accounts: List[str], calnet_uid: str, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.calnet_uid = calnet_uid
self.fields['ocf_account'] = forms.ChoiceField(
choices=[(x, x) for x in ocf_accounts],
label='OCF account',
)
new_password = forms.CharField(
widget=forms.PasswordInput,
label='New password',
)
confirm_password = forms.CharField(
widget=forms.PasswordInput,
label='Confirm password',
)
def clean_ocf_account(self) -> str:
data = self.cleaned_data['ocf_account']
if not user_exists(data):
raise forms.ValidationError('OCF user account does not exist.') | extra = ''
ocf_accounts = get_accounts_for(self.calnet_uid)
try:
ocf_accounts += get_accounts_signatory_for(self.calnet_uid)
except ConnectionError:
extra = CALLINK_ERROR_MSG + '\n'
if data not in ocf_accounts:
raise forms.ValidationError(
extra + 'OCF user account and CalNet UID mismatch.',
)
return data
def clean_confirm_password(self) -> str:
new_password = self.cleaned_data.get('new_password')
confirm_password = self.cleaned_data.get('confirm_password')
if new_password and confirm_password:
if new_password != confirm_password:
raise forms.ValidationError("Your passwords don't match.")
return confirm_password | |
autocomplete-edit-cell.ts | import { extend } from '@syncfusion/ej2-base';
import { IEditCell } from '../base/interface';
import { Column } from '../models/column';
import { AutoComplete } from '@syncfusion/ej2-dropdowns';
import { Query, DataManager, DataUtil } from '@syncfusion/ej2-data';
import { isEditable, getComplexFieldID, getObject } from '../base/util';
import { EditCellBase } from './edit-cell-base';
/**
* `AutoCompleteEditCell` is used to handle autocomplete cell type editing.
*
* @hidden
*/
export class | extends EditCellBase implements IEditCell {
private object: AutoComplete;
private column: Column;
public write(args: { rowData: Object, element: Element, column: Column, rowElement: HTMLElement, requestType: string }): void {
this.column = args.column;
const isInlineEdit: boolean = this.parent.editSettings.mode !== 'Dialog';
this.object = new AutoComplete(extend(
{
dataSource: this.parent.dataSource instanceof DataManager ?
this.parent.dataSource : new DataManager(this.parent.dataSource),
query: new Query().select(args.column.field), enabled: isEditable(args.column, args.requestType, args.element),
fields: { value: args.column.field },
value: getObject(args.column.field, args.rowData),
// enableRtl: this.parentect.enableRtl,
actionComplete: this.selectedValues.bind(this),
placeholder: isInlineEdit ? '' : args.column.headerText,
floatLabelType: isInlineEdit ? 'Never' : 'Always'
},
args.column.edit.params));
this.object.appendTo(args.element as HTMLElement);
/* tslint:disable-next-line:no-any */
args.element.setAttribute('name', getComplexFieldID(args.column.field));
}
private selectedValues(valObj: {result: Object[] }): void {
valObj.result = DataUtil.distinct(valObj.result, this.object.fields.value, true);
if ((<DataManager>this.column.dataSource)) {
(<DataManager>this.column.dataSource).dataSource.json = valObj.result;
}
}
}
| AutoCompleteEditCell |
multiple_of.rs | use serde_json::Value;
use super::super::errors;
use super::super::scope;
use std::cmp::Ordering;
use std::f64;
#[allow(missing_copy_implementations)]
pub struct MultipleOf {
pub number: f64,
}
impl super::Validator for MultipleOf {
fn validate(&self, val: &Value, path: &str, _scope: &scope::Scope) -> super::ValidationState |
}
| {
let number = nonstrict_process!(val.as_f64(), path);
let valid = if (number.fract() == 0f64) && (self.number.fract() == 0f64) {
(number % self.number) == 0f64
} else {
let remainder: f64 = (number / self.number) % 1f64;
let remainder_less_than_epsilon = match remainder.partial_cmp(&f64::EPSILON) {
None | Some(Ordering::Less) => true,
_ => false,
};
let remainder_less_than_one = remainder < (1f64 - f64::EPSILON);
remainder_less_than_epsilon && remainder_less_than_one
};
if valid {
super::ValidationState::new()
} else {
val_error!(errors::MultipleOf {
path: path.to_string()
})
}
} |
symbols.go | // Package symbols is generated by gogll. Do not edit.
package symbols
type Symbol interface{
isSymbol()
IsNonTerminal() bool
String() string
}
func (NT) isSymbol() {}
func (T) isSymbol() {}
// NT is the type of non-terminals symbols
type NT int
const(
NT_Exp NT = iota
NT_Op
)
// T is the type of terminals symbols
type T int | T_2 // |
)
type Symbols []Symbol
func (ss Symbols) Strings() []string {
strs := make([]string, len(ss))
for i, s := range ss {
strs[i] = s.String()
}
return strs
}
func (NT) IsNonTerminal() bool {
return true
}
func (T) IsNonTerminal() bool {
return false
}
func (nt NT) String() string {
return ntToString[nt]
}
func (t T) String() string {
return tToString[t]
}
var ntToString = []string {
"Exp", /* NT_Exp */
"Op", /* NT_Op */
}
var tToString = []string {
"&", /* T_0 */
"id", /* T_1 */
"|", /* T_2 */
}
var stringNT = map[string]NT{
"Exp":NT_Exp,
"Op":NT_Op,
} | const(
T_0 T = iota // &
T_1 // id |
remote.rs | use std::fs::File;
use std::io::{self, ErrorKind, Read};
use std::net::{SocketAddr, TcpStream};
use std::path::Path;
use log::*;
use snafu::{ensure, ResultExt, Snafu};
use ssh2::{Channel, ExtendedData, Session};
// TODO: Probably want to do this more `struct`ured
// TODO: Add time to log
pub type Log = Vec<(String, String)>;
#[derive(Debug, Snafu)]
pub enum Error {
Ssh { source: ssh2::Error },
Io { source: io::Error },
NonZeroReturn { command: String },
}
pub struct Remote {
session: Session,
log: Log,
}
impl Remote {
pub fn connect(
socket_addr: SocketAddr,
user: &str,
private_key_file: &Path,
) -> Result<Remote, Error> {
let tcp = TcpStream::connect(socket_addr).context(Io)?;
let mut session = Session::new().context(Ssh)?;
session.set_tcp_stream(tcp);
session.handshake().context(Ssh)?;
session
.userauth_pubkey_file(user, None, private_key_file, None)
.context(Ssh)?;
Ok(Remote {
session,
log: Vec::new(),
})
}
/// Executes a command on the remote. This blocks until the command finishes and the whole
/// output was read. The command is executed by the default shell on the remote (probably bash)
/// so commands like `echo 123 && echo abc` are valid.
pub fn execute_command(&mut self, command: &str) -> Result<(), Error> |
// TODO: This currently behaves differently than the normal `execute_command` due to the runner
// implementation detail (bash isn't used for execution). That's also the reason for the
// separate (unergonomic) `env` parameter.
pub fn execute_cancellable_command(
&mut self,
command: &str,
env: &str,
) -> Result<CancellableCommand, Error> {
// TODO: Would like to just send signals over ssh which is actually part of the SSH
// specification; Unfortunately nobody implemented that part for a long time and
// OpenSSH just did so recently:
// https://github.com/openssh/openssh-portable/commit/cd98925c6405e972dc9f211afc7e75e838abe81c
// The current deployed OpenSSH version doesn't contain that commit yet and neither
// does libssh2 support it so we we're stuck with this for now...
// NOTE: Currently we rely on a helper binary (see runner) to achieve this
// NOTE: libssh2's setenv doesn't work here as that would try to set the environment of the
// remote ssh handler which is disabled by default in sshd.conf:
// https://serverfault.com/questions/427522/why-is-acceptenv-considered-insecure
self.log.push((command.to_string(), String::new()));
let mut channel = self.session.channel_session().context(Ssh)?;
channel
.handle_extended_data(ExtendedData::Merge)
.context(Ssh)?;
// Old solution without additional binary
// let command = format!("{} & read -t {}; kill $!", command, timeout_secs);
// Have to start runner with sudo to be able to kill sudo'ed children
let command = format!("{}; sudo runner {}", env, command);
debug!("Executing cancellable command: {}", command);
channel.exec(&command).context(Ssh)?;
Ok(CancellableCommand {
channel,
log: &mut self.log,
session: &mut self.session,
})
}
pub fn upload_file(
&mut self,
local_path: &Path,
remote_path: &Path,
mode: i32,
) -> Result<(), Error> {
debug!(
"Uploading file: {} -> {}",
local_path.display(),
remote_path.display()
);
let mut local_file = File::open(local_path).context(Io)?;
let size = local_file.metadata().context(Io)?.len();
let mut remote_file = self
.session
.scp_send(remote_path, mode, size, None)
.context(Ssh)?;
io::copy(&mut local_file, &mut remote_file).context(Io)?;
Ok(())
}
pub fn download_file(&mut self, remote_path: &Path) -> Result<Vec<u8>, Error> {
debug!("Downloading file {}", remote_path.display());
let (mut remote_file, stat) = self.session.scp_recv(remote_path).context(Ssh)?;
let mut contents = Vec::with_capacity(stat.size() as usize);
remote_file.read_to_end(&mut contents).context(Io)?;
Ok(contents)
}
pub fn into_log(self) -> Log {
self.log
}
}
pub struct CancellableCommand<'a> {
channel: Channel,
session: &'a mut Session,
log: &'a mut Log,
}
impl CancellableCommand<'_> {
pub fn is_running(&mut self) -> bool {
// TODO: This feels like a horrible hack but I'm unable to find another API for this...
self.session.set_blocking(false);
let mut buf = [];
let mut is_running = false;
if let Err(e) = self.channel.read(&mut buf) {
if e.kind() == ErrorKind::WouldBlock {
is_running = true;
}
}
self.session.set_blocking(true);
is_running
}
pub fn cancel(mut self) -> Result<(), Error> {
// Close stdin which causes runner to kill the command
self.channel.send_eof().context(Ssh)?;
let mut output = String::new();
self.channel.read_to_string(&mut output).context(Io)?;
self.channel.wait_close().context(Ssh)?;
// We pushed to log at the start so this can't fail
self.log.last_mut().unwrap().1 = output;
Ok(())
}
}
| {
self.log.push((command.to_string(), String::new()));
let mut channel = self.session.channel_session().context(Ssh)?;
// Merge stderr output into default stream
// We may want to do this more granularly in the future
channel
.handle_extended_data(ExtendedData::Merge)
.context(Ssh)?;
debug!("executing command: {}", command);
channel.exec(command).context(Ssh)?;
let mut output = String::new();
channel.read_to_string(&mut output).context(Io)?;
channel.wait_close().context(Ssh)?;
// We pushed to log at the start so this can't fail
self.log.last_mut().unwrap().1 = output;
ensure!(
channel.exit_status().context(Ssh)? == 0,
NonZeroReturn { command }
);
Ok(())
} |
apiparser_parser1.go | package api
import (
"reflect"
"github.com/MockyBang/antlr"
)
// Part 1
// The apiparser_parser.go file was split into multiple files because it
// was too large and caused a possible memory overflow during goctl installation.
func (s *SyntaxLitContext) GetParser() antlr.Parser { return s.parser }
func (s *SyntaxLitContext) GetSyntaxToken() antlr.Token { return s.syntaxToken }
func (s *SyntaxLitContext) GetAssign() antlr.Token { return s.assign }
func (s *SyntaxLitContext) GetVersion() antlr.Token { return s.version }
func (s *SyntaxLitContext) SetSyntaxToken(v antlr.Token) { s.syntaxToken = v }
func (s *SyntaxLitContext) SetAssign(v antlr.Token) { s.assign = v }
func (s *SyntaxLitContext) SetVersion(v antlr.Token) { s.version = v }
func (s *SyntaxLitContext) ID() antlr.TerminalNode {
return s.GetToken(ApiParserParserID, 0)
}
func (s *SyntaxLitContext) STRING() antlr.TerminalNode {
return s.GetToken(ApiParserParserSTRING, 0)
}
func (s *SyntaxLitContext) GetRuleContext() antlr.RuleContext {
return s
}
func (s *SyntaxLitContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
return antlr.TreesStringTree(s, ruleNames, recog)
}
func (s *SyntaxLitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
switch t := visitor.(type) {
case ApiParserVisitor:
return t.VisitSyntaxLit(s)
default:
return t.VisitChildren(s)
}
}
func (p *ApiParserParser) SyntaxLit() (localctx ISyntaxLitContext) {
localctx = NewSyntaxLitContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 4, ApiParserParserRULE_syntaxLit)
defer func() {
p.ExitRule()
}()
defer func() {
if err := recover(); err != nil {
if v, ok := err.(antlr.RecognitionException); ok {
localctx.SetException(v)
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(err)
}
}
}()
p.EnterOuterAlt(localctx, 1)
match(p, "syntax")
{
p.SetState(88)
_m := p.Match(ApiParserParserID)
localctx.(*SyntaxLitContext).syntaxToken = _m
}
{
p.SetState(89)
_m := p.Match(ApiParserParserT__0)
localctx.(*SyntaxLitContext).assign = _m
}
checkVersion(p)
{
p.SetState(91)
_m := p.Match(ApiParserParserSTRING)
localctx.(*SyntaxLitContext).version = _m
}
return localctx
}
// IImportSpecContext is an interface to support dynamic dispatch.
type IImportSpecContext interface {
antlr.ParserRuleContext
// GetParser returns the parser.
GetParser() antlr.Parser
// IsImportSpecContext differentiates from other interfaces.
IsImportSpecContext()
}
type ImportSpecContext struct {
*antlr.BaseParserRuleContext
parser antlr.Parser
}
func NewEmptyImportSpecContext() *ImportSpecContext {
p := new(ImportSpecContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = ApiParserParserRULE_importSpec
return p
}
func (*ImportSpecContext) IsImportSpecContext() {}
func NewImportSpecContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ImportSpecContext {
p := new(ImportSpecContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = ApiParserParserRULE_importSpec
return p
}
func (s *ImportSpecContext) GetParser() antlr.Parser { return s.parser }
func (s *ImportSpecContext) ImportLit() IImportLitContext {
t := s.GetTypedRuleContext(reflect.TypeOf((*IImportLitContext)(nil)).Elem(), 0)
if t == nil {
return nil
}
return t.(IImportLitContext)
}
func (s *ImportSpecContext) ImportBlock() IImportBlockContext {
t := s.GetTypedRuleContext(reflect.TypeOf((*IImportBlockContext)(nil)).Elem(), 0)
if t == nil {
return nil
}
return t.(IImportBlockContext)
}
func (s *ImportSpecContext) GetRuleContext() antlr.RuleContext {
return s
}
func (s *ImportSpecContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
return antlr.TreesStringTree(s, ruleNames, recog)
}
func (s *ImportSpecContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
switch t := visitor.(type) {
case ApiParserVisitor:
return t.VisitImportSpec(s)
default:
return t.VisitChildren(s)
}
}
func (p *ApiParserParser) ImportSpec() (localctx IImportSpecContext) {
localctx = NewImportSpecContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 6, ApiParserParserRULE_importSpec)
defer func() {
p.ExitRule()
}()
defer func() {
if err := recover(); err != nil {
if v, ok := err.(antlr.RecognitionException); ok {
localctx.SetException(v)
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(err)
}
}
}()
p.SetState(95)
p.GetErrorHandler().Sync(p)
switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 2, p.GetParserRuleContext()) {
case 1:
p.EnterOuterAlt(localctx, 1)
{
p.SetState(93)
p.ImportLit()
}
case 2:
p.EnterOuterAlt(localctx, 2)
{
p.SetState(94)
p.ImportBlock()
}
}
return localctx
}
// IImportLitContext is an interface to support dynamic dispatch.
type IImportLitContext interface {
antlr.ParserRuleContext
// GetParser returns the parser.
GetParser() antlr.Parser
// GetImportToken returns the importToken token.
GetImportToken() antlr.Token
// SetImportToken sets the importToken token.
SetImportToken(antlr.Token)
// IsImportLitContext differentiates from other interfaces.
IsImportLitContext()
}
type ImportLitContext struct {
*antlr.BaseParserRuleContext
parser antlr.Parser
importToken antlr.Token
}
func NewEmptyImportLitContext() *ImportLitContext {
p := new(ImportLitContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = ApiParserParserRULE_importLit
return p
}
func (*ImportLitContext) IsImportLitContext() {}
func NewImportLitContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ImportLitContext {
p := new(ImportLitContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = ApiParserParserRULE_importLit
return p
}
func (s *ImportLitContext) GetParser() antlr.Parser { return s.parser }
func (s *ImportLitContext) GetImportToken() antlr.Token { return s.importToken }
func (s *ImportLitContext) SetImportToken(v antlr.Token) { s.importToken = v }
func (s *ImportLitContext) ImportValue() IImportValueContext {
t := s.GetTypedRuleContext(reflect.TypeOf((*IImportValueContext)(nil)).Elem(), 0)
if t == nil {
return nil
}
return t.(IImportValueContext)
}
func (s *ImportLitContext) ID() antlr.TerminalNode {
return s.GetToken(ApiParserParserID, 0)
}
func (s *ImportLitContext) GetRuleContext() antlr.RuleContext {
return s
}
func (s *ImportLitContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
return antlr.TreesStringTree(s, ruleNames, recog)
}
func (s *ImportLitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
switch t := visitor.(type) {
case ApiParserVisitor:
return t.VisitImportLit(s)
default:
return t.VisitChildren(s)
}
}
func (p *ApiParserParser) ImportLit() (localctx IImportLitContext) {
localctx = NewImportLitContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 8, ApiParserParserRULE_importLit)
defer func() {
p.ExitRule()
}()
defer func() {
if err := recover(); err != nil {
if v, ok := err.(antlr.RecognitionException); ok {
localctx.SetException(v)
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(err)
}
}
}()
p.EnterOuterAlt(localctx, 1)
match(p, "import")
{
p.SetState(98)
_m := p.Match(ApiParserParserID)
localctx.(*ImportLitContext).importToken = _m
}
{
p.SetState(99)
p.ImportValue()
}
return localctx
}
// IImportBlockContext is an interface to support dynamic dispatch.
type IImportBlockContext interface {
antlr.ParserRuleContext
// GetParser returns the parser.
GetParser() antlr.Parser
// GetImportToken returns the importToken token.
GetImportToken() antlr.Token
// SetImportToken sets the importToken token.
SetImportToken(antlr.Token)
// IsImportBlockContext differentiates from other interfaces.
IsImportBlockContext()
}
type ImportBlockContext struct {
*antlr.BaseParserRuleContext
parser antlr.Parser
importToken antlr.Token
}
func NewEmptyImportBlockContext() *ImportBlockContext {
p := new(ImportBlockContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = ApiParserParserRULE_importBlock
return p
}
func (*ImportBlockContext) IsImportBlockContext() {}
func NewImportBlockContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ImportBlockContext {
p := new(ImportBlockContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = ApiParserParserRULE_importBlock
return p
}
func (s *ImportBlockContext) GetParser() antlr.Parser { return s.parser }
func (s *ImportBlockContext) GetImportToken() antlr.Token { return s.importToken }
func (s *ImportBlockContext) SetImportToken(v antlr.Token) { s.importToken = v }
func (s *ImportBlockContext) ID() antlr.TerminalNode {
return s.GetToken(ApiParserParserID, 0)
}
func (s *ImportBlockContext) AllImportBlockValue() []IImportBlockValueContext {
ts := s.GetTypedRuleContexts(reflect.TypeOf((*IImportBlockValueContext)(nil)).Elem())
tst := make([]IImportBlockValueContext, len(ts))
for i, t := range ts {
if t != nil {
tst[i] = t.(IImportBlockValueContext)
}
}
return tst
}
func (s *ImportBlockContext) ImportBlockValue(i int) IImportBlockValueContext {
t := s.GetTypedRuleContext(reflect.TypeOf((*IImportBlockValueContext)(nil)).Elem(), i)
if t == nil {
return nil
}
return t.(IImportBlockValueContext)
}
func (s *ImportBlockContext) GetRuleContext() antlr.RuleContext {
return s
}
func (s *ImportBlockContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
return antlr.TreesStringTree(s, ruleNames, recog)
}
func (s *ImportBlockContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
switch t := visitor.(type) {
case ApiParserVisitor:
return t.VisitImportBlock(s)
default:
return t.VisitChildren(s)
}
}
func (p *ApiParserParser) ImportBlock() (localctx IImportBlockContext) {
localctx = NewImportBlockContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 10, ApiParserParserRULE_importBlock)
var _la int
defer func() {
p.ExitRule()
}()
defer func() {
if err := recover(); err != nil {
if v, ok := err.(antlr.RecognitionException); ok {
localctx.SetException(v)
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(err)
}
}
}()
p.EnterOuterAlt(localctx, 1)
match(p, "import")
{
p.SetState(102)
_m := p.Match(ApiParserParserID)
localctx.(*ImportBlockContext).importToken = _m
}
{
p.SetState(103)
p.Match(ApiParserParserT__1)
}
p.SetState(105)
p.GetErrorHandler().Sync(p)
for ok := true; ok; ok = _la == ApiParserParserSTRING {
{
p.SetState(104)
p.ImportBlockValue()
}
p.SetState(107)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
}
{
p.SetState(109)
p.Match(ApiParserParserT__2)
}
return localctx
}
// IImportBlockValueContext is an interface to support dynamic dispatch.
type IImportBlockValueContext interface {
antlr.ParserRuleContext
// GetParser returns the parser.
GetParser() antlr.Parser
// IsImportBlockValueContext differentiates from other interfaces.
IsImportBlockValueContext()
}
type ImportBlockValueContext struct {
*antlr.BaseParserRuleContext
parser antlr.Parser
}
func NewEmptyImportBlockValueContext() *ImportBlockValueContext |
func (*ImportBlockValueContext) IsImportBlockValueContext() {}
func NewImportBlockValueContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ImportBlockValueContext {
p := new(ImportBlockValueContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = ApiParserParserRULE_importBlockValue
return p
}
func (s *ImportBlockValueContext) GetParser() antlr.Parser { return s.parser }
func (s *ImportBlockValueContext) ImportValue() IImportValueContext {
t := s.GetTypedRuleContext(reflect.TypeOf((*IImportValueContext)(nil)).Elem(), 0)
if t == nil {
return nil
}
return t.(IImportValueContext)
}
func (s *ImportBlockValueContext) GetRuleContext() antlr.RuleContext {
return s
}
func (s *ImportBlockValueContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
return antlr.TreesStringTree(s, ruleNames, recog)
}
func (s *ImportBlockValueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
switch t := visitor.(type) {
case ApiParserVisitor:
return t.VisitImportBlockValue(s)
default:
return t.VisitChildren(s)
}
}
func (p *ApiParserParser) ImportBlockValue() (localctx IImportBlockValueContext) {
localctx = NewImportBlockValueContext(p, p.GetParserRuleContext(), p.GetState())
p.EnterRule(localctx, 12, ApiParserParserRULE_importBlockValue)
defer func() {
p.ExitRule()
}()
defer func() {
if err := recover(); err != nil {
if v, ok := err.(antlr.RecognitionException); ok {
localctx.SetException(v)
p.GetErrorHandler().ReportError(p, v)
p.GetErrorHandler().Recover(p, v)
} else {
panic(err)
}
}
}()
p.EnterOuterAlt(localctx, 1)
{
p.SetState(111)
p.ImportValue()
}
return localctx
}
// IImportValueContext is an interface to support dynamic dispatch.
type IImportValueContext interface {
antlr.ParserRuleContext
// GetParser returns the parser.
GetParser() antlr.Parser
// IsImportValueContext differentiates from other interfaces.
IsImportValueContext()
}
type ImportValueContext struct {
*antlr.BaseParserRuleContext
parser antlr.Parser
}
func NewEmptyImportValueContext() *ImportValueContext {
p := new(ImportValueContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = ApiParserParserRULE_importValue
return p
}
func (*ImportValueContext) IsImportValueContext() {}
func NewImportValueContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ImportValueContext {
p := new(ImportValueContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
p.parser = parser
p.RuleIndex = ApiParserParserRULE_importValue
return p
}
| {
p := new(ImportBlockValueContext)
p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
p.RuleIndex = ApiParserParserRULE_importBlockValue
return p
} |
mod.rs | //! The main database module, allowing to interface with leveldb on
//! a key-value basis.
extern crate db_key as key;
use leveldb_sys::*;
use self::options::{Options, c_options};
use self::error::Error;
use std::ffi::CString;
use libc::c_char;
#[cfg(unix)]
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use std::ptr;
use comparator::{Comparator, create_comparator};
use self::key::Key;
use std::marker::PhantomData;
pub mod options;
pub mod error;
pub mod iterator;
pub mod comparator;
pub mod snapshots;
pub mod cache;
pub mod kv;
pub mod batch;
pub mod management;
pub mod compaction;
pub mod bytes;
#[allow(missing_docs)]
struct RawDB {
ptr: *mut leveldb_t,
}
#[allow(missing_docs)]
impl Drop for RawDB {
fn drop(&mut self) {
unsafe {
leveldb_close(self.ptr);
}
}
}
#[allow(missing_docs)]
struct RawComparator {
ptr: *mut leveldb_comparator_t,
}
impl Drop for RawComparator {
fn drop(&mut self) {
unsafe {
leveldb_comparator_destroy(self.ptr);
}
}
}
/// The main database object.
///
/// leveldb databases are based on ordered keys. By default, leveldb orders
/// by the binary value of the key. Additionally, a custom `Comparator` can
/// be passed when opening the database. This library ships with an Comparator
/// implementation for keys that are `Ord`.
///
/// When re-CString a database, you must use the same key type `K` and
/// comparator type `C`.
///
/// Multiple Database objects can be kept around, as leveldb synchronises
/// internally.
pub struct Database<K: Key> {
database: RawDB,
// this holds a reference passed into leveldb
// it is never read from Rust, but must be kept around
#[allow(dead_code)]
comparator: Option<RawComparator>,
// these hold multiple references that are used by the leveldb library
// and should survive as long as the database lives
#[allow(dead_code)]
options: Options,
marker: PhantomData<K>,
}
unsafe impl<K: Key> Sync for Database<K> {}
unsafe impl<K: Key> Send for Database<K> {}
impl<K: Key> Database<K> {
fn new(database: *mut leveldb_t,
options: Options,
comparator: Option<*mut leveldb_comparator_t>)
-> Database<K> {
let raw_comp = match comparator {
Some(p) => Some(RawComparator { ptr: p }),
None => None,
};
Database {
database: RawDB { ptr: database },
comparator: raw_comp,
options: options,
marker: PhantomData,
}
}
/// Open a new database
///
/// If the database is missing, the behaviour depends on `options.create_if_missing`.
/// The database will be created using the settings given in `options`.
pub fn | (name: &Path, options: Options) -> Result<Database<K>, Error> {
let mut error = ptr::null_mut();
unsafe {
#[cfg(unix)]
let c_string = CString::new(name.as_os_str().as_bytes()).unwrap();
#[cfg(not(unix))]
let c_string = CString::new(
name.to_str()
.ok_or_else(|| Error::new("Path is not valid Unicode".into()))?,
)
.unwrap();
let c_options = c_options(&options, None);
let db = leveldb_open(c_options as *const leveldb_options_t,
c_string.as_bytes_with_nul().as_ptr() as *const c_char,
&mut error);
leveldb_options_destroy(c_options);
if error == ptr::null_mut() {
Ok(Database::new(db, options, None))
} else {
Err(Error::new_from_char(error))
}
}
}
/// Open a new database with a custom comparator
///
/// If the database is missing, the behaviour depends on `options.create_if_missing`.
/// The database will be created using the settings given in `options`.
///
/// The comparator must implement a total ordering over the keyspace.
///
/// For keys that implement Ord, consider the `OrdComparator`.
pub fn open_with_comparator<C: Comparator<K = K>>(name: &Path,
options: Options,
comparator: C)
-> Result<Database<K>, Error> {
let mut error = ptr::null_mut();
let comp_ptr = create_comparator(Box::new(comparator));
unsafe {
let c_string = CString::new(name.to_str().unwrap()).unwrap();
let c_options = c_options(&options, Some(comp_ptr));
let db = leveldb_open(c_options as *const leveldb_options_t,
c_string.as_bytes_with_nul().as_ptr() as *const c_char,
&mut error);
leveldb_options_destroy(c_options);
if error == ptr::null_mut() {
Ok(Database::new(db, options, Some(comp_ptr)))
} else {
Err(Error::new_from_char(error))
}
}
}
}
| open |
test_ofswitch.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ofswitch
from neutron.tests import base
class FakeReply(object):
def | (self, type):
self.type = type
class TestBundledOpenFlowBridge(base.BaseTestCase):
def setUp(self):
super(TestBundledOpenFlowBridge, self).setUp()
br = mock.Mock(spec=['install_instructions', 'foo'])
br._get_dp = lambda: (mock.Mock(), ofproto_v1_3, ofproto_v1_3_parser)
br.active_bundles = set()
self.br = ofswitch.BundledOpenFlowBridge(br, False, False)
def test_method_calls(self):
self.br.install_instructions(dummy_arg=1)
self.br.br.install_instructions.assert_called_once_with(dummy_arg=1)
def test_illegal_method_calls(self):
# With python3, this can be written as "with assertRaises..."
try:
self.br.uninstall_foo()
self.fail("Expected an exception")
except Exception as e:
self.assertIsInstance(e, AttributeError)
try:
self.br.foo()
self.fail("Expected an exception")
except Exception as e:
self.assertIsInstance(e, AttributeError)
def test_normal_bundle_context(self):
self.assertIsNone(self.br.active_bundle)
self.br.br._send_msg = mock.Mock(side_effect=[
FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY),
FakeReply(ofproto_v1_3.ONF_BCT_COMMIT_REPLY)])
with self.br:
self.assertIsNotNone(self.br.active_bundle)
# Do nothing
# Assert that the active bundle is gone
self.assertIsNone(self.br.active_bundle)
def test_aborted_bundle_context(self):
self.assertIsNone(self.br.active_bundle)
self.br.br._send_msg = mock.Mock(side_effect=[
FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY),
FakeReply(ofproto_v1_3.ONF_BCT_DISCARD_REPLY)])
try:
with self.br:
self.assertIsNotNone(self.br.active_bundle)
raise Exception()
except Exception:
pass
# Assert that the active bundle is gone
self.assertIsNone(self.br.active_bundle)
self.assertEqual(2, len(self.br.br._send_msg.mock_calls))
args, kwargs = self.br.br._send_msg.call_args_list[0]
self.assertEqual(ofproto_v1_3.ONF_BCT_OPEN_REQUEST,
args[0].type)
args, kwargs = self.br.br._send_msg.call_args_list[1]
self.assertEqual(ofproto_v1_3.ONF_BCT_DISCARD_REQUEST,
args[0].type)
def test_bundle_context_with_error(self):
self.assertIsNone(self.br.active_bundle)
self.br.br._send_msg = mock.Mock(side_effect=[
FakeReply(ofproto_v1_3.ONF_BCT_OPEN_REPLY),
RuntimeError])
try:
with self.br:
saved_bundle_id = self.br.active_bundle
self.assertIsNotNone(self.br.active_bundle)
self.fail("Expected an exception")
except RuntimeError:
pass
# Assert that the active bundle is gone
self.assertIsNone(self.br.active_bundle)
self.assertIn(saved_bundle_id, self.br.br.active_bundles)
self.assertEqual(2, len(self.br.br._send_msg.mock_calls))
args, kwargs = self.br.br._send_msg.call_args_list[0]
self.assertEqual(ofproto_v1_3.ONF_BCT_OPEN_REQUEST,
args[0].type)
args, kwargs = self.br.br._send_msg.call_args_list[1]
self.assertEqual(ofproto_v1_3.ONF_BCT_COMMIT_REQUEST,
args[0].type)
| __init__ |
fork_no_p_eps.go | package stmgr
import (
"context"
"github.com/filecoin-project/go-amt-ipld"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-hamt-ipld"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/address"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
)
func (sm *StateManager) forkNoPowerEPS(ctx context.Context, pstate cid.Cid) (cid.Cid, error) {
cst := hamt.CSTFromBstore(sm.cs.Blockstore())
st, err := state.LoadStateTree(cst, pstate)
if err != nil {
return cid.Undef, xerrors.Errorf("loading parent state tree: %w", err)
}
if err := st.MutateActor(actors.StoragePowerAddress, func(spa *types.Actor) error {
var head actors.StoragePowerState
if err := cst.Get(ctx, spa.Head, &head); err != nil {
return xerrors.Errorf("reading StoragePower state: %w", err)
}
buckets, err := amt.LoadAMT(amt.WrapBlockstore(sm.cs.Blockstore()), head.ProvingBuckets)
if err != nil {
return xerrors.Errorf("opening proving buckets AMT: %w", err)
}
fixedBuckets := map[uint64]map[address.Address]struct{}{}
if err := buckets.ForEach(func(bucketId uint64, ent *typegen.Deferred) error {
var bcid cid.Cid
if err := cbor.DecodeInto(ent.Raw, &bcid); err != nil {
return xerrors.Errorf("decoding bucket cid: %w", err)
}
bucket, err := hamt.LoadNode(ctx, cst, bcid)
if err != nil {
return xerrors.Errorf("loading bucket hamt: %w", err)
}
return bucket.ForEach(ctx, func(abytes string, _ interface{}) error {
addr, err := address.NewFromBytes([]byte(abytes))
if err != nil {
return xerrors.Errorf("parsing address in proving bucket: %w", err)
}
// now find the correct bucket
miner, err := st.GetActor(addr)
if err != nil {
return xerrors.Errorf("getting miner %s: %w", addr, err)
}
var minerHead actors.StorageMinerActorState
if err := cst.Get(ctx, miner.Head, &minerHead); err != nil {
return xerrors.Errorf("reading miner %s state: %w", addr, err)
}
|
if _, ok := fixedBuckets[correctBucket]; !ok {
fixedBuckets[correctBucket] = map[address.Address]struct{}{}
}
fixedBuckets[correctBucket][addr] = struct{}{}
return nil
})
}); err != nil {
return err
}
// /////
// Write fixed buckets
fixed := amt.NewAMT(amt.WrapBlockstore(sm.cs.Blockstore()))
for bucketId, addrss := range fixedBuckets {
bucket := hamt.NewNode(cst)
for addr := range addrss {
if err := bucket.Set(ctx, string(addr.Bytes()), actors.CborNull); err != nil {
return xerrors.Errorf("setting address in bucket: %w", err)
}
}
if err := bucket.Flush(ctx); err != nil {
return xerrors.Errorf("flushing bucket amt: %w", err)
}
bcid, err := cst.Put(context.TODO(), bucket)
if err != nil {
return xerrors.Errorf("put bucket: %w", err)
}
if err := fixed.Set(bucketId, bcid); err != nil {
return xerrors.Errorf("set bucket: %w", err)
}
}
head.ProvingBuckets, err = fixed.Flush()
if err != nil {
return xerrors.Errorf("flushing bucket amt: %w", err)
}
spa.Head, err = cst.Put(ctx, &head)
if err != nil {
return xerrors.Errorf("putting actor head: %w", err)
}
return nil
}); err != nil {
return cid.Undef, err
}
return st.Flush()
} | correctBucket := minerHead.ElectionPeriodStart % build.SlashablePowerDelay
if correctBucket != bucketId {
log.Warnf("miner %s was in wrong proving bucket %d, putting in %d (eps: %d)", addr, bucketId, correctBucket, minerHead.ElectionPeriodStart)
} |
fake_data.js | //dados
const proffys = [
{
name: "Diego Fernandes",
avatar: "https://github.com/diego3g.png",
whatsapp: "900000000",
bio: "Entusiasta das melhores tecnologias de química avançada. Apaixonado por explodir coisas em laboratório e por mudar a vida das pessoas através de experiências. Mais de 200.00 pessoas já passaram por uma das minhas explosões.",
subject: "Química",
cost: "20",
weekday: [0],
time_from: [720],
time_to: [1200]
},
{
name: "Mayk Brito",
avatar: "https://github.com/maykbrito.png",
whatsapp: "900000000", | bio: "Entusiasta das melhores tecnologias de química avançada. Apaixonado por explodir coisas em laboratório e por mudar a vida das pessoas através de experiências. Mais de 200.00 pessoas já passaram por uma das minhas explosões.",
subject: "Química",
cost: "20",
weekday: [1],
time_from: [720],
time_to: [1200]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.