file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
paper.rs | type Dot = (usize, usize);
pub struct PaperBuilder {
dots: Vec<Dot>,
}
impl PaperBuilder {
pub fn new() -> PaperBuilder {
PaperBuilder {
dots: vec![],
}
}
pub fn add_dot(&mut self, x: usize, y: usize) {
self.dots.push((x, y));
}
pub fn build(self) -> Paper {
Paper::new(self.dots)
}
}
pub struct Paper {
dots: Vec<Dot>,
}
impl Paper {
pub fn new(dots: Vec<Dot>) -> Paper {
Paper {
dots,
}
}
pub fn len(&self) -> usize {
self.dots.len()
}
pub fn fold_horizontal(self, row: usize) -> Paper {
self.fold(row, fold_dot_horizontal)
}
pub fn fold_vertical(self, column: usize) -> Paper {
self.fold(column, fold_dot_vertical)
}
fn fold(self, offset: usize, fold_dot: fn(Dot, usize) -> Dot) -> Paper {
let mut folded_dots = vec![];
let mut add_dot = |d: Dot| {
if !folded_dots.contains(&d) {
folded_dots.push(d);
}
};
for dot in self.dots.into_iter() {
let folded_dot = fold_dot(dot, offset);
add_dot(folded_dot);
}
Paper::new(folded_dots)
}
}
fn fold_dot_horizontal(dot: Dot, row: usize) -> Dot {
if dot.1 > row {
let offset = (dot.1 - row) * 2;
(dot.0, dot.1 - offset)
} else if dot.1 < row {
dot.clone()
} else {
panic!("Folding on a dot is not supported - row {} intersects with dot ({},{})", row, dot.0, dot.1)
}
}
fn fold_dot_vertical(dot: Dot, column: usize) -> Dot {
if dot.0 > column {
let offset = (dot.0 - column) * 2;
(dot.0 - offset, dot.1)
} else if dot.0 < column {
dot.clone()
} else {
panic!("Folding on a dot is not supported - column {} intersects with dot ({},{})", column, dot.0, dot.1)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | () {
let mut builder = PaperBuilder::new();
builder.add_dot(6, 10);
builder.add_dot(0, 14);
builder.add_dot(9, 10);
builder.add_dot(0, 3);
builder.add_dot(10, 4);
builder.add_dot(4, 11);
builder.add_dot(6, 0);
builder.add_dot(6, 12);
builder.add_dot(4, 1);
builder.add_dot(0, 13);
builder.add_dot(10, 12);
builder.add_dot(3, 4);
builder.add_dot(3, 0);
builder.add_dot(8, 4);
builder.add_dot(1, 10);
builder.add_dot(2, 14);
builder.add_dot(8, 10);
builder.add_dot(9, 0);
let paper = builder.build();
let folded = paper.fold_horizontal(7);
assert_eq!(17, folded.dots.len());
}
#[test]
fn test_flip_vertical_demo_1() {
let mut builder = PaperBuilder::new();
builder.add_dot(0, 0);
builder.add_dot(2, 0);
builder.add_dot(3, 0);
builder.add_dot(6, 0);
builder.add_dot(9, 0);
builder.add_dot(0, 1);
builder.add_dot(4, 1);
builder.add_dot(6, 2);
builder.add_dot(10, 2);
builder.add_dot(0, 3);
builder.add_dot(4, 3);
builder.add_dot(1, 4);
builder.add_dot(3, 4);
builder.add_dot(6, 4);
builder.add_dot(8, 4);
builder.add_dot(9, 4);
builder.add_dot(10, 4);
let paper = builder.build();
let folded = paper.fold_vertical(5);
assert_eq!(16, folded.dots.len());
}
} | test_flip_horizontal_demo_1 |
process.go | package sanitycheck
import (
"context"
"time"
"github.com/golang/glog"
rapi "github.com/IBM/operator-for-redis-cluster/api/v1alpha1"
"github.com/IBM/operator-for-redis-cluster/pkg/config"
"github.com/IBM/operator-for-redis-cluster/pkg/controller/pod"
"github.com/IBM/operator-for-redis-cluster/pkg/redis"
)
// RunSanityChecks function used to run all the sanity check on the current cluster
// Return actionDone = true if a modification has been made on the cluster
func RunSanityChecks(ctx context.Context, admin redis.AdminInterface, config *config.Redis, podControl pod.RedisClusterControlInterface, cluster *rapi.RedisCluster, infos *redis.ClusterInfos, dryRun bool) (actionDone bool, err error) {
if cluster.Status.Cluster.Status == rapi.ClusterStatusRollingUpdate {
return false, nil
}
// * fix failed nodes: in some cases (cluster without enough primary after crash or scale down), some nodes may still know about fail nodes
if actionDone, err = FixFailedNodes(ctx, admin, cluster, infos, dryRun); err != nil {
return actionDone, err
} else if actionDone {
glog.V(2).Infof("FixFailedNodes executed an action on the cluster (dryRun: %v)", dryRun)
return actionDone, nil
}
// forget nodes and delete pods when a redis node is untrusted.
if actionDone, err = FixUntrustedNodes(ctx, admin, podControl, cluster, infos, dryRun); err != nil {
return actionDone, err
} else if actionDone {
glog.V(2).Infof("FixUntrustedNodes executed an action on the cluster (dryRun: %v)", dryRun)
return actionDone, nil
}
// delete pods that are stuck in terminating state
if actionDone, err = FixTerminatingPods(cluster, podControl, 5*time.Minute, dryRun); err != nil {
return actionDone, err
} else if actionDone {
glog.V(2).Infof("FixTerminatingPods executed an action on the cluster (dryRun: %v)", dryRun)
return actionDone, nil
}
// detect and fix cluster split
if actionDone, err = FixClusterSplit(ctx, admin, config, infos, dryRun); err != nil | else if actionDone {
glog.V(2).Infof("FixClusterSplit executed an action on the cluster (dryRun: %v)", dryRun)
return actionDone, nil
}
return actionDone, err
}
| {
return actionDone, err
} |
udp.go | package socket
import (
"fmt"
"io"
"net"
logging "github.com/op/go-logging"
)
var logger = logging.MustGetLogger("rendez-vous")
// UDP is a struct
type UDP struct {
conn net.PacketConn
}
//Close the socket
func (u *UDP) Close() error {
return u.conn.Close()
}
//Conn underlying
func (u *UDP) Conn() net.PacketConn {
return u.conn
}
//LocalAddr of the underlying conn
func (u *UDP) LocalAddr() net.Addr {
return u.conn.LocalAddr()
}
// Handler handle incoming messages
type Handler func(data []byte, remote net.Addr) error
//Conn of the underlying
func (u *UDP) Write(data []byte, remote net.Addr) (int, error) {
return u.conn.WriteTo(data, remote)
}
// Listen invoke process when a new message income
func (u *UDP) Listen(h Handler) error {
conn := u.conn
var b [0x10000]byte
for {
n, addr, readErr := conn.ReadFrom(b[:])
if readErr == nil && n == len(b) {
readErr = fmt.Errorf("received packet exceeds buffer size %q", len(b))
}
if readErr != nil {
if x, ok := readErr.(*net.OpError); ok && x.Temporary() == false { | continue
} else if h != nil {
x := make([]byte, n)
copy(x, b[:n])
go func(remote net.Addr, data []byte) {
if err := h(data, remote); err != nil {
logger.Error("handling error:", err)
}
}(addr, x)
}
}
} | return io.EOF
}
logger.Errorf("read error: %#v\n", readErr) |
test_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pkg_resources
import unittest
from datetime import datetime, timedelta
from pylons import tmpl_context as c
from tg import config
import mock
from alluratest.controller import setup_basic_test, setup_global_objects, setup_trove_categories
from allura.tests import decorators as td
from allura.model import User, Project, TroveCategory
from allura.lib import helpers as h
from allura import model as M
| # important to be distinct from 'test' which ForgeGit uses, so that the
# tests can run in parallel and not clobber each other
test_project_with_repo = 'test2'
with_git = td.with_tool(test_project_with_repo, 'Git',
'src-git', 'Git', type='git')
class TestUserStats(unittest.TestCase):
def setUp(self):
setup_basic_test()
setup_global_objects()
self.user = User.by_username('test-user-2')
c.user = self.user
def test_init_values(self):
artifacts = self.user.stats.getArtifacts()
tickets = self.user.stats.getTickets()
commits = self.user.stats.getCommits()
assert self.user.stats.tot_logins_count == 0
assert artifacts['created'] == 0
assert artifacts['modified'] == 0
assert tickets['assigned'] == 0
assert tickets['solved'] == 0
assert tickets['revoked'] == 0
assert tickets['averagesolvingtime'] is None
assert commits['number'] == 0
assert commits['lines'] == 0
lmartifacts = self.user.stats.getLastMonthArtifacts()
lmtickets = self.user.stats.getLastMonthTickets()
lmcommits = self.user.stats.getLastMonthCommits()
assert self.user.stats.getLastMonthLogins() == 0
assert lmartifacts['created'] == 0
assert lmartifacts['modified'] == 0
assert lmtickets['assigned'] == 0
assert lmtickets['solved'] == 0
assert lmtickets['revoked'] == 0
assert lmtickets['averagesolvingtime'] is None
assert lmcommits['number'] == 0
assert lmcommits['lines'] == 0
@td.with_user_project('test-user-2')
def test_create_artifact_stats(self):
setup_trove_categories()
p = Project.query.get(shortname='u/test-user-2')
topic = TroveCategory.query.get(shortname='scientific')
init_lm_art = self.user.stats.getLastMonthArtifacts()
init_art = self.user.stats.getArtifacts()
init_art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
init_art_by_type = self.user.stats.getArtifactsByType()
init_lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
init_art_sci = self.user.stats.getArtifacts(category=topic._id)
self.user.stats.addNewArtifact('Wiki', datetime.utcnow(), p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
assert lm_art['created'] == init_lm_art['created'] + 1
assert lm_art['modified'] == init_lm_art['modified']
assert artifacts['created'] == init_art['created'] + 1
assert artifacts['modified'] == init_art['modified']
assert art_wiki['created'] == init_art_wiki['created'] + 1
assert art_wiki['modified'] == init_art_wiki['modified']
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created'] + 1
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified']
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created'] + 1
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified']
# In that case, last month stats should not be changed
new_date = datetime.utcnow() + timedelta(-32)
self.user.stats.addNewArtifact('Wiki', new_date, p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
assert lm_art['created'] == init_lm_art['created'] + 1
assert lm_art['modified'] == init_lm_art['modified']
assert artifacts['created'] == init_art['created'] + 2
assert artifacts['modified'] == init_art['modified']
assert art_wiki['created'] == init_art_wiki['created'] + 2
assert art_wiki['modified'] == init_art_wiki['modified']
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created'] + 2
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified']
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created'] + 1
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified']
p.trove_topic = [topic._id]
self.user.stats.addNewArtifact('Wiki', datetime.utcnow(), p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
art_sci = self.user.stats.getArtifacts(category=topic._id)
art_by_cat = self.user.stats.getArtifactsByCategory(detailed=True)
assert lm_art['created'] == init_lm_art['created'] + 2
assert lm_art['modified'] == init_lm_art['modified']
assert artifacts['created'] == init_art['created'] + 3
assert artifacts['modified'] == init_art['modified']
assert art_wiki['created'] == init_art_wiki['created'] + 3
assert art_wiki['modified'] == init_art_wiki['modified']
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created'] + 3
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified']
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created'] + 2
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified']
assert art_sci['created'] == init_art_sci['created'] + 1
assert art_sci['modified'] == init_art_sci['modified']
assert dict(messagetype='Wiki', created=1,
modified=0) in art_by_cat[topic]
art_by_cat = self.user.stats.getArtifactsByCategory(detailed=False)
assert art_by_cat[topic]['created'] == 1 and art_by_cat[
topic]['modified'] == 0
@td.with_user_project('test-user-2')
def test_modify_artifact_stats(self):
setup_trove_categories()
p = Project.query.get(shortname='u/test-user-2')
topic = TroveCategory.query.get(shortname='scientific')
init_lm_art = self.user.stats.getLastMonthArtifacts()
init_art = self.user.stats.getArtifacts()
init_art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
init_art_by_type = self.user.stats.getArtifactsByType()
init_lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
init_art_sci = self.user.stats.getArtifacts(category=topic._id)
self.user.stats.addModifiedArtifact('Wiki', datetime.utcnow(), p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
assert lm_art['created'] == init_lm_art['created']
assert lm_art['modified'] == init_lm_art['modified'] + 1
assert artifacts['created'] == init_art['created']
assert artifacts['modified'] == init_art['modified'] + 1
assert art_wiki['created'] == init_art_wiki['created']
assert art_wiki['modified'] == init_art_wiki['modified'] + 1
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created']
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified'] + 1
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created']
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified'] + 1
# In that case, last month stats should not be changed
new_date = datetime.utcnow() + timedelta(-32)
self.user.stats.addModifiedArtifact('Wiki', new_date, p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
assert lm_art['created'] == init_lm_art['created']
assert lm_art['modified'] == init_lm_art['modified'] + 1
assert artifacts['created'] == init_art['created']
assert artifacts['modified'] == init_art['modified'] + 2
assert art_wiki['created'] == init_art_wiki['created']
assert art_wiki['modified'] == init_art_wiki['modified'] + 2
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created']
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified'] + 2
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created']
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified'] + 1
p.trove_topic = [topic._id]
self.user.stats.addModifiedArtifact('Wiki', datetime.utcnow(), p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
art_sci = self.user.stats.getArtifacts(category=topic._id)
art_by_cat = self.user.stats.getArtifactsByCategory(detailed=True)
assert lm_art['created'] == init_lm_art['created']
assert lm_art['modified'] == init_lm_art['modified'] + 2
assert artifacts['created'] == init_art['created']
assert artifacts['modified'] == init_art['modified'] + 3
assert art_wiki['created'] == init_art_wiki['created']
assert art_wiki['modified'] == init_art_wiki['modified'] + 3
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created']
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified'] + 3
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created']
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified'] + 2
assert art_sci['created'] == init_art_sci['created']
assert art_sci['modified'] == init_art_sci['modified'] + 1
assert dict(messagetype='Wiki', created=0,
modified=1) in art_by_cat[topic]
art_by_cat = self.user.stats.getArtifactsByCategory(detailed=False)
assert art_by_cat[topic]['created'] == 0 and art_by_cat[
topic]['modified'] == 1
@td.with_user_project('test-user-2')
def test_ticket_stats(self):
setup_trove_categories()
p = Project.query.get(shortname='u/test-user-2')
topic = TroveCategory.query.get(shortname='scientific')
create_time = datetime.utcnow() + timedelta(-5)
init_lm_tickets_art = self.user.stats.getLastMonthArtifacts(
art_type='Ticket')
init_tickets_art = self.user.stats.getArtifacts(art_type='Ticket')
init_tickets_sci_art = self.user.stats.getArtifacts(category=topic._id)
init_tickets = self.user.stats.getTickets()
init_lm_tickets = self.user.stats.getLastMonthTickets()
self.user.stats.addNewArtifact('Ticket', create_time, p)
lm_tickets_art = self.user.stats.getLastMonthArtifacts(
art_type='Ticket')
tickets_art = self.user.stats.getArtifacts(art_type='Ticket')
tickets_sci_art = self.user.stats.getArtifacts(category=topic._id)
assert lm_tickets_art['created'] == init_lm_tickets_art['created'] + 1
assert lm_tickets_art['modified'] == init_lm_tickets_art['modified']
assert tickets_art['created'] == init_tickets_art['created'] + 1
assert tickets_art['modified'] == init_tickets_art['modified']
assert tickets_sci_art['created'] == tickets_sci_art['created']
assert tickets_sci_art['modified'] == tickets_sci_art['modified']
p.trove_topic = [topic._id]
self.user.stats.addAssignedTicket(create_time, p)
tickets = self.user.stats.getTickets()
lm_tickets = self.user.stats.getLastMonthTickets()
assert tickets['assigned'] == init_tickets['assigned'] + 1
assert tickets['revoked'] == init_tickets['revoked']
assert tickets['solved'] == init_tickets['solved']
assert tickets['averagesolvingtime'] is None
assert lm_tickets['assigned'] == init_lm_tickets['assigned'] + 1
assert lm_tickets['revoked'] == init_lm_tickets['revoked']
assert lm_tickets['solved'] == init_lm_tickets['solved']
assert lm_tickets['averagesolvingtime'] is None
self.user.stats.addRevokedTicket(create_time + timedelta(-32), p)
tickets = self.user.stats.getTickets()
assert tickets['assigned'] == init_tickets['assigned'] + 1
assert tickets['revoked'] == init_tickets['revoked'] + 1
assert tickets['solved'] == init_tickets['solved']
assert tickets['averagesolvingtime'] is None
assert lm_tickets['assigned'] == init_lm_tickets['assigned'] + 1
assert lm_tickets['revoked'] == init_lm_tickets['revoked']
assert lm_tickets['solved'] == init_lm_tickets['solved']
assert lm_tickets['averagesolvingtime'] is None
self.user.stats.addClosedTicket(
create_time, create_time + timedelta(1), p)
tickets = self.user.stats.getTickets()
lm_tickets = self.user.stats.getLastMonthTickets()
assert tickets['assigned'] == init_tickets['assigned'] + 1
assert tickets['revoked'] == init_tickets['revoked'] + 1
assert tickets['solved'] == init_tickets['solved'] + 1
solving_time = dict(seconds=0, minutes=0, days=1, hours=0)
assert tickets['averagesolvingtime'] == solving_time
assert lm_tickets['assigned'] == init_lm_tickets['assigned'] + 1
assert lm_tickets['revoked'] == init_lm_tickets['revoked']
assert lm_tickets['solved'] == init_lm_tickets['solved'] + 1
assert lm_tickets['averagesolvingtime'] == solving_time
p.trove_topic = []
self.user.stats.addClosedTicket(
create_time, create_time + timedelta(3), p)
tickets = self.user.stats.getTickets()
lm_tickets = self.user.stats.getLastMonthTickets()
solving_time = dict(seconds=0, minutes=0, days=2, hours=0)
assert tickets['assigned'] == init_tickets['assigned'] + 1
assert tickets['revoked'] == init_tickets['revoked'] + 1
assert tickets['solved'] == init_tickets['solved'] + 2
assert tickets['averagesolvingtime'] == solving_time
assert lm_tickets['assigned'] == init_lm_tickets['assigned'] + 1
assert lm_tickets['revoked'] == init_lm_tickets['revoked']
assert lm_tickets['solved'] == init_lm_tickets['solved'] + 2
assert lm_tickets['averagesolvingtime'] == solving_time
by_cat = self.user.stats.getTicketsByCategory()
lm_by_cat = self.user.stats.getLastMonthTicketsByCategory()
solving_time = dict(days=1, hours=0, minutes=0, seconds=0)
assert by_cat[topic]['assigned'] == 1
assert by_cat[topic]['revoked'] == 1
assert by_cat[topic]['solved'] == 1
assert by_cat[topic]['averagesolvingtime'] == solving_time
assert lm_by_cat[topic]['assigned'] == 1
assert lm_by_cat[topic]['revoked'] == 0
assert lm_by_cat[topic]['solved'] == 1
assert lm_by_cat[topic]['averagesolvingtime'] == solving_time
@with_git
@td.with_user_project('test-user-2')
def test_commit_stats(self):
setup_trove_categories()
p = Project.query.get(shortname='u/test-user-2')
topic = TroveCategory.query.get(shortname='scientific')
commit_time = datetime.utcnow() + timedelta(-1)
with mock.patch('allura.lib.plugin.session'):
self.user.set_password('testpassword')
self.user.claim_address('[email protected]')
addr = M.EmailAddress.get(email='[email protected]')
addr.confirmed = True
repo_dir = pkg_resources.resource_filename(
'forgeuserstats', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.name = 'testgit.git'
repo = c.app.repo
repo.refresh()
commit = repo.commit('HEAD')
init_commits = self.user.stats.getCommits()
assert init_commits['number'] == 4
init_lmcommits = self.user.stats.getLastMonthCommits()
assert init_lmcommits['number'] == 4
p.trove_topic = [topic._id]
self.user.stats.addCommit(commit, datetime.utcnow(), p)
commits = self.user.stats.getCommits()
assert commits['number'] == init_commits['number'] + 1
assert commits['lines'] == init_commits['lines'] + 1
lmcommits = self.user.stats.getLastMonthCommits()
assert lmcommits['number'] == init_lmcommits['number'] + 1
assert lmcommits['lines'] == init_lmcommits['lines'] + 1
by_cat = self.user.stats.getCommitsByCategory()
assert by_cat[topic]['number'] == 1
assert by_cat[topic]['lines'] == 1
lm_by_cat = self.user.stats.getLastMonthCommitsByCategory()
assert lm_by_cat[topic]['number'] == 1
assert lm_by_cat[topic]['lines'] == 1
self.user.stats.addCommit(
commit, datetime.utcnow() + timedelta(-40), p)
commits = self.user.stats.getCommits()
assert commits['number'] == init_commits['number'] + 2
assert commits['lines'] == init_commits['lines'] + 2
lmcommits = self.user.stats.getLastMonthCommits()
assert lmcommits['number'] == init_lmcommits['number'] + 1
assert lmcommits['lines'] == init_lmcommits['lines'] + 1
by_cat = self.user.stats.getCommitsByCategory()
assert by_cat[topic]['number'] == 2
assert by_cat[topic]['lines'] == 2
lm_by_cat = self.user.stats.getLastMonthCommitsByCategory()
assert lm_by_cat[topic]['number'] == 1
assert lm_by_cat[topic]['lines'] == 1
@td.with_user_project('test-user-2')
def test_login_stats(self):
init_logins = self.user.stats.tot_logins_count
init_lm_logins = self.user.stats.getLastMonthLogins()
login_datetime = datetime.utcnow()
self.user.stats.addLogin(login_datetime)
logins = self.user.stats.tot_logins_count
lm_logins = self.user.stats.getLastMonthLogins()
assert logins == init_logins + 1
assert lm_logins == init_lm_logins + 1
assert abs(self.user.stats.last_login -
login_datetime) < timedelta(seconds=1)
self.user.stats.addLogin(datetime.utcnow() + timedelta(-32))
logins = self.user.stats.tot_logins_count
lm_logins = self.user.stats.getLastMonthLogins()
assert logins == init_logins + 2
assert lm_logins == init_lm_logins + 1
assert abs(self.user.stats.last_login -
login_datetime) < timedelta(seconds=1)
def test_start_date(self):
stats = USM.UserStats(registration_date=datetime(2012, 04, 01))
self.assertEqual(stats.start_date, datetime(2012, 04, 01))
with h.push_config(config, **{'userstats.start_date': '2013-04-01'}):
self.assertEqual(stats.start_date, datetime(2013, 04, 01))
with h.push_config(config, **{'userstats.start_date': '2011-04-01'}):
self.assertEqual(stats.start_date, datetime(2012, 04, 01))
@mock.patch('allura.model.stats.difflib.unified_diff')
def test_count_loc(self, unified_diff):
stats = USM.UserStats()
newcommit = mock.Mock(
parent_ids=['deadbeef'],
diffs=mock.Mock(
changed=[mock.MagicMock()],
copied=[mock.MagicMock()],
added=[mock.MagicMock()],
),
)
unified_diff.return_value = ['+++', '---', '+line']
newcommit.tree.get_blob_by_path.return_value = mock.MagicMock()
newcommit.tree.get_blob_by_path.return_value.__iter__.return_value = [
'one']
newcommit.repo.commit(
).tree.get_blob_by_path.return_value = mock.MagicMock()
newcommit.repo.commit().tree.get_blob_by_path.return_value.__iter__.return_value = [
'two']
commit_datetime = datetime.utcnow()
project = mock.Mock(
trove_topic=[],
trove_language=[],
)
stats.addCommit(newcommit, commit_datetime, project)
self.assertEqual(stats.general[0].commits[0],
{'lines': 3, 'number': 1, 'language': None})
unified_diff.reset_mock()
with h.push_config(config, **{'userstats.count_lines_of_code': 'false'}):
stats.addCommit(newcommit, commit_datetime, project)
self.assertEqual(stats.general[0].commits[0],
{'lines': 3, 'number': 2, 'language': None})
unified_diff.assert_not_called() | from forgeuserstats.model import stats as USM
|
data_router.rs | use std::{collections::HashMap, net::SocketAddrV4};
use communication_utils::{
parallel_consumer::{ParallelCommonConsumer, ParallelCommonConsumerConfig},
publisher::CommonPublisher,
};
use serde::{Deserialize, Serialize};
use task_utils::task_limiter::TaskLimiter;
use crate::apps::{
default_async_task_limit,
AmqpConsumeOptions,
CommunicationMethod,
LogSettings,
MonitoringSettings,
RepositoryStaticRouting,
};
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct DataRouterSettings {
pub communication_method: CommunicationMethod,
pub cache_capacity: usize,
#[serde(default = "default_async_task_limit")]
pub async_task_limit: usize,
pub kafka: Option<DataRouterConsumerKafkaSettings>,
pub amqp: Option<DataRouterAmqpSettings>,
pub grpc: Option<DataRouterGRpcSettings>,
pub monitoring: MonitoringSettings,
pub services: DataRouterServicesSettings,
#[serde(default)]
pub log: LogSettings,
#[serde(skip_serializing_if = "HashMap::is_empty", default)]
pub repositories: HashMap<String, RepositoryStaticRouting>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct | {
pub brokers: String,
pub group_id: String,
pub ingest_topic: String,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct DataRouterAmqpSettings {
pub exchange_url: String,
pub tag: String,
pub ingest_queue: String,
pub consume_options: Option<AmqpConsumeOptions>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct DataRouterGRpcSettings {
pub address: SocketAddrV4,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct DataRouterServicesSettings {
pub schema_registry_url: String,
}
impl DataRouterSettings {
pub async fn consumer(&self) -> anyhow::Result<ParallelCommonConsumer> {
match (
&self.kafka,
&self.amqp,
&self.grpc,
&self.communication_method,
) {
(Some(kafka), _, _, CommunicationMethod::Kafka) => {
kafka
.parallel_consumer(TaskLimiter::new(self.async_task_limit))
.await
}
(_, Some(amqp), _, CommunicationMethod::Amqp) => {
amqp.parallel_consumer(TaskLimiter::new(self.async_task_limit))
.await
}
(_, _, Some(grpc), CommunicationMethod::Grpc) => grpc.parallel_consumer().await,
_ => anyhow::bail!("Unsupported consumer specification"),
}
}
pub async fn producer(&self) -> anyhow::Result<CommonPublisher> {
Ok(
match (
&self.kafka,
&self.amqp,
&self.grpc,
&self.communication_method,
) {
(Some(kafka), _, _, CommunicationMethod::Kafka) => {
CommonPublisher::new_kafka(&kafka.brokers).await?
}
(_, Some(amqp), _, CommunicationMethod::Amqp) => {
CommonPublisher::new_amqp(&amqp.exchange_url).await?
}
(_, _, Some(_), CommunicationMethod::Grpc) => CommonPublisher::new_grpc().await?,
_ => anyhow::bail!("Unsupported consumer specification"),
},
)
}
}
impl DataRouterConsumerKafkaSettings {
pub async fn parallel_consumer(
&self,
task_limiter: TaskLimiter,
) -> anyhow::Result<ParallelCommonConsumer> {
Ok(
ParallelCommonConsumer::new(ParallelCommonConsumerConfig::Kafka {
brokers: &self.brokers,
group_id: &self.group_id,
topic: &self.ingest_topic,
task_limiter,
})
.await?,
)
}
}
impl DataRouterAmqpSettings {
pub async fn parallel_consumer(
&self,
task_limiter: TaskLimiter,
) -> anyhow::Result<ParallelCommonConsumer> {
Ok(
ParallelCommonConsumer::new(ParallelCommonConsumerConfig::Amqp {
connection_string: &self.exchange_url,
consumer_tag: &self.tag,
queue_name: &self.ingest_queue,
options: self.consume_options.map(|o| o.into()),
task_limiter,
})
.await?,
)
}
}
impl DataRouterGRpcSettings {
pub async fn parallel_consumer(&self) -> anyhow::Result<ParallelCommonConsumer> {
Ok(
ParallelCommonConsumer::new(ParallelCommonConsumerConfig::Grpc { addr: self.address })
.await?,
)
}
}
| DataRouterConsumerKafkaSettings |
media-session-api.js | module.exports={A:{A:{"2":"K D G E A B gB"},B:{"2":"2 C d J M H I"},C:{"2":"0 1 2 3 4 6 7 9 dB BB F N K D G E A B C d J M H I O P Q R S T U V W X Y Z a b c e f g h i j k l m n o L q r s t u v w x y z GB FB AB CB DB XB WB"},D:{"1":"0 1 3 4 7 9 GB FB AB CB DB QB iB KB IB LB MB NB OB","2":"2 6 F N K D G E A B C d J M H I O P Q R S T U V W X Y Z a b c e f g h i j k l m n o L q r s t u v w x y z"},E:{"2":"F N K D G E A B C PB HB RB SB TB UB VB p","16":"5 YB"},F:{"2":"0 1 5 6 8 E B C J M H I O P Q R S T U V W X Y Z a b c e f g h i j k l m n o L q r s t u v w x y z ZB aB bB cB p eB"},G:{"2":"G HB fB EB hB JB jB kB lB mB nB oB pB qB rB sB"},H:{"2":"tB"},I:{"2":"4 BB F uB vB wB xB EB yB zB"},J:{"2":"D A"},K:{"2":"5 8 A B C L p"},L:{"2":"IB"},M:{"2":"3"},N:{"2":"A B"},O:{"2":"0B"},P:{"2":"F 1B 2B 3B 4B"},Q:{"2":"5B"},R:{"2":"6B"}},B:6,C:"Media Session API"}; |
||
Labeling.py | import numpy
from matplotlib import pyplot
import gdal
from skimage import io,exposure
from skimage.segmentation import slic,mark_boundaries
import os
from PIL import Image
import shelve
import sys
sys.path.append('..')
from Config import config
def seg(path,n_segments=500, compactness=20):
i=io.imread(path)[:,:,[3,2,1,7]]
img=i[:,:,:3]
img=(img-img.min())/(img.max()-img.min())
img=img*255
img=img.astype(numpy.uint8)
img=exposure.adjust_gamma(img,0.5)
segment=slic(img,n_segments=n_segments, compactness=compactness,enforce_connectivity=True)
out=mark_boundaries(img,segment,color=[0,0,0.2])
#img=exposure.adjust_gamma(img,0.5)
#out=exposure.adjust_gamma(out,0.5)
wdi=(i[:,:,3]-i[:,:,1])/(i[:,:,3]+i[:,:,1])
wdi=(wdi/wdi.max())*255
return segment,out,img,wdi
def getname(path,namelist):
if namelist[0]==0:
season='ROIs1158_spring'
elif namelist[0]==1:
season='ROIs1868_summer'
elif namelist[0]==2:
season='ROIs1970_fall'
elif namelist[0]==3:
season='ROIs2017_winter'
path_s2=path+'\\'+season+'\\s2_'+str(namelist[1])+'\\'+season+'_s2_'+str(namelist[1])+'_p'+str(namelist[2])+'.tif'
return path_s2
def transform(name):
if 'spring' in name:
season=0
elif 'summer' in name:
season=1
elif 'fall' in name:
season=2
elif 'winter' in name:
season=3
l=[]
l.append(season)
l.append(int(name.split('_')[3]))
l.append(int(name.split('_')[4].split('.')[0][1:]))
return l
class UI:
def __init__(self,mode='normal',init=0):
'''mode = normal 正常
mode=review 仅仅显示已经标记的
'''
self.mode=mode
self.path_label=config.path_labels
if self.mode=='normal':
with shelve.open(config.path_devision) as f:
self.imglist=f['test']
else:
self.imglist=os.listdir(config.path_labels)
self.n=init
self.ifpress=False
self.ifloadlabel=False
fig=pyplot.figure()
fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)
fig.canvas.mpl_connect('key_press_event',self.on_key_press)
fig.canvas.mpl_connect('button_press_event',self.on_button_press)
fig.canvas.mpl_connect('motion_notify_event',self.on_button_move)
fig.canvas.mpl_connect('button_release_event',self.on_button_release)
self.fig=fig
self.ax1=fig.add_subplot(3,2,1)
self.ax2=fig.add_subplot(3,2,3)
self.ax4=fig.add_subplot(3,2,5)
self.ax3=fig.add_subplot(1,2,2)
pyplot.get_current_fig_manager().window.state('zoomed')
#self.ax2=fig.add_subplot(1,2,2)
self.valuelist=[]
self.label=numpy.zeros((256,256))
self.ifloadlabel=True
self.draw()
pyplot.show()
def on_key_press(self,event):
if event.key=='a' or event.key=='left':
self.n-=1
print(self.n)
self.valuelist=[]
self.label=numpy.zeros(self.segment.shape)
self.ifloadlabel=True
self.draw()
if event.key=='d' or event.key=='right':
if self.n+1>=len(self.imglist):
return
self.n+=1
print(self.n)
self.valuelist=[]
self.label=numpy.zeros(self.segment.shape)
self.ifloadlabel=True
self.draw()
if event.key=='e' or event.key=='enter':
self.save_label()
if event.key=='Q': | self.valuelist.append(i)
for i in range(len(self.valuelist)):
if i==0:
flag=(self.segment==self.valuelist[i])
else:
flag=flag+(self.segment==self.valuelist[i])
self.label=numpy.where(flag,1.0,0)
self.draw()
def on_button_press(self,event):
try:
r=int(event.ydata)
c=int(event.xdata)
except TypeError:
return
value=self.segment[r,c]
if event.button==1:
if value not in self.valuelist:
self.ifpress=True
self.valuelist.append(value)
elif event.button==3:
if value in self.valuelist:
self.ifpress=True
self.valuelist.remove(value)
def on_button_move(self,event):
if not self.ifpress:
return
try:
r=int(event.ydata)
c=int(event.xdata)
except TypeError:
return
value=self.segment[r,c]
if event.button==1:
if value not in self.valuelist:
self.valuelist.append(value)
elif event.button==3:
if value in self.valuelist:
self.valuelist.remove(value)
def on_button_release(self,event):
if not self.ifpress:
return
self.ifpress=False
for i in range(len(self.valuelist)):
if i==0:
flag=(self.segment==self.valuelist[i])
else:
flag=flag+(self.segment==self.valuelist[i])
self.label=numpy.where(flag,1,0).astype(int)
self.draw()
def draw(self):
if self.mode=='normal':
segment,out,img,wdi=seg(getname(config.path,self.imglist[self.n]))
else:
segment,out,img,wdi=seg(getname(config.path,transform(self.imglist[self.n])))
self.segment=segment
if self.ifloadlabel:
self.read_label()
self.ifloadlabel=False
#self.ax1.imshow(out)
t=numpy.where(self.label==1,0.5,out[:,:,2])
out[:,:,2]=t
self.ax1.cla()
self.ax2.cla()
self.ax3.cla()
self.ax4.cla()
self.ax1.imshow(img)
self.ax2.imshow(wdi,cmap='gray')
self.ax3.imshow(out)
self.ax4.imshow(self.label,cmap='gray')
d=os.listdir(config.path_labels)
self.ax3.set_title(str(len(d))+'/'+str(self.n+1))
self.fig.canvas.draw_idle()
def save_label(self):
label=self.label*255
label=label.astype(numpy.uint8)
label=Image.fromarray(label)
if self.mode=='normal':
name=getname(config.path,self.imglist[self.n]).split('\\')[-1]
name=name.split('_')
name[2]='label'
name='_'.join(name)
else:
name=self.imglist[self.n]
label.save(self.path_label+'\\'+name)
def read_label(self):
dirlist=os.listdir(self.path_label)
if self.mode=='normal':
name=getname(config.path,self.imglist[self.n]).split('\\')[-1]
name=name.split('_')
name[2]='label'
name='_'.join(name)
else:
name=self.imglist[self.n]
if name in dirlist:
self.label=numpy.array(Image.open(self.path_label+'\\'+name))/255
self.label=self.label.astype(int)
self.valuelist=list(numpy.unique(numpy.where(self.label==1,self.segment,-2)))
self.valuelist.remove(-2)
def statistic():
d=os.listdir(config.path_labels)
n=numpy.array([0,0,0,0])
for i in d:
if 'spring' in i:
n[0]=n[0]+1
if 'summer' in i:
n[1]=n[1]+1
if 'fall' in i:
n[2]=n[2]+1
if 'winter' in i:
n[3]=n[3]+1
print(n)
n=n/len(d)
print(n)
if __name__=='__main__':
test=UI(mode='normal',init=100)
#statistic() | f=numpy.unique(self.segment).tolist()
for i in f:
if i not in self.valuelist: |
rounded_rect.rs | // Take a look at the license at the top of the repository in the LICENSE file.
use glib::translate::*;
use graphene::{Point, Rect, Size};
use std::mem;
#[derive(Clone, Debug)]
#[doc(alias = "GskRoundedRect")]
pub struct RoundedRect(ffi::GskRoundedRect);
impl RoundedRect {
#[doc(alias = "gsk_rounded_rect_init")]
pub fn new(
bounds: Rect,
top_left: Size,
top_right: Size,
bottom_right: Size,
bottom_left: Size,
) -> Self {
assert_initialized_main_thread!();
unsafe {
let mut rounded_rect = mem::MaybeUninit::uninit();
ffi::gsk_rounded_rect_init(
rounded_rect.as_mut_ptr(),
bounds.to_glib_none().0,
top_left.to_glib_none().0,
top_right.to_glib_none().0,
bottom_right.to_glib_none().0,
bottom_left.to_glib_none().0,
);
Self(rounded_rect.assume_init())
}
}
#[doc(alias = "gsk_rounded_rect_init_from_rect")]
#[doc(alias = "init_from_rect")]
pub fn from_rect(bounds: Rect, radius: f32) -> Self {
assert_initialized_main_thread!();
unsafe {
let mut rounded_rect = mem::MaybeUninit::uninit();
ffi::gsk_rounded_rect_init_from_rect(
rounded_rect.as_mut_ptr(),
bounds.to_glib_none().0,
radius,
);
Self(rounded_rect.assume_init())
}
}
#[doc(alias = "gsk_rounded_rect_init")]
pub fn init(
&mut self,
bounds: Rect,
top_left: Size,
top_right: Size,
bottom_right: Size,
bottom_left: Size,
) {
unsafe {
ffi::gsk_rounded_rect_init(
&mut self.0,
bounds.to_glib_none().0,
top_left.to_glib_none().0,
top_right.to_glib_none().0,
bottom_right.to_glib_none().0,
bottom_left.to_glib_none().0,
);
}
}
#[doc(alias = "gsk_rounded_rect_init_from_rect")]
pub fn init_from_rect(&mut self, bounds: Rect, radius: f32) {
unsafe {
ffi::gsk_rounded_rect_init_from_rect(&mut self.0, bounds.to_glib_none().0, radius);
}
}
#[doc(alias = "gsk_rounded_rect_normalize")]
pub fn normalize(&mut self) {
unsafe {
ffi::gsk_rounded_rect_normalize(&mut self.0);
}
}
#[doc(alias = "gsk_rounded_rect_offset")]
pub fn offset(&mut self, dx: f32, dy: f32) {
unsafe {
ffi::gsk_rounded_rect_offset(&mut self.0, dx, dy);
}
}
#[doc(alias = "gsk_rounded_rect_shrink")]
pub fn shrink(&mut self, top: f32, right: f32, bottom: f32, left: f32) {
unsafe {
ffi::gsk_rounded_rect_shrink(&mut self.0, top, right, bottom, left);
}
}
#[doc(alias = "gsk_rounded_rect_is_rectilinear")]
pub fn is_rectilinear(&self) -> bool {
unsafe { from_glib(ffi::gsk_rounded_rect_is_rectilinear(&self.0)) }
}
#[doc(alias = "gsk_rounded_rect_contains_point")]
pub fn | (&self, point: Point) -> bool {
unsafe {
from_glib(ffi::gsk_rounded_rect_contains_point(
&self.0,
point.to_glib_none().0,
))
}
}
#[doc(alias = "gsk_rounded_rect_contains_rect")]
pub fn contains_rect(&self, rect: Rect) -> bool {
unsafe {
from_glib(ffi::gsk_rounded_rect_contains_rect(
&self.0,
rect.to_glib_none().0,
))
}
}
#[doc(alias = "gsk_rounded_rect_intersects_rect")]
pub fn intersects_rect(&self, rect: Rect) -> bool {
unsafe {
from_glib(ffi::gsk_rounded_rect_intersects_rect(
&self.0,
rect.to_glib_none().0,
))
}
}
}
#[doc(hidden)]
impl FromGlibPtrNone<*const ffi::GskRoundedRect> for RoundedRect {
unsafe fn from_glib_none(ptr: *const ffi::GskRoundedRect) -> Self {
Self(*ptr)
}
}
#[doc(hidden)]
impl<'a> ToGlibPtr<'a, *const ffi::GskRoundedRect> for RoundedRect {
type Storage = &'a Self;
fn to_glib_none(&'a self) -> Stash<*const ffi::GskRoundedRect, Self> {
Stash(&self.0, self)
}
}
| contains_point |
distributed.py | import copy
import inspect
import itertools
import logging
import os
import warnings
from contextlib import contextmanager
from typing import NamedTuple
import torch
import torch.distributed as dist
RPC_AVAILABLE = False
if dist.is_available():
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.distributed_c10d import _get_default_group
if torch.distributed.rpc.is_available():
RPC_AVAILABLE = True
from torch.distributed.rpc import RRef
from torch._utils import _get_device_index
from ..modules import Module
from ._functions import _get_stream
from .scatter_gather import scatter_kwargs, gather, is_namedtuple
def _find_tensors(obj):
r"""
Recursively find all tensors contained in the specified object.
"""
if RPC_AVAILABLE and isinstance(obj, RRef):
# If the current node is the owner of the RRef, unwrap it and try to
# find Tensors.
# TODO: Expand to remote RRefs.
if obj.is_owner():
return _find_tensors(obj.local_value())
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
def _dump_DDP_relevant_env_vars():
relevant_env_vars = [
"RANK",
"LOCAL_RANK",
"WORLD_SIZE",
"MASTER_PORT",
"MASTER_ADDR",
"CUDA_VISIBLE_DEVICES",
"GLOO_SOCKET_IFNAME",
"GLOO_DEVICE_TRANSPORT",
"NCCL_SOCKET_IFNAME",
"NCCL_BLOCKING_WAIT",
"NCCL_DEBUG",
"NCCL_DEBUG_SUBSYS",
"NCCL_IB_DISABLE",
# More NCCL env vars:
"NCCL_P2P_DISABLE",
"NCCL_P2P_LEVEL",
"NCCL_SHM_DISABLE",
"NCCL_SOCKET_NTHREADS",
"NCCL_NSOCKS_PERTHREAD",
"NCCL_BUFFSIZE",
"NCCL_NTHREADS",
"NCCL_RINGS",
"NCCL_MAX_NCHANNELS",
"NCCL_MIN_NCHANNELS",
"NCCL_CHECKS_DISABLE",
"NCCL_CHECK_POINTERS",
"NCCL_LAUNCH_MODE",
"NCCL_IB_HCA",
"NCCL_IB_TIMEOUT",
"NCCL_IB_RETRY_CNT",
"NCCL_IB_GID_INDEX",
"NCCL_IB_SL",
"NCCL_IB_TC",
"NCCL_IB_AR_THRESHOLD",
"NCCL_IB_CUDA_SUPPORT",
"NCCL_NET_GDR_LEVEL",
"NCCL_NET_GDR_READ",
"NCCL_SINGLE_RING_THRESHOLD",
"NCCL_LL_THRESHOLD",
"NCCL_TREE_THRESHOLD",
"NCCL_ALGO",
"NCCL_PROTO",
"NCCL_IGNORE_CPU_AFFINITY",
"NCCL_DEBUG_FILE",
"NCCL_COLLNET_ENABLE",
"NCCL_TOPO_FILE",
"NCCL_TOPO_DUMP_FILE",
]
formatted_output = ""
for var in relevant_env_vars:
value = os.environ[var] if var in os.environ else "N/A"
formatted_output += "env:%s=%s\n" % (var, value)
print(formatted_output)
class _DDPUnevenInputsConfig(NamedTuple):
ddp_join_enabled: bool
ddp_join_divide_by_initial_world_size: bool
class DistributedDataParallel(Module):
r"""Implements distributed data parallelism that is based on
``torch.distributed`` package at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine and each device, and
each such replica handles a portion of the input. During the backwards
pass, gradients from each node are averaged.
The batch size should be larger than the number of GPUs used locally.
See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.
The same constraints on input as in :class:`torch.nn.DataParallel` apply.
Creation of this class requires that ``torch.distributed`` to be already
initialized, by calling :func:`torch.distributed.init_process_group`.
``DistributedDataParallel`` is proven to be significantly faster than
:class:`torch.nn.DataParallel` for single-node multi-GPU data
parallel training.
To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn
up ``N`` processes, ensuring that each process exclusively works on a single
GPU from 0 to N-1. This can be done by either setting
``CUDA_VISIBLE_DEVICES`` for every process or by calling:
>>> torch.cuda.set_device(i)
where i is from 0 to N-1. In each process, you should refer the following
to construct this module:
>>> torch.distributed.init_process_group(
>>> backend='nccl', world_size=N, init_method='...'
>>> )
>>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)
In order to spawn up multiple processes per node, you can use either
``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.
.. note::
Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__
for a brief introduction to all features related to distributed training.
.. note::
``DistributedDataParallel`` can be used in conjunction with
:class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce
per-rank optimizer states memory footprint. Please refer to
`ZeroRedundancyOptimizer recipe <https://pytorch.org/tutorials/recipes/zero_redundancy_optimizer.html>`__
for more details.
.. note:: ``nccl`` backend is currently the fastest and highly recommended
backend when using GPUs. This applies to both single-node and
multi-node distributed training.
.. note:: This module also supports mixed-precision distributed training.
This means that your model can have different types of parameters such
as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these
mixed types of parameters will just work fine.
.. note:: If you use ``torch.save`` on one process to checkpoint the module,
and ``torch.load`` on some other processes to recover it, make sure that
``map_location`` is configured properly for every process. Without
``map_location``, ``torch.load`` would recover the module to devices
where the module was saved from.
.. note:: When a model is trained on ``M`` nodes with ``batch=N``, the
gradient will be ``M`` times smaller when compared to the same model
trained on a single node with ``batch=M*N`` if the loss is summed (NOT
averaged as usual) across instances in a batch (because the gradients
between different nodes are averaged). You should take this into
consideration when you want to obtain a mathematically equivalent
training process compared to the local training counterpart. But in most
cases, you can just treat a DistributedDataParallel wrapped model, a
DataParallel wrapped model and an ordinary model on a single GPU as the
same (E.g. using the same learning rate for equivalent batch size).
.. note::
Parameters are never broadcast between processes. The module performs
an all-reduce step on gradients and assumes that they will be modified
by the optimizer in all processes in the same way. Buffers
(e.g. BatchNorm stats) are broadcast from the module in process of rank
0, to all other replicas in the system in every iteration.
.. note::
If you are using DistributedDataParallel in conjunction with the
:ref:`distributed-rpc-framework`, you should always use
:meth:`torch.distributed.autograd.backward` to compute gradients and
:class:`torch.distributed.optim.DistributedOptimizer` for optimizing
parameters.
Example::
>>> import torch.distributed.autograd as dist_autograd
>>> from torch.nn.parallel import DistributedDataParallel as DDP
>>> from torch import optim
>>> from torch.distributed.optim import DistributedOptimizer
>>> from torch.distributed.rpc import RRef
>>>
>>> t1 = torch.rand((3, 3), requires_grad=True)
>>> t2 = torch.rand((3, 3), requires_grad=True)
>>> rref = rpc.remote("worker1", torch.add, args=(t1, t2))
>>> ddp_model = DDP(my_model)
>>>
>>> # Setup optimizer
>>> optimizer_params = [rref]
>>> for param in ddp_model.parameters():
>>> optimizer_params.append(RRef(param))
>>>
>>> dist_optim = DistributedOptimizer(
>>> optim.SGD,
>>> optimizer_params,
>>> lr=0.05,
>>> )
>>>
>>> with dist_autograd.context() as context_id:
>>> pred = ddp_model(rref.to_here())
>>> loss = loss_func(pred, loss)
>>> dist_autograd.backward(context_id, loss)
>>> dist_optim.step()
.. note::
To let a non-DDP model load a state dict from a DDP model,
:meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present`
needs to be applied to strip the prefix "module." in the DDP state dict before loading.
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) are distributed synchronization
points. Take that into account in case different processes might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
Same applies to buffers.
.. warning::
This module assumes all parameters are registered in the model of each
distributed processes are in the same order. The module itself will
conduct gradient ``allreduce`` following the reverse order of the
registered parameters of the model. In other words, it is users'
responsibility to ensure that each distributed process has the exact
same model and thus the exact same parameter registration order.
.. warning::
This module allows parameters with non-rowmajor-contiguous strides.
For example, your model may contain some parameters whose
:class:`torch.memory_format` is ``torch.contiguous_format``
and others whose format is ``torch.channels_last``. However,
corresponding parameters in different processes must have the
same strides.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. warning::
If you plan on using this module with a ``nccl`` backend or a ``gloo``
backend (that uses Infiniband), together with a DataLoader that uses
multiple workers, please change the multiprocessing start method to
``forkserver`` (Python 3 only) or ``spawn``. Unfortunately
Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will
likely experience deadlocks if you don't change this setting.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
won't be invoked anymore, unless the hooks are initialized in the
:meth:`forward` method.
.. warning::
You should never try to change your model's parameters after wrapping
up your model with ``DistributedDataParallel``. Because, when
wrapping up your model with ``DistributedDataParallel``, the constructor
of ``DistributedDataParallel`` will register the additional gradient
reduction functions on all the parameters of the model itself at the
time of construction. If you change the model's parameters afterwards,
gradient redunction functions no longer match the correct set of
parameters.
.. warning::
Using ``DistributedDataParallel`` in conjunction with the
:ref:`distributed-rpc-framework` is experimental and subject to change.
.. warning::
The ``gradient_as_bucket_view`` mode does not yet work with Automatic
Mixed Precision (AMP). AMP maintains stashed gradients that are used for
unscaling gradients. With ``gradient_as_bucket_view=True``, these
stashed gradients will point to communication buckets in the first
iteration. In the next iteration, the communication buckets are mutated
and thus these stashed gradients will be unexpectedly mutated as well,
which might lead to wrong results.
Args:
module (Module): module to be parallelized
device_ids (list of int or torch.device): CUDA devices.
1) For single-device modules, ``device_ids`` can
contain exactly one device id, which represents the only
CUDA device where the input module corresponding to this process resides.
Alternatively, ``device_ids`` can also be ``None``.
2) For multi-device modules and CPU modules,
``device_ids`` must be ``None``.
When ``device_ids`` is ``None`` for both cases,
both the input data for the forward pass and the actual module
must be placed on the correct device.
(default: ``None``)
output_device (int or torch.device): Device location of output for
single-device CUDA modules. For multi-device modules and
CPU modules, it must be ``None``, and the module itself
dictates the output location. (default: ``device_ids[0]``
for single-device modules)
broadcast_buffers (bool): Flag that enables syncing (broadcasting)
buffers of the module at beginning of the ``forward``
function. (default: ``True``)
process_group: The process group to be used for distributed data
all-reduction. If ``None``, the default process group, which
is created by :func:`torch.distributed.init_process_group`,
will be used. (default: ``None``)
bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into
multiple buckets so that gradient reduction of each
bucket can potentially overlap with backward computation.
:attr:`bucket_cap_mb` controls the bucket size in
MegaBytes (MB). (default: 25)
find_unused_parameters (bool): Traverse the autograd graph from all
tensors contained in the return value of the
wrapped module's ``forward`` function. Parameters
that don't receive gradients as part of this
graph are preemptively marked as being ready to
be reduced. Note that all ``forward`` outputs
that are derived from module parameters must
participate in calculating loss and later the
gradient computation. If they don't, this wrapper
will hang waiting for autograd to produce
gradients for those parameters. Any outputs
derived from module parameters that are otherwise
unused can be detached from the autograd graph
using ``torch.Tensor.detach``. (default: ``False``)
check_reduction: This argument is deprecated.
gradient_as_bucket_view (bool): This is a prototype feature and subject
to changes. When set to ``True``, gradients will be views
pointing to different offsets of ``allreduce`` communication
buckets. This can reduce peak memory usage, where the
saved memory size will be equal to the total gradients
size. Moreover, it avoids the overhead of copying between
gradients and ``allreduce`` communication buckets. When
gradients are views, ``detach_()`` cannot be called on the
gradients. If hitting such errors, please fix it by
referring to the :meth:`~torch.optim.Optimizer.zero_grad`
function in ``torch/optim/optimizer.py`` as a solution.
Attributes:
module (Module): the module to be parallelized.
Example::
>>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
>>> net = torch.nn.parallel.DistributedDataParallel(model, pg)
"""
def __init__(
self,
module,
device_ids=None,
output_device=None,
dim=0,
broadcast_buffers=True,
process_group=None,
bucket_cap_mb=25,
find_unused_parameters=False,
check_reduction=False,
gradient_as_bucket_view=False,
):
super(DistributedDataParallel, self).__init__()
assert any((p.requires_grad for p in module.parameters())), (
"DistributedDataParallel is not needed when a module "
"doesn't have any parameter that requires a gradient."
)
if device_ids is not None and len(device_ids) > 1:
raise ValueError("device_ids can only be None or contain a single element.")
self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1
distinct_device_types = {p.device.type for p in module.parameters()}
if len(distinct_device_types) != 1:
raise ValueError(
"DistributedDataParallel's input module must be on "
"the same type of devices, but input module parameters locate in {}.".format(
distinct_device_types
)
)
self.device_type = list(distinct_device_types)[0]
if (
device_ids is None
or len(device_ids) == 0 # For backward compatibility.
or self.device_type == "cpu"
or self.is_multi_device_module
):
if device_ids or output_device:
raise ValueError(
"DistributedDataParallel device_ids and output_device arguments "
"only work with single-device/multiple-device GPU modules or CPU modules, "
"but got device_ids {}, output_device {}, and module parameters {}.".format(
device_ids,
output_device,
{p.device for p in module.parameters()},
)
)
self.device_ids = None
self.output_device = None
else:
self.device_ids = [_get_device_index(x, True) for x in device_ids]
if output_device is None:
output_device = device_ids[0]
self.output_device = _get_device_index(output_device, True)
if process_group is None:
self.process_group = _get_default_group()
else:
self.process_group = process_group
self.dim = dim
self.module = module
self.device = list(self.module.parameters())[0].device
self.broadcast_buffers = broadcast_buffers
self.find_unused_parameters = find_unused_parameters
self.require_backward_grad_sync = True
self.require_forward_param_sync = True
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=False, ddp_join_divide_by_initial_world_size=False
)
self.gradient_as_bucket_view = gradient_as_bucket_view
if hasattr(module, "_ddp_params_and_buffers_to_ignore"):
self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore
else:
self.parameters_to_ignore = []
if check_reduction:
# This argument is no longer used since the reducer
# will ensure reduction completes even if some parameters
# do not receive gradients.
warnings.warn(
"The `check_reduction` argument in `DistributedDataParallel` "
"module is deprecated. Please avoid using it."
)
# Check that a module does not have Uninitialized parameters
for param in module.parameters():
if isinstance(param, torch.nn.parameter.UninitializedParameter):
raise RuntimeError(
"Modules with uninitialized parameters can't be used with `DistributedDataParallel`. "
"Run a dummy forward pass to correctly initialize the modules"
)
# used for intra-node param sync and inter-node sync as wel
self.broadcast_bucket_size = int(250 * 1024 * 1024)
# reduction bucket size
self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)
# Whether to perform input tensor CPU to GPU copies on a side-stream
self.use_side_stream_for_tensor_copies = (
os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1"
)
# TODO(wayi@): Remove this field since SPMD is no longer supported,
# and also remove all the relevant unnecessary loops.
# Module replication within process (single-process multi device)
self._module_copies = [self.module]
# Build parameters for reducer.
parameters, expect_sparse_gradient = self._build_params_for_reducer()
# Verify model equivalence.
dist._verify_model_across_ranks(self.process_group, parameters)
# Sync params and buffers. Ensures all DDP models start off at the same value.
self._sync_params_and_buffers(authoritative_rank=0)
# Builds reducer.
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _sync_params_and_buffers(self, authoritative_rank=0):
module_states = []
for name, param in self.module.state_dict().items():
if name not in self.parameters_to_ignore:
module_states.append(param)
if len(module_states) > 0:
self._distributed_broadcast_coalesced(
module_states, self.broadcast_bucket_size, authoritative_rank
)
def _ddp_init_helper(self, parameters, expect_sparse_gradient):
"""
Initialization helper function that does the following:
(1) bucketing the parameters for reductions
(2) resetting the bucketing states
(3) registering the grad hooks
(4) Logging constructin-time DDP logging data
(5) passing a handle of DDP to SyncBatchNorm Layer
"""
# The bucket size limit is specified in the constructor.
# Additionally, we allow for a single small bucket for parameters
# that are defined first, such that their gradients don't spill into
# a much larger bucket, adding unnecessary latency after gradient
# computation finishes. Experiments showed 1MB is a reasonable value.
bucket_indices = dist._compute_bucket_assignment_by_size(
parameters[0],
[dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap],
expect_sparse_gradient[0],
)
# Note: reverse list of buckets because we want to approximate the
# order in which their gradients are produced, and assume they
# are used in the forward pass in the order they are defined.
self.reducer = dist.Reducer(
parameters,
list(reversed(bucket_indices)),
self.process_group,
expect_sparse_gradient,
self.bucket_bytes_cap,
self.find_unused_parameters,
self.gradient_as_bucket_view,
)
self.logger = dist.Logger(self.reducer)
# Set logging data that can be got during construction time.
self.logger.set_construction_data_and_log(
self.module.__class__.__name__,
[] if self.device_ids is None else self.device_ids,
-1 if self.output_device is None else self.output_device,
self.broadcast_buffers,
)
# passing a handle to torch.nn.SyncBatchNorm layer
self._passing_sync_batchnorm_handle(self._module_copies)
def __getstate__(self):
self._check_default_group()
attrs = copy.copy(self.__dict__)
del attrs["process_group"]
del attrs["reducer"]
del attrs["logger"]
return attrs
def __setstate__(self, state):
# If serializable, then the process group should be the default one
self.process_group = _get_default_group()
super(DistributedDataParallel, self).__setstate__(state)
self.__dict__.setdefault("require_forward_param_sync", True)
self.__dict__.setdefault("require_backward_grad_sync", True)
parameters, expect_sparse_gradient = self._build_params_for_reducer()
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _build_params_for_reducer(self):
# Build tuple of (module, parameter) for all parameters that require grads.
modules_and_parameters = [
[
(module, parameter)
for module_name, module in replica.named_modules()
for parameter in [
param
# Note that we access module.named_parameters instead of
# parameters(module). parameters(module) is only needed in the
# single-process multi device case, where it accesses replicated
# parameters through _former_parameters.
for param_name, param in module.named_parameters(recurse=False)
if param.requires_grad
and f"{module_name}.{param_name}"
not in self.parameters_to_ignore
]
]
for replica in self._module_copies
]
# Deduplicate any parameters that might be shared across child modules.
memo = set()
modules_and_parameters = [
# "p not in memo" is the deduplication check.
# "not memo.add(p)" is always True, and it's only there to cause "add(p)" if needed.
[(m, p) for m, p in replica_mps if p not in memo and not memo.add(p)]
for replica_mps in modules_and_parameters
]
# Build list of parameters.
parameters = [
list(parameter for _, parameter in replica)
for replica in modules_and_parameters
]
# Checks if a module will produce a sparse gradient.
def produces_sparse_gradient(module):
if isinstance(module, torch.nn.Embedding) or isinstance(
module, torch.nn.EmbeddingBag
):
return module.sparse
return False
# Build list of booleans indicating whether or not to expect sparse
# gradients for the corresponding parameters.
expect_sparse_gradient = [
list(produces_sparse_gradient(module) for module, _ in replica)
for replica in modules_and_parameters
]
# The following modules_params and modules_buffers are used for
# param/buffer sync in _sync_params.
self.modules_params = [
list(self._get_parameters(m)) for m in self._module_copies
]
# Collect buffers for modules, filtering out buffers that should be ignored.
named_module_buffers = [
[(buffer, buffer_name) for buffer_name, buffer in m.named_buffers()]
for m in self._module_copies
]
self.modules_buffers = [
[
buffer
for (buffer, buffer_name) in module_buffers
if buffer_name not in self.parameters_to_ignore
]
for module_buffers in named_module_buffers
]
return parameters, expect_sparse_gradient
def _get_parameters(self, m, recurse=True):
"""
Returns a generator of module parameters
"""
def model_parameters(m):
ps = (
m._former_parameters.values()
if hasattr(m, "_former_parameters")
else m.parameters(recurse=False)
)
for p in ps:
yield p
for m in m.modules() if recurse else [m]:
for p in model_parameters(m):
yield p
def _check_default_group(self):
pickle_not_supported = False
try:
if self.process_group != _get_default_group():
pickle_not_supported = True
except RuntimeError:
pickle_not_supported = True
if pickle_not_supported:
raise RuntimeError(
"DDP Pickling/Unpickling are only supported "
"when using DDP with the default process "
"group. That is, when you have called "
"init_process_group and have not passed "
"process_group argument to DDP constructor"
)
@contextmanager
def no_sync(self):
r"""
A context manager to disable gradient synchronizations across DDP
processes. Within this context, gradients will be accumulated on module
variables, which will later be synchronized in the first
forward-backward pass exiting the context.
Example::
>>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)
>>> with ddp.no_sync():
>>> for input in inputs:
>>> ddp(input).backward() # no synchronization, accumulate grads
>>> ddp(another_input).backward() # synchronize grads
"""
old_require_backward_grad_sync = self.require_backward_grad_sync
self.require_backward_grad_sync = False
try:
yield
finally:
self.require_backward_grad_sync = old_require_backward_grad_sync
def forward(self, *inputs, **kwargs):
self.reducer.save_thread_local_state()
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.logger.set_runtime_stats_and_log()
self.reducer.prepare_for_forward()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
ones = torch.ones(1, device=self.device)
work = dist.all_reduce(ones, group=self.process_group, async_op=True)
self.reducer._set_forward_pass_work_handle(
work,
self.ddp_uneven_inputs_config.ddp_join_divide_by_initial_world_size,
)
# Calling _rebuild_buckets before forward compuation,
# It may allocate new buckets before deallocating old buckets
# inside _rebuild_buckets. To save peak memory usage,
# call _rebuild_buckets before the peak memory usage increases
# during forward computation.
# This should be called only once during whole training period.
if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
logging.info("Reducer buckets have been rebuilt in this iteration.")
if self.require_forward_param_sync:
self._sync_params()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
# Notify joined ranks whether they should sync in backwards pass or not.
self._check_global_requires_backward_grad_sync(is_joined_rank=False)
if self.device_ids:
inputs, kwargs = self.to_kwargs(inputs, kwargs, self.device_ids[0])
output = self.module(*inputs[0], **kwargs[0])
else:
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
self.require_forward_param_sync = False
return output
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def _recursive_to(self, inputs, target_gpu):
r"""
Recursively moves input to the target_gpu.
"""
def | (obj):
if isinstance(obj, torch.Tensor):
if not self.use_side_stream_for_tensor_copies:
return (obj.to(target_gpu),)
else:
# Perform CPU -> GPU copies in a background stream. This code is
# motivated from similar logic in torch/nn/parallel/_functions.py
stream = _get_stream(target_gpu)
with torch.cuda.stream(stream):
output = obj.to(target_gpu)
# synchronize with the copy stream
with torch.cuda.device(target_gpu):
current_stream = torch.cuda.current_stream()
# Sync the current stream with the copy stream
current_stream.wait_stream(stream)
# Ensure tensor memory is not reused until work on
# main stream is complete
output.record_stream(current_stream)
return (output,)
if is_namedtuple(obj):
return [type(obj)(*args) for args in zip(*map(to_map, obj))]
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(to_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return [list(i) for i in zip(*map(to_map, obj))]
if isinstance(obj, dict) and len(obj) > 0:
return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]
return [obj]
# Avoid reference cycle
try:
res = to_map(inputs)
finally:
to_map = None
return res
def to_kwargs(self, inputs, kwargs, device_id):
inputs = self._recursive_to(inputs, device_id) if inputs else []
kwargs = self._recursive_to(kwargs, device_id) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def train(self, mode=True):
super(DistributedDataParallel, self).train(mode)
for module in self._module_copies[1:]:
module.train(mode)
return self
# When running in join mode, schedules an allreduce to match the one in the
# forward pass to determine the no. of currently active processes and whether
# all processes have joined.
def _schedule_shadow_all_reduce_for_fwd_pass(self):
all_active_procs = torch.zeros(1, device=self.device)
dist.all_reduce(all_active_procs, group=self.process_group)
return all_active_procs.item()
# When running in join mode, schedules an allreduce to notify joined ranks
# of whether backwards pass synchronization will run this iteraton or not.
def _check_global_requires_backward_grad_sync(self, is_joined_rank):
if not is_joined_rank and self.require_backward_grad_sync:
requires_sync_tensor = torch.ones(1, device=self.device)
else:
requires_sync_tensor = torch.zeros(1, device=self.device)
work = dist.all_reduce(
requires_sync_tensor, group=self.process_group, async_op=True
)
return work, requires_sync_tensor
# When running in join mode, checks and performs sync of module buffers if
# the models have buffers that should be synchronized in the forward pass.
def _check_and_sync_module_buffers(self):
if self.will_sync_module_buffers():
authoritative_rank = self._find_common_rank(self._distributed_rank, False)
self._distributed_broadcast_coalesced(
self.modules_buffers[0], self.broadcast_bucket_size, authoritative_rank
)
# When running in join model, agrees upon a common rank and broadcast model
# parameters to all other ranks.
def _sync_final_model(self, is_last_joiner):
# Agree upon the process that will be the authoritative model copy.
# The current rank is a candidate for being the authoritative copy if
# is_last_joiner=True. We break ties via picking the larger rank.
self._authoritative_rank = self._find_common_rank(
self._distributed_rank, is_last_joiner
)
self._sync_params_and_buffers(authoritative_rank=self._authoritative_rank)
# Schedule allreduce ops to match those scheduled in the reducer's backward
# pass.
def _match_all_reduce_for_bwd_pass(self):
allreduce_work = []
# Schedule allreduce in the same order as Reducer schedules them, i.e.
# the order of the buckets. Retrieving the bucket order from the reducer
# ensures that we keep the same order in join mode, such as when bucket
# order is rebuilt dynamically.
all_bucket_tensors = self.reducer.get_bucket_tensors()
for bucket_tensors in all_bucket_tensors:
# Joined processes contribute zero gradient. In the case that
# divide_by_initial_world_size=True, we divide grads by the static
# world size, if not, the dividing factor is reduced by the number
# of joined processes.
zero_tensors = [torch.zeros_like(t) for t in bucket_tensors]
work = self.process_group.allreduce(zero_tensors)
allreduce_work.append(work)
for work in allreduce_work:
work.wait()
# Allreduces the used parameter mapping across ranks.
def _match_unused_params_allreduce(self):
locally_used_param_maps = self.reducer._get_local_used_maps()
self.process_group.allreduce(locally_used_param_maps)
@contextmanager
def join(self, divide_by_initial_world_size=True, enable=True):
r"""
A context manager to be used in conjunction with an instance of
:class:`torch.nn.parallel.DistributedDataParallel` to be
able to train with uneven inputs across participating processes.
This context manager will keep track of already-joined DDP processes,
and "shadow" the forward and backward passes by inserting collective
communication operations to match with the ones created by non-joined
DDP processes. This will ensure each collective call has a corresponding
call by already-joined DDP processes, preventing hangs or errors that
would otherwise happen when training with uneven inputs across
processes.
Once all DDP processes have joined, the context manager will broadcast
the model corresponding to the last joined process to all processes to
ensure the model is the same across all processes
(which is guaranteed by DDP).
To use this to enable training with uneven inputs across processes,
simply wrap this context manager around your training loop. No further
modifications to the model or data loading is required.
.. warning::
This module currently does not support custom distributed collective
operations in the forward pass, such as ``SyncBatchNorm`` or other
custom defined collectives in the model's forward pass.
Args:
divide_by_initial_world_size (bool): If ``True``, will divide
gradients by the initial ``world_size`` DDP training was launched
with. If ``False``, will compute the effective world size
(number of ranks that have not depleted their inputs yet) and
divide gradients by that during allreduce. Set
``divide_by_initial_world_size=True`` to ensure every input
sample including the uneven inputs have equal weight in terms of
how much they contribute to the global gradient. This is
achieved by always dividing the gradient by the initial
``world_size`` even when we encounter uneven inputs. If you set
this to ``False``, we divide the gradient by the remaining
number of nodes. This ensures parity with training on a smaller
``world_size`` although it also means the uneven inputs would
contribute more towards the global gradient. Typically, you
would want to set this to ``True`` for cases where the last few
inputs of your training job are uneven. In extreme cases, where
there is a large discrepancy in the number of inputs, setting
this to ``False`` might provide better results.
enable (bool): Whether to enable uneven input detection or not. Pass
in ``enable=False`` to disable in cases where you know that
inputs are even across participating processes. Default is
``True``.
Example::
>>> import torch
>>> import torch.distributed as dist
>>> import os
>>> import torch.multiprocessing as mp
>>> import torch.nn as nn
>>> # On each spawned worker
>>> def worker(rank):
>>> dist.init_process_group("nccl", rank=rank, world_size=2)
>>> torch.cuda.set_device(rank)
>>> model = nn.Linear(1, 1, bias=False).to(rank)
>>> model = torch.nn.parallel.DistributedDataParallel(
>>> model, device_ids=[rank], output_device=rank
>>> )
>>> # Rank 1 gets one more input than rank 0.
>>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]
>>> with model.join():
>>> for _ in range(5):
>>> for inp in inputs:
>>> loss = model(inp).sum()
>>> loss.backward()
>>> # Without the join() API, the below synchronization will hang
>>> # blocking for rank 1's allreduce to complete.
>>> torch.cuda.synchronize(device=rank)
"""
# Log uneven input API usage.
self.logger._set_uneven_input_join()
try:
has_error = False
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=enable,
ddp_join_divide_by_initial_world_size=divide_by_initial_world_size,
)
yield
except Exception as e:
# Set to skip any processing in the finally block.
has_error = True
raise e
finally:
# Skip any processing to let the exception immediately be raised if
# there was one.
if enable and not has_error:
all_procs_joined = False
is_last_joiner = True
i = 0
WARN_THRESHOLD = 1000
warnings.simplefilter("once")
while not all_procs_joined:
if i > WARN_THRESHOLD:
my_rank = self._distributed_rank
warnings.warn(
"Detected uneven input skew of greater "
f"than {WARN_THRESHOLD}. This means that rank {my_rank} "
f"has at least {WARN_THRESHOLD} fewer inputs than "
"other currently active ranks. This level of skew could "
"lead to performance degradation during training."
)
# Schedules allreduce to match fwd pass allreduce in non-joined procs
num_active_procs = self._schedule_shadow_all_reduce_for_fwd_pass()
if num_active_procs == 0:
all_procs_joined = True
else:
# Some DDP process still needs to be joined.
if is_last_joiner:
is_last_joiner = False
# It will rebuild buckets only once during training period
self.reducer._rebuild_buckets()
# Schedule a corresponding broadcast if we are syncing module
# buffers in the forward pass.
self._check_and_sync_module_buffers()
(
work,
should_sync_backwards_tensor,
) = self._check_global_requires_backward_grad_sync(
is_joined_rank=True
)
work.wait()
# If nonzero, then we should sync in the bwd pass.
should_sync_backwards = should_sync_backwards_tensor.item() != 0
# Forward param sync is disabled in the next iteration
# if we are skipping grad sync this iteration. Hence, we
# set require_forward_param_sync appropriately here.
self.require_forward_param_sync = should_sync_backwards
if not should_sync_backwards:
continue
# Schedules one allreduce per gradient bucket to match
# the backwards pass allreduce.
self._match_all_reduce_for_bwd_pass()
# Check if we need to allreduce locally unused params.
if self.find_unused_parameters:
self._match_unused_params_allreduce()
# It will push rebuilt params only once during training period
self.reducer._push_all_rebuilt_params()
i += 1
# All procs joined. Agree on authoritative rank and broadcast the model.
self._sync_final_model(is_last_joiner)
def register_comm_hook(self, state: object, hook: callable):
r"""
Registers a communication hook which is an enhancement that provides a
flexible hook to users where they can specify how DDP aggregates gradients
across multiple workers.
This hook would be very useful for researchers to try out new ideas. For
example, this hook can be used to implement several algorithms like GossipGrad
and gradient compression which involve different communication strategies for
parameter syncs while running Distributed DataParallel training.
Args:
state (object): Passed to the hook to maintain any state information during the training process.
Examples include error feedback in gradient compression,
peers to communicate with next in GossipGrad, etc.
It is locally stored by each worker
and shared by all the gradient tensors on the worker.
hook (callable): Averages gradient tensors across workers and defined as:
``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future``:
This function is called once the bucket is ready. The
hook can perform whatever processing is needed and return
a Future indicating completion of any async work (ex: allreduce).
If the hook doesn't perform any communication, it can also
just return a completed Future. The Future should hold the
new value of grad bucket's tensors. Once a bucket is ready,
c10d reducer would call this hook and use the tensors returned
by the Future and copy grads to individual parameters.
We also provide an API called ``get_future`` to retrieve a
Future associated with the completion of ``c10d.ProcessGroup.work``.
.. warning ::
Grad bucket's tensors will not be predivided by world_size. User is responsible
to divide by the world_size in case of operations like allreduce.
.. warning ::
DDP communication hook can only be registered once and should be registered
before calling backward.
.. warning ::
The Future object that hook returns should contain a result that has the same
shape with the tensors inside grad bucket.
.. warning ::
DDP communication hook does not support single-process multiple-device mode.
Gradbucket tensors should consist of only a single tensor.
.. warning ::
``get_future`` API supports only NCCL backend and will return a ``torch._C.Future``
which is an internal type and should be used with caution. It can still be used by
``register_comm_hook`` API, but it is subject to some subtle differences compared
to ``torch.futures.Future``.
.. warning ::
DDP communication hook is experimental and subject to change.
Example::
Below is an example of a noop hook that returns the same tensors.
>>> def noop(state: object, bucket: dist.GradBucket): -> torch.futures.Future
>>> fut = torch.futures.Future()
>>> fut.set_result(bucket.get_tensors())
>>> return fut
>>> ddp.register_comm_hook(state = None, hook = noop)
Example::
Below is an example of a Parallel SGD algorithm where gradients are encoded before
allreduce, and then decoded after allreduce.
>>> def encode_and_decode(state: object, bucket: dist.GradBucket): -> torch.futures.Future
>>> tensors = [t / process_group.world_size for t in bucket.get_tensors()]
>>> encoded_tensors = encode(tensors) # encode gradients
>>> fut = process_group.allreduce(encoded_tensors).get_future()
>>> # Define the then callback to decode.
>>> def decode(fut):
>>> decoded_tensors = decode(fut.value()) # decode gradients
>>> return decoded_tensors
>>> return fut.then(decode)
>>> ddp.register_comm_hook(state = None, hook = encode_and_decode)
"""
self._check_comm_hook(hook)
self.logger._set_comm_hook_name(hook.__qualname__)
dist._register_comm_hook(self.reducer, state, hook)
def _register_builtin_comm_hook(self, comm_hook_type):
r"""
Registers a built-in communication hook that specifies how DDP
aggregates gradients across multiple workers.
The built-in hooks aim to provide efficient C++ implementations for certain hooks,
which might not be as efficient if implemented in Python using a Python communication hook.
Args:
comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as
ALLREDUCE, FP16_COMPRESS, etc.
.. warning ::
DDP communication hook can only be registered once and should be registered
before calling backward.
.. warning ::
DDP communication hook does not support single-process multiple-device mode.
Gradbucket tensors should consist of only a single tensor.
.. warning ::
DDP communication hook is experimental and subject to change.
Example::
Below is an example of a FP16 compression where gradients are
compressed into 16-bit floating-point numbers before allreduce, and
then decompressed after allreduce.
>>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)
"""
self.logger._set_comm_hook_name(str(comm_hook_type))
dist._register_builtin_comm_hook(self.reducer, comm_hook_type)
def _distributed_broadcast_coalesced(
self, tensors, buffer_size, authoritative_rank=0
):
dist._broadcast_coalesced(
self.process_group, tensors, buffer_size, authoritative_rank
)
def will_sync_module_buffers(self):
return (
self.require_forward_param_sync
and self.broadcast_buffers
and len(self.modules_buffers[0]) > 0
)
def _find_common_rank(self, input_rank, rank_cond):
# -1 indicates that this rank is not under consideration to be the
# common_rank
rank_to_use = torch.tensor(
[input_rank if rank_cond else -1],
device=self.device,
)
dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)
if rank_to_use.item() == -1:
raise ValueError(
"BUG! Expected rank_cond to be true for at least one process."
)
return rank_to_use.item()
def _sync_params(self):
with torch.no_grad():
# module buffer sync
if self.will_sync_module_buffers():
# Synchronize buffers across processes.
# If we are running DDP with the join manager, we have to agree
# upon a rank to sync module buffers from, since rank 0 may
# already have been joined and have stale module buffers.
if self.ddp_uneven_inputs_config.ddp_join_enabled:
authoritative_rank = self._find_common_rank(
self._distributed_rank, True
)
else:
# The process with rank 0 is considered the authoritative copy.
authoritative_rank = 0
self._distributed_broadcast_coalesced(
self.modules_buffers[0],
self.broadcast_bucket_size,
authoritative_rank,
)
def _passing_sync_batchnorm_handle(self, module_copies):
for dev_idx, module in enumerate(module_copies):
for layer in module.modules():
if isinstance(layer, torch.nn.modules.SyncBatchNorm):
assert (
self.device_type != "cpu"
), "SyncBatchNorm layers only work with GPU modules"
layer._specify_ddp_gpu_num(1)
def _check_comm_hook(self, hook):
if not callable(hook):
raise TypeError("Communication hook must be callable.")
sig = inspect.signature(hook)
if (
sig.parameters["bucket"].annotation != inspect._empty
and sig.parameters["bucket"].annotation != dist.GradBucket
):
raise ValueError(
"Communication hook: bucket annotation should be dist.GradBucket."
)
if sig.return_annotation != inspect._empty and (
sig.return_annotation != torch.futures.Future
and sig.return_annotation != torch._C.Future
):
raise ValueError(
"Communication hook: return annotation should be torch.futures.Future or torch._C.Future."
)
@property
def _distributed_rank(self):
return dist.get_rank(self.process_group)
@staticmethod
def _set_params_and_buffers_to_ignore_for_model(
module, params_and_buffers_to_ignore
):
# This is a workaround to set parameters and buffers DDP should ignore
# during synchronization. It will be removed when the API is finalized
# as part of addressing https://github.com/pytorch/pytorch/issues/43690.
module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore
def get_ddp_logging_data(self):
r"""
This interface can be called after DistributedDataParallel() is
constructed. It returns DDPLoggingData for debugging and analysis.
More detailed explanation of the fields in DDPLoggingData are in
``torch/c10/util/Logging.h``.
"""
return self.logger._get_ddp_logging_data()
def set_ddp_runtime_logging_sample_rate(self, sample_rate):
r"""
This interface allows users to set sample_rate of collecting
runtime stats. The runtime stats will be recorded for the
first 10 iterations, after 10 iteratons runtime stats will be
recorded once every "sample_rate" training iterations. In
default, runtime stats are recorded for the first 10 iterations,
after 10 iterations runtime stats are recorded once every
"kDDPRuntimeLoggingSampleRate=100" training iterations.
"""
if sample_rate < 1:
raise ValueError(
"DDP runtime logging sample rate should be equal or greater than 1"
)
self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)
| to_map |
variables_b.js | var searchData= | ]; | [
['mat',['mat',['../structsalle__s.html#ad9934630b58b551f8f185dead2006ba6',1,'salle_s']]] |
lexer.go | package lexer
import (
"fmt"
"io"
"log"
)
var (
UnexpectedTokenErr = fmt.Errorf("unexpected token")
)
type Tokenizer interface {
FromStrLit(lit string, lastToken int) int
}
type LexerImpl struct {
scanner *Scanner
tokenizer Tokenizer
Result interface{}
}
type LexerResult struct {
Token int
Literal string
}
func NewLexerImpl(r io.Reader, t Tokenizer) *LexerImpl |
func (li *LexerImpl) Lex(lastToken int) (*LexerResult, error) {
result := &LexerResult{}
SCAN:
tok, lit := li.scanner.Scan()
switch tok {
case EOF:
// Stop lex
case IDENT, NUMBER, LEFT_PARENTHESIS, RIGHT_PARENTHESIS, COMMA, SEMICOLON, EQUAL, ANGLE_LEFT, ANGLE_RIGHT:
result.Literal = lit
case WS:
// Skip
goto SCAN
default:
log.Printf("UnexpectedToken: tok is %d, lit is %s\n", tok, lit)
return nil, UnexpectedTokenErr
}
result.Token = li.tokenizer.FromStrLit(lit, lastToken)
return result, nil
}
| {
return &LexerImpl{
scanner: NewScanner(r),
tokenizer: t,
}
} |
Widget_nl.js | // All material copyright ESRI, All Rights Reserved, unless otherwise specified.
// See http://js.arcgis.com/3.15/esri/copyright.txt and http://www.arcgis.com/apps/webappbuilder/copyright.txt for details.
//>>built | define({"widgets/Stream/nls/strings":{_widgetLabel:"Stroom",noStreamLayer:"Geen vooraf geconfigureerde stroomlaagopties beschikbaar.",streamControls:"Bedieningselementen voor streaming",startStreaming:"Streaming starten",stopStreaming:"Streaming stoppen",clearObservation:"Vorige observaties wissen",streamFilter:"Filter",showAllObservations:"Alle observaties weergeven",showMapAreaObservations:"Observaties beperken tot het huidige kaartgebied",showObservationsByDrawing:"Observaties beperken tot tekenen op de kaart",
useSpatialFilter:"Ruimtelijke filter gebruiken om observaties te beperken",useAttributeFilter:"Attribuutfilter gebruiken om observaties te beperken",selectStreamLayer:"Stroomlaag selecteren",previousObservations:"Vorige observaties tekenen",applyAttrFilter:"Toepassen",_localized:{}}}); |
|
environments.go | package timeseriesinsights
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// EnvironmentsClient is the time Series Insights client
type EnvironmentsClient struct {
BaseClient
}
// NewEnvironmentsClient creates an instance of the EnvironmentsClient client.
func NewEnvironmentsClient(subscriptionID string) EnvironmentsClient {
return NewEnvironmentsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewEnvironmentsClientWithBaseURI creates an instance of the EnvironmentsClient client.
func NewEnvironmentsClientWithBaseURI(baseURI string, subscriptionID string) EnvironmentsClient {
return EnvironmentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate create or update an environment in the specified subscription and resource group.
// Parameters:
// resourceGroupName - name of an Azure Resource group.
// environmentName - name of the environment
// parameters - parameters for creating an environment resource.
func (client EnvironmentsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, environmentName string, parameters EnvironmentCreateOrUpdateParameters) (result EnvironmentsCreateOrUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EnvironmentsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: environmentName,
Constraints: []validation.Constraint{{Target: "environmentName", Name: validation.MaxLength, Rule: 90, Chain: nil},
{Target: "environmentName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "environmentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.Sku.Capacity", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.Sku.Capacity", Name: validation.InclusiveMaximum, Rule: int64(10), Chain: nil},
{Target: "parameters.Sku.Capacity", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil},
}},
}},
{Target: "parameters.EnvironmentCreationProperties", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.EnvironmentCreationProperties.DataRetentionTime", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("timeseriesinsights.EnvironmentsClient", "CreateOrUpdate", err.Error())
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, environmentName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client EnvironmentsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, environmentName string, parameters EnvironmentCreateOrUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"environmentName": autorest.Encode("path", environmentName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-11-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters)) | return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client EnvironmentsClient) CreateOrUpdateSender(req *http.Request) (future EnvironmentsCreateOrUpdateFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client EnvironmentsClient) CreateOrUpdateResponder(resp *http.Response) (result EnvironmentResource, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusNotFound),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the environment with the specified name in the specified subscription and resource group.
// Parameters:
// resourceGroupName - name of an Azure Resource group.
// environmentName - the name of the Time Series Insights environment associated with the specified resource
// group.
func (client EnvironmentsClient) Delete(ctx context.Context, resourceGroupName string, environmentName string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EnvironmentsClient.Delete")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, environmentName)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "Delete", resp, "Failure responding to request")
}
return
}
// DeletePreparer prepares the Delete request.
func (client EnvironmentsClient) DeletePreparer(ctx context.Context, resourceGroupName string, environmentName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"environmentName": autorest.Encode("path", environmentName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-11-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client EnvironmentsClient) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client EnvironmentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the environment with the specified name in the specified subscription and resource group.
// Parameters:
// resourceGroupName - name of an Azure Resource group.
// environmentName - the name of the Time Series Insights environment associated with the specified resource
// group.
// expand - setting $expand=status will include the status of the internal services of the environment in the
// Time Series Insights service.
func (client EnvironmentsClient) Get(ctx context.Context, resourceGroupName string, environmentName string, expand string) (result EnvironmentResource, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EnvironmentsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, environmentName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client EnvironmentsClient) GetPreparer(ctx context.Context, resourceGroupName string, environmentName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"environmentName": autorest.Encode("path", environmentName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-11-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client EnvironmentsClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client EnvironmentsClient) GetResponder(resp *http.Response) (result EnvironmentResource, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByResourceGroup lists all the available environments associated with the subscription and within the specified
// resource group.
// Parameters:
// resourceGroupName - name of an Azure Resource group.
func (client EnvironmentsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result EnvironmentListResponse, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EnvironmentsClient.ListByResourceGroup")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "ListByResourceGroup", nil, "Failure preparing request")
return
}
resp, err := client.ListByResourceGroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "ListByResourceGroup", resp, "Failure sending request")
return
}
result, err = client.ListByResourceGroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "ListByResourceGroup", resp, "Failure responding to request")
}
return
}
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
func (client EnvironmentsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-11-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
// http.Response Body if it receives an error.
func (client EnvironmentsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
// closes the http.Response Body.
func (client EnvironmentsClient) ListByResourceGroupResponder(resp *http.Response) (result EnvironmentListResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListBySubscription lists all the available environments within a subscription, irrespective of the resource groups.
func (client EnvironmentsClient) ListBySubscription(ctx context.Context) (result EnvironmentListResponse, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EnvironmentsClient.ListBySubscription")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.ListBySubscriptionPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "ListBySubscription", nil, "Failure preparing request")
return
}
resp, err := client.ListBySubscriptionSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "ListBySubscription", resp, "Failure sending request")
return
}
result, err = client.ListBySubscriptionResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "ListBySubscription", resp, "Failure responding to request")
}
return
}
// ListBySubscriptionPreparer prepares the ListBySubscription request.
func (client EnvironmentsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-11-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.TimeSeriesInsights/environments", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListBySubscriptionSender sends the ListBySubscription request. The method will close the
// http.Response Body if it receives an error.
func (client EnvironmentsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always
// closes the http.Response Body.
func (client EnvironmentsClient) ListBySubscriptionResponder(resp *http.Response) (result EnvironmentListResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Update updates the environment with the specified name in the specified subscription and resource group.
// Parameters:
// resourceGroupName - name of an Azure Resource group.
// environmentName - the name of the Time Series Insights environment associated with the specified resource
// group.
// environmentUpdateParameters - request object that contains the updated information for the environment.
func (client EnvironmentsClient) Update(ctx context.Context, resourceGroupName string, environmentName string, environmentUpdateParameters EnvironmentUpdateParameters) (result EnvironmentsUpdateFuture, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EnvironmentsClient.Update")
defer func() {
sc := -1
if result.Response() != nil {
sc = result.Response().StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.UpdatePreparer(ctx, resourceGroupName, environmentName, environmentUpdateParameters)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "Update", nil, "Failure preparing request")
return
}
result, err = client.UpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "timeseriesinsights.EnvironmentsClient", "Update", result.Response(), "Failure sending request")
return
}
return
}
// UpdatePreparer prepares the Update request.
func (client EnvironmentsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, environmentName string, environmentUpdateParameters EnvironmentUpdateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"environmentName": autorest.Encode("path", environmentName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-11-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TimeSeriesInsights/environments/{environmentName}", pathParameters),
autorest.WithJSON(environmentUpdateParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client EnvironmentsClient) UpdateSender(req *http.Request) (future EnvironmentsUpdateFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client EnvironmentsClient) UpdateResponder(resp *http.Response) (result EnvironmentResource, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
} | |
__init__.py | import logging
import os
import jsons
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
from flask_socketio import SocketIO
from flaskr import db
from flaskr.api.vasps import Vasps
from flaskr.models.user_wallet import UserWallet
from flaskr.models.vasp_details import VaspDetails
from flaskr.service.socket_manager import SocketManager
from flaskr.simulator.vasp_simulator import VaspSimulator
def create_app(test_config=None):
# create and configure the app
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__, instance_relative_config=True)
CORS(app)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'vasps.sqlite')
)
|
print(jsons.dump(app.config))
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
simulator = setup_simulator(app)
setup_api(app)
setup_websockets(app, simulator)
return app
def setup_simulator(app: Flask):
# TODO: config file tells us which simulator to launch
vasp_details = VaspDetails('BOB-GUID', 'BobVASP', 'description', None, 'private-Key', 'public-key',
[
UserWallet('ROBERT-GUID', '18nxAxBktHZDrMoJ3N2fk9imLX8xNnYbNh'),
UserWallet('AMY-GUID', 'amy@bobvasp')
])
return VaspSimulator(vasp_details)
def setup_api(app: Flask):
api = Api(app)
api.add_resource(Vasps, '/vasps')
def setup_websockets(app: Flask, simulator: VaspSimulator):
socketio = SocketIO(app, cors_allowed_origins='*', logger=True, engineio_logger=True)
socketio.run(app)
socket_manager = SocketManager(socketio, simulator) | db.init_app(app)
logging.basicConfig(level=logging.DEBUG) |
day5.rs | use super::lib::*;
pub fn run() -> (Option<String>, Option<String>) {
let filename = "inputs/day5.txt";
let inputs = read_inputs(&filename);
let seat_ids = inputs
.lines()
.map(|line| convert_boarding_pass_to_binary(line))
.filter_map(|line| convert_from_binary(&line).ok());
let part_one = seat_ids.clone().max().unwrap();
let part_two = find_gap(&seat_ids.collect::<Vec<usize>>()).unwrap();
(Some(part_one.to_string()), Some(part_two.to_string()))
}
fn convert_boarding_pass_to_binary(line: &str) -> String {
line.replace("F", "0")
.replace("B", "1")
.replace("L", "0")
.replace("R", "1")
}
fn convert_from_binary(line: &str) -> Result<usize, std::num::ParseIntError> {
usize::from_str_radix(line, 2)
}
fn find_gap(list: &[usize]) -> Option<usize> {
let mut sorted_list = list.to_owned();
sorted_list.sort_unstable();
let mut gap = None;
for idx in 1..sorted_list.len() {
let current = *sorted_list.get(idx).unwrap();
let previous = *sorted_list.get(idx - 1).unwrap();
if current != previous + 1 {
gap = Some(previous + 1); | gap
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_convert_boarding_pass_to_binary() {
assert_eq!("0101100", convert_boarding_pass_to_binary("FBFBBFF"));
}
#[test]
fn test_convert_from_binary() -> Result<(), std::num::ParseIntError> {
assert_eq!(4, convert_from_binary("0100")?);
assert_eq!(44, convert_from_binary("0101100")?);
Ok(())
}
#[test]
fn test_find_gap() {
assert_eq!(Some(4), find_gap(&[1, 2, 3, 5, 6]));
assert_eq!(Some(4), find_gap(&[1, 2, 3, 5]));
}
} | break;
}
} |
sonar_interface.py | #!/usr/bin/env python
from ping360_sonar.sensor import Ping360
from numpy import pi, sqrt, tan, cos, sign
from brping import definitions
class SonarInterface:
samplePeriodTickDuration = 25e-9
firmwareMinTransmitDuration = 5
firmwareMaxTransmitDuration = 500
firmwareMaxSamples = 1200
firmwareMinSamplePeriod = 80
maxDurationRatio = 64e6
def __init__(self, port, baudrate, fallback_emulated):
self.angle = 0
try:
self.sonar = Ping360(port, baudrate)
if self.sonar.initialize():
return
except:
pass
if not fallback_emulated:
raise RuntimeError('Cannot initialize sonar')
print('Using emulated sonar')
self.sonar = None
def configureAngles(self, aperture_deg, step_deg, ensure_divisor):
# to gradians
target_half_aperture = int(aperture_deg*200/360+0.5)
best_half_aperture = target_half_aperture
self.angle_step = int(round(step_deg*400/360))
# ensure angle_step is a divisor of max-min in gradians, necessary for LaserScan messages
if ensure_divisor:
# look around step, allow increased aperture
target_step = self.angle_step
# not too far from requested aperture, as close as possible to requested step (impacts turn duration)
computeCost = lambda step,half_aperture: 1000 if half_aperture%step != 0 else abs(step-target_step) + abs(half_aperture-target_half_aperture)
best_cost = computeCost(self.angle_step, target_half_aperture)
if best_cost != 0:
for step in range(1, target_step*2):
for half_aperture in range(target_half_aperture, min(target_half_aperture+10, 200)+1):
cost = computeCost(step, half_aperture)
if cost < best_cost:
best_cost = cost
self.angle_step = step
best_half_aperture = half_aperture
self.angle_min = -best_half_aperture
self.angle_max = best_half_aperture
if self.angle_max == 200:
self.angle_max -= self.angle_step
if self.angle < self.angle_min or self.angle > self.angle_max or (self.angle-self.angle_min) % self.angle_step != 0:
self.angle = 0
@staticmethod
def grad2rad(grad):
return grad*pi/200
def angleMin(self):
return self.grad2rad(self.angle_min)
def angleMax(self):
return self.grad2rad(self.angle_max)
def angleStep(self):
return self.grad2rad(self.angle_step)
def currentAngle(self):
return self.grad2rad(self.angle)
def angleCount(self):
return (self.angle_max-self.angle_min)//self.angle_step
def angleIndex(self):
if self.angle_step > 0:
return (self.angle-self.angle_min)//self.angle_step
return (self.angle-self.angle_max)//self.angle_step
def rangeFrom(self, index):
return (index+1)*self.max_range/self.samples
def configureTransducer(self, gain, frequency, speed_of_sound, max_range):
self.gain = gain
self.frequency = frequency
self.samples = int(min(self.firmwareMaxSamples,2*max_range/(self.firmwareMinSamplePeriod*speed_of_sound*self.samplePeriodTickDuration)))
self.sample_period = int((2.*max_range)/
(self.samples*speed_of_sound*self.samplePeriodTickDuration));
#* Per firmware engineer:
#* 1. Starting point is TxPulse in usec = ((one-way range in metres) * 8000) / (Velocity of sound in metres
#* per second)
#* 2. Then check that TxPulse is wide enough for currently selected sample interval in usec, i.e.,
#* if TxPulse < (2.5 * sample interval) then TxPulse = (2.5 * sample interval)
#* 3. Perform limit checking
#1
one_way_duration_us = (8000.*max_range)/speed_of_sound
# 2 (transmit duration is microseconds, sample_period_ns is nanoseconds) | self.transmit_duration = max(2.5*sample_period_ns/1000, one_way_duration_us)
# 3 ensure bounds
if self.transmit_duration < self.firmwareMinTransmitDuration:
self.transmit_duration = self.firmwareMinTransmitDuration
else:
max_duration = min(self.firmwareMaxTransmitDuration, sample_period_ns*self.maxDurationRatio)
if self.transmit_duration > max_duration:
self.transmit_duration = max_duration
self.transmit_duration = int(self.transmit_duration)
def transmitDuration(self):
# microseconds to seconds
return self.transmit_duration/1e6
def updateAngle(self):
self.angle += self.angle_step
if self.angle_min == -200:
# full scan
end_turn = self.angle + self.angle_step > self.angle_max
if self.angle > self.angle_max:
self.angle = self.angle_min
return end_turn
# sector scan, check near end of sector
if self.angle + self.angle_step >= self.angle_max or self.angle + self.angle_step <= self.angle_min:
self.angle_step *= -1
return True
return False
def read(self):
# update angle before transmit
end_turn = self.updateAngle()
if self.sonar is not None:
print(f'transmit: {self.transmit_duration}')
self.sonar.control_transducer(
0, # reserved
self.gain,
self.angle,
self.transmit_duration,
self.sample_period,
self.frequency,
self.samples,
1,
0)
self.sonar.wait_message([definitions.PING360_DEVICE_DATA, definitions.COMMON_NACK], 4.0)
self.data = bytearray(self.sonar._data)
return (len(self.data) != 0, end_turn)
# emulated sonar
from random import randint
from time import sleep
self.data = [0 for _ in range(self.samples)]
scale = 5*abs((self.angle+400) % 400 - 200)
for i in range(self.samples):
if randint(self.samples,2*self.samples) < 1.1*i + scale:
self.data[i] = randint(220, 255)
# emulate transmit duration in microseconds
#sleep(self.transmit_duration/1000000)
return (True, end_turn)
# handles an angular sector of the image
class Bound:
radius = 0
def __init__(self, x, tm, tM):
self.x = x
if type(tM) == int:
self.low = Bound.clamp(tm*x)
self.up = int(tM*sqrt(Bound.radius**2-x**2-1))
else:
self.low = Bound.clamp(x*tm)
self.up = Bound.clamp(x*tM)
if self.up**2 + x**2 > Bound.radius**2:
self.up = int(sign(self.up) * sqrt(Bound.radius**2-x**2-1))
if self.up < self.low:
self.low,self.up = self.up,self.low
#staticmethod
def clamp(coord):
if coord < -Bound.radius+1:
return -Bound.radius+1
elif coord > Bound.radius-1:
return Bound.radius-1
return int(coord)
class Sector:
def __init__(self):
self.dr = None
def configure(self, samples, radius):
self.dr = radius/samples
Bound.radius = radius
def init(self, angle, step):
angle_min = angle-step/2
angle_max = angle+step/2
xmin, xmax,same_side = self.xLimits(angle_min, angle_max)
tm, tM = tan(angle_min), tan(angle_max)
self.bounds = []
if same_side:
# same side
if abs(tm) > abs(tM):
tm,tM = tM,tm
for x in range(xmin, xmax+1):
self.bounds.append(Bound(x,tm,tM))
else:
f = 1 if abs(angle-pi/2) < abs(angle+pi/2) else -1
if f == -1:
tm,tM = tM,tm
for x in range(xmin, 0):
self.bounds.append(Bound(x, tM,f))
for x in range(0, xmax+1):
self.bounds.append(Bound(x, tm,f))
self.cur = -1
def xLimits(self, angle_min, angle_max):
cm = cos(angle_min)
cM = cos(angle_max)
if cM < cm:
cm,cM = cM,cm
if cm*cM > 0:
if cM < 0:
cM = 0
else:
cm = 0
return Bound.clamp(round(Bound.radius*cm)), Bound.clamp(round(Bound.radius*cM)), cm*cM >= 0
def nextPoint(self, x, y):
if self.cur == -1:
self.cur = 0
x = self.bounds[0].x
y = self.bounds[0].low
elif y < self.bounds[self.cur].up:
y += 1
else:
self.cur += 1
if self.cur == len(self.bounds):
return False, 0, 0, 0
x = self.bounds[self.cur].x
y = self.bounds[self.cur].low
return True, x, y, int(round(sqrt(x*x+y*y)/self.dr)) | sample_period_ns = self.sample_period * self.samplePeriodTickDuration |
about.js | /* eslint-disable react/display-name */
import React from 'react';
import { Link } from 'gatsby';
import AboutSection from '../components/aboutSection';
import Header from '../components/header'; |
import styles from './index.module.scss';
export default () => (
<div>
<Galaxy />
<Header />
<div className={styles.linkWrapper}>
<Link to="/" id={styles.link} className={styles.glow}>Home</Link>
</div>
<SocialNav />
<main>
<div className={styles.pageContainer}>
<AboutSection />
</div>
</main>
</div>
); | import SocialNav from '../components/socialNav';
import Galaxy from '../components/galaxy/galaxy'; |
setShInchAction.ts | import { IAction } from "../typeDefs/action";
import { IAtcViolMsgAppState } from "../typeDefs/atcViolMsgAppState";
import { ActionType } from "./actionType";
export interface ISetShInchPayload {
shInch: string
}
| payload: ISetShInchPayload
}
export function setShInchAction(shInch: string): ISetShInchAction {
return {
type: ActionType.SET_SH_INCH,
payload: { shInch }
};
}
export const setAtcPageShInchReducer = (state: IAtcViolMsgAppState, action: ISetShInchAction): IAtcViolMsgAppState => {
return {
...state,
ui: {
...state.ui,
sInChargeStr: action.payload.shInch
}
};
} | export interface ISetShInchAction extends IAction {
type: ActionType.SET_SH_INCH, |
merger.rs | use std::cmp;
use std::collections::HashMap;
use std::sync::Arc;
use itertools::Itertools;
use measure_time::debug_time;
use tantivy_bitpacker::minmax;
use crate::core::{Segment, SegmentReader};
use crate::docset::{DocSet, TERMINATED};
use crate::error::DataCorruption;
use crate::fastfield::{
AliveBitSet, CompositeFastFieldSerializer, DynamicFastFieldReader, FastFieldDataAccess,
FastFieldReader, FastFieldStats, MultiValueLength, MultiValuedFastFieldReader,
};
use crate::fieldnorm::{FieldNormReader, FieldNormReaders, FieldNormsSerializer, FieldNormsWriter};
use crate::indexer::doc_id_mapping::{expect_field_id_for_sort_field, SegmentDocIdMapping};
use crate::indexer::SegmentSerializer;
use crate::postings::{InvertedIndexSerializer, Postings, SegmentPostings};
use crate::schema::{Cardinality, Field, FieldType, Schema};
use crate::store::StoreWriter;
use crate::termdict::{TermMerger, TermOrdinal};
use crate::{
DocId, IndexSettings, IndexSortByField, InvertedIndexReader, Order, SegmentComponent,
SegmentOrdinal,
};
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
///
/// We do not allow segments with more than
pub const MAX_DOC_LIMIT: u32 = 1 << 31;
fn estimate_total_num_tokens_in_single_segment(
reader: &SegmentReader,
field: Field,
) -> crate::Result<u64> {
// There are no deletes. We can simply use the exact value saved into the posting list.
// Note that this value is not necessarily exact as it could have been the result of a merge
// between segments themselves containing deletes.
if !reader.has_deletes() {
return Ok(reader.inverted_index(field)?.total_num_tokens());
}
// When there are deletes, we use an approximation either
// by using the fieldnorm.
if let Some(fieldnorm_reader) = reader.fieldnorms_readers().get_field(field)? {
let mut count: [usize; 256] = [0; 256];
for doc in reader.doc_ids_alive() {
let fieldnorm_id = fieldnorm_reader.fieldnorm_id(doc);
count[fieldnorm_id as usize] += 1;
}
let total_num_tokens = count
.iter()
.cloned()
.enumerate()
.map(|(fieldnorm_ord, count)| {
count as u64 * u64::from(FieldNormReader::id_to_fieldnorm(fieldnorm_ord as u8))
})
.sum::<u64>();
return Ok(total_num_tokens);
}
// There are no fieldnorms available.
// Here we just do a pro-rata with the overall number of tokens an the ratio of
// documents alive.
let segment_num_tokens = reader.inverted_index(field)?.total_num_tokens();
if reader.max_doc() == 0 {
// That supposedly never happens, but let's be a bit defensive here.
return Ok(0u64);
}
let ratio = reader.num_docs() as f64 / reader.max_doc() as f64;
Ok((segment_num_tokens as f64 * ratio) as u64)
}
fn estimate_total_num_tokens(readers: &[SegmentReader], field: Field) -> crate::Result<u64> {
let mut total_num_tokens: u64 = 0;
for reader in readers {
total_num_tokens += estimate_total_num_tokens_in_single_segment(reader, field)?;
}
Ok(total_num_tokens)
}
pub struct IndexMerger {
index_settings: IndexSettings,
schema: Schema,
pub(crate) readers: Vec<SegmentReader>,
max_doc: u32,
}
fn compute_min_max_val(
u64_reader: &impl FastFieldReader<u64>,
segment_reader: &SegmentReader,
) -> Option<(u64, u64)> {
if segment_reader.max_doc() == 0 {
return None;
}
if segment_reader.alive_bitset().is_none() {
// no deleted documents,
// we can use the previous min_val, max_val.
return Some((u64_reader.min_value(), u64_reader.max_value()));
}
// some deleted documents,
// we need to recompute the max / min
minmax(
segment_reader
.doc_ids_alive()
.map(|doc_id| u64_reader.get(doc_id)),
)
}
struct TermOrdinalMapping {
per_segment_new_term_ordinals: Vec<Vec<TermOrdinal>>,
}
impl TermOrdinalMapping {
fn new(max_term_ords: Vec<TermOrdinal>) -> TermOrdinalMapping {
TermOrdinalMapping {
per_segment_new_term_ordinals: max_term_ords
.into_iter()
.map(|max_term_ord| vec![TermOrdinal::default(); max_term_ord as usize])
.collect(),
}
}
fn register_from_to(&mut self, segment_ord: usize, from_ord: TermOrdinal, to_ord: TermOrdinal) {
self.per_segment_new_term_ordinals[segment_ord][from_ord as usize] = to_ord;
}
fn get_segment(&self, segment_ord: usize) -> &[TermOrdinal] {
&(self.per_segment_new_term_ordinals[segment_ord])[..]
}
fn max_term_ord(&self) -> TermOrdinal {
self.per_segment_new_term_ordinals
.iter()
.flat_map(|term_ordinals| term_ordinals.iter().cloned().max())
.max()
.unwrap_or_default()
}
}
struct DeltaComputer {
buffer: Vec<u32>,
}
impl DeltaComputer {
fn new() -> DeltaComputer {
DeltaComputer {
buffer: vec![0u32; 512],
}
}
fn compute_delta(&mut self, positions: &[u32]) -> &[u32] {
if positions.len() > self.buffer.len() {
self.buffer.resize(positions.len(), 0u32);
}
let mut last_pos = 0u32;
for (cur_pos, dest) in positions.iter().cloned().zip(self.buffer.iter_mut()) {
*dest = cur_pos - last_pos;
last_pos = cur_pos;
}
&self.buffer[..positions.len()]
}
}
impl IndexMerger {
pub fn open(
schema: Schema,
index_settings: IndexSettings,
segments: &[Segment],
) -> crate::Result<IndexMerger> {
let delete_bitsets = segments.iter().map(|_| None).collect_vec();
Self::open_with_custom_alive_set(schema, index_settings, segments, delete_bitsets)
}
// Create merge with a custom delete set.
// For every Segment, a delete bitset can be provided, which
// will be merged with the existing bit set. Make sure the index
// corresponds to the segment index.
//
// If `None` is provided for custom alive set, the regular alive set will be used.
// If a delete_bitsets is provided, the union between the provided and regular
// alive set will be used.
//
// This can be used to merge but also apply an additional filter.
// One use case is demux, which is basically taking a list of
// segments and partitions them e.g. by a value in a field.
pub fn open_with_custom_alive_set(
schema: Schema,
index_settings: IndexSettings,
segments: &[Segment],
alive_bitset_opt: Vec<Option<AliveBitSet>>,
) -> crate::Result<IndexMerger> {
let mut readers = vec![];
for (segment, new_alive_bitset_opt) in segments.iter().zip(alive_bitset_opt.into_iter()) {
if segment.meta().num_docs() > 0 {
let reader =
SegmentReader::open_with_custom_alive_set(segment, new_alive_bitset_opt)?;
readers.push(reader);
}
}
let max_doc = readers.iter().map(|reader| reader.num_docs()).sum();
if let Some(sort_by_field) = index_settings.sort_by_field.as_ref() {
readers = Self::sort_readers_by_min_sort_field(readers, sort_by_field)?;
}
// sort segments by their natural sort setting
if max_doc >= MAX_DOC_LIMIT {
let err_msg = format!(
"The segment resulting from this merge would have {} docs,which exceeds the limit \
{}.",
max_doc, MAX_DOC_LIMIT
);
return Err(crate::TantivyError::InvalidArgument(err_msg));
}
Ok(IndexMerger {
index_settings,
schema,
readers,
max_doc,
})
}
fn sort_readers_by_min_sort_field(
readers: Vec<SegmentReader>,
sort_by_field: &IndexSortByField,
) -> crate::Result<Vec<SegmentReader>> {
// presort the readers by their min_values, so that when they are disjunct, we can use
// the regular merge logic (implicitly sorted)
let mut readers_with_min_sort_values = readers
.into_iter()
.map(|reader| {
let accessor = Self::get_sort_field_accessor(&reader, sort_by_field)?;
Ok((reader, accessor.min_value()))
})
.collect::<crate::Result<Vec<_>>>()?;
if sort_by_field.order.is_asc() {
readers_with_min_sort_values.sort_by_key(|(_, min_val)| *min_val);
} else {
readers_with_min_sort_values.sort_by_key(|(_, min_val)| std::cmp::Reverse(*min_val));
}
Ok(readers_with_min_sort_values
.into_iter()
.map(|(reader, _)| reader)
.collect())
}
fn | (
&self,
mut fieldnorms_serializer: FieldNormsSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
let fields = FieldNormsWriter::fields_with_fieldnorm(&self.schema);
let mut fieldnorms_data = Vec::with_capacity(self.max_doc as usize);
for field in fields {
fieldnorms_data.clear();
let fieldnorms_readers: Vec<FieldNormReader> = self
.readers
.iter()
.map(|reader| reader.get_fieldnorms_reader(field))
.collect::<Result<_, _>>()?;
for (doc_id, reader_ordinal) in doc_id_mapping.iter() {
let fieldnorms_reader = &fieldnorms_readers[*reader_ordinal as usize];
let fieldnorm_id = fieldnorms_reader.fieldnorm_id(*doc_id);
fieldnorms_data.push(fieldnorm_id);
}
fieldnorms_serializer.serialize_field(field, &fieldnorms_data[..])?;
}
fieldnorms_serializer.close()?;
Ok(())
}
fn write_fast_fields(
&self,
fast_field_serializer: &mut CompositeFastFieldSerializer,
mut term_ord_mappings: HashMap<Field, TermOrdinalMapping>,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
debug_time!("write_fast_fields");
for (field, field_entry) in self.schema.fields() {
let field_type = field_entry.field_type();
match field_type {
FieldType::Facet(_) => {
let term_ordinal_mapping = term_ord_mappings.remove(&field).expect(
"Logic Error in Tantivy (Please report). Facet field should have required \
a`term_ordinal_mapping`.",
);
self.write_hierarchical_facet_field(
field,
&term_ordinal_mapping,
fast_field_serializer,
doc_id_mapping,
)?;
}
FieldType::U64(ref options)
| FieldType::I64(ref options)
| FieldType::F64(ref options)
| FieldType::Date(ref options) => match options.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => {
self.write_single_fast_field(field, fast_field_serializer, doc_id_mapping)?;
}
Some(Cardinality::MultiValues) => {
self.write_multi_fast_field(field, fast_field_serializer, doc_id_mapping)?;
}
None => {}
},
FieldType::Str(_) => {
// We don't handle str fast field for the moment
// They can be implemented using what is done
// for facets in the future.
}
FieldType::Bytes(byte_options) => {
if byte_options.is_fast() {
self.write_bytes_fast_field(field, fast_field_serializer, doc_id_mapping)?;
}
}
}
}
Ok(())
}
// used both to merge field norms, `u64/i64` single fast fields.
fn write_single_fast_field(
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
let (min_value, max_value) = self
.readers
.iter()
.filter_map(|reader| {
let u64_reader: DynamicFastFieldReader<u64> =
reader.fast_fields().typed_fast_field_reader(field).expect(
"Failed to find a reader for single fast field. This is a tantivy bug and \
it should never happen.",
);
compute_min_max_val(&u64_reader, reader)
})
.reduce(|a, b| (a.0.min(b.0), a.1.max(b.1)))
.expect("Unexpected error, empty readers in IndexMerger");
let fast_field_readers = self
.readers
.iter()
.map(|reader| {
let u64_reader: DynamicFastFieldReader<u64> =
reader.fast_fields().typed_fast_field_reader(field).expect(
"Failed to find a reader for single fast field. This is a tantivy bug and \
it should never happen.",
);
u64_reader
})
.collect::<Vec<_>>();
let stats = FastFieldStats {
min_value,
max_value,
num_vals: doc_id_mapping.len() as u64,
};
#[derive(Clone)]
struct SortedDocIdFieldAccessProvider<'a> {
doc_id_mapping: &'a SegmentDocIdMapping,
fast_field_readers: &'a Vec<DynamicFastFieldReader<u64>>,
}
impl<'a> FastFieldDataAccess for SortedDocIdFieldAccessProvider<'a> {
fn get_val(&self, doc: u64) -> u64 {
let (doc_id, reader_ordinal) = self.doc_id_mapping[doc as usize];
self.fast_field_readers[reader_ordinal as usize].get(doc_id)
}
}
let fastfield_accessor = SortedDocIdFieldAccessProvider {
doc_id_mapping,
fast_field_readers: &fast_field_readers,
};
let iter1 = doc_id_mapping.iter().map(|(doc_id, reader_ordinal)| {
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
fast_field_reader.get(*doc_id)
});
let iter2 = doc_id_mapping.iter().map(|(doc_id, reader_ordinal)| {
let fast_field_reader = &fast_field_readers[*reader_ordinal as usize];
fast_field_reader.get(*doc_id)
});
fast_field_serializer.create_auto_detect_u64_fast_field(
field,
stats,
fastfield_accessor,
iter1,
iter2,
)?;
Ok(())
}
/// Checks if the readers are disjunct for their sort property and in the correct order to be
/// able to just stack them.
pub(crate) fn is_disjunct_and_sorted_on_sort_property(
&self,
sort_by_field: &IndexSortByField,
) -> crate::Result<bool> {
let reader_ordinal_and_field_accessors =
self.get_reader_with_sort_field_accessor(sort_by_field)?;
let everything_is_in_order = reader_ordinal_and_field_accessors
.into_iter()
.map(|reader| reader.1)
.tuple_windows()
.all(|(field_accessor1, field_accessor2)| {
if sort_by_field.order.is_asc() {
field_accessor1.max_value() <= field_accessor2.min_value()
} else {
field_accessor1.min_value() >= field_accessor2.max_value()
}
});
Ok(everything_is_in_order)
}
pub(crate) fn get_sort_field_accessor(
reader: &SegmentReader,
sort_by_field: &IndexSortByField,
) -> crate::Result<impl FastFieldReader<u64>> {
let field_id = expect_field_id_for_sort_field(reader.schema(), sort_by_field)?; // for now expect fastfield, but not strictly required
let value_accessor = reader.fast_fields().u64_lenient(field_id)?;
Ok(value_accessor)
}
/// Collecting value_accessors into a vec to bind the lifetime.
pub(crate) fn get_reader_with_sort_field_accessor(
&self,
sort_by_field: &IndexSortByField,
) -> crate::Result<Vec<(SegmentOrdinal, impl FastFieldReader<u64> + Clone)>> {
let reader_ordinal_and_field_accessors = self
.readers
.iter()
.enumerate()
.map(|(reader_ordinal, _)| reader_ordinal as SegmentOrdinal)
.map(|reader_ordinal: SegmentOrdinal| {
let value_accessor = Self::get_sort_field_accessor(
&self.readers[reader_ordinal as usize],
sort_by_field,
)?;
Ok((reader_ordinal, value_accessor))
})
.collect::<crate::Result<Vec<_>>>()?;
Ok(reader_ordinal_and_field_accessors)
}
/// Generates the doc_id mapping where position in the vec=new
/// doc_id.
/// ReaderWithOrdinal will include the ordinal position of the
/// reader in self.readers.
pub(crate) fn generate_doc_id_mapping(
&self,
sort_by_field: &IndexSortByField,
) -> crate::Result<SegmentDocIdMapping> {
let reader_ordinal_and_field_accessors =
self.get_reader_with_sort_field_accessor(sort_by_field)?;
// Loading the field accessor on demand causes a 15x regression
// create iterators over segment/sort_accessor/doc_id tuple
let doc_id_reader_pair =
reader_ordinal_and_field_accessors
.iter()
.map(|reader_and_field_accessor| {
let reader = &self.readers[reader_and_field_accessor.0 as usize];
reader.doc_ids_alive().map(move |doc_id| {
(
doc_id,
reader_and_field_accessor.0,
&reader_and_field_accessor.1,
)
})
});
let total_num_new_docs = self
.readers
.iter()
.map(|reader| reader.num_docs() as usize)
.sum();
let mut sorted_doc_ids = Vec::with_capacity(total_num_new_docs);
// create iterator tuple of (old doc_id, reader) in order of the new doc_ids
sorted_doc_ids.extend(
doc_id_reader_pair
.into_iter()
.kmerge_by(|a, b| {
let val1 = a.2.get(a.0);
let val2 = b.2.get(b.0);
if sort_by_field.order == Order::Asc {
val1 < val2
} else {
val1 > val2
}
})
.map(|(doc_id, reader_with_id, _)| (doc_id, reader_with_id)),
);
Ok(SegmentDocIdMapping::new(sorted_doc_ids, false))
}
// Creating the index file to point into the data, generic over `BytesFastFieldReader` and
// `MultiValuedFastFieldReader`
//
fn write_1_n_fast_field_idx_generic<T: MultiValueLength>(
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
reader_and_field_accessors: &[(&SegmentReader, T)],
) -> crate::Result<Vec<u64>> {
let mut total_num_vals = 0u64;
// In the first pass, we compute the total number of vals.
//
// This is required by the bitpacker, as it needs to know
// what should be the bit length use for bitpacking.
let mut num_docs = 0;
for (reader, u64s_reader) in reader_and_field_accessors.iter() {
if let Some(alive_bitset) = reader.alive_bitset() {
num_docs += alive_bitset.num_alive_docs() as u64;
for doc in reader.doc_ids_alive() {
let num_vals = u64s_reader.get_len(doc) as u64;
total_num_vals += num_vals;
}
} else {
num_docs += reader.max_doc() as u64;
total_num_vals += u64s_reader.get_total_len();
}
}
let stats = FastFieldStats {
max_value: total_num_vals,
// The fastfield offset index contains (num_docs + 1) values.
num_vals: num_docs + 1,
min_value: 0,
};
// We can now create our `idx` serializer, and in a second pass,
// can effectively push the different indexes.
// copying into a temp vec is not ideal, but the fast field codec api requires random
// access, which is used in the estimation. It's possible to 1. calculate random
// acccess on the fly or 2. change the codec api to make random access optional, but
// they both have also major drawbacks.
let mut offsets = Vec::with_capacity(doc_id_mapping.len());
let mut offset = 0;
for (doc_id, reader) in doc_id_mapping.iter() {
let reader = &reader_and_field_accessors[*reader as usize].1;
offsets.push(offset);
offset += reader.get_len(*doc_id) as u64;
}
offsets.push(offset);
fast_field_serializer.create_auto_detect_u64_fast_field(
field,
stats,
&offsets[..],
offsets.iter().cloned(),
offsets.iter().cloned(),
)?;
Ok(offsets)
}
/// Returns the fastfield index (index for the data, not the data).
fn write_multi_value_fast_field_idx(
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<Vec<u64>> {
let reader_ordinal_and_field_accessors = self
.readers
.iter()
.map(|reader| {
let u64s_reader: MultiValuedFastFieldReader<u64> = reader
.fast_fields()
.typed_fast_field_multi_reader(field)
.expect(
"Failed to find index for multivalued field. This is a bug in tantivy, \
please report.",
);
(reader, u64s_reader)
})
.collect::<Vec<_>>();
Self::write_1_n_fast_field_idx_generic(
field,
fast_field_serializer,
doc_id_mapping,
&reader_ordinal_and_field_accessors,
)
}
fn write_hierarchical_facet_field(
&self,
field: Field,
term_ordinal_mappings: &TermOrdinalMapping,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
debug_time!("write_hierarchical_facet_field");
// Multifastfield consists of 2 fastfields.
// The first serves as an index into the second one and is stricly increasing.
// The second contains the actual values.
// First we merge the idx fast field.
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
let fast_field_reader = self
.readers
.iter()
.map(|reader| {
let ff_reader: MultiValuedFastFieldReader<u64> = reader
.fast_fields()
.u64s(field)
.expect("Could not find multivalued u64 fast value reader.");
ff_reader
})
.collect::<Vec<_>>();
// We can now write the actual fast field values.
// In the case of hierarchical facets, they are actually term ordinals.
let max_term_ord = term_ordinal_mappings.max_term_ord();
{
let mut serialize_vals =
fast_field_serializer.new_u64_fast_field_with_idx(field, 0u64, max_term_ord, 1)?;
let mut vals = Vec::with_capacity(100);
for (old_doc_id, reader_ordinal) in doc_id_mapping.iter() {
let term_ordinal_mapping: &[TermOrdinal] =
term_ordinal_mappings.get_segment(*reader_ordinal as usize);
let ff_reader = &fast_field_reader[*reader_ordinal as usize];
ff_reader.get_vals(*old_doc_id, &mut vals);
for &prev_term_ord in &vals {
let new_term_ord = term_ordinal_mapping[prev_term_ord as usize];
serialize_vals.add_val(new_term_ord)?;
}
}
serialize_vals.close_field()?;
}
Ok(())
}
/// Creates a mapping if the segments are stacked. this is helpful to merge codelines between
/// index sorting and the others
pub(crate) fn get_doc_id_from_concatenated_data(&self) -> crate::Result<SegmentDocIdMapping> {
let total_num_new_docs = self
.readers
.iter()
.map(|reader| reader.num_docs() as usize)
.sum();
let mut mapping = Vec::with_capacity(total_num_new_docs);
mapping.extend(
self.readers
.iter()
.enumerate()
.flat_map(|(reader_ordinal, reader)| {
reader
.doc_ids_alive()
.map(move |doc_id| (doc_id, reader_ordinal as SegmentOrdinal))
}),
);
Ok(SegmentDocIdMapping::new(mapping, true))
}
fn write_multi_fast_field(
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
// Multifastfield consists in 2 fastfields.
// The first serves as an index into the second one and is stricly increasing.
// The second contains the actual values.
// First we merge the idx fast field.
let offsets =
self.write_multi_value_fast_field_idx(field, fast_field_serializer, doc_id_mapping)?;
let mut min_value = u64::max_value();
let mut max_value = u64::min_value();
let mut num_vals = 0;
let mut vals = Vec::with_capacity(100);
let mut ff_readers = Vec::new();
// Our values are bitpacked and we need to know what should be
// our bitwidth and our minimum value before serializing any values.
//
// Computing those is non-trivial if some documents are deleted.
// We go through a complete first pass to compute the minimum and the
// maximum value and initialize our Serializer.
for reader in &self.readers {
let ff_reader: MultiValuedFastFieldReader<u64> = reader
.fast_fields()
.typed_fast_field_multi_reader(field)
.expect(
"Failed to find multivalued fast field reader. This is a bug in tantivy. \
Please report.",
);
for doc in reader.doc_ids_alive() {
ff_reader.get_vals(doc, &mut vals);
for &val in &vals {
min_value = cmp::min(val, min_value);
max_value = cmp::max(val, max_value);
}
num_vals += vals.len();
}
ff_readers.push(ff_reader);
// TODO optimize when no deletes
}
if min_value > max_value {
min_value = 0;
max_value = 0;
}
// We can now initialize our serializer, and push it the different values
let stats = FastFieldStats {
max_value,
num_vals: num_vals as u64,
min_value,
};
struct SortedDocIdMultiValueAccessProvider<'a> {
doc_id_mapping: &'a SegmentDocIdMapping,
fast_field_readers: &'a Vec<MultiValuedFastFieldReader<u64>>,
offsets: Vec<u64>,
}
impl<'a> FastFieldDataAccess for SortedDocIdMultiValueAccessProvider<'a> {
fn get_val(&self, pos: u64) -> u64 {
// use the offsets index to find the doc_id which will contain the position.
// the offsets are stricly increasing so we can do a simple search on it.
let new_doc_id = self
.offsets
.iter()
.position(|&offset| offset > pos)
.expect("pos is out of bounds")
- 1;
// now we need to find the position of `pos` in the multivalued bucket
let num_pos_covered_until_now = self.offsets[new_doc_id];
let pos_in_values = pos - num_pos_covered_until_now;
let (old_doc_id, reader_ordinal) = self.doc_id_mapping[new_doc_id as usize];
let num_vals = self.fast_field_readers[reader_ordinal as usize].get_len(old_doc_id);
assert!(num_vals >= pos_in_values);
let mut vals = vec![];
self.fast_field_readers[reader_ordinal as usize].get_vals(old_doc_id, &mut vals);
vals[pos_in_values as usize]
}
}
let fastfield_accessor = SortedDocIdMultiValueAccessProvider {
doc_id_mapping,
fast_field_readers: &ff_readers,
offsets,
};
let iter1 = doc_id_mapping.iter().flat_map(|(doc_id, reader_ordinal)| {
let ff_reader = &ff_readers[*reader_ordinal as usize];
let mut vals = vec![];
ff_reader.get_vals(*doc_id, &mut vals);
vals.into_iter()
});
let iter2 = doc_id_mapping.iter().flat_map(|(doc_id, reader_ordinal)| {
let ff_reader = &ff_readers[*reader_ordinal as usize];
let mut vals = vec![];
ff_reader.get_vals(*doc_id, &mut vals);
vals.into_iter()
});
fast_field_serializer.create_auto_detect_u64_fast_field_with_idx(
field,
stats,
fastfield_accessor,
iter1,
iter2,
1,
)?;
Ok(())
}
fn write_bytes_fast_field(
&self,
field: Field,
fast_field_serializer: &mut CompositeFastFieldSerializer,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
let reader_and_field_accessors = self
.readers
.iter()
.map(|reader| {
let bytes_reader = reader.fast_fields().bytes(field).expect(
"Failed to find index for bytes field. This is a bug in tantivy, please \
report.",
);
(reader, bytes_reader)
})
.collect::<Vec<_>>();
Self::write_1_n_fast_field_idx_generic(
field,
fast_field_serializer,
doc_id_mapping,
&reader_and_field_accessors,
)?;
let mut serialize_vals = fast_field_serializer.new_bytes_fast_field_with_idx(field, 1);
for (doc_id, reader_ordinal) in doc_id_mapping.iter() {
let bytes_reader = &reader_and_field_accessors[*reader_ordinal as usize].1;
let val = bytes_reader.get_bytes(*doc_id);
serialize_vals.write_all(val)?;
}
serialize_vals.flush()?;
Ok(())
}
fn write_postings_for_field(
&self,
indexed_field: Field,
field_type: &FieldType,
serializer: &mut InvertedIndexSerializer,
fieldnorm_reader: Option<FieldNormReader>,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<Option<TermOrdinalMapping>> {
debug_time!("write_postings_for_field");
let mut positions_buffer: Vec<u32> = Vec::with_capacity(1_000);
let mut delta_computer = DeltaComputer::new();
let mut max_term_ords: Vec<TermOrdinal> = Vec::new();
let field_readers: Vec<Arc<InvertedIndexReader>> = self
.readers
.iter()
.map(|reader| reader.inverted_index(indexed_field))
.collect::<crate::Result<Vec<_>>>()?;
let mut field_term_streams = Vec::new();
for field_reader in &field_readers {
let terms = field_reader.terms();
field_term_streams.push(terms.stream()?);
max_term_ords.push(terms.num_terms() as u64);
}
let mut term_ord_mapping_opt = match field_type {
FieldType::Facet(_) => Some(TermOrdinalMapping::new(max_term_ords)),
_ => None,
};
let mut merged_terms = TermMerger::new(field_term_streams);
// map from segment doc ids to the resulting merged segment doc id.
let mut merged_doc_id_map: Vec<Vec<Option<DocId>>> = self
.readers
.iter()
.map(|reader| {
let mut segment_local_map = vec![];
segment_local_map.resize(reader.max_doc() as usize, None);
segment_local_map
})
.collect();
for (new_doc_id, (old_doc_id, segment_ord)) in doc_id_mapping.iter().enumerate() {
let segment_map = &mut merged_doc_id_map[*segment_ord as usize];
segment_map[*old_doc_id as usize] = Some(new_doc_id as DocId);
}
// Note that the total number of tokens is not exact.
// It is only used as a parameter in the BM25 formula.
let total_num_tokens: u64 = estimate_total_num_tokens(&self.readers, indexed_field)?;
// Create the total list of doc ids
// by stacking the doc ids from the different segment.
//
// In the new segments, the doc id from the different
// segment are stacked so that :
// - Segment 0's doc ids become doc id [0, seg.max_doc]
// - Segment 1's doc ids become [seg0.max_doc, seg0.max_doc + seg.max_doc]
// - Segment 2's doc ids become [seg0.max_doc + seg1.max_doc, seg0.max_doc + seg1.max_doc +
// seg2.max_doc]
//
// This stacking applies only when the index is not sorted, in that case the
// doc_ids are kmerged by their sort property
let mut field_serializer =
serializer.new_field(indexed_field, total_num_tokens, fieldnorm_reader)?;
let field_entry = self.schema.get_field_entry(indexed_field);
// ... set segment postings option the new field.
let segment_postings_option = field_entry.field_type().get_index_record_option().expect(
"Encountered a field that is not supposed to be
indexed. Have you modified the schema?",
);
let mut segment_postings_containing_the_term: Vec<(usize, SegmentPostings)> = vec![];
let mut doc_id_and_positions = vec![];
while merged_terms.advance() {
segment_postings_containing_the_term.clear();
let term_bytes: &[u8] = merged_terms.key();
let mut total_doc_freq = 0;
// Let's compute the list of non-empty posting lists
for (segment_ord, term_info) in merged_terms.current_segment_ords_and_term_infos() {
let segment_reader = &self.readers[segment_ord];
let inverted_index: &InvertedIndexReader = &*field_readers[segment_ord];
let segment_postings = inverted_index
.read_postings_from_terminfo(&term_info, segment_postings_option)?;
let alive_bitset_opt = segment_reader.alive_bitset();
let doc_freq = if let Some(alive_bitset) = alive_bitset_opt {
segment_postings.doc_freq_given_deletes(alive_bitset)
} else {
segment_postings.doc_freq()
};
if doc_freq > 0u32 {
total_doc_freq += doc_freq;
segment_postings_containing_the_term.push((segment_ord, segment_postings));
}
}
// At this point, `segment_postings` contains the posting list
// of all of the segments containing the given term (and that are non-empty)
//
// These segments are non-empty and advance has already been called.
if total_doc_freq == 0u32 {
// All docs that used to contain the term have been deleted. The `term` will be
// entirely removed.
continue;
}
let to_term_ord = field_serializer.new_term(term_bytes, total_doc_freq)?;
if let Some(ref mut term_ord_mapping) = term_ord_mapping_opt {
for (segment_ord, from_term_ord) in merged_terms.matching_segments() {
term_ord_mapping.register_from_to(segment_ord, from_term_ord, to_term_ord);
}
}
// We can now serialize this postings, by pushing each document to the
// postings serializer.
for (segment_ord, mut segment_postings) in
segment_postings_containing_the_term.drain(..)
{
let old_to_new_doc_id = &merged_doc_id_map[segment_ord];
let mut doc = segment_postings.doc();
while doc != TERMINATED {
// deleted doc are skipped as they do not have a `remapped_doc_id`.
if let Some(remapped_doc_id) = old_to_new_doc_id[doc as usize] {
// we make sure to only write the term if
// there is at least one document.
let term_freq = segment_postings.term_freq();
segment_postings.positions(&mut positions_buffer);
// if doc_id_mapping exists, the doc_ids are reordered, they are
// not just stacked. The field serializer expects monotonically increasing
// doc_ids, so we collect and sort them first, before writing.
//
// I think this is not strictly necessary, it would be possible to
// avoid the loading into a vec via some form of kmerge, but then the merge
// logic would deviate much more from the stacking case (unsorted index)
if !doc_id_mapping.is_trivial() {
doc_id_and_positions.push((
remapped_doc_id,
term_freq,
positions_buffer.to_vec(),
));
} else {
let delta_positions = delta_computer.compute_delta(&positions_buffer);
field_serializer.write_doc(remapped_doc_id, term_freq, delta_positions);
}
}
doc = segment_postings.advance();
}
}
if !doc_id_mapping.is_trivial() {
doc_id_and_positions.sort_unstable_by_key(|&(doc_id, _, _)| doc_id);
for (doc_id, term_freq, positions) in &doc_id_and_positions {
let delta_positions = delta_computer.compute_delta(positions);
field_serializer.write_doc(*doc_id, *term_freq, delta_positions);
}
doc_id_and_positions.clear();
}
// closing the term.
field_serializer.close_term()?;
}
field_serializer.close()?;
Ok(term_ord_mapping_opt)
}
fn write_postings(
&self,
serializer: &mut InvertedIndexSerializer,
fieldnorm_readers: FieldNormReaders,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<HashMap<Field, TermOrdinalMapping>> {
let mut term_ordinal_mappings = HashMap::new();
for (field, field_entry) in self.schema.fields() {
let fieldnorm_reader = fieldnorm_readers.get_field(field)?;
if field_entry.is_indexed() {
if let Some(term_ordinal_mapping) = self.write_postings_for_field(
field,
field_entry.field_type(),
serializer,
fieldnorm_reader,
doc_id_mapping,
)? {
term_ordinal_mappings.insert(field, term_ordinal_mapping);
}
}
}
Ok(term_ordinal_mappings)
}
fn write_storable_fields(
&self,
store_writer: &mut StoreWriter,
doc_id_mapping: &SegmentDocIdMapping,
) -> crate::Result<()> {
debug_time!("write_storable_fields");
let store_readers: Vec<_> = self
.readers
.iter()
.map(|reader| reader.get_store_reader())
.collect::<Result<_, _>>()?;
let mut document_iterators: Vec<_> = store_readers
.iter()
.enumerate()
.map(|(i, store)| store.iter_raw(self.readers[i].alive_bitset()))
.collect();
if !doc_id_mapping.is_trivial() {
for (old_doc_id, reader_ordinal) in doc_id_mapping.iter() {
let doc_bytes_it = &mut document_iterators[*reader_ordinal as usize];
if let Some(doc_bytes_res) = doc_bytes_it.next() {
let doc_bytes = doc_bytes_res?;
store_writer.store_bytes(&doc_bytes)?;
} else {
return Err(DataCorruption::comment_only(&format!(
"unexpected missing document in docstore on merge, doc id {:?}",
old_doc_id
))
.into());
}
}
} else {
for reader in &self.readers {
let store_reader = reader.get_store_reader()?;
if reader.has_deletes()
// If there is not enough data in the store, we avoid stacking in order to
// avoid creating many small blocks in the doc store. Once we have 5 full blocks,
// we start stacking. In the worst case 2/7 of the blocks would be very small.
// [segment 1 - {1 doc}][segment 2 - {fullblock * 5}{1doc}]
// => 5 * full blocks, 2 * 1 document blocks
//
// In a more realistic scenario the segments are of the same size, so 1/6 of
// the doc stores would be on average half full, given total randomness (which
// is not the case here, but not sure how it behaves exactly).
//
// https://github.com/quickwit-oss/tantivy/issues/1053
//
// take 7 in order to not walk over all checkpoints.
|| store_reader.block_checkpoints().take(7).count() < 6
|| store_reader.compressor() != store_writer.compressor()
{
for doc_bytes_res in store_reader.iter_raw(reader.alive_bitset()) {
let doc_bytes = doc_bytes_res?;
store_writer.store_bytes(&doc_bytes)?;
}
} else {
store_writer.stack(&store_reader)?;
}
}
}
Ok(())
}
/// Writes the merged segment by pushing information
/// to the `SegmentSerializer`.
///
/// # Returns
/// The number of documents in the resulting segment.
pub fn write(&self, mut serializer: SegmentSerializer) -> crate::Result<u32> {
let doc_id_mapping = if let Some(sort_by_field) = self.index_settings.sort_by_field.as_ref()
{
// If the documents are already sorted and stackable, we ignore the mapping and execute
// it as if there was no sorting
if self.is_disjunct_and_sorted_on_sort_property(sort_by_field)? {
self.get_doc_id_from_concatenated_data()?
} else {
self.generate_doc_id_mapping(sort_by_field)?
}
} else {
self.get_doc_id_from_concatenated_data()?
};
if let Some(fieldnorms_serializer) = serializer.extract_fieldnorms_serializer() {
self.write_fieldnorms(fieldnorms_serializer, &doc_id_mapping)?;
}
let fieldnorm_data = serializer
.segment()
.open_read(SegmentComponent::FieldNorms)?;
let fieldnorm_readers = FieldNormReaders::open(fieldnorm_data)?;
let term_ord_mappings = self.write_postings(
serializer.get_postings_serializer(),
fieldnorm_readers,
&doc_id_mapping,
)?;
self.write_fast_fields(
serializer.get_fast_field_serializer(),
term_ord_mappings,
&doc_id_mapping,
)?;
self.write_storable_fields(serializer.get_store_writer(), &doc_id_mapping)?;
serializer.close()?;
Ok(self.max_doc)
}
}
#[cfg(test)]
mod tests {
use byteorder::{BigEndian, ReadBytesExt};
use futures::executor::block_on;
use schema::FAST;
use crate::collector::tests::{
BytesFastFieldTestCollector, FastFieldTestCollector, TEST_COLLECTOR_WITH_SCORE,
};
use crate::collector::{Count, FacetCollector};
use crate::core::Index;
use crate::fastfield::FastFieldReader;
use crate::query::{AllQuery, BooleanQuery, Scorer, TermQuery};
use crate::schema::{
Cardinality, Document, Facet, FacetOptions, IndexRecordOption, NumericOptions, Term,
TextFieldIndexing, INDEXED, TEXT,
};
use crate::{
assert_nearly_equals, schema, DocAddress, DocSet, IndexSettings, IndexSortByField,
IndexWriter, Order, Searcher, SegmentId,
};
#[test]
fn test_index_merger_no_deletes() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default()
.set_tokenizer("default")
.set_index_option(IndexRecordOption::WithFreqs),
)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let date_field = schema_builder.add_date_field("date", INDEXED);
let score_fieldtype = schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST);
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader()?;
let curr_time = chrono::Utc::now();
{
let mut index_writer = index.writer_for_tests()?;
// writing the segment
index_writer.add_document(doc!(
text_field => "af b",
score_field => 3u64,
date_field => curr_time,
bytes_score_field => 3u32.to_be_bytes().as_ref()
))?;
index_writer.add_document(doc!(
text_field => "a b c",
score_field => 5u64,
bytes_score_field => 5u32.to_be_bytes().as_ref()
))?;
index_writer.add_document(doc!(
text_field => "a b c d",
score_field => 7u64,
bytes_score_field => 7u32.to_be_bytes().as_ref()
))?;
index_writer.commit()?;
// writing the segment
index_writer.add_document(doc!(
text_field => "af b",
date_field => curr_time,
score_field => 11u64,
bytes_score_field => 11u32.to_be_bytes().as_ref()
))?;
index_writer.add_document(doc!(
text_field => "a b c g",
score_field => 13u64,
bytes_score_field => 13u32.to_be_bytes().as_ref()
))?;
index_writer.commit()?;
}
{
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests()?;
block_on(index_writer.merge(&segment_ids))?;
index_writer.wait_merging_threads()?;
}
{
reader.reload()?;
let searcher = reader.searcher();
let get_doc_ids = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
searcher
.search(&query, &TEST_COLLECTOR_WITH_SCORE)
.map(|top_docs| top_docs.docs().to_vec())
};
{
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "a")])?,
vec![
DocAddress::new(0, 1),
DocAddress::new(0, 2),
DocAddress::new(0, 4)
]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "af")])?,
vec![DocAddress::new(0, 0), DocAddress::new(0, 3)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "g")])?,
vec![DocAddress::new(0, 4)]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_text(text_field, "b")])?,
vec![
DocAddress::new(0, 0),
DocAddress::new(0, 1),
DocAddress::new(0, 2),
DocAddress::new(0, 3),
DocAddress::new(0, 4)
]
);
assert_eq!(
get_doc_ids(vec![Term::from_field_date(date_field, &curr_time)])?,
vec![DocAddress::new(0, 0), DocAddress::new(0, 3)]
);
}
{
let doc = searcher.doc(DocAddress::new(0, 0))?;
assert_eq!(doc.get_first(text_field).unwrap().as_text(), Some("af b"));
}
{
let doc = searcher.doc(DocAddress::new(0, 1))?;
assert_eq!(doc.get_first(text_field).unwrap().as_text(), Some("a b c"));
}
{
let doc = searcher.doc(DocAddress::new(0, 2))?;
assert_eq!(
doc.get_first(text_field).unwrap().as_text(),
Some("a b c d")
);
}
{
let doc = searcher.doc(DocAddress::new(0, 3))?;
assert_eq!(doc.get_first(text_field).unwrap().as_text(), Some("af b"));
}
{
let doc = searcher.doc(DocAddress::new(0, 4))?;
assert_eq!(
doc.get_first(text_field).unwrap().as_text(),
Some("a b c g")
);
}
{
let get_fast_vals = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
searcher.search(&query, &FastFieldTestCollector::for_field(score_field))
};
let get_fast_vals_bytes = |terms: Vec<Term>| {
let query = BooleanQuery::new_multiterms_query(terms);
searcher.search(
&query,
&BytesFastFieldTestCollector::for_field(bytes_score_field),
)
};
assert_eq!(
get_fast_vals(vec![Term::from_field_text(text_field, "a")])?,
vec![5, 7, 13]
);
assert_eq!(
get_fast_vals_bytes(vec![Term::from_field_text(text_field, "a")])?,
vec![0, 0, 0, 5, 0, 0, 0, 7, 0, 0, 0, 13]
);
}
}
Ok(())
}
#[test]
fn test_index_merger_with_deletes() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let text_fieldtype = schema::TextOptions::default()
.set_indexing_options(
TextFieldIndexing::default().set_index_option(IndexRecordOption::WithFreqs),
)
.set_stored();
let text_field = schema_builder.add_text_field("text", text_fieldtype);
let score_fieldtype = schema::NumericOptions::default().set_fast(Cardinality::SingleValue);
let score_field = schema_builder.add_u64_field("score", score_fieldtype);
let bytes_score_field = schema_builder.add_bytes_field("score_bytes", FAST);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests()?;
let reader = index.reader().unwrap();
let search_term = |searcher: &Searcher, term: Term| {
let collector = FastFieldTestCollector::for_field(score_field);
let bytes_collector = BytesFastFieldTestCollector::for_field(bytes_score_field);
let term_query = TermQuery::new(term, IndexRecordOption::Basic);
searcher
.search(&term_query, &(collector, bytes_collector))
.map(|(scores, bytes)| {
let mut score_bytes = &bytes[..];
for &score in &scores {
assert_eq!(score as u32, score_bytes.read_u32::<BigEndian>().unwrap());
}
scores
})
};
let empty_vec = Vec::<u64>::new();
{
// a first commit
index_writer.add_document(doc!(
text_field => "a b d",
score_field => 1u64,
bytes_score_field => vec![0u8, 0, 0, 1],
))?;
index_writer.add_document(doc!(
text_field => "b c",
score_field => 2u64,
bytes_score_field => vec![0u8, 0, 0, 2],
))?;
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.add_document(doc!(
text_field => "c d",
score_field => 3u64,
bytes_score_field => vec![0u8, 0, 0, 3],
))?;
index_writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
vec![1]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
vec![1]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
vec![3]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
vec![1, 3]
);
}
{
// a second commit
index_writer.add_document(doc!(
text_field => "a d e",
score_field => 4_000u64,
bytes_score_field => vec![0u8, 0, 0, 4],
))?;
index_writer.add_document(doc!(
text_field => "e f",
score_field => 5_000u64,
bytes_score_field => vec![0u8, 0, 0, 5],
))?;
index_writer.delete_term(Term::from_field_text(text_field, "a"));
index_writer.delete_term(Term::from_field_text(text_field, "f"));
index_writer.add_document(doc!(
text_field => "f g",
score_field => 6_000u64,
bytes_score_field => vec![0u8, 0, 23, 112],
))?;
index_writer.add_document(doc!(
text_field => "g h",
score_field => 7_000u64,
bytes_score_field => vec![0u8, 0, 27, 88],
))?;
index_writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2);
assert_eq!(searcher.num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 4);
assert_eq!(searcher.segment_readers()[1].num_docs(), 1);
assert_eq!(searcher.segment_readers()[1].max_doc(), 3);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
vec![3]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
vec![3]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?,
vec![6_000]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?,
vec![6_000, 7_000]
);
let score_field_reader = searcher
.segment_reader(0)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 4000);
assert_eq!(score_field_reader.max_value(), 7000);
let score_field_reader = searcher
.segment_reader(1)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 1);
assert_eq!(score_field_reader.max_value(), 3);
}
{
// merging the segments
let segment_ids = index.searchable_segment_ids()?;
block_on(index_writer.merge(&segment_ids))?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].num_docs(), 3);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
vec![3]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
vec![3]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?,
vec![6_000]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?,
vec![6_000, 7_000]
);
let score_field_reader = searcher
.segment_reader(0)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000);
}
{
// test a commit with only deletes
index_writer.delete_term(Term::from_field_text(text_field, "c"));
index_writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 3);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?,
vec![6_000]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?,
vec![6_000, 7_000]
);
let score_field_reader = searcher
.segment_reader(0)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 3);
assert_eq!(score_field_reader.max_value(), 7000);
}
{
// Test merging a single segment in order to remove deletes.
let segment_ids = index.searchable_segment_ids()?;
block_on(index_writer.merge(&segment_ids))?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
assert_eq!(searcher.num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].num_docs(), 2);
assert_eq!(searcher.segment_readers()[0].max_doc(), 2);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "a"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "b"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "c"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "d"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "e"))?,
empty_vec
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "f"))?,
vec![6_000]
);
assert_eq!(
search_term(&searcher, Term::from_field_text(text_field, "g"))?,
vec![6_000, 7_000]
);
let score_field_reader = searcher
.segment_reader(0)
.fast_fields()
.u64(score_field)
.unwrap();
assert_eq!(score_field_reader.min_value(), 6000);
assert_eq!(score_field_reader.max_value(), 7000);
}
{
// Test removing all docs
index_writer.delete_term(Term::from_field_text(text_field, "g"));
index_writer.commit()?;
let segment_ids = index.searchable_segment_ids()?;
reader.reload()?;
let searcher = reader.searcher();
assert!(segment_ids.is_empty());
assert!(searcher.segment_readers().is_empty());
assert_eq!(searcher.num_docs(), 0);
}
Ok(())
}
#[test]
fn test_merge_facets_sort_none() {
test_merge_facets(None, true)
}
#[test]
fn test_merge_facets_sort_asc() {
// In the merge case this will go through the doc_id mapping code
test_merge_facets(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
true,
);
// In the merge case this will not go through the doc_id mapping code, because the data is
// sorted and disjunct
test_merge_facets(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
false,
);
}
#[test]
fn test_merge_facets_sort_desc() {
// In the merge case this will go through the doc_id mapping code
test_merge_facets(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
true,
);
// In the merge case this will not go through the doc_id mapping code, because the data is
// sorted and disjunct
test_merge_facets(
Some(IndexSettings {
sort_by_field: Some(IndexSortByField {
field: "intval".to_string(),
order: Order::Desc,
}),
..Default::default()
}),
false,
);
}
// force_segment_value_overlap forces the int value for sorting to have overlapping min and max
// ranges between segments so that merge algorithm can't apply certain optimizations
fn test_merge_facets(index_settings: Option<IndexSettings>, force_segment_value_overlap: bool) {
let mut schema_builder = schema::Schema::builder();
let facet_field = schema_builder.add_facet_field("facet", FacetOptions::default());
let int_options = NumericOptions::default()
.set_fast(Cardinality::SingleValue)
.set_indexed();
let int_field = schema_builder.add_u64_field("intval", int_options);
let mut index_builder = Index::builder().schema(schema_builder.build());
if let Some(settings) = index_settings {
index_builder = index_builder.settings(settings);
}
let index = index_builder.create_in_ram().unwrap();
// let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader().unwrap();
let mut int_val = 0;
{
let mut index_writer = index.writer_for_tests().unwrap();
let index_doc =
|index_writer: &mut IndexWriter, doc_facets: &[&str], int_val: &mut u64| {
let mut doc = Document::default();
for facet in doc_facets {
doc.add_facet(facet_field, Facet::from(facet));
}
doc.add_u64(int_field, *int_val);
*int_val += 1;
index_writer.add_document(doc).unwrap();
};
index_doc(
&mut index_writer,
&["/top/a/firstdoc", "/top/b"],
&mut int_val,
);
index_doc(
&mut index_writer,
&["/top/a/firstdoc", "/top/b", "/top/c"],
&mut int_val,
);
index_doc(&mut index_writer, &["/top/a", "/top/b"], &mut int_val);
index_doc(&mut index_writer, &["/top/a"], &mut int_val);
index_doc(&mut index_writer, &["/top/b", "/top/d"], &mut int_val);
if force_segment_value_overlap {
index_doc(&mut index_writer, &["/top/d"], &mut 0);
index_doc(&mut index_writer, &["/top/e"], &mut 10);
index_writer.commit().expect("committed");
index_doc(&mut index_writer, &["/top/a"], &mut 5); // 5 is between 0 - 10 so the
// segments don' have disjunct
// ranges
} else {
index_doc(&mut index_writer, &["/top/d"], &mut int_val);
index_doc(&mut index_writer, &["/top/e"], &mut int_val);
index_writer.commit().expect("committed");
index_doc(&mut index_writer, &["/top/a"], &mut int_val);
}
index_doc(&mut index_writer, &["/top/b"], &mut int_val);
index_doc(&mut index_writer, &["/top/c"], &mut int_val);
index_writer.commit().expect("committed");
index_doc(&mut index_writer, &["/top/e", "/top/f"], &mut int_val);
index_writer.commit().expect("committed");
}
reader.reload().unwrap();
let test_searcher = |expected_num_docs: usize, expected: &[(&str, u64)]| {
let searcher = reader.searcher();
let mut facet_collector = FacetCollector::for_field(facet_field);
facet_collector.add_facet(Facet::from("/top"));
let (count, facet_counts) = searcher
.search(&AllQuery, &(Count, facet_collector))
.unwrap();
assert_eq!(count, expected_num_docs);
let facets: Vec<(String, u64)> = facet_counts
.get("/top")
.map(|(facet, count)| (facet.to_string(), count))
.collect();
assert_eq!(
facets,
expected
.iter()
.map(|&(facet_str, count)| (String::from(facet_str), count))
.collect::<Vec<_>>()
);
};
test_searcher(
11,
&[
("/top/a", 5),
("/top/b", 5),
("/top/c", 2),
("/top/d", 2),
("/top/e", 2),
("/top/f", 1),
],
);
// Merging the segments
{
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
let mut index_writer = index.writer_for_tests().unwrap();
block_on(index_writer.merge(&segment_ids)).expect("Merging failed");
index_writer.wait_merging_threads().unwrap();
reader.reload().unwrap();
test_searcher(
11,
&[
("/top/a", 5),
("/top/b", 5),
("/top/c", 2),
("/top/d", 2),
("/top/e", 2),
("/top/f", 1),
],
);
}
// Deleting one term
{
let mut index_writer = index.writer_for_tests().unwrap();
let facet = Facet::from_path(vec!["top", "a", "firstdoc"]);
let facet_term = Term::from_facet(facet_field, &facet);
index_writer.delete_term(facet_term);
index_writer.commit().unwrap();
reader.reload().unwrap();
test_searcher(
9,
&[
("/top/a", 3),
("/top/b", 3),
("/top/c", 1),
("/top/d", 2),
("/top/e", 2),
("/top/f", 1),
],
);
}
}
#[test]
fn test_bug_merge() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let int_field = schema_builder.add_u64_field("intvals", INDEXED);
let index = Index::create_in_ram(schema_builder.build());
let mut index_writer = index.writer_for_tests().unwrap();
index_writer.add_document(doc!(int_field => 1u64))?;
index_writer.commit().expect("commit failed");
index_writer.add_document(doc!(int_field => 1u64))?;
index_writer.commit().expect("commit failed");
let reader = index.reader()?;
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2);
index_writer.delete_term(Term::from_field_u64(int_field, 1));
let segment_ids = index
.searchable_segment_ids()
.expect("Searchable segments failed.");
block_on(index_writer.merge(&segment_ids))?;
reader.reload()?;
// commit has not been called yet. The document should still be
// there.
assert_eq!(reader.searcher().num_docs(), 2);
Ok(())
}
#[test]
fn test_merge_multivalued_int_fields_all_deleted() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let int_options = NumericOptions::default()
.set_fast(Cardinality::MultiValues)
.set_indexed();
let int_field = schema_builder.add_u64_field("intvals", int_options);
let index = Index::create_in_ram(schema_builder.build());
let reader = index.reader()?;
{
let mut index_writer = index.writer_for_tests()?;
let mut doc = Document::default();
doc.add_u64(int_field, 1);
index_writer.add_document(doc.clone())?;
index_writer.commit()?;
index_writer.add_document(doc)?;
index_writer.commit()?;
index_writer.delete_term(Term::from_field_u64(int_field, 1));
let segment_ids = index.searchable_segment_ids()?;
block_on(index_writer.merge(&segment_ids))?;
// assert delete has not been committed
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 2);
index_writer.commit()?;
index_writer.wait_merging_threads()?;
}
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.num_docs(), 0);
Ok(())
}
#[test]
fn test_merge_multivalued_int_fields_simple() -> crate::Result<()> {
let mut schema_builder = schema::Schema::builder();
let int_options = NumericOptions::default()
.set_fast(Cardinality::MultiValues)
.set_indexed();
let int_field = schema_builder.add_u64_field("intvals", int_options);
let index = Index::create_in_ram(schema_builder.build());
{
let mut index_writer = index.writer_for_tests()?;
let index_doc = |index_writer: &mut IndexWriter, int_vals: &[u64]| {
let mut doc = Document::default();
for &val in int_vals {
doc.add_u64(int_field, val);
}
index_writer.add_document(doc).unwrap();
};
index_doc(&mut index_writer, &[1, 2]);
index_doc(&mut index_writer, &[1, 2, 3]);
index_doc(&mut index_writer, &[4, 5]);
index_doc(&mut index_writer, &[1, 2]);
index_doc(&mut index_writer, &[1, 5]);
index_doc(&mut index_writer, &[3]);
index_doc(&mut index_writer, &[17]);
assert!(index_writer.commit().is_ok());
index_doc(&mut index_writer, &[20]);
assert!(index_writer.commit().is_ok());
index_doc(&mut index_writer, &[28, 27]);
index_doc(&mut index_writer, &[1_000]);
assert!(index_writer.commit().is_ok());
}
let reader = index.reader()?;
let searcher = reader.searcher();
let mut vals: Vec<u64> = Vec::new();
{
let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[1, 2]);
ff_reader.get_vals(1, &mut vals);
assert_eq!(&vals, &[1, 2, 3]);
ff_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[4, 5]);
ff_reader.get_vals(3, &mut vals);
assert_eq!(&vals, &[1, 2]);
ff_reader.get_vals(4, &mut vals);
assert_eq!(&vals, &[1, 5]);
ff_reader.get_vals(5, &mut vals);
assert_eq!(&vals, &[3]);
ff_reader.get_vals(6, &mut vals);
assert_eq!(&vals, &[17]);
}
{
let segment = searcher.segment_reader(1u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[28, 27]);
ff_reader.get_vals(1, &mut vals);
assert_eq!(&vals, &[1_000]);
}
{
let segment = searcher.segment_reader(2u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[20]);
}
// Merging the segments
{
let segment_ids = index.searchable_segment_ids()?;
let mut index_writer = index.writer_for_tests()?;
block_on(index_writer.merge(&segment_ids))?;
index_writer.wait_merging_threads()?;
}
reader.reload()?;
{
let searcher = reader.searcher();
let segment = searcher.segment_reader(0u32);
let ff_reader = segment.fast_fields().u64s(int_field).unwrap();
ff_reader.get_vals(0, &mut vals);
assert_eq!(&vals, &[1, 2]);
ff_reader.get_vals(1, &mut vals);
assert_eq!(&vals, &[1, 2, 3]);
ff_reader.get_vals(2, &mut vals);
assert_eq!(&vals, &[4, 5]);
ff_reader.get_vals(3, &mut vals);
assert_eq!(&vals, &[1, 2]);
ff_reader.get_vals(4, &mut vals);
assert_eq!(&vals, &[1, 5]);
ff_reader.get_vals(5, &mut vals);
assert_eq!(&vals, &[3]);
ff_reader.get_vals(6, &mut vals);
assert_eq!(&vals, &[17]);
ff_reader.get_vals(7, &mut vals);
assert_eq!(&vals, &[28, 27]);
ff_reader.get_vals(8, &mut vals);
assert_eq!(&vals, &[1_000]);
ff_reader.get_vals(9, &mut vals);
assert_eq!(&vals, &[20]);
}
Ok(())
}
#[test]
fn merges_f64_fast_fields_correctly() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let fast_multi = NumericOptions::default().set_fast(Cardinality::MultiValues);
let field = builder.add_f64_field("f64", schema::FAST);
let multi_field = builder.add_f64_field("f64s", fast_multi);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_for_tests()?;
// Make sure we'll attempt to merge every created segment
let mut policy = crate::indexer::LogMergePolicy::default();
policy.set_min_num_segments(2);
writer.set_merge_policy(Box::new(policy));
for i in 0..100 {
let mut doc = Document::new();
doc.add_f64(field, 42.0);
doc.add_f64(multi_field, 0.24);
doc.add_f64(multi_field, 0.27);
writer.add_document(doc)?;
if i % 5 == 0 {
writer.commit()?;
}
}
writer.commit()?;
writer.wait_merging_threads()?;
// If a merging thread fails, we should end up with more
// than one segment here
assert_eq!(1, index.searchable_segments()?.len());
Ok(())
}
#[test]
fn test_merged_index_has_blockwand() -> crate::Result<()> {
let mut builder = schema::SchemaBuilder::new();
let text = builder.add_text_field("text", TEXT);
let index = Index::create_in_ram(builder.build());
let mut writer = index.writer_for_tests()?;
let happy_term = Term::from_field_text(text, "happy");
let term_query = TermQuery::new(happy_term, IndexRecordOption::WithFreqs);
for _ in 0..62 {
writer.add_document(doc!(text=>"hello happy tax payer"))?;
}
writer.commit()?;
let reader = index.reader()?;
let searcher = reader.searcher();
let mut term_scorer = term_query
.specialized_weight(&searcher, true)?
.specialized_scorer(searcher.segment_reader(0u32), 1.0)?;
assert_eq!(term_scorer.doc(), 0);
assert_nearly_equals!(term_scorer.block_max_score(), 0.0079681855);
assert_nearly_equals!(term_scorer.score(), 0.0079681855);
for _ in 0..81 {
writer.add_document(doc!(text=>"hello happy tax payer"))?;
}
writer.commit()?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 2);
for segment_reader in searcher.segment_readers() {
let mut term_scorer = term_query
.specialized_weight(&searcher, true)?
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries
// there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
}
let segment_ids: Vec<SegmentId> = searcher
.segment_readers()
.iter()
.map(|reader| reader.segment_id())
.collect();
block_on(writer.merge(&segment_ids[..]))?;
reader.reload()?;
let searcher = reader.searcher();
assert_eq!(searcher.segment_readers().len(), 1);
let segment_reader = searcher.segment_reader(0u32);
let mut term_scorer = term_query
.specialized_weight(&searcher, true)?
.specialized_scorer(segment_reader, 1.0)?;
// the difference compared to before is instrinsic to the bm25 formula. no worries there.
for doc in segment_reader.doc_ids_alive() {
assert_eq!(term_scorer.doc(), doc);
assert_nearly_equals!(term_scorer.block_max_score(), 0.003478312);
assert_nearly_equals!(term_scorer.score(), 0.003478312);
term_scorer.advance();
}
Ok(())
}
#[test]
fn test_max_doc() {
// this is the first time I write a unit test for a constant.
assert!(((super::MAX_DOC_LIMIT - 1) as i32) >= 0);
assert!((super::MAX_DOC_LIMIT as i32) < 0);
}
}
| write_fieldnorms |
_color.py | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color", | **kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
) | parent_name="parcats.line.colorbar.title.font", |
service.go | package service
import (
"io/ioutil"
"net/http"
"gopkg.in/gin-gonic/gin.v0"
"gopkg.in/jmoiron/sqlx.v0"
_ "gopkg.in/lib/pq.v0"
_ "gopkg.in/mattn/go-sqlite3.v0"
)
type Config struct {
ServiceHost string `yaml:"host,flow"`
DbDriver string `yaml:"driver,flow"`
DbSource string `yaml:"datasource,flow"`
KeyFile string
}
type AuthService struct{}
func (s *AuthService) GetHttpHandler(conf Config) (http.Handler, error) {
// database connection
dbh, err := GetDBHandler(conf)
if err != nil {
return gin.New(), err
}
// reading the private key
keyData, err := ioutil.ReadFile(conf.KeyFile)
if err != nil {
return gin.New(), err
}
// setup resource | auth.POST("/signup", resource.CreateUser)
return r, nil
}
func GetDBHandler(conf Config) (*sqlx.DB, error) {
dbh, err := sqlx.Connect(conf.DbDriver, conf.DbSource)
return dbh, err
} | resource := &AuthResource{dbh, keyData}
r := gin.Default()
auth := r.Group("/auth")
auth.POST("/login", resource.CreateSession) |
redshift_cluster_teardown.py | import boto3
import configparser
def main():
|
if __name__ == "__main__":
main() | """
Description:
- Sets up a Redshift cluster on AWS
Returns:
None
"""
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
DWH_CLUSTER_IDENTIFIER = config.get("DWH","DWH_CLUSTER_IDENTIFIER")
DWH_IAM_ROLE_NAME = config.get("DWH", "DWH_IAM_ROLE_NAME")
redshift = boto3.client('redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
iam = boto3.client('iam',
region_name='us-west-2',
aws_access_key_id=KEY,
aws_secret_access_key=SECRET)
redshift.delete_cluster(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,
SkipFinalClusterSnapshot=True)
# Remove role:
iam.detach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess")
iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)
print("Cluster and IAM role has been deleted") |
viewModel.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/gimalay/octogo/todoapp/core/viewModel/viewModel.proto
package viewModel
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type LocationType int32
const (
LocationType_Unknown LocationType = 0
LocationType_Home LocationType = 1457
LocationType_Project LocationType = 1571
LocationType_Task LocationType = 1648
)
var LocationType_name = map[int32]string{
0: "Unknown",
1457: "Home",
1571: "Project",
1648: "Task",
}
var LocationType_value = map[string]int32{
"Unknown": 0,
"Home": 1457,
"Project": 1571,
"Task": 1648,
}
func (x LocationType) String() string {
return proto.EnumName(LocationType_name, int32(x))
}
func (LocationType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{0}
}
type Location struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Location) Reset() { *m = Location{} }
func (m *Location) String() string { return proto.CompactTextString(m) }
func (*Location) ProtoMessage() {}
func (*Location) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{0}
}
func (m *Location) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Location.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Location) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location.Merge(m, src)
}
func (m *Location) XXX_Size() int {
return m.Size()
}
func (m *Location) XXX_DiscardUnknown() {
xxx_messageInfo_Location.DiscardUnknown(m)
}
var xxx_messageInfo_Location proto.InternalMessageInfo
type Location_Home struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Location_Home) Reset() { *m = Location_Home{} }
func (m *Location_Home) String() string { return proto.CompactTextString(m) }
func (*Location_Home) ProtoMessage() {}
func (*Location_Home) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{0, 0}
}
func (m *Location_Home) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Location_Home) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Location_Home.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Location_Home) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location_Home.Merge(m, src)
}
func (m *Location_Home) XXX_Size() int {
return m.Size()
}
func (m *Location_Home) XXX_DiscardUnknown() {
xxx_messageInfo_Location_Home.DiscardUnknown(m)
}
var xxx_messageInfo_Location_Home proto.InternalMessageInfo
type Location_Project struct {
ProjectID []byte `protobuf:"bytes,2697,opt,name=ProjectID,proto3" json:"ProjectID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Location_Project) Reset() { *m = Location_Project{} }
func (m *Location_Project) String() string { return proto.CompactTextString(m) }
func (*Location_Project) ProtoMessage() {}
func (*Location_Project) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{0, 1}
}
func (m *Location_Project) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Location_Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Location_Project.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Location_Project) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location_Project.Merge(m, src)
}
func (m *Location_Project) XXX_Size() int {
return m.Size()
}
func (m *Location_Project) XXX_DiscardUnknown() {
xxx_messageInfo_Location_Project.DiscardUnknown(m)
}
var xxx_messageInfo_Location_Project proto.InternalMessageInfo
func (m *Location_Project) GetProjectID() []byte {
if m != nil {
return m.ProjectID
}
return nil
}
type Location_Task struct {
TaskID []byte `protobuf:"bytes,1717,opt,name=TaskID,proto3" json:"TaskID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Location_Task) Reset() { *m = Location_Task{} }
func (m *Location_Task) String() string { return proto.CompactTextString(m) }
func (*Location_Task) ProtoMessage() {}
func (*Location_Task) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{0, 2}
}
func (m *Location_Task) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Location_Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Location_Task.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Location_Task) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location_Task.Merge(m, src)
}
func (m *Location_Task) XXX_Size() int {
return m.Size()
}
func (m *Location_Task) XXX_DiscardUnknown() {
xxx_messageInfo_Location_Task.DiscardUnknown(m)
}
var xxx_messageInfo_Location_Task proto.InternalMessageInfo
func (m *Location_Task) GetTaskID() []byte {
if m != nil {
return m.TaskID
}
return nil
}
type Location_AddTask struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Location_AddTask) Reset() { *m = Location_AddTask{} }
func (m *Location_AddTask) String() string { return proto.CompactTextString(m) }
func (*Location_AddTask) ProtoMessage() {}
func (*Location_AddTask) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{0, 3}
}
func (m *Location_AddTask) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Location_AddTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Location_AddTask.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Location_AddTask) XXX_Merge(src proto.Message) {
xxx_messageInfo_Location_AddTask.Merge(m, src)
}
func (m *Location_AddTask) XXX_Size() int {
return m.Size()
}
func (m *Location_AddTask) XXX_DiscardUnknown() {
xxx_messageInfo_Location_AddTask.DiscardUnknown(m)
}
var xxx_messageInfo_Location_AddTask proto.InternalMessageInfo
type ViewModel struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ViewModel) Reset() { *m = ViewModel{} }
func (m *ViewModel) String() string { return proto.CompactTextString(m) }
func (*ViewModel) ProtoMessage() {}
func (*ViewModel) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{1}
}
func (m *ViewModel) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ViewModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ViewModel.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ViewModel) XXX_Merge(src proto.Message) {
xxx_messageInfo_ViewModel.Merge(m, src)
}
func (m *ViewModel) XXX_Size() int {
return m.Size()
}
func (m *ViewModel) XXX_DiscardUnknown() {
xxx_messageInfo_ViewModel.DiscardUnknown(m)
}
var xxx_messageInfo_ViewModel proto.InternalMessageInfo
type ViewModel_Project struct {
ID []byte `protobuf:"bytes,4947,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,4032,opt,name=name,proto3" json:"name,omitempty"`
Tasks []*ViewModel_Project_Task `protobuf:"bytes,8856,rep,name=tasks,proto3" json:"tasks,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ViewModel_Project) Reset() { *m = ViewModel_Project{} }
func (m *ViewModel_Project) String() string { return proto.CompactTextString(m) }
func (*ViewModel_Project) ProtoMessage() {}
func (*ViewModel_Project) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{1, 0}
}
func (m *ViewModel_Project) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ViewModel_Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ViewModel_Project.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ViewModel_Project) XXX_Merge(src proto.Message) {
xxx_messageInfo_ViewModel_Project.Merge(m, src)
}
func (m *ViewModel_Project) XXX_Size() int {
return m.Size()
}
func (m *ViewModel_Project) XXX_DiscardUnknown() {
xxx_messageInfo_ViewModel_Project.DiscardUnknown(m)
}
var xxx_messageInfo_ViewModel_Project proto.InternalMessageInfo
func (m *ViewModel_Project) GetID() []byte {
if m != nil {
return m.ID
}
return nil
}
func (m *ViewModel_Project) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ViewModel_Project) GetTasks() []*ViewModel_Project_Task {
if m != nil {
return m.Tasks
}
return nil
}
type ViewModel_Project_Task struct {
ID []byte `protobuf:"bytes,5946,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,8336,opt,name=name,proto3" json:"name,omitempty"`
Emoji string `protobuf:"bytes,9790,opt,name=emoji,proto3" json:"emoji,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ViewModel_Project_Task) Reset() { *m = ViewModel_Project_Task{} }
func (m *ViewModel_Project_Task) String() string { return proto.CompactTextString(m) }
func (*ViewModel_Project_Task) ProtoMessage() {}
func (*ViewModel_Project_Task) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{1, 0, 0}
}
func (m *ViewModel_Project_Task) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ViewModel_Project_Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ViewModel_Project_Task.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ViewModel_Project_Task) XXX_Merge(src proto.Message) {
xxx_messageInfo_ViewModel_Project_Task.Merge(m, src)
}
func (m *ViewModel_Project_Task) XXX_Size() int {
return m.Size()
}
func (m *ViewModel_Project_Task) XXX_DiscardUnknown() {
xxx_messageInfo_ViewModel_Project_Task.DiscardUnknown(m)
}
var xxx_messageInfo_ViewModel_Project_Task proto.InternalMessageInfo
func (m *ViewModel_Project_Task) GetID() []byte {
if m != nil {
return m.ID
}
return nil
}
func (m *ViewModel_Project_Task) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ViewModel_Project_Task) GetEmoji() string {
if m != nil {
return m.Emoji
}
return ""
}
type ViewModel_Task struct {
ID []byte `protobuf:"bytes,4987,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,4932,opt,name=name,proto3" json:"name,omitempty"`
Emoji string `protobuf:"bytes,2651,opt,name=emoji,proto3" json:"emoji,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ViewModel_Task) Reset() { *m = ViewModel_Task{} }
func (m *ViewModel_Task) String() string { return proto.CompactTextString(m) }
func (*ViewModel_Task) ProtoMessage() {}
func (*ViewModel_Task) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{1, 1}
}
func (m *ViewModel_Task) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ViewModel_Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ViewModel_Task.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ViewModel_Task) XXX_Merge(src proto.Message) {
xxx_messageInfo_ViewModel_Task.Merge(m, src)
}
func (m *ViewModel_Task) XXX_Size() int {
return m.Size()
}
func (m *ViewModel_Task) XXX_DiscardUnknown() {
xxx_messageInfo_ViewModel_Task.DiscardUnknown(m)
}
var xxx_messageInfo_ViewModel_Task proto.InternalMessageInfo
func (m *ViewModel_Task) GetID() []byte {
if m != nil {
return m.ID
}
return nil
}
func (m *ViewModel_Task) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ViewModel_Task) GetEmoji() string {
if m != nil {
return m.Emoji
}
return ""
}
type ViewModel_Home struct {
Projects []*ViewModel_Home_Project `protobuf:"bytes,6910,rep,name=projects,proto3" json:"projects,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ViewModel_Home) Reset() { *m = ViewModel_Home{} }
func (m *ViewModel_Home) String() string { return proto.CompactTextString(m) }
func (*ViewModel_Home) ProtoMessage() {}
func (*ViewModel_Home) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{1, 2}
}
func (m *ViewModel_Home) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ViewModel_Home) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ViewModel_Home.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ViewModel_Home) XXX_Merge(src proto.Message) {
xxx_messageInfo_ViewModel_Home.Merge(m, src)
}
func (m *ViewModel_Home) XXX_Size() int {
return m.Size()
}
func (m *ViewModel_Home) XXX_DiscardUnknown() {
xxx_messageInfo_ViewModel_Home.DiscardUnknown(m)
}
var xxx_messageInfo_ViewModel_Home proto.InternalMessageInfo
func (m *ViewModel_Home) GetProjects() []*ViewModel_Home_Project {
if m != nil {
return m.Projects
}
return nil
}
type ViewModel_Home_Project struct {
ID []byte `protobuf:"bytes,4947,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,4032,opt,name=name,proto3" json:"name,omitempty"`
Tasks []*ViewModel_Home_Project_Task `protobuf:"bytes,8856,rep,name=tasks,proto3" json:"tasks,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ViewModel_Home_Project) Reset() { *m = ViewModel_Home_Project{} }
func (m *ViewModel_Home_Project) String() string { return proto.CompactTextString(m) }
func (*ViewModel_Home_Project) ProtoMessage() {}
func (*ViewModel_Home_Project) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{1, 2, 0}
}
func (m *ViewModel_Home_Project) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ViewModel_Home_Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ViewModel_Home_Project.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ViewModel_Home_Project) XXX_Merge(src proto.Message) {
xxx_messageInfo_ViewModel_Home_Project.Merge(m, src)
}
func (m *ViewModel_Home_Project) XXX_Size() int {
return m.Size()
}
func (m *ViewModel_Home_Project) XXX_DiscardUnknown() {
xxx_messageInfo_ViewModel_Home_Project.DiscardUnknown(m)
}
var xxx_messageInfo_ViewModel_Home_Project proto.InternalMessageInfo
func (m *ViewModel_Home_Project) GetID() []byte {
if m != nil {
return m.ID
}
return nil
}
func (m *ViewModel_Home_Project) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ViewModel_Home_Project) GetTasks() []*ViewModel_Home_Project_Task {
if m != nil {
return m.Tasks
}
return nil
}
type ViewModel_Home_Project_Task struct {
ID []byte `protobuf:"bytes,2946,opt,name=ID,proto3" json:"ID,omitempty"`
Name string `protobuf:"bytes,4336,opt,name=name,proto3" json:"name,omitempty"`
Emoji string `protobuf:"bytes,2990,opt,name=emoji,proto3" json:"emoji,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ViewModel_Home_Project_Task) Reset() { *m = ViewModel_Home_Project_Task{} }
func (m *ViewModel_Home_Project_Task) String() string { return proto.CompactTextString(m) }
func (*ViewModel_Home_Project_Task) ProtoMessage() {}
func (*ViewModel_Home_Project_Task) Descriptor() ([]byte, []int) {
return fileDescriptor_1a8739d099015062, []int{1, 2, 0, 0}
}
func (m *ViewModel_Home_Project_Task) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ViewModel_Home_Project_Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ViewModel_Home_Project_Task.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ViewModel_Home_Project_Task) XXX_Merge(src proto.Message) {
xxx_messageInfo_ViewModel_Home_Project_Task.Merge(m, src)
}
func (m *ViewModel_Home_Project_Task) XXX_Size() int {
return m.Size()
}
func (m *ViewModel_Home_Project_Task) XXX_DiscardUnknown() {
xxx_messageInfo_ViewModel_Home_Project_Task.DiscardUnknown(m)
}
var xxx_messageInfo_ViewModel_Home_Project_Task proto.InternalMessageInfo
func (m *ViewModel_Home_Project_Task) GetID() []byte {
if m != nil {
return m.ID
}
return nil
}
func (m *ViewModel_Home_Project_Task) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ViewModel_Home_Project_Task) GetEmoji() string {
if m != nil {
return m.Emoji
}
return ""
}
func init() {
proto.RegisterEnum("viewModel.LocationType", LocationType_name, LocationType_value)
proto.RegisterType((*Location)(nil), "viewModel.Location")
proto.RegisterType((*Location_Home)(nil), "viewModel.Location.Home")
proto.RegisterType((*Location_Project)(nil), "viewModel.Location.Project")
proto.RegisterType((*Location_Task)(nil), "viewModel.Location.Task")
proto.RegisterType((*Location_AddTask)(nil), "viewModel.Location.AddTask")
proto.RegisterType((*ViewModel)(nil), "viewModel.ViewModel")
proto.RegisterType((*ViewModel_Project)(nil), "viewModel.ViewModel.Project")
proto.RegisterType((*ViewModel_Project_Task)(nil), "viewModel.ViewModel.Project.Task")
proto.RegisterType((*ViewModel_Task)(nil), "viewModel.ViewModel.Task")
proto.RegisterType((*ViewModel_Home)(nil), "viewModel.ViewModel.Home")
proto.RegisterType((*ViewModel_Home_Project)(nil), "viewModel.ViewModel.Home.Project")
proto.RegisterType((*ViewModel_Home_Project_Task)(nil), "viewModel.ViewModel.Home.Project.Task")
}
func init() {
proto.RegisterFile("github.com/gimalay/octogo/todoapp/core/viewModel/viewModel.proto", fileDescriptor_1a8739d099015062)
}
var fileDescriptor_1a8739d099015062 = []byte{
// 438 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x48, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcc, 0x4d, 0xcc, 0x49, 0xac, 0xd4, 0xcf,
0x4f, 0x2e, 0xc9, 0x4f, 0xcf, 0xd7, 0x2f, 0xc9, 0x4f, 0xc9, 0x4f, 0x2c, 0x28, 0xd0, 0x4f, 0xce,
0x2f, 0x4a, 0xd5, 0x2f, 0xcb, 0x4c, 0x2d, 0xf7, 0xcd, 0x4f, 0x49, 0xcd, 0x41, 0xb0, 0xf4, 0x0a,
0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x38, 0xe1, 0x02, 0x4a, 0x19, 0x5c, 0x1c, 0x3e, 0xf9, 0xc9, 0x89,
0x25, 0x99, 0xf9, 0x79, 0x52, 0x6c, 0x5c, 0x2c, 0x1e, 0xf9, 0xb9, 0xa9, 0x52, 0x1a, 0x5c, 0xec,
0x01, 0x45, 0xf9, 0x59, 0xa9, 0xc9, 0x25, 0x42, 0xb2, 0x5c, 0x9c, 0x50, 0xa6, 0xa7, 0x8b, 0x44,
0xa7, 0xa8, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x42, 0x44, 0x4a, 0x9e, 0x8b, 0x25, 0x24, 0xb1, 0x38,
0x5b, 0x48, 0x9c, 0x8b, 0x0d, 0x44, 0x7b, 0xba, 0x48, 0x6c, 0xe5, 0x05, 0xab, 0x81, 0x72, 0xa5,
0x38, 0xb9, 0xd8, 0x1d, 0x53, 0x52, 0x40, 0x1c, 0xa5, 0x06, 0x16, 0x2e, 0xce, 0x30, 0x98, 0xbd,
0x52, 0xeb, 0x18, 0x11, 0x96, 0xf0, 0x73, 0x31, 0x79, 0xba, 0x48, 0x5c, 0x56, 0x03, 0xeb, 0x64,
0xf2, 0x74, 0x11, 0x12, 0xe6, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x38, 0x20, 0xaf, 0xc0, 0xa8,
0xc1, 0x19, 0x04, 0xe6, 0x08, 0x59, 0x70, 0xb1, 0x96, 0x24, 0x16, 0x67, 0x17, 0x4b, 0xcc, 0x70,
0x55, 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd4, 0x43, 0x78, 0x0b, 0x6e, 0xae, 0x1e, 0xd4, 0x4c, 0x3d,
0x90, 0x95, 0x41, 0x10, 0x0d, 0x52, 0xce, 0x50, 0x57, 0x42, 0xec, 0xd9, 0xa5, 0x87, 0x61, 0xcf,
0x04, 0x47, 0x24, 0x7b, 0x44, 0xb9, 0x58, 0x53, 0x73, 0xf3, 0xb3, 0x32, 0x25, 0xf6, 0xf9, 0x80,
0x45, 0x21, 0x3c, 0x34, 0x43, 0x7e, 0x63, 0x3a, 0xf6, 0x88, 0x1a, 0x36, 0x43, 0x6e, 0x8b, 0x20,
0x1b, 0xf2, 0x87, 0x11, 0x12, 0xc4, 0x42, 0x76, 0x5c, 0x1c, 0x05, 0x10, 0x97, 0x16, 0x4b, 0xfc,
0x33, 0xc5, 0xe3, 0x1f, 0x90, 0x6a, 0x98, 0xa7, 0x82, 0xe0, 0x7a, 0xa4, 0x36, 0x93, 0x1c, 0x7c,
0xb6, 0x68, 0xc1, 0xa7, 0x46, 0xd0, 0x3a, 0x3c, 0x61, 0xd8, 0x24, 0x8e, 0x61, 0xd9, 0x07, 0x45,
0x6c, 0xde, 0x5f, 0x27, 0x8e, 0xe4, 0x7d, 0x2d, 0x7b, 0x2e, 0x1e, 0x58, 0x62, 0x0b, 0xa9, 0x2c,
0x48, 0x15, 0xe2, 0xe6, 0x62, 0x0f, 0xcd, 0xcb, 0xce, 0xcb, 0x2f, 0xcf, 0x13, 0x60, 0x10, 0xe2,
0x84, 0x04, 0x8d, 0xc0, 0x46, 0x6e, 0x21, 0x1e, 0xb8, 0xe7, 0x04, 0x16, 0xf3, 0x80, 0x24, 0x40,
0x56, 0x0b, 0x7c, 0xe0, 0x71, 0xd2, 0x3f, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07,
0x8f, 0xe4, 0x18, 0x67, 0x3c, 0x96, 0x63, 0xe0, 0x92, 0x4a, 0xce, 0xcf, 0xd5, 0x4b, 0xad, 0x48,
0xcc, 0x2d, 0xc8, 0x49, 0xd5, 0x03, 0x25, 0xff, 0x9c, 0xcc, 0xe2, 0x12, 0xbd, 0x5c, 0x90, 0x67,
0x76, 0x31, 0x31, 0x24, 0xb1, 0x81, 0x13, 0xbc, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x0b,
0xcb, 0x97, 0x34, 0x03, 0x00, 0x00,
}
func (m *Location) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Location) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
return len(dAtA) - i, nil
}
func (m *Location_Home) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Location_Home) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Location_Home) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
return len(dAtA) - i, nil
}
func (m *Location_Project) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Location_Project) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Location_Project) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ProjectID) > 0 {
i -= len(m.ProjectID)
copy(dAtA[i:], m.ProjectID)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.ProjectID)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xa8
i--
dAtA[i] = 0xca
}
return len(dAtA) - i, nil
}
func (m *Location_Task) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Location_Task) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Location_Task) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.TaskID) > 0 {
i -= len(m.TaskID)
copy(dAtA[i:], m.TaskID)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.TaskID)))
i--
dAtA[i] = 0x6b
i--
dAtA[i] = 0xaa
}
return len(dAtA) - i, nil
}
func (m *Location_AddTask) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Location_AddTask) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Location_AddTask) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
return len(dAtA) - i, nil
}
func (m *ViewModel) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ViewModel) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ViewModel) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
return len(dAtA) - i, nil
}
func (m *ViewModel_Project) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ViewModel_Project) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ViewModel_Project) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Tasks) > 0 {
for iNdEx := len(m.Tasks) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Tasks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintViewModel(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4
i--
dAtA[i] = 0xa9
i--
dAtA[i] = 0xc2
}
}
if len(m.ID) > 0 {
i -= len(m.ID)
copy(dAtA[i:], m.ID)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.ID)))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xb5
i--
dAtA[i] = 0x9a
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xfc
i--
dAtA[i] = 0x82
}
return len(dAtA) - i, nil
}
func (m *ViewModel_Project_Task) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ViewModel_Project_Task) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ViewModel_Project_Task) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Emoji) > 0 {
i -= len(m.Emoji)
copy(dAtA[i:], m.Emoji)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.Emoji)))
i--
dAtA[i] = 0x4
i--
dAtA[i] = 0xe3
i--
dAtA[i] = 0xf2
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x4
i--
dAtA[i] = 0x89
i--
dAtA[i] = 0x82
}
if len(m.ID) > 0 {
i -= len(m.ID)
copy(dAtA[i:], m.ID)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.ID)))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xf3
i--
dAtA[i] = 0xd2
}
return len(dAtA) - i, nil
}
func (m *ViewModel_Task) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ViewModel_Task) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ViewModel_Task) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.ID) > 0 {
i -= len(m.ID)
copy(dAtA[i:], m.ID)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.ID)))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xb7
i--
dAtA[i] = 0xda
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xb4
i--
dAtA[i] = 0xa2
}
if len(m.Emoji) > 0 {
i -= len(m.Emoji)
copy(dAtA[i:], m.Emoji)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.Emoji)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xa5
i--
dAtA[i] = 0xda
}
return len(dAtA) - i, nil
}
func (m *ViewModel_Home) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ViewModel_Home) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ViewModel_Home) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Projects) > 0 {
for iNdEx := len(m.Projects) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Projects[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintViewModel(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3
i--
dAtA[i] = 0xaf
i--
dAtA[i] = 0xf2
}
}
return len(dAtA) - i, nil
}
func (m *ViewModel_Home_Project) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ViewModel_Home_Project) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ViewModel_Home_Project) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Tasks) > 0 {
for iNdEx := len(m.Tasks) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Tasks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintViewModel(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4
i--
dAtA[i] = 0xa9
i--
dAtA[i] = 0xc2
}
}
if len(m.ID) > 0 {
i -= len(m.ID)
copy(dAtA[i:], m.ID)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.ID)))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0xb5
i--
dAtA[i] = 0x9a
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xfc
i--
dAtA[i] = 0x82
}
return len(dAtA) - i, nil
}
func (m *ViewModel_Home_Project_Task) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ViewModel_Home_Project_Task) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ViewModel_Home_Project_Task) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0x2
i--
dAtA[i] = 0x8f
i--
dAtA[i] = 0x82
}
if len(m.Emoji) > 0 {
i -= len(m.Emoji)
copy(dAtA[i:], m.Emoji)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.Emoji)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xba
i--
dAtA[i] = 0xf2
}
if len(m.ID) > 0 {
i -= len(m.ID)
copy(dAtA[i:], m.ID)
i = encodeVarintViewModel(dAtA, i, uint64(len(m.ID)))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xb8
i--
dAtA[i] = 0x92
}
return len(dAtA) - i, nil
}
func encodeVarintViewModel(dAtA []byte, offset int, v uint64) int {
offset -= sovViewModel(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Location) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Location_Home) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Location_Project) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ProjectID)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Location_Task) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.TaskID)
if l > 0 {
n += 2 + l + sovViewModel(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Location_AddTask) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ViewModel) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ViewModel_Project) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
l = len(m.ID)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
if len(m.Tasks) > 0 {
for _, e := range m.Tasks {
l = e.Size()
n += 3 + l + sovViewModel(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ViewModel_Project_Task) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ID)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
l = len(m.Name)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
l = len(m.Emoji)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ViewModel_Task) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Emoji)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
l = len(m.Name)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
l = len(m.ID)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ViewModel_Home) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Projects) > 0 {
for _, e := range m.Projects {
l = e.Size()
n += 3 + l + sovViewModel(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ViewModel_Home_Project) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
l = len(m.ID)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
if len(m.Tasks) > 0 {
for _, e := range m.Tasks {
l = e.Size()
n += 3 + l + sovViewModel(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ViewModel_Home_Project_Task) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ID)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
l = len(m.Emoji)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
l = len(m.Name)
if l > 0 {
n += 3 + l + sovViewModel(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovViewModel(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozViewModel(x uint64) (n int) {
return sovViewModel(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Location) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Location: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Location_Home) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Home: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Home: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Location_Project) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Project: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2697:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ProjectID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ProjectID = append(m.ProjectID[:0], dAtA[iNdEx:postIndex]...)
if m.ProjectID == nil {
m.ProjectID = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Location_Task) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Task: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1717:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TaskID = append(m.TaskID[:0], dAtA[iNdEx:postIndex]...)
if m.TaskID == nil {
m.TaskID = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Location_AddTask) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AddTask: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AddTask: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ViewModel) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ViewModel: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ViewModel: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ViewModel_Project) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Project: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 4032:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4947:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = append(m.ID[:0], dAtA[iNdEx:postIndex]...)
if m.ID == nil {
m.ID = []byte{}
}
iNdEx = postIndex
case 8856:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Tasks = append(m.Tasks, &ViewModel_Project_Task{})
if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ViewModel_Project_Task) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Task: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 5946:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = append(m.ID[:0], dAtA[iNdEx:postIndex]...)
if m.ID == nil {
m.ID = []byte{}
}
iNdEx = postIndex
case 8336:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 9790:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Emoji", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Emoji = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ViewModel_Task) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Task: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2651:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Emoji", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Emoji = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4932:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4987:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = append(m.ID[:0], dAtA[iNdEx:postIndex]...)
if m.ID == nil {
m.ID = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ViewModel_Home) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Home: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Home: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 6910:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Projects", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Projects = append(m.Projects, &ViewModel_Home_Project{})
if err := m.Projects[len(m.Projects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ViewModel_Home_Project) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Project: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 4032:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4947:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = append(m.ID[:0], dAtA[iNdEx:postIndex]...)
if m.ID == nil {
m.ID = []byte{}
}
iNdEx = postIndex
case 8856:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Tasks = append(m.Tasks, &ViewModel_Home_Project_Task{})
if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ViewModel_Home_Project_Task) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Task: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 2946:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = append(m.ID[:0], dAtA[iNdEx:postIndex]...)
if m.ID == nil {
m.ID = []byte{}
}
iNdEx = postIndex
case 2990:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Emoji", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Emoji = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4336:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowViewModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthViewModel
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthViewModel
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipViewModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthViewModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipViewModel(dAtA []byte) (n int, err error) |
var (
ErrInvalidLengthViewModel = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowViewModel = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupViewModel = fmt.Errorf("proto: unexpected end of group")
)
| {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowViewModel
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowViewModel
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowViewModel
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthViewModel
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupViewModel
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthViewModel
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
} |
generate_cdiscount_predictions.py | import math
import tensorflow as tf
import os
import struct
import pdb
import numpy as np
from datasets import dataset_factory
from nets import nets_factory
import nets.resnet_v2 as resnet_v2
from preprocessing import preprocessing_factory
slim = tf.contrib.slim
def merge_predictions(predictions_fn):
'''
Merge predictions/logit scores for products that are the same.
'''
out_f = open(predictions_fn + '_merged', 'wb')
f = open(predictions_fn, 'r')
line = f.readline().strip().split()
curr_id = line[0]
curr_scores = np.power(np.array([float(x) for x in line[1:]]), 3)
num_elems = 1
line = f.readline().strip().split()
while line != []:
id = line[0]
# raise elements to the third power, and then take the cubic root
scores = np.power(np.array([float(x) for x in line[1:]]), 3)
if id == curr_id:
num_elems += 1
curr_scores += scores
else:
curr_scores = np.cbrt(curr_scores / float(num_elems))
for score in curr_scores:
out_f.write(struct.pack('>f', score))
curr_scores = scores
num_elems = 1
curr_id = id
line = f.readline().strip().split()
curr_scores = np.cbrt(curr_scores / float(num_elems))
for score in curr_scores:
out_f.write(struct.pack('>f', score))
out_f.close()
f.close()
if __name__ == '__main__':
checkpoint_dir = '/home/shunan/Code/Data/cdiscount/training'
dataset_dir = '/home/shunan/Code/Data/cdiscount/tf_records'
num_classes = 5270
image_size = 180
batch_size = 100
set_name = 'validation'
data_sizes = {'train': 12195682, 'validation': 175611, 'test': 3095080}
out_fn = os.path.join(dataset_dir, '{}_predictions.txt'.format(set_name))
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
# loading the dataset
dataset = dataset_factory.get_dataset('cdiscount', set_name, dataset_dir)
# dataset provider to load data from the dataset.
provider = slim.dataset_data_provider.DatasetDataProvider(dataset, shuffle=False, common_queue_capacity=2*batch_size, | # Pre-processing step.
image_preprocessing_fn = preprocessing_factory.get_preprocessing('simple', is_training=False)
image = image_preprocessing_fn(image, image_size, image_size)
images, labels, product_ids = tf.train.batch([image, label, product_id], batch_size=batch_size, num_threads=1,
capacity=5 * batch_size)
# Get the model
# network_fn = nets_factory.get_network_fn('resnet_v2_152', num_classes=num_classes, is_training=False)
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=0.)):
logits, end_points = resnet_v2.resnet_v2_152(images, num_classes=num_classes, is_training=False)
#Obtain the trainable variables and a saver
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
output_f = open(out_fn, 'w')
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(tf.global_variables_initializer())
saver.restore(sess, checkpoint_file)
num_iters = int(math.ceil(data_sizes[set_name] / float(batch_size)))
num_last_batch = batch_size - ((num_iters * batch_size) - data_sizes[set_name])
for i in range(num_iters):
output, ids = sess.run([logits, product_ids])
if i == num_iters - 1:
output = output[:num_last_batch, :]
ids = ids[:num_last_batch]
for j in range(output.shape[0]):
vec_str = [str(x) for x in output[j, :]]
output_f.write(str(ids[j]) + ' ' + ' '.join(vec_str) + '\n')
output_f.close() | common_queue_min=batch_size)
[image, label, product_id] = provider.get(['image', 'label', 'product_id'])
|
test.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: go.chromium.org/luci/starlark/starlarkproto/testprotos/test.proto
package testprotos
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Enum int32
const (
Enum_ENUM_DEFAULT Enum = 0
Enum_ENUM_VAL_1 Enum = 1
)
var Enum_name = map[int32]string{
0: "ENUM_DEFAULT",
1: "ENUM_VAL_1",
}
var Enum_value = map[string]int32{
"ENUM_DEFAULT": 0,
"ENUM_VAL_1": 1,
}
func (x Enum) String() string {
return proto.EnumName(Enum_name, int32(x))
}
func (Enum) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{0}
}
type Complex_InnerEnum int32
const (
Complex_UNKNOWN Complex_InnerEnum = 0
Complex_ENUM_VAL_1 Complex_InnerEnum = 1
)
var Complex_InnerEnum_name = map[int32]string{
0: "UNKNOWN",
1: "ENUM_VAL_1",
}
var Complex_InnerEnum_value = map[string]int32{
"UNKNOWN": 0,
"ENUM_VAL_1": 1,
}
func (x Complex_InnerEnum) String() string {
return proto.EnumName(Complex_InnerEnum_name, int32(x))
}
func (Complex_InnerEnum) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{4, 0}
}
type SimpleFields struct {
I64 int64 `protobuf:"varint,1,opt,name=i64,proto3" json:"i64,omitempty"`
I64Rep []int64 `protobuf:"varint,2,rep,packed,name=i64_rep,json=i64Rep,proto3" json:"i64_rep,omitempty"`
I32 int32 `protobuf:"varint,3,opt,name=i32,proto3" json:"i32,omitempty"`
Ui64 uint64 `protobuf:"varint,4,opt,name=ui64,proto3" json:"ui64,omitempty"`
Ui32 uint32 `protobuf:"varint,5,opt,name=ui32,proto3" json:"ui32,omitempty"`
B bool `protobuf:"varint,6,opt,name=b,proto3" json:"b,omitempty"`
F32 float32 `protobuf:"fixed32,7,opt,name=f32,proto3" json:"f32,omitempty"`
F64 float64 `protobuf:"fixed64,8,opt,name=f64,proto3" json:"f64,omitempty"`
S string `protobuf:"bytes,9,opt,name=s,proto3" json:"s,omitempty"`
Bs []byte `protobuf:"bytes,10,opt,name=bs,proto3" json:"bs,omitempty"`
BsRep [][]byte `protobuf:"bytes,11,rep,name=bs_rep,json=bsRep,proto3" json:"bs_rep,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SimpleFields) Reset() { *m = SimpleFields{} }
func (m *SimpleFields) String() string { return proto.CompactTextString(m) }
func (*SimpleFields) ProtoMessage() {}
func (*SimpleFields) Descriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{0}
}
func (m *SimpleFields) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SimpleFields.Unmarshal(m, b)
}
func (m *SimpleFields) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SimpleFields.Marshal(b, m, deterministic)
}
func (m *SimpleFields) XXX_Merge(src proto.Message) {
xxx_messageInfo_SimpleFields.Merge(m, src)
}
func (m *SimpleFields) XXX_Size() int {
return xxx_messageInfo_SimpleFields.Size(m)
}
func (m *SimpleFields) XXX_DiscardUnknown() {
xxx_messageInfo_SimpleFields.DiscardUnknown(m)
}
var xxx_messageInfo_SimpleFields proto.InternalMessageInfo
func (m *SimpleFields) GetI64() int64 {
if m != nil {
return m.I64
}
return 0
}
func (m *SimpleFields) GetI64Rep() []int64 {
if m != nil {
return m.I64Rep
}
return nil
}
func (m *SimpleFields) GetI32() int32 {
if m != nil {
return m.I32
}
return 0
}
func (m *SimpleFields) GetUi64() uint64 {
if m != nil {
return m.Ui64
}
return 0
}
func (m *SimpleFields) GetUi32() uint32 {
if m != nil {
return m.Ui32
}
return 0
}
func (m *SimpleFields) GetB() bool {
if m != nil {
return m.B
}
return false
}
func (m *SimpleFields) GetF32() float32 {
if m != nil {
return m.F32
}
return 0
}
func (m *SimpleFields) GetF64() float64 {
if m != nil {
return m.F64
}
return 0
}
func (m *SimpleFields) GetS() string {
if m != nil {
return m.S
}
return ""
}
func (m *SimpleFields) GetBs() []byte {
if m != nil {
return m.Bs
}
return nil
}
func (m *SimpleFields) GetBsRep() [][]byte {
if m != nil {
return m.BsRep
}
return nil
}
type MessageFields struct {
Single *Simple `protobuf:"bytes,1,opt,name=single,proto3" json:"single,omitempty"`
Rep []*Simple `protobuf:"bytes,2,rep,name=rep,proto3" json:"rep,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MessageFields) Reset() { *m = MessageFields{} }
func (m *MessageFields) String() string { return proto.CompactTextString(m) }
func (*MessageFields) ProtoMessage() {}
func (*MessageFields) Descriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{1}
}
func (m *MessageFields) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MessageFields.Unmarshal(m, b)
}
func (m *MessageFields) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MessageFields.Marshal(b, m, deterministic)
}
func (m *MessageFields) XXX_Merge(src proto.Message) {
xxx_messageInfo_MessageFields.Merge(m, src)
}
func (m *MessageFields) XXX_Size() int {
return xxx_messageInfo_MessageFields.Size(m)
}
func (m *MessageFields) XXX_DiscardUnknown() {
xxx_messageInfo_MessageFields.DiscardUnknown(m)
}
var xxx_messageInfo_MessageFields proto.InternalMessageInfo
func (m *MessageFields) GetSingle() *Simple {
if m != nil {
return m.Single
}
return nil
}
func (m *MessageFields) GetRep() []*Simple {
if m != nil {
return m.Rep
}
return nil
}
type Simple struct {
I int64 `protobuf:"varint,1,opt,name=i,proto3" json:"i,omitempty"`
ManyI []int64 `protobuf:"varint,2,rep,packed,name=many_i,json=manyI,proto3" json:"many_i,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Simple) Reset() { *m = Simple{} }
func (m *Simple) String() string { return proto.CompactTextString(m) }
func (*Simple) ProtoMessage() {}
func (*Simple) Descriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{2}
}
func (m *Simple) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Simple.Unmarshal(m, b)
}
func (m *Simple) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Simple.Marshal(b, m, deterministic)
}
func (m *Simple) XXX_Merge(src proto.Message) {
xxx_messageInfo_Simple.Merge(m, src)
}
func (m *Simple) XXX_Size() int {
return xxx_messageInfo_Simple.Size(m)
}
func (m *Simple) XXX_DiscardUnknown() {
xxx_messageInfo_Simple.DiscardUnknown(m)
}
var xxx_messageInfo_Simple proto.InternalMessageInfo
func (m *Simple) GetI() int64 {
if m != nil {
return m.I
}
return 0
}
func (m *Simple) GetManyI() []int64 {
if m != nil {
return m.ManyI
}
return nil
}
type AnotherSimple struct {
J int64 `protobuf:"varint,1,opt,name=j,proto3" json:"j,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AnotherSimple) Reset() { *m = AnotherSimple{} }
func (m *AnotherSimple) String() string { return proto.CompactTextString(m) }
func (*AnotherSimple) ProtoMessage() {}
func (*AnotherSimple) Descriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{3}
}
func (m *AnotherSimple) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AnotherSimple.Unmarshal(m, b)
}
func (m *AnotherSimple) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AnotherSimple.Marshal(b, m, deterministic)
}
func (m *AnotherSimple) XXX_Merge(src proto.Message) {
xxx_messageInfo_AnotherSimple.Merge(m, src)
}
func (m *AnotherSimple) XXX_Size() int {
return xxx_messageInfo_AnotherSimple.Size(m)
}
func (m *AnotherSimple) XXX_DiscardUnknown() {
xxx_messageInfo_AnotherSimple.DiscardUnknown(m)
}
var xxx_messageInfo_AnotherSimple proto.InternalMessageInfo
func (m *AnotherSimple) GetJ() int64 {
if m != nil {
return m.J
}
return 0
}
type Complex struct {
I64 int64 `protobuf:"varint,1,opt,name=i64,proto3" json:"i64,omitempty"`
I64Rep []int64 `protobuf:"varint,2,rep,packed,name=i64_rep,json=i64Rep,proto3" json:"i64_rep,omitempty"`
EnumVal Complex_InnerEnum `protobuf:"varint,3,opt,name=enum_val,json=enumVal,proto3,enum=testprotos.Complex_InnerEnum" json:"enum_val,omitempty"`
MsgVal *Complex_InnerMessage `protobuf:"bytes,4,opt,name=msg_val,json=msgVal,proto3" json:"msg_val,omitempty"`
MsgValRep []*Complex_InnerMessage `protobuf:"bytes,5,rep,name=msg_val_rep,json=msgValRep,proto3" json:"msg_val_rep,omitempty"`
// Types that are valid to be assigned to OneofVal:
// *Complex_Simple
// *Complex_AnotherSimple
OneofVal isComplex_OneofVal `protobuf_oneof:"oneof_val"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Complex) Reset() { *m = Complex{} }
func (m *Complex) String() string { return proto.CompactTextString(m) }
func (*Complex) ProtoMessage() {}
func (*Complex) Descriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{4}
}
func (m *Complex) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Complex.Unmarshal(m, b)
}
func (m *Complex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Complex.Marshal(b, m, deterministic)
}
func (m *Complex) XXX_Merge(src proto.Message) {
xxx_messageInfo_Complex.Merge(m, src)
}
func (m *Complex) XXX_Size() int {
return xxx_messageInfo_Complex.Size(m)
}
func (m *Complex) XXX_DiscardUnknown() {
xxx_messageInfo_Complex.DiscardUnknown(m)
}
var xxx_messageInfo_Complex proto.InternalMessageInfo
func (m *Complex) GetI64() int64 {
if m != nil {
return m.I64
}
return 0
}
func (m *Complex) GetI64Rep() []int64 {
if m != nil {
return m.I64Rep
}
return nil
}
func (m *Complex) GetEnumVal() Complex_InnerEnum {
if m != nil {
return m.EnumVal
}
return Complex_UNKNOWN
}
func (m *Complex) GetMsgVal() *Complex_InnerMessage {
if m != nil {
return m.MsgVal
}
return nil
}
func (m *Complex) GetMsgValRep() []*Complex_InnerMessage {
if m != nil {
return m.MsgValRep
}
return nil
}
type isComplex_OneofVal interface {
isComplex_OneofVal()
}
type Complex_Simple struct {
Simple *Simple `protobuf:"bytes,6,opt,name=simple,proto3,oneof"`
}
type Complex_AnotherSimple struct {
AnotherSimple *AnotherSimple `protobuf:"bytes,7,opt,name=another_simple,json=anotherSimple,proto3,oneof"`
}
func (*Complex_Simple) isComplex_OneofVal() {}
func (*Complex_AnotherSimple) isComplex_OneofVal() {}
func (m *Complex) GetOneofVal() isComplex_OneofVal {
if m != nil {
return m.OneofVal
}
return nil
}
func (m *Complex) GetSimple() *Simple {
if x, ok := m.GetOneofVal().(*Complex_Simple); ok {
return x.Simple
}
return nil
}
func (m *Complex) GetAnotherSimple() *AnotherSimple {
if x, ok := m.GetOneofVal().(*Complex_AnotherSimple); ok {
return x.AnotherSimple
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Complex) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Complex_Simple)(nil),
(*Complex_AnotherSimple)(nil),
}
}
type Complex_InnerMessage struct {
I int64 `protobuf:"varint,1,opt,name=i,proto3" json:"i,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Complex_InnerMessage) Reset() { *m = Complex_InnerMessage{} }
func (m *Complex_InnerMessage) String() string { return proto.CompactTextString(m) }
func (*Complex_InnerMessage) ProtoMessage() {}
func (*Complex_InnerMessage) Descriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{4, 0}
}
func (m *Complex_InnerMessage) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Complex_InnerMessage.Unmarshal(m, b)
}
func (m *Complex_InnerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Complex_InnerMessage.Marshal(b, m, deterministic)
}
func (m *Complex_InnerMessage) XXX_Merge(src proto.Message) {
xxx_messageInfo_Complex_InnerMessage.Merge(m, src)
}
func (m *Complex_InnerMessage) XXX_Size() int {
return xxx_messageInfo_Complex_InnerMessage.Size(m)
}
func (m *Complex_InnerMessage) XXX_DiscardUnknown() {
xxx_messageInfo_Complex_InnerMessage.DiscardUnknown(m)
}
var xxx_messageInfo_Complex_InnerMessage proto.InternalMessageInfo
func (m *Complex_InnerMessage) GetI() int64 {
if m != nil {
return m.I
}
return 0
}
type RefsOtherProtos struct {
AnotherMsg *AnotherMessage `protobuf:"bytes,1,opt,name=another_msg,json=anotherMsg,proto3" json:"another_msg,omitempty"`
Ts *timestamp.Timestamp `protobuf:"bytes,2,opt,name=ts,proto3" json:"ts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RefsOtherProtos) Reset() { *m = RefsOtherProtos{} }
func (m *RefsOtherProtos) String() string { return proto.CompactTextString(m) }
func (*RefsOtherProtos) ProtoMessage() {}
func (*RefsOtherProtos) Descriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{5}
}
func (m *RefsOtherProtos) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RefsOtherProtos.Unmarshal(m, b)
}
func (m *RefsOtherProtos) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RefsOtherProtos.Marshal(b, m, deterministic)
}
func (m *RefsOtherProtos) XXX_Merge(src proto.Message) {
xxx_messageInfo_RefsOtherProtos.Merge(m, src)
}
func (m *RefsOtherProtos) XXX_Size() int {
return xxx_messageInfo_RefsOtherProtos.Size(m)
}
func (m *RefsOtherProtos) XXX_DiscardUnknown() {
xxx_messageInfo_RefsOtherProtos.DiscardUnknown(m)
}
var xxx_messageInfo_RefsOtherProtos proto.InternalMessageInfo
func (m *RefsOtherProtos) GetAnotherMsg() *AnotherMessage {
if m != nil {
return m.AnotherMsg
}
return nil
}
func (m *RefsOtherProtos) GetTs() *timestamp.Timestamp {
if m != nil {
return m.Ts
}
return nil
}
type MapWithPrimitiveType struct {
M map[string]int64 `protobuf:"bytes,1,rep,name=m,proto3" json:"m,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MapWithPrimitiveType) Reset() { *m = MapWithPrimitiveType{} }
func (m *MapWithPrimitiveType) String() string { return proto.CompactTextString(m) }
func (*MapWithPrimitiveType) ProtoMessage() {}
func (*MapWithPrimitiveType) Descriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{6}
}
func (m *MapWithPrimitiveType) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MapWithPrimitiveType.Unmarshal(m, b)
}
func (m *MapWithPrimitiveType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MapWithPrimitiveType.Marshal(b, m, deterministic)
}
func (m *MapWithPrimitiveType) XXX_Merge(src proto.Message) {
xxx_messageInfo_MapWithPrimitiveType.Merge(m, src)
}
func (m *MapWithPrimitiveType) XXX_Size() int {
return xxx_messageInfo_MapWithPrimitiveType.Size(m)
}
func (m *MapWithPrimitiveType) XXX_DiscardUnknown() {
xxx_messageInfo_MapWithPrimitiveType.DiscardUnknown(m)
}
var xxx_messageInfo_MapWithPrimitiveType proto.InternalMessageInfo
func (m *MapWithPrimitiveType) GetM() map[string]int64 {
if m != nil {
return m.M
}
return nil
}
type MapWithMessageType struct {
M map[string]*Simple `protobuf:"bytes,1,rep,name=m,proto3" json:"m,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MapWithMessageType) Reset() { *m = MapWithMessageType{} }
func (m *MapWithMessageType) String() string { return proto.CompactTextString(m) }
func (*MapWithMessageType) ProtoMessage() {}
func (*MapWithMessageType) Descriptor() ([]byte, []int) {
return fileDescriptor_15b36de02fb5cc42, []int{7}
}
func (m *MapWithMessageType) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MapWithMessageType.Unmarshal(m, b)
}
func (m *MapWithMessageType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MapWithMessageType.Marshal(b, m, deterministic)
}
func (m *MapWithMessageType) XXX_Merge(src proto.Message) {
xxx_messageInfo_MapWithMessageType.Merge(m, src)
}
func (m *MapWithMessageType) XXX_Size() int {
return xxx_messageInfo_MapWithMessageType.Size(m)
}
func (m *MapWithMessageType) XXX_DiscardUnknown() {
xxx_messageInfo_MapWithMessageType.DiscardUnknown(m)
}
var xxx_messageInfo_MapWithMessageType proto.InternalMessageInfo
func (m *MapWithMessageType) GetM() map[string]*Simple {
if m != nil {
return m.M
}
return nil
}
func | () {
proto.RegisterEnum("testprotos.Enum", Enum_name, Enum_value)
proto.RegisterEnum("testprotos.Complex_InnerEnum", Complex_InnerEnum_name, Complex_InnerEnum_value)
proto.RegisterType((*SimpleFields)(nil), "testprotos.SimpleFields")
proto.RegisterType((*MessageFields)(nil), "testprotos.MessageFields")
proto.RegisterType((*Simple)(nil), "testprotos.Simple")
proto.RegisterType((*AnotherSimple)(nil), "testprotos.AnotherSimple")
proto.RegisterType((*Complex)(nil), "testprotos.Complex")
proto.RegisterType((*Complex_InnerMessage)(nil), "testprotos.Complex.InnerMessage")
proto.RegisterType((*RefsOtherProtos)(nil), "testprotos.RefsOtherProtos")
proto.RegisterType((*MapWithPrimitiveType)(nil), "testprotos.MapWithPrimitiveType")
proto.RegisterMapType((map[string]int64)(nil), "testprotos.MapWithPrimitiveType.MEntry")
proto.RegisterType((*MapWithMessageType)(nil), "testprotos.MapWithMessageType")
proto.RegisterMapType((map[string]*Simple)(nil), "testprotos.MapWithMessageType.MEntry")
}
func init() {
proto.RegisterFile("go.chromium.org/luci/starlark/starlarkproto/testprotos/test.proto", fileDescriptor_15b36de02fb5cc42)
}
var fileDescriptor_15b36de02fb5cc42 = []byte{
// 714 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xed, 0x6e, 0xd3, 0x48,
0x14, 0xed, 0xd8, 0x89, 0xd3, 0xdc, 0x7c, 0x6c, 0x34, 0xea, 0x6a, 0xbd, 0xd1, 0x56, 0x6b, 0x59,
0xbb, 0xc2, 0xaa, 0xc0, 0x11, 0x49, 0x88, 0x0a, 0xfc, 0x21, 0xa5, 0xa9, 0x5a, 0xd1, 0xa4, 0xd5,
0xd0, 0x8f, 0x9f, 0x91, 0x5d, 0x26, 0xee, 0xb4, 0xfe, 0x92, 0xc7, 0xa9, 0x08, 0x7f, 0x79, 0x04,
0xde, 0x84, 0xc7, 0xe1, 0x69, 0xd0, 0x8c, 0x9d, 0xc6, 0x15, 0x01, 0x04, 0xff, 0xce, 0x1d, 0x9f,
0x73, 0x7d, 0xee, 0xb9, 0x33, 0x30, 0xf4, 0x22, 0xfb, 0xea, 0x3a, 0x89, 0x02, 0x36, 0x0f, 0xec,
0x28, 0xf1, 0x3a, 0xfe, 0xfc, 0x8a, 0x75, 0x78, 0xea, 0x24, 0xbe, 0x93, 0xdc, 0xde, 0x83, 0x38,
0x89, 0xd2, 0xa8, 0x93, 0x52, 0x9e, 0x4a, 0xc4, 0x25, 0xb4, 0x25, 0xc6, 0xb0, 0x3a, 0x6e, 0xef,
0xff, 0x66, 0x3b, 0x27, 0x8c, 0xd2, 0x6b, 0x9a, 0x64, 0x1d, 0xdb, 0xff, 0x7a, 0x51, 0xe4, 0xf9,
0xb4, 0x23, 0x2b, 0x77, 0x3e, 0xeb, 0xa4, 0x2c, 0xa0, 0x3c, 0x75, 0x82, 0x38, 0x23, 0x98, 0x5f,
0x10, 0xd4, 0xdf, 0xb2, 0x20, 0xf6, 0xe9, 0x01, 0xa3, 0xfe, 0x3b, 0x8e, 0x5b, 0xa0, 0xb2, 0x41,
0x5f, 0x47, 0x06, 0xb2, 0x54, 0x22, 0x20, 0xfe, 0x0b, 0x2a, 0x6c, 0xd0, 0x9f, 0x26, 0x34, 0xd6,
0x15, 0x43, 0xb5, 0x54, 0xa2, 0xb1, 0x41, 0x9f, 0xd0, 0x58, 0x52, 0x7b, 0x5d, 0x5d, 0x35, 0x90,
0x55, 0x26, 0x02, 0x62, 0x0c, 0xa5, 0xb9, 0x50, 0x97, 0x0c, 0x64, 0x95, 0x88, 0xc4, 0xd9, 0x59,
0xaf, 0xab, 0x97, 0x0d, 0x64, 0x35, 0x88, 0xc4, 0xb8, 0x0e, 0xc8, 0xd5, 0x35, 0x03, 0x59, 0x9b,
0x04, 0xb9, 0xa2, 0xcf, 0xac, 0xd7, 0xd5, 0x2b, 0x06, 0xb2, 0x14, 0x22, 0xa0, 0x3c, 0x19, 0xf4,
0xf5, 0x4d, 0x03, 0x59, 0x88, 0x08, 0x28, 0x14, 0x5c, 0xaf, 0x1a, 0xc8, 0xaa, 0x12, 0xc4, 0x71,
0x13, 0x14, 0x97, 0xeb, 0x60, 0x20, 0xab, 0x4e, 0x14, 0x97, 0xe3, 0x3f, 0x41, 0x73, 0xb9, 0x74,
0x58, 0x33, 0x54, 0xab, 0x4e, 0xca, 0x2e, 0x27, 0x34, 0x36, 0x1d, 0x68, 0x8c, 0x29, 0xe7, 0x8e,
0xb7, 0x1c, 0x6e, 0x07, 0x34, 0xce, 0x42, 0xcf, 0xa7, 0x72, 0xbe, 0x5a, 0x17, 0xdb, 0xab, 0xe4,
0xec, 0x2c, 0x06, 0x92, 0x33, 0xf0, 0x7f, 0xa0, 0x2e, 0x47, 0x5e, 0x4f, 0x14, 0x9f, 0xcd, 0x27,
0xa0, 0x65, 0xa5, 0x70, 0xc8, 0xf2, 0xd8, 0x10, 0x13, 0x8e, 0x02, 0x27, 0x5c, 0x4c, 0x59, 0x9e,
0x59, 0x59, 0x54, 0x47, 0xe6, 0x36, 0x34, 0x86, 0xd9, 0x82, 0x56, 0xaa, 0x9b, 0xa5, 0xea, 0xc6,
0xfc, 0xac, 0x42, 0xe5, 0x75, 0x24, 0x3e, 0xbc, 0xff, 0x95, 0x45, 0xec, 0xc2, 0x26, 0x0d, 0xe7,
0xc1, 0xf4, 0xce, 0xf1, 0xe5, 0x36, 0x9a, 0xdd, 0xed, 0xa2, 0xdf, 0xbc, 0xa3, 0x7d, 0x14, 0x86,
0x34, 0x19, 0x85, 0xf3, 0x80, 0x54, 0x04, 0xfd, 0xc2, 0xf1, 0xf1, 0x73, 0xa8, 0x04, 0xdc, 0x93,
0xc2, 0x92, 0x4c, 0xc4, 0xf8, 0xae, 0x30, 0x4f, 0x92, 0x68, 0x01, 0xf7, 0x84, 0xf4, 0x15, 0xd4,
0x72, 0xa9, 0x74, 0x54, 0x96, 0x39, 0xfd, 0x5c, 0x5e, 0xcd, 0xe4, 0xc2, 0xf6, 0x63, 0xb1, 0x0d,
0x41, 0x91, 0x57, 0x61, 0x6d, 0xc8, 0x87, 0x1b, 0x24, 0xe7, 0xe0, 0x3d, 0x68, 0xe6, 0x77, 0x7b,
0x9a, 0xab, 0x2a, 0x52, 0xf5, 0x77, 0x51, 0xf5, 0x20, 0xdc, 0xc3, 0x0d, 0xd2, 0x70, 0x8a, 0x07,
0xed, 0x7f, 0xa0, 0x5e, 0x34, 0xf3, 0x70, 0x67, 0xa6, 0x05, 0xd5, 0xfb, 0x88, 0x70, 0x0d, 0x2a,
0xe7, 0x93, 0x37, 0x93, 0x93, 0xcb, 0x49, 0x6b, 0x03, 0x37, 0x01, 0x46, 0x93, 0xf3, 0xf1, 0xf4,
0x62, 0x78, 0x3c, 0x7d, 0xda, 0x42, 0x7b, 0x35, 0xa8, 0x46, 0x21, 0x8d, 0x66, 0x62, 0x7a, 0xf3,
0x03, 0xfc, 0x41, 0xe8, 0x8c, 0x9f, 0x88, 0xff, 0x9c, 0x4a, 0x1b, 0xf8, 0x25, 0xd4, 0x96, 0x5e,
0x03, 0xee, 0xe5, 0x97, 0xad, 0xbd, 0xc6, 0xe8, 0x32, 0x15, 0xc8, 0xe9, 0x63, 0xee, 0xe1, 0x1d,
0x50, 0x52, 0xae, 0x2b, 0xb9, 0x26, 0x7b, 0xc0, 0xf6, 0xf2, 0x01, 0xdb, 0x67, 0xcb, 0x07, 0x4c,
0x94, 0x94, 0x9b, 0x1f, 0x11, 0x6c, 0x8d, 0x9d, 0xf8, 0x92, 0xa5, 0xd7, 0xa7, 0x09, 0x0b, 0x58,
0xca, 0xee, 0xe8, 0xd9, 0x22, 0xa6, 0xf8, 0x19, 0xa0, 0x40, 0x47, 0x72, 0x27, 0x8f, 0x8a, 0xff,
0x5d, 0x47, 0xb6, 0xc7, 0xa3, 0x30, 0x4d, 0x16, 0x04, 0x05, 0xed, 0x3e, 0x68, 0x59, 0x21, 0xae,
0xdf, 0x2d, 0x5d, 0x48, 0xeb, 0x55, 0x22, 0x20, 0xde, 0x82, 0xf2, 0x9d, 0xe3, 0xcf, 0xa9, 0xb4,
0xa6, 0x92, 0xac, 0x78, 0xa1, 0xec, 0x22, 0xf3, 0x13, 0x02, 0x9c, 0x37, 0xce, 0x07, 0x92, 0x1e,
0x7a, 0x2b, 0x0f, 0xff, 0xaf, 0xf1, 0x50, 0xa0, 0x16, 0x1c, 0x1c, 0xfe, 0xc0, 0x81, 0x55, 0x74,
0xb0, 0xfe, 0x51, 0xae, 0x5c, 0xed, 0x58, 0x50, 0x92, 0x9b, 0x6c, 0x41, 0x5d, 0x2e, 0x6f, 0x7f,
0x74, 0x30, 0x3c, 0x3f, 0x3e, 0xfb, 0x76, 0x9d, 0xae, 0x26, 0x7b, 0xf4, 0xbe, 0x06, 0x00, 0x00,
0xff, 0xff, 0x93, 0xa9, 0xca, 0xd8, 0xc3, 0x05, 0x00, 0x00,
}
| init |
babel-server.js | require('./server.js'); | require("babel-register"); |
|
Utils.py | #[The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# |
from io import StringIO
def str_list(val):
with StringIO() as buf:
buf.write('[')
first = True
for item in val:
if not first:
buf.write(', ')
buf.write(str(item))
first = False
buf.write(']')
return buf.getvalue()
def escapeWhitespace(s:str, escapeSpaces:bool):
with StringIO() as buf:
for c in s:
if c==' ' and escapeSpaces:
buf.write('\u00B7')
elif c=='\t':
buf.write("\\t")
elif c=='\n':
buf.write("\\n")
elif c=='\r':
buf.write("\\r")
else:
buf.write(c)
return buf.getvalue() | |
diffie_hellman.py | # Copyright (c) Kuba Szczodrzyński 2021-5-6.
import os
from .primes import PRIMES
class DiffieHellman:
_ | prime: int
_private_key: int
_public_key: int
_shared_key: int
@staticmethod
def _to_bytes(a: int) -> bytes:
return a.to_bytes((a.bit_length() + 7) // 8, byteorder="big")
def __init__(self, group: int = 14, key_bits: int = 540) -> None:
prime_bytes = PRIMES[group]
self._prime = int.from_bytes(prime_bytes, byteorder="big")
self.generate_private_key(key_bits)
def generate_private_key(self, key_bits: int = 540) -> bytes:
private_key = os.urandom(key_bits // 8 + 8)
self.set_private_key(private_key)
return self.get_private_key()
def set_private_key(self, key: bytes) -> None:
self._private_key = int.from_bytes(key, byteorder="big")
self._public_key = pow(2, self._private_key, self._prime)
def generate_shared_key(self, other_public_key: bytes) -> bytes:
remote_key = int.from_bytes(other_public_key, "big")
self._shared_key = pow(remote_key, self._private_key, self._prime)
return self.get_shared_key()
def get_private_key(self) -> bytes:
return self._to_bytes(self._private_key)
def get_public_key(self) -> bytes:
return self._to_bytes(self._public_key)
def get_shared_key(self) -> bytes:
return self._to_bytes(self._shared_key)
|
|
mips.rs | #![allow(unused_imports, unused_parens, unused_mut, unused_unsafe)]
#![allow(non_upper_case_globals, overflowing_literals)]
use ::mips::*;
use std::any::Any;
use std::io::{Result, Write};
use std::mem;
use byteorder::{WriteBytesExt, LE};
/// A Mips register.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Register(pub u8);
impl Into<u8> for Register {
fn into(self) -> u8 { self.0 }
}
impl Register {
pub const ZERO: Self = Register(0);
pub const AT: Self = Register(1);
pub const V0: Self = Register(2);
pub const V1: Self = Register(3);
pub const A0: Self = Register(4);
pub const A1: Self = Register(5);
pub const A2: Self = Register(6);
pub const A3: Self = Register(7);
pub const T0: Self = Register(8);
pub const T1: Self = Register(9);
pub const T2: Self = Register(10);
pub const T3: Self = Register(11);
pub const T4: Self = Register(12);
pub const T5: Self = Register(13);
pub const T6: Self = Register(14);
pub const T7: Self = Register(15);
pub const S0: Self = Register(16);
pub const S1: Self = Register(17);
pub const S2: Self = Register(18);
pub const S3: Self = Register(19);
pub const S4: Self = Register(20);
pub const S5: Self = Register(21);
pub const S6: Self = Register(22);
pub const S7: Self = Register(23);
pub const T8: Self = Register(24);
pub const T9: Self = Register(25);
pub const K0: Self = Register(26);
pub const K1: Self = Register(27);
pub const GP: Self = Register(28);
pub const SP: Self = Register(29);
pub const FP: Self = Register(30);
pub const RA: Self = Register(31);
}
/// Allows any struct that implements `Write` to assemble Mips instructions.
pub trait MipsAssembler: Write {
/// Emits a 'sll' instruction.
#[inline]
fn sll(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((0 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'movci' instruction.
#[inline]
fn movci(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((1 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'srl' instruction.
#[inline]
fn srl(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((2 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'sra' instruction.
#[inline]
fn sra(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((3 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'sllv' instruction.
#[inline]
fn sllv_r(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((4 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'srlv' instruction.
#[inline]
fn srlv(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((6 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'srav' instruction.
#[inline]
fn srav(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((7 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'jr' instruction.
#[inline]
fn jr(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((8 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'jalr' instruction.
#[inline]
fn jalr_r(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((9 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'movz' instruction.
#[inline]
fn movz(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((10 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'movn' instruction.
#[inline]
fn movn(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((11 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'syscall' instruction.
#[inline]
fn syscall(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((12 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'breakpoint' instruction.
#[inline]
fn breakpoint(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((13 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'sync' instruction.
#[inline]
fn sync(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((15 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'mfhi' instruction.
#[inline]
fn mfhi(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((16 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'mthi' instruction.
#[inline]
fn mthi(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((17 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'mflo' instruction.
#[inline]
fn mflo(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((18 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dsllv' instruction.
#[inline]
fn dsllv_r(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((20 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dsrlv' instruction.
#[inline]
fn dsrlv(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((22 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dsrav' instruction.
#[inline]
fn dsrav(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((23 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'mult' instruction.
#[inline]
fn mult(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((24 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'multu' instruction.
#[inline]
fn multu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((25 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'div' instruction.
#[inline]
fn div(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((26 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'divu' instruction.
#[inline]
fn divu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((27 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dmult' instruction.
#[inline]
fn dmult(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((28 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dmultu' instruction.
#[inline]
fn dmultu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((29 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'ddiv' instruction.
#[inline]
fn ddiv(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((30 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'ddivu' instruction.
#[inline]
fn ddivu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((31 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits an 'add' instruction.
#[inline]
fn add(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((32 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits an 'addu' instruction.
#[inline]
fn addu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((33 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'sub' instruction.
#[inline]
fn sub(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((34 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'subu' instruction.
#[inline]
fn subu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((35 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits an 'and' instruction.
#[inline]
fn and(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((36 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits an 'or' instruction.
#[inline]
fn or(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((37 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'xor' instruction.
#[inline]
fn xor(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((38 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'nor' instruction.
#[inline]
fn nor(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((39 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'slt' instruction.
#[inline]
fn slt(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((42 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'sltu' instruction.
#[inline]
fn sltu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((43 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dadd' instruction.
#[inline]
fn dadd(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((44 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'daddu' instruction.
#[inline]
fn daddu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((45 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dsub' instruction.
#[inline]
fn dsub(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((46 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dsubu' instruction.
#[inline]
fn dsubu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((47 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'tge' instruction.
#[inline]
fn | (&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((48 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'tgeu' instruction.
#[inline]
fn tgeu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((49 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'tlt' instruction.
#[inline]
fn tlt(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((50 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'tltu' instruction.
#[inline]
fn tltu(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((51 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'teq' instruction.
#[inline]
fn teq(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((52 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'tne' instruction.
#[inline]
fn tne(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((54 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dsll' instruction.
#[inline]
fn dsll(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((56 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dslr' instruction.
#[inline]
fn dslr(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((58 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'dsra' instruction.
#[inline]
fn dsra(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((59 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'mhc0' instruction.
#[inline]
fn mhc0(&mut self, rd: Register, rs: Register, rt: Register, shift: u8) -> Result<()> {
unsafe {
let mut rd = Into::<u8>::into(rd) as u32;
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut shift = shift as u32;
self.write_u32::<LE>(((((1073741824 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((rd & 31) << 11)) | ((shift & 31) << 6)) as _)?;
}
Ok(())
}
/// Emits a 'btlz' instruction.
#[inline]
fn btlz(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'bgez' instruction.
#[inline]
fn bgez(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'bltzl' instruction.
#[inline]
fn bltzl(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'bgezl' instruction.
#[inline]
fn bgezl(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'sllv' instruction.
#[inline]
fn sllv_ri(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'tgei' instruction.
#[inline]
fn tgei(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'jalr' instruction.
#[inline]
fn jalr_ri(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'tlti' instruction.
#[inline]
fn tlti(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'tltiu' instruction.
#[inline]
fn tltiu(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'teqi' instruction.
#[inline]
fn teqi(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'tnei' instruction.
#[inline]
fn tnei(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'bltzal' instruction.
#[inline]
fn bltzal(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'bgezal' instruction.
#[inline]
fn bgezal(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'bltzall' instruction.
#[inline]
fn bltzall(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'bgezall' instruction.
#[inline]
fn bgezall(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'dsllv' instruction.
#[inline]
fn dsllv_ri(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'synci' instruction.
#[inline]
fn synci(&mut self, rs: Register, target: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut target = target as u32;
self.write_u32::<LE>(((67108864 | ((rs & 31) << 16)) | ((target >> 2) & 65535)) as _)?;
}
Ok(())
}
/// Emits an 'addi' instruction.
#[inline]
fn addi(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((536870912 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits an 'addiu' instruction.
#[inline]
fn addiu(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((603979776 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits an 'andi' instruction.
#[inline]
fn andi(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((805306368 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'beq' instruction.
#[inline]
fn beq(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((268435456 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((imm & 65535) >> 2)) as _)?;
}
Ok(())
}
/// Emits a 'blez' instruction.
#[inline]
fn blez(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((402653184 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((imm & 65535) >> 2)) as _)?;
}
Ok(())
}
/// Emits a 'bne' instruction.
#[inline]
fn bne(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((335544320 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | ((imm & 65535) >> 2)) as _)?;
}
Ok(())
}
/// Emits a 'lw' instruction.
#[inline]
fn lw(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((2348810240 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'lbu' instruction.
#[inline]
fn lbu(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((2415919104 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'lhu' instruction.
#[inline]
fn lhu(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((2483027968 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'lui' instruction.
#[inline]
fn lui(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((1006632960 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits an 'ori' instruction.
#[inline]
fn ori(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((872415232 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'sb' instruction.
#[inline]
fn sb(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((2684354560 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'sh' instruction.
#[inline]
fn sh(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((2751463424 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'slti' instruction.
#[inline]
fn slti(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((671088640 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'sltiu' instruction.
#[inline]
fn sltiu(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((738197504 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'sw' instruction.
#[inline]
fn sw(&mut self, rs: Register, rt: Register, imm: u16) -> Result<()> {
unsafe {
let mut rs = Into::<u8>::into(rs) as u32;
let mut rt = Into::<u8>::into(rt) as u32;
let mut imm = imm as u32;
self.write_u32::<LE>((((2885681152 | ((rs & 31) << 21)) | ((rt & 31) << 16)) | (imm & 65535)) as _)?;
}
Ok(())
}
/// Emits a 'j' instruction.
#[inline]
fn j(&mut self, address: u32) -> Result<()> {
unsafe {
let mut address = address as u32;
self.write_u32::<LE>((134217728 | ((address >> 2) & 67108863)) as _)?;
}
Ok(())
}
/// Emits a 'jal' instruction.
#[inline]
fn jal(&mut self, address: u32) -> Result<()> {
unsafe {
let mut address = address as u32;
self.write_u32::<LE>((201326592 | ((address >> 2) & 67108863)) as _)?;
}
Ok(())
}
/// Assembles an instruction, given its opcode and operands.
///
/// # Returns
/// - `Ok(True)` if the corresponding instruction was assembled.
/// - `Ok(False)` if the corresponding instruction could not be bound.
/// - `Err(_)` if the writing operation resulted in an IO error.
fn assemble(&mut self, opcode: &str, operands: &[&Any]) -> Result<bool> {
Ok(match opcode {
"add" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.add(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"addi" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.addi(*rs, *rt, *imm)?; true },
_ => false
},
"addiu" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.addiu(*rs, *rt, *imm)?; true },
_ => false
},
"addu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.addu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"and" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.and(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"andi" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.andi(*rs, *rt, *imm)?; true },
_ => false
},
"beq" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.beq(*rs, *rt, *imm)?; true },
_ => false
},
"bgez" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.bgez(*rs, *target)?; true },
_ => false
},
"bgezal" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.bgezal(*rs, *target)?; true },
_ => false
},
"bgezall" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.bgezall(*rs, *target)?; true },
_ => false
},
"bgezl" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.bgezl(*rs, *target)?; true },
_ => false
},
"blez" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.blez(*rs, *rt, *imm)?; true },
_ => false
},
"bltzal" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.bltzal(*rs, *target)?; true },
_ => false
},
"bltzall" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.bltzall(*rs, *target)?; true },
_ => false
},
"bltzl" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.bltzl(*rs, *target)?; true },
_ => false
},
"bne" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.bne(*rs, *rt, *imm)?; true },
_ => false
},
"breakpoint" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.breakpoint(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"btlz" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.btlz(*rs, *target)?; true },
_ => false
},
"dadd" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dadd(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"daddu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.daddu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"ddiv" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.ddiv(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"ddivu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.ddivu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"div" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.div(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"divu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.divu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dmult" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dmult(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dmultu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dmultu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dsll" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dsll(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dsllv" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dsllv_r(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dsllv" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.dsllv_ri(*rs, *target)?; true },
_ => false
},
"dslr" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dslr(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dsra" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dsra(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dsrav" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dsrav(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dsrlv" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dsrlv(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dsub" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dsub(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"dsubu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.dsubu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"j" if operands.len() == 1 => match (operands[0].downcast_ref::<u32>()) {
(Some(address)) => { self.j(*address)?; true },
_ => false
},
"jal" if operands.len() == 1 => match (operands[0].downcast_ref::<u32>()) {
(Some(address)) => { self.jal(*address)?; true },
_ => false
},
"jalr" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.jalr_r(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"jalr" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.jalr_ri(*rs, *target)?; true },
_ => false
},
"jr" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.jr(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"lbu" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.lbu(*rs, *rt, *imm)?; true },
_ => false
},
"lhu" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.lhu(*rs, *rt, *imm)?; true },
_ => false
},
"lui" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.lui(*rs, *rt, *imm)?; true },
_ => false
},
"lw" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.lw(*rs, *rt, *imm)?; true },
_ => false
},
"mfhi" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.mfhi(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"mflo" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.mflo(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"mhc0" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.mhc0(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"movci" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.movci(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"movn" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.movn(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"movz" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.movz(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"mthi" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.mthi(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"mult" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.mult(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"multu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.multu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"nor" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.nor(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"or" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.or(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"ori" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.ori(*rs, *rt, *imm)?; true },
_ => false
},
"sb" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.sb(*rs, *rt, *imm)?; true },
_ => false
},
"sh" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.sh(*rs, *rt, *imm)?; true },
_ => false
},
"sll" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.sll(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"sllv" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.sllv_r(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"sllv" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.sllv_ri(*rs, *target)?; true },
_ => false
},
"slt" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.slt(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"slti" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.slti(*rs, *rt, *imm)?; true },
_ => false
},
"sltiu" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.sltiu(*rs, *rt, *imm)?; true },
_ => false
},
"sltu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.sltu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"sra" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.sra(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"srav" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.srav(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"srl" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.srl(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"srlv" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.srlv(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"sub" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.sub(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"subu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.subu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"sw" if operands.len() == 3 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<u16>()) {
(Some(rs), Some(rt), Some(imm)) => { self.sw(*rs, *rt, *imm)?; true },
_ => false
},
"sync" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.sync(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"synci" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.synci(*rs, *target)?; true },
_ => false
},
"syscall" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.syscall(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"teq" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.teq(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"teqi" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.teqi(*rs, *target)?; true },
_ => false
},
"tge" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.tge(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"tgei" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.tgei(*rs, *target)?; true },
_ => false
},
"tgeu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.tgeu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"tlt" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.tlt(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"tlti" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.tlti(*rs, *target)?; true },
_ => false
},
"tltiu" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.tltiu(*rs, *target)?; true },
_ => false
},
"tltu" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.tltu(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"tne" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.tne(*rd, *rs, *rt, *shift)?; true },
_ => false
},
"tnei" if operands.len() == 2 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<u16>()) {
(Some(rs), Some(target)) => { self.tnei(*rs, *target)?; true },
_ => false
},
"xor" if operands.len() == 4 => match (operands[0].downcast_ref::<Register>(), operands[1].downcast_ref::<Register>(), operands[2].downcast_ref::<Register>(), operands[3].downcast_ref::<u8>()) {
(Some(rd), Some(rs), Some(rt), Some(shift)) => { self.xor(*rd, *rs, *rt, *shift)?; true },
_ => false
},
_ => false
})
}
}
/// Implementation of `MipsAssembler` for all `Write` implementations.
impl<W: Write + ?Sized> MipsAssembler for W {}
| tge |
controllermanager.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and
// nodes.
//
// CAUTION: If you update code in this file, you may need to also update code
// in contrib/mesos/pkg/controllermanager/controllermanager.go
package app
import (
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/http/pprof"
"os"
"strconv"
"time"
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/batch"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/leaderelection"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
certcontroller "k8s.io/kubernetes/pkg/controller/certificates"
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/controller/deployment"
"k8s.io/kubernetes/pkg/controller/disruption"
endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/garbagecollector"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/job"
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
nodecontroller "k8s.io/kubernetes/pkg/controller/node"
petset "k8s.io/kubernetes/pkg/controller/petset"
"k8s.io/kubernetes/pkg/controller/podautoscaler"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/controller/podgc"
replicaset "k8s.io/kubernetes/pkg/controller/replicaset"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota"
routecontroller "k8s.io/kubernetes/pkg/controller/route"
"k8s.io/kubernetes/pkg/controller/scheduledjob"
servicecontroller "k8s.io/kubernetes/pkg/controller/service"
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
"k8s.io/kubernetes/pkg/healthz"
quotainstall "k8s.io/kubernetes/pkg/quota/install"
"k8s.io/kubernetes/pkg/runtime/serializer"
"k8s.io/kubernetes/pkg/serviceaccount"
certutil "k8s.io/kubernetes/pkg/util/cert"
"k8s.io/kubernetes/pkg/util/configz"
"k8s.io/kubernetes/pkg/util/wait"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
// Jitter used when starting controller managers
ControllerStartJitter = 1.0
)
// NewControllerManagerCommand creates a *cobra.Command object with default parameters
func NewControllerManagerCommand() *cobra.Command {
s := options.NewCMServer()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "kube-controller-manager",
Long: `The Kubernetes controller manager is a daemon that embeds
the core control loops shipped with Kubernetes. In applications of robotics and
automation, a control loop is a non-terminating loop that regulates the state of
the system. In Kubernetes, a controller is a control loop that watches the shared
state of the cluster through the apiserver and makes changes attempting to move the
current state towards the desired state. Examples of controllers that ship with
Kubernetes today are the replication controller, endpoints controller, namespace
controller, and serviceaccounts controller.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
func ResyncPeriod(s *options.CMServer) func() time.Duration {
return func() time.Duration {
factor := rand.Float64() + 1
return time.Duration(float64(s.MinResyncPeriod.Nanoseconds()) * factor)
}
}
// Run runs the CMServer. This should never exit.
func Run(s *options.CMServer) error {
if c, err := configz.New("componentconfig"); err == nil {
c.Set(s.KubeControllerManagerConfiguration)
} else {
glog.Errorf("unable to register configz: %s", err)
}
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
if err != nil {
return err
}
kubeconfig.ContentConfig.ContentType = s.ContentType
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = s.KubeAPIQPS
kubeconfig.Burst = int(s.KubeAPIBurst)
kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "controller-manager"))
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
configz.InstallHandler(mux)
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controller-manager"})
run := func(stop <-chan struct{}) {
err := StartControllers(s, kubeconfig, stop, recorder)
glog.Fatalf("error running controllers: %v", err)
panic("unreachable")
}
if !s.LeaderElection.LeaderElect {
run(nil)
panic("unreachable")
}
id, err := os.Hostname() | }
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
EndpointsMeta: api.ObjectMeta{
Namespace: "kube-system",
Name: "kube-controller-manager",
},
EndpointsClient: kubeClient,
Identity: id,
EventRecorder: recorder,
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
glog.Fatalf("leaderelection lost")
},
},
})
panic("unreachable")
}
func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, stop <-chan struct{}, recorder record.EventRecorder) error {
client := func(userAgent string) clientset.Interface {
return clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, userAgent))
}
discoveryClient := client("controller-discovery").Discovery()
sharedInformers := informers.NewSharedInformerFactory(client("shared-informers"), ResyncPeriod(s)())
go endpointcontroller.NewEndpointController(sharedInformers.Pods().Informer(), client("endpoint-controller")).
Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
go replicationcontroller.NewReplicationManager(
sharedInformers.Pods().Informer(),
client("replication-controller"),
ResyncPeriod(s),
replicationcontroller.BurstReplicas,
int(s.LookupCacheSizeForRC),
s.EnableGarbageCollector,
).Run(int(s.ConcurrentRCSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if s.TerminatedPodGCThreshold > 0 {
go podgc.New(client("pod-garbage-collector"), ResyncPeriod(s), int(s.TerminatedPodGCThreshold)).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
_, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR)
if err != nil {
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", s.ClusterCIDR, err)
}
_, serviceCIDR, err := net.ParseCIDR(s.ServiceCIDR)
if err != nil {
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", s.ServiceCIDR, err)
}
nodeController, err := nodecontroller.NewNodeController(sharedInformers.Pods().Informer(), cloud, client("node-controller"),
s.PodEvictionTimeout.Duration, s.NodeEvictionRate, s.SecondaryNodeEvictionRate, s.LargeClusterSizeThreshold, s.UnhealthyZoneThreshold, s.NodeMonitorGracePeriod.Duration,
s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR,
int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
if err != nil {
glog.Fatalf("Failed to initialize nodecontroller: %v", err)
}
nodeController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
serviceController, err := servicecontroller.New(cloud, client("service-controller"), s.ClusterName)
if err != nil {
glog.Errorf("Failed to start service controller: %v", err)
} else {
serviceController.Run(int(s.ConcurrentServiceSyncs))
}
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if s.AllocateNodeCIDRs && s.ConfigureCloudRoutes {
if cloud == nil {
glog.Warning("configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes.")
} else if routes, ok := cloud.Routes(); !ok {
glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
} else {
routeController := routecontroller.New(routes, client("route-controller"), s.ClusterName, clusterCIDR)
routeController.Run(s.RouteReconciliationPeriod.Duration)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
} else {
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", s.AllocateNodeCIDRs, s.ConfigureCloudRoutes)
}
resourceQuotaControllerClient := client("resourcequota-controller")
resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient)
groupKindsToReplenish := []unversioned.GroupKind{
api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
api.Kind("Secret"),
api.Kind("ConfigMap"),
}
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
KubeClient: resourceQuotaControllerClient,
ResyncPeriod: controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration),
Registry: resourceQuotaRegistry,
ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactory(sharedInformers.Pods().Informer(), resourceQuotaControllerClient),
ReplenishmentResyncPeriod: ResyncPeriod(s),
GroupKindsToReplenish: groupKindsToReplenish,
}
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
var versionStrings []string
err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
if versionStrings, err = restclient.ServerAPIVersions(kubeconfig); err == nil {
return true, nil
}
glog.Errorf("Failed to get api versions from server: %v", err)
return false, nil
})
if err != nil {
glog.Fatalf("Failed to get api versions from server: %v", err)
}
versions := &unversioned.APIVersions{Versions: versionStrings}
resourceMap, err := discoveryClient.ServerResources()
if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
// TODO: should use a dynamic RESTMapper built from the discovery results.
restMapper := registered.RESTMapper()
// Find the list of namespaced resources via discovery that the namespace controller must manage
namespaceKubeClient := client("namespace-controller")
namespaceClientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "namespace-controller"), restMapper, dynamic.LegacyAPIPathResolverFunc)
groupVersionResources, err := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources()
if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
groupVersion := "extensions/v1beta1"
resources, found := resourceMap[groupVersion]
// TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "horizontalpodautoscalers") {
glog.Infof("Starting horizontal pod controller.")
hpaClient := client("horizontal-pod-autoscaler")
metricsClient := metrics.NewHeapsterMetricsClient(
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient.Autoscaling(), metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(sharedInformers.DaemonSets(), sharedInformers.Pods(), sharedInformers.Nodes(), client("daemon-set-controller"), int(s.LookupCacheSizeForDaemonSet)).
Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(sharedInformers.Pods().Informer(), client("job-controller")).
Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(client("deployment-controller"), ResyncPeriod(s)).
Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetController(sharedInformers.Pods().Informer(), client("replicaset-controller"), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS), s.EnableGarbageCollector).
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
groupVersion = "policy/v1alpha1"
resources, found = resourceMap[groupVersion]
glog.Infof("Attempting to start disruption controller, full resource map %+v", resourceMap)
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "poddisruptionbudgets") {
glog.Infof("Starting disruption controller")
go disruption.NewDisruptionController(sharedInformers.Pods().Informer(), client("disruption-controller")).Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
groupVersion = "apps/v1alpha1"
resources, found = resourceMap[groupVersion]
glog.Infof("Attempting to start petset, full resource map %+v", resourceMap)
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "petsets") {
glog.Infof("Starting PetSet controller")
resyncPeriod := ResyncPeriod(s)()
go petset.NewPetSetController(
sharedInformers.Pods().Informer(),
client("petset-controller"),
resyncPeriod,
).Run(1, wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
groupVersion = "batch/v2alpha1"
resources, found = resourceMap[groupVersion]
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "scheduledjobs") {
glog.Infof("Starting scheduledjob controller")
// // TODO: this is a temp fix for allowing kubeClient list v2alpha1 sj, should switch to using clientset
kubeconfig.ContentConfig.GroupVersion = &unversioned.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"}
go scheduledjob.NewScheduledJobController(client("scheduledjob-controller")).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
} else {
glog.Infof("Not starting %s apis", groupVersion)
}
alphaProvisioner, err := NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatalf("An backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
}
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
client("persistent-volume-binder"),
s.PVClaimBinderSyncPeriod.Duration,
alphaProvisioner,
ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
cloud,
s.ClusterName,
nil, // volumeSource
nil, // claimSource
nil, // classSource
nil, // eventRecorder
s.VolumeConfiguration.EnableDynamicProvisioning,
)
volumeController.Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
attachDetachController, attachDetachControllerErr :=
attachdetach.NewAttachDetachController(
client("attachdetach-controller"),
sharedInformers.Pods().Informer(),
sharedInformers.Nodes().Informer(),
sharedInformers.PersistentVolumeClaims().Informer(),
sharedInformers.PersistentVolumes().Informer(),
cloud,
ProbeAttachableVolumePlugins(s.VolumeConfiguration),
recorder)
if attachDetachControllerErr != nil {
glog.Fatalf("Failed to start attach/detach controller: %v", attachDetachControllerErr)
}
go attachDetachController.Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
groupVersion = "certificates.k8s.io/v1alpha1"
resources, found = resourceMap[groupVersion]
glog.Infof("Attempting to start certificates, full resource map %+v", resourceMap)
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "certificatesigningrequests") {
glog.Infof("Starting certificate request controller")
resyncPeriod := ResyncPeriod(s)()
certController, err := certcontroller.NewCertificateController(
client("certificate-controller"),
resyncPeriod,
s.ClusterSigningCertFile,
s.ClusterSigningKeyFile,
s.ApproveAllKubeletCSRsForGroup,
)
if err != nil {
glog.Errorf("Failed to start certificate controller: %v", err)
} else {
go certController.Run(1, wait.NeverStop)
}
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := certutil.ParseCertsPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
go serviceaccountcontroller.NewTokensController(
client("tokens-controller"),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run(int(s.ConcurrentSATokenSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
serviceaccountcontroller.NewServiceAccountsController(
client("service-account-controller"),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if s.EnableGarbageCollector {
gcClientset := client("generic-garbage-collector")
groupVersionResources, err := gcClientset.Discovery().ServerPreferredResources()
if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
config := restclient.AddUserAgent(kubeconfig, "generic-garbage-collector")
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
config.ContentConfig.NegotiatedSerializer = nil
clientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
garbageCollector, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, restMapper, groupVersionResources)
if err != nil {
glog.Errorf("Failed to start the generic garbage collector: %v", err)
} else {
workers := int(s.ConcurrentGCSyncs)
go garbageCollector.Run(workers, wait.NeverStop)
}
}
sharedInformers.Start(stop)
select {}
}
func containsVersion(versions *unversioned.APIVersions, version string) bool {
for ix := range versions.Versions {
if versions.Versions[ix] == version {
return true
}
}
return false
}
func containsResource(resources *unversioned.APIResourceList, resourceName string) bool {
for ix := range resources.APIResources {
resource := resources.APIResources[ix]
if resource.Name == resourceName {
return true
}
}
return false
} | if err != nil {
return err |
main.rs | use {
anyhow::{anyhow, Error, Result},
common::input::{from_path, inputs},
std::{collections::HashMap, iter::FusedIterator, str::FromStr},
};
fn main() -> Result<()> {
let inputs = inputs(to_line, from_path("day5/data/input.txt")?);
part1(&inputs)?;
part2(&inputs)?;
Ok(())
}
fn part1(inputs: &[Line]) -> Result<()> {
let mut grid = Grid::default();
inputs.iter().for_each(|l| grid.plot(l, false));
println!("Day 5 Part 1 => {}", grid.points(2).count());
Ok(())
}
fn part2(inputs: &[Line]) -> Result<()> {
let mut grid = Grid::default();
inputs.iter().for_each(|l| grid.plot(l, true));
println!("Day 5 Part 2 => {}", grid.points(2).count());
Ok(())
}
fn to_line(s: String) -> Option<Line> |
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Ord)]
struct Point {
x: u32,
y: u32,
}
#[allow(dead_code)]
impl Point {
fn new(x: u32, y: u32) -> Self {
Self { x, y }
}
fn step(&self, dx: i32, dy: i32) -> Self {
Self {
x: Self::adjust(self.x, dx, dy),
y: Self::adjust(self.y, dy, dx),
}
}
fn adjust(start: u32, delta: i32, other: i32) -> u32 {
let start = start as i32;
let max = (delta.abs().max(other.abs())) as f32;
let delta = delta as f32;
(start + (delta / max).round() as i32) as u32
}
}
impl FromStr for Point {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
let mut parts = s.split(',');
if let Some(x) = parts.next() {
if let Some(y) = parts.next() {
return Ok(Point {
x: x.trim().parse()?,
y: y.trim().parse()?,
});
}
}
Err(anyhow!("Missing coordinates"))
}
}
#[derive(Debug, Clone, PartialEq, PartialOrd)]
struct Line {
start: Point,
end: Point,
}
#[allow(dead_code)]
impl Line {
fn new(start: Point, end: Point) -> Self {
Self { start, end }
}
fn coords(startx: u32, starty: u32, endx: u32, endy: u32) -> Self {
Self::new(Point::new(startx, starty), Point::new(endx, endy))
}
fn is_horizontal(&self) -> bool {
self.start.x == self.end.x
}
fn is_vertical(&self) -> bool {
self.start.y == self.end.y
}
fn is_diagonal(&self) -> bool {
(self.end.y as i32 - self.start.y as i32).abs()
== (self.end.x as i32 - self.start.x as i32).abs()
}
fn points(&self) -> PointIterator {
PointIterator::new(self)
}
}
impl FromStr for Line {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
let mut parts = s.split(" -> ");
if let Some(start) = parts.next() {
if let Some(end) = parts.next() {
return Ok(Line {
start: start.trim().parse()?,
end: end.trim().parse()?,
});
}
}
Err(anyhow!("Missing coordinates"))
}
}
struct PointIterator {
current: Option<Point>,
end: Point,
}
impl PointIterator {
fn new(line: &Line) -> Self {
Self {
current: Some(line.start),
end: line.end,
}
}
fn dx(&self) -> i32 {
if let Some(current) = self.current {
self.end.x as i32 - current.x as i32
} else {
0
}
}
fn dy(&self) -> i32 {
if let Some(current) = self.current {
self.end.y as i32 - current.y as i32
} else {
0
}
}
}
impl Iterator for PointIterator {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
let result = self.current;
if let Some(current) = self.current {
let dx = self.dx();
let dy = self.dy();
self.current = if dx == 0 && dy == 0 {
None
} else {
Some(current.step(dx, dy))
};
}
result
}
}
impl FusedIterator for PointIterator {}
#[derive(Debug, Default)]
struct Grid {
points: HashMap<Point, u32>,
}
impl Grid {
fn plot(&mut self, line: &Line, allow_diagonal: bool) {
if line.is_horizontal() || line.is_vertical() || (allow_diagonal && line.is_diagonal()) {
for p in line.points() {
*self.points.entry(p).or_default() += 1;
}
}
}
fn points(&self, threshold: u32) -> impl Iterator<Item = &Point> {
self.points
.iter()
.filter_map(move |(k, v)| if *v >= threshold { Some(k) } else { None })
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn check_point_fromstr() {
assert_eq!(" 10,2 ".parse::<Point>().unwrap(), Point { x: 10, y: 2 })
}
#[test]
fn check_line_from_str() {
assert_eq!(
"0,9 -> 5,9".parse::<Line>().unwrap(),
Line {
start: Point { x: 0, y: 9 },
end: Point { x: 5, y: 9 }
}
)
}
#[test]
fn check_horizontal() {
assert_eq!(Line::coords(0, 0, 0, 5).is_horizontal(), true);
assert_eq!(Line::coords(5, 0, 0, 0).is_horizontal(), false);
assert_eq!(Line::coords(0, 0, 5, 5).is_horizontal(), false);
assert_eq!(Line::coords(0, 0, 2, 5).is_horizontal(), false);
}
#[test]
fn check_vertical() {
assert_eq!(Line::coords(0, 0, 0, 5).is_vertical(), false);
assert_eq!(Line::coords(5, 0, 0, 0).is_vertical(), true);
assert_eq!(Line::coords(0, 0, 5, 5).is_vertical(), false);
assert_eq!(Line::coords(0, 0, 2, 5).is_vertical(), false);
}
#[test]
fn check_diagonal() {
assert_eq!(Line::coords(0, 0, 0, 5).is_diagonal(), false);
assert_eq!(Line::coords(5, 0, 0, 0).is_diagonal(), false);
assert_eq!(Line::coords(0, 0, 5, 5).is_diagonal(), true);
assert_eq!(Line::coords(0, 0, 2, 5).is_diagonal(), false);
}
#[test]
fn check_point_iter_up() {
assert_eq!(
Line::coords(0, 0, 0, 5).points().collect::<Vec<Point>>(),
vec![
Point::new(0, 0),
Point::new(0, 1),
Point::new(0, 2),
Point::new(0, 3),
Point::new(0, 4),
Point::new(0, 5)
]
);
}
#[test]
fn check_point_iter_across() {
assert_eq!(
Line::coords(0, 0, 3, 0).points().collect::<Vec<Point>>(),
vec![
Point::new(0, 0),
Point::new(1, 0),
Point::new(2, 0),
Point::new(3, 0),
]
);
}
#[test]
fn check_point_iter_diagonal() {
assert_eq!(
Line::coords(0, 0, 2, 5).points().collect::<Vec<Point>>(),
vec![
Point::new(0, 0),
Point::new(0, 1),
Point::new(1, 2),
Point::new(1, 3),
Point::new(2, 4),
Point::new(2, 5)
]
);
}
#[test]
fn check_point_iter_backwards() {
assert_eq!(
Line::coords(0, 5, 0, 0).points().collect::<Vec<Point>>(),
vec![
Point::new(0, 5),
Point::new(0, 4),
Point::new(0, 3),
Point::new(0, 2),
Point::new(0, 1),
Point::new(0, 0)
]
);
}
#[test]
fn check_grid_plot() {
let mut grid = Grid::default();
grid.plot(&Line::coords(0, 9, 5, 9), false);
assert_eq!(grid.points.len(), 6);
let mut points = grid.points.iter().collect::<Vec<(&Point, &u32)>>();
points.sort();
assert_eq!(
points,
vec![
(&Point::new(0, 9), &1),
(&Point::new(1, 9), &1),
(&Point::new(2, 9), &1),
(&Point::new(3, 9), &1),
(&Point::new(4, 9), &1),
(&Point::new(5, 9), &1)
]
);
}
#[test]
fn check_grid_plot_diagonal() {
let mut grid = Grid::default();
grid.plot(&Line::coords(0, 4, 5, 9), false);
assert_eq!(grid.points.len(), 0);
}
#[test]
fn check_grid_plot_diagonal_allowed() {
let mut grid = Grid::default();
grid.plot(&Line::coords(0, 4, 5, 9), true);
assert_eq!(grid.points.len(), 6);
let mut points = grid.points.iter().collect::<Vec<(&Point, &u32)>>();
points.sort();
assert_eq!(
points,
vec![
(&Point::new(0, 4), &1),
(&Point::new(1, 5), &1),
(&Point::new(2, 6), &1),
(&Point::new(3, 7), &1),
(&Point::new(4, 8), &1),
(&Point::new(5, 9), &1)
]
);
}
#[test]
fn check_grid_plot_backwards() {
let mut grid = Grid::default();
grid.plot(&Line::coords(5, 9, 0, 9), false);
assert_eq!(grid.points.len(), 6);
let mut points = grid.points.iter().collect::<Vec<(&Point, &u32)>>();
points.sort();
assert_eq!(
points,
vec![
(&Point::new(0, 9), &1),
(&Point::new(1, 9), &1),
(&Point::new(2, 9), &1),
(&Point::new(3, 9), &1),
(&Point::new(4, 9), &1),
(&Point::new(5, 9), &1)
]
);
}
#[test]
fn check_grid_points() {
let mut grid = Grid::default();
let lines = vec![
Line::coords(0, 9, 5, 9),
Line::coords(8, 0, 0, 8),
Line::coords(9, 4, 3, 4),
Line::coords(2, 2, 2, 1),
Line::coords(7, 0, 7, 4),
Line::coords(6, 4, 2, 0),
Line::coords(0, 9, 2, 9),
Line::coords(3, 4, 1, 4),
Line::coords(0, 0, 8, 8),
Line::coords(5, 5, 8, 2),
];
lines.iter().for_each(|l| grid.plot(&l, false));
let mut points = grid.points(2).collect::<Vec<&Point>>();
points.sort();
assert_eq!(points.len(), 5);
assert_eq!(
points,
vec![
&Point::new(0, 9),
&Point::new(1, 9),
&Point::new(2, 9),
&Point::new(3, 4),
&Point::new(7, 4)
]
);
}
}
| {
s.parse().ok()
} |
dialog.py | import tkinter as tk
from tkinter import Frame, Button, Tk, TclError
from typing import Dict, Optional
from tanager_feeder import utils
class Dialog:
def __init__(
self,
controller,
title: str,
label: str,
buttons: Dict,
width: Optional[int] = None,
height: Optional[int] = None,
allow_exit: bool = True,
button_width: int = 20,
info_string: Optional[str] = None,
start_mainloop: bool = True,
):
self.controller = controller
if self.controller is not None:
self.tk_format = utils.TkFormat(self.controller.config_info)
if width is None or height is None:
self.top = tk.Toplevel(controller.master, bg=self.tk_format.bg)
else:
self.top = tk.Toplevel(controller.master, width=width, height=height, bg=self.tk_format.bg)
if info_string is not None:
self.controller.log(info_string)
else:
self.tk_format = utils.TkFormat()
self.top = Tk()
self.top.configure(background=self.tk_format.bg)
self.top.attributes("-topmost", 1)
self.top.attributes("-topmost", 0)
self.label_frame = Frame(self.top, bg=self.tk_format.bg)
self.label_frame.pack(side=tk.TOP)
self.__label = tk.Label(self.label_frame, fg=self.tk_format.textcolor, text=label, bg=self.tk_format.bg)
self.set_label_text(label, log_string=info_string)
if label != "":
self.__label.pack(pady=(10, 10), padx=(10, 10))
self.button_width = button_width
self.buttons = buttons
self.set_buttons(buttons)
self.top.wm_title(title)
self.allow_exit = allow_exit
self.top.protocol("WM_DELETE_WINDOW", self.on_closing)
if (
self.controller is None and start_mainloop
): # If there's no controller and this is the Tk object, might want to start the mainloop here, or might want
# to make additional modifications first in a subclass.
self.top.mainloop()
@property
def label(self):
return self.__label.cget("text")
@label.setter
def label(self, val: str):
self.__label.configure(text=val)
def set_title(self, newtitle: str):
self.top.wm_title(newtitle)
def set_label_text(self, newlabel: str, log_string: Optional[str] = None):
try:
self.__label.config(fg=self.tk_format.textcolor, text=newlabel)
except TclError:
print("Could not set label.")
if log_string is not None and self.controller is not None:
self.controller.log(log_string)
def set_buttons(self, buttons: Dict, button_width: Optional[int] = None):
|
def on_closing(self):
if self.allow_exit:
if self.controller is not None:
self.controller.unfreeze()
self.top.destroy()
def reset(self):
functions = self.buttons["reset"]
self.execute(functions, close=False)
def change_ip(self):
functions = self.buttons["Change IP"]
self.execute(functions)
def close(self):
if self.controller is not None:
self.controller.unfreeze()
self.top.destroy()
def retry(self):
self.close()
functions = self.buttons["retry"]
self.execute(functions, False)
def exit(self):
self.top.destroy()
utils.exit_func()
def cont(self):
functions = self.buttons["continue"]
self.execute(functions, close=False)
def pause(self):
functions = self.buttons["pause"]
self.execute(functions, close=False)
def ok(self, event=None):
# pylint: disable = unused-argument
functions = self.buttons["ok"]
self.execute(functions)
def yes(self):
functions = self.buttons["yes"]
self.execute(functions)
def yes_to_all(self):
functions = self.buttons["yes to all"]
self.execute(functions)
def no(self):
functions = self.buttons["no"]
self.execute(functions)
def cancel(self):
functions = self.buttons["cancel"]
self.execute(functions)
def cancel_queue(self):
functions = self.buttons["cancel_queue"]
self.execute(functions, close=False)
def execute(self, function_info, close=True):
for function in function_info:
args = function_info[function]
function(*args)
if close:
self.close()
def work_offline(self):
self.close()
functions = self.buttons["work offline"]
self.execute(functions, close=False)
| self.buttons = buttons
if button_width is None:
button_width = self.button_width
else:
self.button_width = button_width
# Sloppy way to check if button_frame already exists and reset it if it does.
try:
# pylint: disable = access-member-before-definition
self.button_frame.destroy()
except AttributeError:
pass
self.button_frame = Frame(self.top, bg=self.tk_format.bg)
self.button_frame.pack(side=tk.BOTTOM)
self.tk_buttons = []
for button in buttons:
if "ok" in button.lower():
self.ok_button = Button(
self.button_frame, fg=self.tk_format.textcolor, text="OK", command=self.ok, width=self.button_width
)
self.ok_button.bind("<Return>", self.ok)
self.tk_buttons.append(self.ok_button)
self.ok_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
elif "yes to all" in button.lower():
self.yes_to_all_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Yes to all",
command=self.yes_to_all,
width=self.button_width,
)
self.yes_to_all_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.yes_to_all_button)
elif "yes" in button.lower():
self.yes_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Yes",
bg="light gray",
command=self.yes,
width=self.button_width,
)
self.tk_buttons.append(self.yes_button)
self.yes_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
elif "no" in button.lower():
self.no_button = Button(
self.button_frame, fg=self.tk_format.textcolor, text="No", command=self.no, width=self.button_width
)
self.no_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.no_button)
elif "cancel_queue" in button.lower():
self.cancel_queue_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Cancel",
command=self.cancel_queue,
width=self.button_width,
)
self.cancel_queue_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.cancel_queue_button)
elif "cancel" in button.lower():
self.cancel_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Cancel",
command=self.cancel,
width=self.button_width,
)
self.cancel_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.cancel_button)
elif "retry" in button.lower():
self.retry_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Retry",
command=self.retry,
width=self.button_width,
)
self.retry_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.retry_button)
elif "exit" in button.lower():
self.exit_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Exit",
command=self.exit,
width=self.button_width,
)
self.exit_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.exit_button)
elif "work offline" in button.lower():
self.offline_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Work offline",
command=self.work_offline,
width=self.button_width,
)
self.offline_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.offline_button)
elif "pause" in button.lower():
self.pause_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Pause",
command=self.pause,
width=self.button_width,
)
self.pause_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.pause_button)
elif "continue" in button.lower():
self.continue_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Continue",
command=self.cont,
width=self.button_width,
)
self.continue_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.continue_button)
elif "close" in button.lower():
self.close_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Close",
command=self.close,
width=self.button_width,
)
self.close_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.close_button)
elif "reset" in button.lower():
self.reset_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Reset",
command=self.reset,
width=self.button_width,
)
self.reset_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.reset_button)
elif "change ip" in button.lower():
self.ip_button = Button(
self.button_frame,
fg=self.tk_format.textcolor,
text="Change IP",
command=self.change_ip,
width=self.button_width,
)
self.ip_button.pack(side=tk.LEFT, padx=(10, 10), pady=(10, 10))
self.tk_buttons.append(self.ip_button)
for tk_button in self.tk_buttons:
tk_button.config(
fg=self.tk_format.buttontextcolor,
highlightbackground=self.tk_format.highlightbackgroundcolor,
bg=self.tk_format.buttonbackgroundcolor,
) |
bulk_operation.rs | extern crate bson;
extern crate mongo_driver;
mod helpers;
use std::env;
use bson::doc;
use mongo_driver::client::{ClientPool,Uri};
#[test]
fn test_execute_error() {
let uri = Uri::new(helpers::mongodb_test_connection_string()).unwrap();
let pool = ClientPool::new(uri, None);
let client = pool.pop();
let mut collection = client.get_collection("rust_driver_test", "bulk_operation_error");
collection.drop().unwrap_or(());
let bulk_operation = collection.create_bulk_operation(None);
let result = bulk_operation.execute();
assert!(result.is_err());
let error_message = format!("{:?}", result.err().unwrap());
assert_eq!(error_message, "BulkOperationError { error: MongoError (BsoncError: Command/CommandInvalidArg - Cannot do an empty bulk write), reply: Document({}) }");
}
#[test]
fn test_basics() {
let uri = Uri::new(helpers::mongodb_test_connection_string()).unwrap();
let pool = ClientPool::new(uri, None);
let client = pool.pop();
let mut collection = client.get_collection("rust_driver_test", "bulk_operation_basics");
collection.drop().unwrap_or(());
let bulk_operation = collection.create_bulk_operation(None);
let document = doc! {"key_1": "Value 1"};
bulk_operation.insert(&document).expect("Could not insert");
bulk_operation.execute().expect("Could not execute bulk operation");
let first_document = collection.find(&doc!{}, None).unwrap().next().unwrap().unwrap();
assert_eq!(
first_document.get("key_1").unwrap(),
&bson::Bson::String("Value 1".to_string())
);
}
#[test]
fn test_utf8() {
let uri = Uri::new(helpers::mongodb_test_connection_string()).unwrap();
let pool = ClientPool::new(uri, None);
let client = pool.pop();
let mut collection = client.get_collection("rust_driver_test", "bulk_operation_utf8");
collection.drop().unwrap_or(());
let bulk_operation = collection.create_bulk_operation(None);
let document = doc! {"key_1": "kācaṃ śaknomyattum; nopahinasti mām."};
bulk_operation.insert(&document).expect("Could not insert");
bulk_operation.execute().expect("Could not execute bulk operation");
let first_document = collection.find(&doc!{}, None).unwrap().next().unwrap().unwrap();
assert_eq!(
first_document.get("key_1").unwrap(),
&bson::Bson::String("kācaṃ śaknomyattum; nopahinasti mām.".to_string())
);
}
#[test]
fn test_insert_remove_replace_update_extended() {
if e | nv::var("SKIP_EXTENDED_BULK_OPERATION_TESTS") == Ok("true".to_string()) {
return
}
let uri = Uri::new(helpers::mongodb_test_connection_string()).unwrap();
let pool = ClientPool::new(uri, None);
let client = pool.pop();
let mut collection = client.get_collection("rust_driver_test", "bulk_operation_extended");
collection.drop().unwrap_or(());
// Insert 5 documents
{
let bulk_operation = collection.create_bulk_operation(None);
let document = doc! {
"key_1": "Value 1",
"key_2": "Value 2"
};
for _ in 0..5 {
bulk_operation.insert(&document).unwrap();
}
let result = bulk_operation.execute().expect("Could not execute bulk operation");
assert_eq!(
result.get("nInserted").unwrap(),
&bson::Bson::Int32(5)
);
assert_eq!(5, collection.count(&doc!{}, None).unwrap());
}
let query = doc!{};
let update_document = doc! {
"$set": {"key_1": "Value update"}
};
// Update one
{
let bulk_operation = collection.create_bulk_operation(None);
bulk_operation.update_one(
&query,
&update_document,
false
).unwrap();
let result = bulk_operation.execute().expect("Could not execute bulk operation");
assert_eq!(
result.get("nModified").unwrap(),
&bson::Bson::Int32(1)
);
let first_document = collection.find(&doc!{}, None).unwrap().next().unwrap().unwrap();
assert_eq!(
first_document.get("key_1").unwrap(),
&bson::Bson::String("Value update".to_string())
);
// Make sure it was updated, it should have other keys
assert!(first_document.get("key_2").is_some());
}
// Update all
{
let bulk_operation = collection.create_bulk_operation(None);
bulk_operation.update(
&query,
&update_document,
false
).unwrap();
let result = bulk_operation.execute().expect("Could not execute bulk operation");
assert_eq!(
result.get("nModified").unwrap(),
&bson::Bson::Int32(4)
);
collection.find(&doc!{}, None).unwrap().next().unwrap().unwrap();
let second_document = collection.find(&doc!{}, None).unwrap().next().unwrap().unwrap();
assert_eq!(
second_document.get("key_1").unwrap(),
&bson::Bson::String("Value update".to_string())
);
// Make sure it was updated, it should have other keys
assert!(second_document.get("key_2").is_some());
}
// Replace one
{
let replace_document = doc! { "key_1": "Value replace" };
let bulk_operation = collection.create_bulk_operation(None);
bulk_operation.replace_one(
&query,
&replace_document,
false
).unwrap();
let result = bulk_operation.execute().expect("Could not execute bulk operation");
assert_eq!(
result.get("nModified").unwrap(),
&bson::Bson::Int32(1)
);
let first_document = collection.find(&doc!{}, None).unwrap().next().unwrap().unwrap();
assert_eq!(
first_document.get("key_1").unwrap(),
&bson::Bson::String("Value replace".to_string())
);
// Make sure it was replaced, it shouldn't have other keys
assert!(first_document.get("key_2").is_none());
}
// Remove one
{
let bulk_operation = collection.create_bulk_operation(None);
bulk_operation.remove_one(&query).unwrap();
let result = bulk_operation.execute().expect("Could not execute bulk operation");
assert_eq!(
result.get("nRemoved").unwrap(),
&bson::Bson::Int32(1)
);
assert_eq!(4, collection.count(&query, None).unwrap());
}
// Remove all remaining documents
{
let bulk_operation = collection.create_bulk_operation(None);
bulk_operation.remove(&query).unwrap();
let result = bulk_operation.execute().expect("Could not execute bulk operation");
assert_eq!(
result.get("nRemoved").unwrap(),
&bson::Bson::Int32(4)
);
assert_eq!(0, collection.count(&query, None).unwrap());
}
}
|
|
wire_gen.go | // Code generated by Wire. DO NOT EDIT.
//go:generate wire
//+build !wireinject
package main
import (
"context"
"github.com/google/knative-gcp/pkg/broker/config/volume"
"github.com/google/knative-gcp/pkg/broker/ingress"
"github.com/google/knative-gcp/pkg/metrics"
"github.com/google/knative-gcp/pkg/utils/clients"
)
// Injectors from wire.go:
func InitializeHandler(ctx context.Context, port clients.Port, projectID clients.ProjectID, podName metrics.PodName, containerName metrics.ContainerName) (*ingress.Handler, error) |
var (
_wireValue = []volume.Option(nil)
)
| {
httpMessageReceiver := clients.NewHTTPMessageReceiver(port)
v := _wireValue
readonlyTargets, err := volume.NewTargetsFromFile(v...)
if err != nil {
return nil, err
}
client, err := clients.NewPubsubClient(ctx, projectID)
if err != nil {
return nil, err
}
multiTopicDecoupleSink := ingress.NewMultiTopicDecoupleSink(ctx, readonlyTargets, client)
ingressReporter, err := metrics.NewIngressReporter(podName, containerName)
if err != nil {
return nil, err
}
handler := ingress.NewHandler(ctx, httpMessageReceiver, multiTopicDecoupleSink, ingressReporter)
return handler, nil
} |
declaration_parser.rs | // Copyright (c) 2019, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use crate::expression_parser::ExpressionParser;
use crate::lexer::Lexer;
use crate::parser_env::ParserEnv;
use crate::parser_trait::{Context, ExpectedTokens, ParserTrait, SeparatedListKind};
use crate::smart_constructors::{NodeType, SmartConstructors};
use crate::statement_parser::StatementParser;
use crate::type_parser::TypeParser;
use parser_core_types::lexable_token::LexableToken;
use parser_core_types::lexable_trivia::LexableTrivia;
use parser_core_types::syntax_error::{self as Errors, SyntaxError};
use parser_core_types::token_kind::TokenKind;
use parser_core_types::trivia_kind::TriviaKind;
#[derive(Debug)]
pub struct DeclarationParser<'a, S, T>
where
S: SmartConstructors<'a, T>,
S::R: NodeType,
{
lexer: Lexer<'a, S::Token>,
env: ParserEnv,
context: Context<'a, S::Token>,
errors: Vec<SyntaxError>,
sc: S,
}
impl<'a, S, T: Clone> Clone for DeclarationParser<'a, S, T>
where
S: SmartConstructors<'a, T>,
S::R: NodeType,
{
fn clone(&self) -> Self {
Self {
lexer: self.lexer.clone(),
env: self.env.clone(),
context: self.context.clone(),
errors: self.errors.clone(),
sc: self.sc.clone(),
}
}
}
impl<'a, S, T: Clone> ParserTrait<'a, S, T> for DeclarationParser<'a, S, T>
where
S: SmartConstructors<'a, T>,
S::R: NodeType,
{
fn make(
lexer: Lexer<'a, S::Token>,
env: ParserEnv,
context: Context<'a, S::Token>,
errors: Vec<SyntaxError>,
sc: S,
) -> Self {
Self {
lexer,
env,
context,
errors,
sc,
}
}
fn into_parts(
self,
) -> (
Lexer<'a, S::Token>,
Context<'a, S::Token>,
Vec<SyntaxError>,
S,
) {
(self.lexer, self.context, self.errors, self.sc)
}
fn lexer(&self) -> &Lexer<'a, S::Token> {
&self.lexer
}
fn lexer_mut(&mut self) -> &mut Lexer<'a, S::Token> {
&mut self.lexer
}
fn continue_from<P: ParserTrait<'a, S, T>>(&mut self, other: P)
where
T: Clone,
{
let (lexer, context, errors, sc) = other.into_parts();
self.lexer = lexer;
self.context = context;
self.errors = errors;
self.sc = sc;
}
fn add_error(&mut self, error: SyntaxError) {
self.errors.push(error)
}
fn env(&self) -> &ParserEnv {
&self.env
}
fn sc_mut(&mut self) -> &mut S {
&mut self.sc
}
fn skipped_tokens_mut(&mut self) -> &mut Vec<S::Token> {
&mut self.context.skipped_tokens
}
fn skipped_tokens(&self) -> &[S::Token] {
&self.context.skipped_tokens
}
fn context_mut(&mut self) -> &mut Context<'a, S::Token> {
&mut self.context
}
fn context(&self) -> &Context<'a, S::Token> {
&self.context
}
}
impl<'a, S, T: Clone> DeclarationParser<'a, S, T>
where
S: SmartConstructors<'a, T>,
S::R: NodeType,
{
fn with_type_parser<F, U>(&mut self, f: F) -> U
where
T: Clone,
F: Fn(&mut TypeParser<'a, S, T>) -> U,
{
let mut type_parser: TypeParser<S, T> = TypeParser::make(
self.lexer.clone(),
self.env.clone(),
self.context.clone(),
self.errors.clone(),
self.sc.clone(),
);
let res = f(&mut type_parser);
self.continue_from(type_parser);
res
}
fn parse_type_specifier(&mut self, allow_var: bool, allow_attr: bool) -> S::R {
self.with_type_parser(|p: &mut TypeParser<'a, S, T>| {
p.parse_type_specifier(allow_var, allow_attr)
})
}
fn with_statement_parser<F, U>(&mut self, f: F) -> U
where
T: Clone,
F: Fn(&mut StatementParser<'a, S, T>) -> U,
{
let mut statement_parser: StatementParser<S, T> = StatementParser::make(
self.lexer.clone(),
self.env.clone(),
self.context.clone(),
self.errors.clone(),
self.sc.clone(),
);
let res = f(&mut statement_parser);
self.continue_from(statement_parser);
res
}
fn parse_simple_type_or_type_constant(&mut self) -> S::R {
self.with_type_parser(|x: &mut TypeParser<'a, S, T>| x.parse_simple_type_or_type_constant())
}
fn parse_simple_type_or_generic(&mut self) -> S::R {
self.with_type_parser(|p: &mut TypeParser<'a, S, T>| p.parse_simple_type_or_generic())
}
fn with_expression_parser<F, U>(&mut self, f: F) -> U
where
T: Clone,
F: Fn(&mut ExpressionParser<'a, S, T>) -> U,
{
let mut expression_parser: ExpressionParser<S, T> = ExpressionParser::make(
self.lexer.clone(),
self.env.clone(),
self.context.clone(),
self.errors.clone(),
self.sc.clone(),
);
let res = f(&mut expression_parser);
self.continue_from(expression_parser);
res
}
fn parse_expression(&mut self) -> S::R {
self.with_expression_parser(|p: &mut ExpressionParser<'a, S, T>| p.parse_expression())
}
fn parse_compound_statement(&mut self) -> S::R {
self.with_statement_parser(|p: &mut StatementParser<'a, S, T>| p.parse_compound_statement())
}
fn parse_enumerator_list_opt(&mut self) -> S::R {
// SPEC
// enumerator-list:
// enumerator
// enumerator-list enumerator
//
self.parse_terminated_list(|x: &mut Self| x.parse_enumerator(), TokenKind::RightBrace)
}
fn parse_enum_declaration(&mut self, attrs: S::R) -> S::R {
//
// enum-declaration:
// attribute-specification-opt enum name enum-base type-constraint-opt /
// { enumerator-list-opt }
// enum-base:
// : int
// : string
//
// TODO: SPEC ERROR: The spec states that the only legal enum types
// are "int" and "string", but Hack allows any type, and apparently
// some of those are meaningful and desired. Figure out what types
// are actually legal and illegal as enum base types; put them in the
// spec, and add an error pass that says when they are wrong.
let enum_ = self.assert_token(TokenKind::Enum);
let name = self.require_name();
let colon = self.require_colon();
let base =
self.parse_type_specifier(false /* allow_var */, true /* allow_attr */);
let enum_type = self.parse_type_constraint_opt();
let (left_brace, enumerators, right_brace) =
self.parse_braced_list(|x: &mut Self| x.parse_enumerator_list_opt());
S!(
make_enum_declaration,
self,
attrs,
enum_,
name,
colon,
base,
enum_type,
left_brace,
enumerators,
right_brace,
)
}
fn parse_record_field(&mut self) -> S::R {
// SPEC
// record_field:
// record-constant : type field-initializer-opt,
// record-constant:
// name
// field-initializer:
// = expression
let name = self.require_name_allow_non_reserved();
let colon = self.require_colon();
let field_type = self.parse_type_specifier(false, true);
let init = self.parse_simple_initializer_opt();
let comma = self.require_comma();
S!(
make_record_field,
self,
name,
colon,
field_type,
init,
comma
)
}
fn parse_record_fields(&mut self) -> S::R {
// SPEC
// record-list:
// record-field
// record-list record-field
self.parse_terminated_list(|x| x.parse_record_field(), TokenKind::RightBrace)
}
fn parse_record_declaration(&mut self, attrs: S::R) -> S::R {
// record-declaration:
// (abstract|final) record name { record-list }
let modifier =
self.require_token_one_of(&[TokenKind::Abstract, TokenKind::Final], Errors::error1037);
let record = self.assert_token(TokenKind::RecordDec);
let name = self.require_name();
let (record_extends, record_extends_list) = self.parse_extends_opt();
let (left_brace, record_fields, right_brace) =
self.parse_braced_list(|x| x.parse_record_fields());
S!(
make_record_declaration,
self,
attrs,
modifier,
record,
name,
record_extends,
record_extends_list,
left_brace,
record_fields,
right_brace
)
}
pub fn parse_leading_markup_section(&mut self) -> Option<S::R> {
let mut parser1 = self.clone();
let (markup_section, has_suffix) =
parser1.with_statement_parser(|p: &mut StatementParser<'a, S, T>| p.parse_header());
// proceed successfully if we've consumed <?..., or dont need it
// We purposefully ignore leading trivia before the <?hh, and handle
// the error on a later pass
if has_suffix {
self.continue_from(parser1);
Some(markup_section)
} else {
if self.lexer().source().length() > 0
&& self.lexer().source().file_path().ends_with(".php")
{
self.with_error(Errors::error1001);
}
None
}
}
fn parse_namespace_body(&mut self) -> S::R {
match self.peek_token_kind() {
TokenKind::Semicolon => {
let token = self.fetch_token();
S!(make_namespace_empty_body, self, token)
}
TokenKind::LeftBrace => {
let left = self.fetch_token();
let body = self.parse_terminated_list(
|x: &mut Self| x.parse_declaration(),
TokenKind::RightBrace,
);
let right = self.require_right_brace();
S!(make_namespace_body, self, left, body, right)
}
_ => {
// ERROR RECOVERY: return an inert namespace (one with all of its
// components 'missing'), and recover--without advancing the parser--
// back to the level that the namespace was declared in.
self.with_error(Errors::error1038);
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
let missing3 = S!(make_missing, self, self.pos());
S!(make_namespace_body, self, missing1, missing2, missing3)
}
}
}
fn is_group_use(&self) -> bool {
let mut parser = self.clone();
// We want a heuristic to determine whether to parse the use clause as
// a group use or normal use clause. We distinguish the two by (1) whether
// there is a namespace prefix -- in this case it is definitely a group use
// clause -- or, if there is a name followed by a curly. That's illegal, but
// we should give an informative error message about that.
parser.assert_token(TokenKind::Use);
parser.parse_namespace_use_kind_opt();
let token = parser.next_token();
match token.kind() {
TokenKind::Backslash => {
let missing = S!(make_missing, parser, parser.pos());
let backslash = S!(make_token, parser, token);
let (_name, is_backslash) = parser.scan_qualified_name_extended(missing, backslash);
is_backslash || parser.peek_token_kind() == TokenKind::LeftBrace
}
TokenKind::Name => {
let token = S!(make_token, parser, token);
let roken_ref = &token as *const _;
let (name, is_backslash) = parser.scan_remaining_qualified_name_extended(token);
// Here we rely on the implementation details of
// scan_remaining_qualified_name_extended. It's returning
// *exactly* token if there is nothing except it in the name.
is_backslash && (&name as *const _ == roken_ref)
|| parser.peek_token_kind() == TokenKind::LeftBrace
}
_ => false,
}
}
fn parse_namespace_use_kind_opt(&mut self) -> S::R {
// SPEC
// namespace-use-kind:
// namespace
// function
// const
let mut parser1 = self.clone();
let token = parser1.next_token();
match token.kind() {
TokenKind::Type | TokenKind::Namespace | TokenKind::Function | TokenKind::Const => {
self.continue_from(parser1);
S!(make_token, self, token)
}
_ => S!(make_missing, self, self.pos()),
}
}
fn parse_group_use(&mut self) -> S::R {
// See below for grammar.
let use_token = self.assert_token(TokenKind::Use);
let use_kind = self.parse_namespace_use_kind_opt();
// We already know that this is a name, qualified name, or prefix.
// If this is not a prefix, it will be detected as an error in a later pass
let prefix = self.scan_name_or_qualified_name();
let (left, clauses, right) =
self.parse_braced_comma_list_opt_allow_trailing(|x: &mut Self| {
x.parse_namespace_use_clause()
});
let semi = self.require_semicolon();
S!(
make_namespace_group_use_declaration,
self,
use_token,
use_kind,
prefix,
left,
clauses,
right,
semi,
)
}
fn parse_namespace_use_clause(&mut self) -> S::R {
// SPEC
// namespace-use-clause:
// qualified-name namespace-aliasing-clauseopt
// namespace-use-kind-clause:
// namespace-use-kind-opt qualified-name namespace-aliasing-clauseopt
// namespace-aliasing-clause:
// as name
//
let use_kind = self.parse_namespace_use_kind_opt();
let name = self.require_qualified_name();
let (as_token, alias) = if self.peek_token_kind() == TokenKind::As {
let as_token = self.next_token();
let as_token = S!(make_token, self, as_token);
let alias = self.require_name();
(as_token, alias)
} else {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
};
S!(
make_namespace_use_clause,
self,
use_kind,
name,
as_token,
alias
)
}
fn parse_namespace_use_declaration(&mut self) -> S::R {
// SPEC
// namespace-use-declaration:
// use namespace-use-kind-opt namespace-use-clauses ;
// use namespace-use-kind namespace-name-as-a-prefix
// { namespace-use-clauses } ;
// use namespace-name-as-a-prefix { namespace-use-kind-clauses } ;
//
// TODO: Add the grammar for the namespace-use-clauses; ensure that it
// indicates that trailing commas are allowed in the list.
//
// ERROR RECOVERY
// In the "simple" format, the kind may only be specified up front.
//
// The grammar in the specification says that in the "group"
// format, if the kind is specified up front then it may not
// be specified in each clause. However, HHVM's parser disallows
// the kind in each clause regardless of whether it is specified up front.
// We will fix the specification to match HHVM.
//
// The grammar in the specification also says that in the "simple" format,
// the kind may only be specified up front. But HHVM allows the kind to
// be specified in each clause. Again, we will fix the specification to match
// HHVM.
//
// TODO: Update the grammar comment above when the specification is fixed.
// (This work is being tracked by spec work items 102, 103 and 104.)
//
// We do not enforce these rules here. Rather, we allow the kind to be anywhere,
// and detect the errors in a later pass.
if self.is_group_use() {
self.parse_group_use()
} else {
let use_token = self.assert_token(TokenKind::Use);
let use_kind = self.parse_namespace_use_kind_opt();
let (clauses, _) = self.parse_comma_list_allow_trailing(
TokenKind::Semicolon,
Errors::error1004,
|x: &mut Self| x.parse_namespace_use_clause(),
);
let semi = self.require_semicolon();
S!(
make_namespace_use_declaration,
self,
use_token,
use_kind,
clauses,
semi
)
}
}
fn parse_namespace_declaration(&mut self) -> S::R {
// SPEC
// namespace-definition:
// namespace namespace-name ;
// namespace namespace-name-opt { declaration-list }
//
// TODO: An error case not caught by the parser that should be caught
// in a later pass:
// Qualified names are a superset of legal namespace names.
let namespace_token = self.assert_token(TokenKind::Namespace);
let name = match self.peek_token_kind() {
TokenKind::Name => {
let token = self.next_token();
let token = S!(make_token, self, token);
self.scan_remaining_qualified_name(token)
}
TokenKind::LeftBrace => S!(make_missing, self, self.pos()),
TokenKind::Semicolon => {
// ERROR RECOVERY Plainly the name is missing.
self.with_error(Errors::error1004);
S!(make_missing, self, self.pos())
}
_ =>
// TODO: Death to PHPisms; keywords as namespace names
{
self.require_name_allow_non_reserved()
}
};
let body = self.parse_namespace_body();
S!(
make_namespace_declaration,
self,
namespace_token,
name,
body
)
}
pub fn parse_classish_declaration(&mut self, attribute_spec: S::R) -> S::R {
let modifiers = self.parse_classish_modifiers();
let token = self.parse_classish_token();
let name = self.require_class_name();
let generic_type_parameter_list = self.parse_generic_type_parameter_list_opt();
let (classish_extends, classish_extends_list) = self.parse_extends_opt();
let (classish_implements, classish_implements_list) = self.parse_classish_implements_opt();
let classish_where_clause = self.parse_classish_where_clause_opt();
let body = self.parse_classish_body();
S!(
make_classish_declaration,
self,
attribute_spec,
modifiers,
token,
name,
generic_type_parameter_list,
classish_extends,
classish_extends_list,
classish_implements,
classish_implements_list,
classish_where_clause,
body,
)
}
fn parse_classish_where_clause_opt(&mut self) -> S::R {
if self.peek_token_kind() == TokenKind::Where {
self.parse_where_clause()
} else {
S!(make_missing, self, self.pos())
}
}
fn parse_classish_implements_opt(&mut self) -> (S::R, S::R) {
if self.peek_token_kind() != TokenKind::Implements {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
} else {
let implements_token = self.next_token();
let implements_token = S!(make_token, self, implements_token);
let implements_list = self.parse_special_type_list();
(implements_token, implements_list)
}
}
fn parse_classish_modifiers(&mut self) -> S::R {
let mut acc = vec![];
loop {
match self.peek_token_kind() {
TokenKind::Abstract | TokenKind::Final => {
// TODO(T25649779)
let token = self.next_token();
let token = S!(make_token, self, token);
acc.push(token);
}
_ => return S!(make_list, self, acc, self.pos()),
}
}
}
fn parse_classish_token(&mut self) -> S::R {
let spellcheck_tokens = vec![TokenKind::Class, TokenKind::Trait, TokenKind::Interface];
let token_str = &self.current_token_text();
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Class | TokenKind::Trait | TokenKind::Interface => {
let token = self.next_token();
S!(make_token, self, token)
}
// Spellcheck case
TokenKind::Name if Self::is_misspelled_from(&spellcheck_tokens, token_str) => {
// Default won't be used, since we already checked is_misspelled_from
let suggested_kind = Self::suggested_kind_from(&spellcheck_tokens, token_str)
.unwrap_or(TokenKind::Name);
self.skip_and_log_misspelled_token(suggested_kind);
S!(make_missing, self, self.pos())
}
_ => {
self.with_error(Errors::error1035);
S!(make_missing, self, self.pos())
}
}
}
fn parse_special_type(&mut self) -> (S::R, bool) {
let mut parser1 = self.clone();
let token = parser1.next_xhp_class_name_or_other_token();
match token.kind() {
TokenKind::Comma => {
// ERROR RECOVERY. We expected a type but we got a comma.
// Give the error that we expected a type, not a name, even though
// not every type is legal here.
self.continue_from(parser1);
self.with_error(Errors::error1007);
let comma = S!(make_token, self, token);
let missing = S!(make_missing, self, self.pos());
let list_item = S!(make_list_item, self, missing, comma);
(list_item, false)
}
TokenKind::Backslash
| TokenKind::Namespace
| TokenKind::Name
| TokenKind::XHPClassName => {
let item = self
.parse_type_specifier(false /* allow_var */, true /* allow_attr */);
let comma = self.optional_token(TokenKind::Comma);
let is_missing = comma.is_missing();
let list_item = S!(make_list_item, self, item, comma);
(list_item, is_missing)
}
TokenKind::Parent
| TokenKind::Enum
| TokenKind::RecordDec
| TokenKind::Shape
| TokenKind::SelfToken
if self.env.hhvm_compat_mode =>
{
// HHVM allows these keywords here for some reason
let item = self.parse_simple_type_or_type_constant();
let comma = self.optional_token(TokenKind::Comma);
let is_missing = comma.is_missing();
let list_item = S!(make_list_item, self, item, comma);
(list_item, is_missing)
}
_ => {
// ERROR RECOVERY: We are expecting a type; give an error as above.
// Don't eat the offending token.
self.with_error(Errors::error1007);
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
let list_item = S!(make_list_item, self, missing1, missing2);
(list_item, true)
}
}
}
fn parse_special_type_list(&mut self) -> S::R {
// An extends / implements list is a comma-separated list of types, but
// very special types; we want the types to consist of a name and an
// optional generic type argument list.
//
// TODO: Can the type name be of the form "foo::bar"? Those do not
// necessarily start with names. Investigate this.
//
// Normally we'd use one of the separated list helpers, but there is no
// specific end token we could use to detect the end of the list, and we
// want to bail out if we get something that is not a type of the right form.
// So we have custom logic here.
//
// TODO: This is one of the rare cases in Hack where a comma-separated list
// may not have a trailing comma. Is that desirable, or was that an
// oversight when the trailing comma rules were added? If possible we
// should keep the rule as-is, and disallow the trailing comma; it makes
// parsing and error recovery easier.
let mut items = vec![];
loop {
let (item, is_missing) = self.parse_special_type();
items.push(item);
if is_missing {
break;
}
}
S!(make_list, self, items, self.pos())
}
fn parse_extends_opt(&mut self) -> (S::R, S::R) {
let token_kind = self.peek_token_kind();
if token_kind != TokenKind::Extends {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
} else {
let token = self.next_token();
let extends_token = S!(make_token, self, token);
let extends_list = self.parse_special_type_list();
(extends_token, extends_list)
}
}
fn parse_classish_body(&mut self) -> S::R {
let left_brace_token = self.require_left_brace();
let classish_element_list = self.parse_classish_element_list_opt();
let right_brace_token = self.require_right_brace();
S!(
make_classish_body,
self,
left_brace_token,
classish_element_list,
right_brace_token
)
}
fn parse_classish_element_list_opt(&mut self) -> S::R {
// TODO: ERROR RECOVERY: consider bailing if the token cannot possibly
// start a classish element.
// ERROR RECOVERY: we're in the body of a classish, so we add visibility
// modifiers to our context.
self.expect_in_new_scope(ExpectedTokens::Visibility);
let element_list = self.parse_terminated_list(
|x: &mut Self| x.parse_classish_element(),
TokenKind::RightBrace,
);
self.pop_scope(ExpectedTokens::Visibility);
element_list
}
fn parse_xhp_children_paren(&mut self) -> S::R {
// SPEC (Draft)
// ( xhp-children-expressions )
//
// xhp-children-expressions:
// xhp-children-expression
// xhp-children-expressions , xhp-children-expression
//
// TODO: The parenthesized list of children expressions is NOT allowed
// to be comma-terminated. Is this intentional? It is inconsistent with
// practice throughout the rest of Hack. There is no syntactic difficulty
// in allowing a comma before the close paren. Consider allowing it.
let (left, exprs, right) =
self.parse_parenthesized_comma_list(|x: &mut Self| x.parse_xhp_children_expression());
S!(
make_xhp_children_parenthesized_list,
self,
left,
exprs,
right
)
}
fn parse_xhp_children_term(&mut self) -> S::R {
// SPEC (Draft)
// xhp-children-term:
// ( xhp-children-expressions ) trailing-opt
// name trailing-opt
// xhp-class-name trailing-opt
// xhp-category-name trailing-opt
// trailing: * ? +
//
// Note that there may be only zero or one trailing unary operator.
// "foo*?" is not a legal xhp child expression.
//
let mut parser1 = self.clone();
let token = parser1.next_xhp_children_name_or_other();
let kind = token.kind();
let name = S!(make_token, parser1, token);
match kind {
TokenKind::Name | TokenKind::XHPClassName | TokenKind::XHPCategoryName => {
self.continue_from(parser1);
self.parse_xhp_children_trailing(name)
}
TokenKind::LeftParen => {
let term = self.parse_xhp_children_paren();
self.parse_xhp_children_trailing(term)
}
_ => {
// ERROR RECOVERY: Eat the offending token, keep going.
self.with_error(Errors::error1053);
name
}
}
}
fn parse_xhp_children_trailing(&mut self, term: S::R) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Star | TokenKind::Plus | TokenKind::Question => {
let token = self.next_token();
let token = S!(make_token, self, token);
S!(make_postfix_unary_expression, self, term, token)
}
_ => term,
}
}
fn parse_xhp_children_bar(&mut self, left: S::R) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Bar => {
let token = self.next_token();
let token = S!(make_token, self, token);
let right = self.parse_xhp_children_term();
let result = S!(make_binary_expression, self, left, token, right);
self.parse_xhp_children_bar(result)
}
_ => left,
}
}
fn parse_xhp_children_expression(&mut self) -> S::R {
// SPEC (Draft)
// xhp-children-expression:
// xhp-children-term
// xhp-children-expression | xhp-children-term
//
// Note that the bar operator is left-associative. (Not that it matters
// semantically.
let term = self.parse_xhp_children_term();
self.parse_xhp_children_bar(term)
}
fn parse_xhp_children_declaration(&mut self) -> S::R {
// SPEC (Draft)
// xhp-children-declaration:
// children empty ;
// children xhp-children-expression ;
let children = self.assert_token(TokenKind::Children);
let token_kind = self.peek_token_kind();
let expr = match token_kind {
TokenKind::Empty => {
let token = self.next_token();
S!(make_token, self, token)
}
_ => self.parse_xhp_children_expression(),
};
let semi = self.require_semicolon();
S!(make_xhp_children_declaration, self, children, expr, semi)
}
fn parse_xhp_category(&mut self) -> S::R {
let token = self.next_xhp_category_name();
let token_kind = token.kind();
let category = S!(make_token, self, token);
match token_kind {
TokenKind::XHPCategoryName => category,
_ => {
self.with_error(Errors::error1052);
category
}
}
}
fn parse_xhp_type_specifier(&mut self) -> S::R {
// SPEC (Draft)
// xhp-type-specifier:
// enum { xhp-attribute-enum-list ,-opt }
// type-specifier
//
// The list of enum values must have at least one value and can be
// comma-terminated.
//
// xhp-enum-list:
// xhp-attribute-enum-value
// xhp-enum-list , xhp-attribute-enum-value
//
// xhp-attribute-enum-value:
// any integer literal
// any single-quoted-string literal
// any double-quoted-string literal
//
// TODO: What are the semantics of encapsulated expressions in double-quoted
// string literals here?
// ERROR RECOVERY: We parse any expressions here;
// TODO: give an error in a later pass if the expressions are not literals.
// (This work is tracked by task T21175355)
//
// An empty list is illegal, but we allow it here and give an error in
// a later pass.
let mut parser1 = self.clone();
let token = parser1.next_token();
let (token, optional) = match token.kind() {
TokenKind::Question => {
let enum_token = parser1.next_token();
let token = S!(make_token, parser1, token);
(enum_token, token)
}
_ => {
let missing = S!(make_missing, parser1, self.pos());
(token, missing)
}
};
match token.kind() {
TokenKind::Enum => {
self.continue_from(parser1);
let enum_token = S!(make_token, self, token);
let (left_brace, values, right_brace) = self
.parse_braced_comma_list_opt_allow_trailing(|x: &mut Self| {
x.parse_expression()
});
S!(
make_xhp_enum_type,
self,
optional,
enum_token,
left_brace,
values,
right_brace
)
}
_ => self.parse_type_specifier(true, true),
}
}
fn parse_xhp_required_opt(&mut self) -> S::R {
// SPEC (Draft)
// xhp-required :
// @ (required | lateinit)
//
// Note that these are two tokens. They can have whitespace between them.
if self.peek_token_kind() == TokenKind::At {
let at = self.assert_token(TokenKind::At);
let req_kind = self.next_token();
let kind = req_kind.kind();
let req = S!(make_token, self, req_kind);
match kind {
TokenKind::Required => S!(make_xhp_required, self, at, req),
TokenKind::Lateinit => S!(make_xhp_lateinit, self, at, req),
_ => {
self.with_error(Errors::error1051);
S!(make_missing, self, self.pos())
}
}
} else {
S!(make_missing, self, self.pos())
}
}
fn parse_xhp_class_attribute_typed(&mut self) -> S::R {
// xhp-type-specifier xhp-name initializer-opt xhp-required-opt
let ty = self.parse_xhp_type_specifier();
let name = self.require_xhp_name();
let init = self.parse_simple_initializer_opt();
let req = self.parse_xhp_required_opt();
S!(make_xhp_class_attribute, self, ty, name, init, req)
}
fn parse_xhp_category_declaration(&mut self) -> S::R {
// SPEC (Draft)
// xhp-category-declaration:
// category xhp-category-list ,-opt ;
//
// xhp-category-list:
// xhp-category-name
// xhp-category-list , xhp-category-name
let category = self.assert_token(TokenKind::Category);
let (items, _) = self.parse_comma_list_allow_trailing(
TokenKind::Semicolon,
Errors::error1052,
|x: &mut Self| x.parse_xhp_category(),
);
let semi = self.require_semicolon();
S!(make_xhp_category_declaration, self, category, items, semi)
}
fn parse_xhp_class_attribute(&mut self) -> S::R {
// SPEC (Draft)
// xhp-attribute-declaration:
// xhp-class-name
// xhp-type-specifier xhp-name initializer-opt xhp-required-opt
//
// ERROR RECOVERY:
// The xhp type specifier could be an xhp class name. To disambiguate we peek
// ahead a token; if it's a comma or semi, we're done. If not, then we assume
// that we are in the more complex case.
if self.is_next_xhp_class_name() {
let mut parser1 = self.clone();
let class_name = parser1.require_class_name();
match parser1.peek_token_kind() {
TokenKind::Comma | TokenKind::Semicolon => {
self.continue_from(parser1);
let type_specifier = S!(make_simple_type_specifier, self, class_name);
S!(make_xhp_simple_class_attribute, self, type_specifier)
}
_ => self.parse_xhp_class_attribute_typed(),
}
} else {
self.parse_xhp_class_attribute_typed()
}
}
fn parse_xhp_class_attribute_declaration(&mut self) -> S::R {
// SPEC: (Draft)
// xhp-class-attribute-declaration :
// attribute xhp-attribute-declaration-list ;
//
// xhp-attribute-declaration-list:
// xhp-attribute-declaration
// xhp-attribute-declaration-list , xhp-attribute-declaration
//
// TODO: The list of attributes may NOT be terminated with a trailing comma
// before the semicolon. This is inconsistent with the rest of Hack.
// Allowing a comma before the semi does not introduce any syntactic
// difficulty; consider allowing it.
let attr_token = self.assert_token(TokenKind::Attribute);
// TODO: Better error message.
let attrs =
self.parse_comma_list(TokenKind::Semicolon, Errors::error1004, |x: &mut Self| {
x.parse_xhp_class_attribute()
});
let semi = self.require_semicolon();
S!(
make_xhp_class_attribute_declaration,
self,
attr_token,
attrs,
semi
)
}
fn parse_qualified_name_type(&mut self) -> S::R {
// Here we're parsing a name followed by an optional generic type
// argument list; if we don't have a name, give an error.
match self.peek_token_kind() {
TokenKind::Backslash | TokenKind::Name => self.parse_simple_type_or_generic(),
_ => self.require_qualified_name(),
}
}
fn parse_qualified_name_type_opt(&mut self) -> S::R {
// Here we're parsing a name followed by an optional generic type
// argument list; if we don't have a name, give an error.
match self.peek_token_kind() {
TokenKind::Backslash | TokenKind::Construct | TokenKind::Name => {
self.parse_simple_type_or_generic()
}
_ => S!(make_missing, self, self.pos()),
}
}
fn parse_require_clause(&mut self) -> S::R {
// SPEC
// require-extends-clause:
// require extends qualified-name ;
//
// require-implements-clause:
// require implements qualified-name ;
//
// We must also parse "require extends :foo;"
// TODO: What about "require extends :foo<int>;" ?
// TODO: The spec is incomplete; we need to be able to parse
// require extends Foo<int>;
// (This work is being tracked by spec issue 105.)
// TODO: Check whether we also need to handle
// require extends foo::bar
// and so on.
//
// ERROR RECOVERY: Detect if the implements/extends, name and semi are
// missing.
let req = self.assert_token(TokenKind::Require);
let token_kind = self.peek_token_kind();
let req_kind = match token_kind {
TokenKind::Implements | TokenKind::Extends => {
let req_kind_token = self.next_token();
S!(make_token, self, req_kind_token)
}
_ => {
self.with_error(Errors::error1045);
S!(make_missing, self, self.pos())
}
};
let name = if self.is_next_xhp_class_name() {
self.parse_simple_type_or_generic()
} else {
self.parse_qualified_name_type()
};
let semi = self.require_semicolon();
S!(make_require_clause, self, req, req_kind, name, semi)
}
// This duplicates work from parse_methodish_or_const_or_type_const,
// but this function is only invoked after an attribute spec, while
// parse_methodish_or_const_or_type_const is called after a modifier.
// Having this function prevents constants from having attributes as
// this cannot be checked in parser_errors as there is no field in constant
// declaration to store 'attributes'.
fn parse_methodish_or_property_or_type_constant(&mut self, attribute_spec: S::R) -> S::R {
let mut parser1 = self.clone();
let modifiers = parser1.parse_modifiers();
let current_token_kind = parser1.peek_token_kind();
let next_token = parser1.peek_token_with_lookahead(1);
let next_token_kind = next_token.kind();
match (current_token_kind, next_token_kind) {
(TokenKind::Const, TokenKind::Type) => {
self.continue_from(parser1);
let const_ = self.assert_token(TokenKind::Const);
self.parse_type_const_declaration(attribute_spec, modifiers, const_)
}
_ => self.parse_methodish_or_property(attribute_spec),
}
}
fn has_leading_trivia(token: &S::Token, kind: TriviaKind) -> bool {
token.leading().iter().any(|x| x.kind() == kind)
}
fn parse_methodish_or_property(&mut self, attribute_spec: S::R) -> S::R {
let modifiers = self.parse_modifiers();
// ERROR RECOVERY: match against two tokens, because if one token is
// in error but the next isn't, then it's likely that the user is
// simply still typing. Throw an error on what's being typed, then eat
// it and keep going.
let current_token_kind = self.peek_token_kind();
let next_token = self.peek_token_with_lookahead(1);
let next_token_kind = next_token.kind();
match (current_token_kind, next_token_kind) {
// Detected the usual start to a method, so continue parsing as method.
(TokenKind::Async, _) | (TokenKind::Coroutine, _) | (TokenKind::Function, _) => {
self.parse_methodish(attribute_spec, modifiers)
}
(TokenKind::LeftParen, _) => self.parse_property_declaration(attribute_spec, modifiers),
// We encountered one unexpected token, but the next still indicates that
// we should be parsing a methodish. Throw an error, process the token
// as an extra, and keep going.
(_, TokenKind::Async) | (_, TokenKind::Coroutine) | (_, TokenKind::Function)
if !(Self::has_leading_trivia(&next_token, TriviaKind::EndOfLine)) =>
{
self.with_error_on_whole_token(Errors::error1056);
self.skip_and_log_unexpected_token(false);
self.parse_methodish(attribute_spec, modifiers)
}
// Otherwise, continue parsing as a property (which might be a lambda).
_ => self.parse_property_declaration(attribute_spec, modifiers),
}
}
fn parse_trait_use_precedence_item(&mut self, name: S::R) -> S::R {
let keyword = self.assert_token(TokenKind::Insteadof);
let removed_names = self.parse_trait_name_list(|x: TokenKind| x == TokenKind::Semicolon);
S!(
make_trait_use_precedence_item,
self,
name,
keyword,
removed_names
)
}
fn parse_trait_use_alias_item(&mut self, aliasing_name: S::R) -> S::R {
let keyword = self.require_token(TokenKind::As, Errors::expected_as_or_insteadof);
let modifiers = self.parse_modifiers();
let aliased_name = self.parse_qualified_name_type_opt();
S!(
make_trait_use_alias_item,
self,
aliasing_name,
keyword,
modifiers,
aliased_name
)
}
fn parse_trait_use_conflict_resolution_item(&mut self) -> S::R {
let qualifier = self.parse_qualified_name_type();
let name = if self.peek_token_kind() == TokenKind::ColonColon {
// scope resolution expression case
let cc_token = self.require_coloncolon();
let name = self
.require_token_one_of(&[TokenKind::Name, TokenKind::Construct], Errors::error1004);
S!(
make_scope_resolution_expression,
self,
qualifier,
cc_token,
name
)
} else {
// plain qualified name case
qualifier
};
match self.peek_token_kind() {
TokenKind::Insteadof => self.parse_trait_use_precedence_item(name),
TokenKind::As | _ => self.parse_trait_use_alias_item(name),
}
}
// SPEC:
// trait-use-conflict-resolution:
// use trait-name-list { trait-use-conflict-resolution-list }
//
// trait-use-conflict-resolution-list:
// trait-use-conflict-resolution-item
// trait-use-conflict-resolution-item trait-use-conflict-resolution-list
//
// trait-use-conflict-resolution-item:
// trait-use-alias-item
// trait-use-precedence-item
//
// trait-use-alias-item:
// trait-use-conflict-resolution-item-name as name;
// trait-use-conflict-resolution-item-name as visibility-modifier name;
// trait-use-conflict-resolution-item-name as visibility-modifier;
//
// trait-use-precedence-item:
// scope-resolution-expression insteadof trait-name-list
//
// trait-use-conflict-resolution-item-name:
// qualified-name
// scope-resolution-expression
fn parse_trait_use_conflict_resolution(
&mut self,
use_token: S::R,
trait_name_list: S::R,
) -> S::R {
let left_brace = self.assert_token(TokenKind::LeftBrace);
let clauses = self.parse_separated_list_opt(
TokenKind::Semicolon,
SeparatedListKind::TrailingAllowed,
TokenKind::RightBrace,
Errors::error1004,
|x: &mut Self| x.parse_trait_use_conflict_resolution_item(),
);
let right_brace = self.require_token(TokenKind::RightBrace, Errors::error1006);
S!(
make_trait_use_conflict_resolution,
self,
use_token,
trait_name_list,
left_brace,
clauses,
right_brace,
)
}
// SPEC:
// trait-use-clause:
// use trait-name-list ;
//
// trait-name-list:
// qualified-name generic-type-parameter-listopt
// trait-name-list , qualified-name generic-type-parameter-listopt
fn parse_trait_name_list<P>(&mut self, predicate: P) -> S::R
where
P: Fn(TokenKind) -> bool,
{
let (items, _, _) = self.parse_separated_list_predicate(
|x| x == TokenKind::Comma,
SeparatedListKind::NoTrailing,
predicate,
Errors::error1004,
|x: &mut Self| x.parse_qualified_name_type(),
);
items
}
fn parse_trait_use(&mut self) -> S::R {
let use_token = self.assert_token(TokenKind::Use);
let trait_name_list =
self.parse_trait_name_list(|x| x == TokenKind::Semicolon || x == TokenKind::LeftBrace);
if self.peek_token_kind() == TokenKind::LeftBrace {
self.parse_trait_use_conflict_resolution(use_token, trait_name_list)
} else {
let semi = self.require_semicolon();
S!(make_trait_use, self, use_token, trait_name_list, semi)
}
}
fn parse_property_declaration(&mut self, attribute_spec: S::R, modifiers: S::R) -> S::R {
// SPEC:
// property-declaration:
// attribute-spec-opt property-modifier type-specifier
// property-declarator-list ;
//
// property-declarator-list:
// property-declarator
// property-declarator-list , property-declarator
//
// The type specifier is optional in non-strict mode and required in
// strict mode. We give an error in a later pass.
let prop_type = match self.peek_token_kind() {
TokenKind::Variable => S!(make_missing, self, self.pos()),
_ => self.parse_type_specifier(false /* allow_var */, false /* allow_attr */),
};
let decls =
self.parse_comma_list(TokenKind::Semicolon, Errors::error1008, |x: &mut Self| {
x.parse_property_declarator()
});
let semi = self.require_semicolon();
S!(
make_property_declaration,
self,
attribute_spec,
modifiers,
prop_type,
decls,
semi
)
}
fn parse_property_declarator(&mut self) -> S::R {
// SPEC:
// property-declarator:
// variable-name property-initializer-opt
// property-initializer:
// = expression
let name = self.require_variable();
let simple_init = self.parse_simple_initializer_opt();
S!(make_property_declarator, self, name, simple_init)
}
fn is_type_in_const(&self) -> bool {
let mut parser1 = self.clone();
let _ = parser1.parse_type_specifier(false, true);
let _ = parser1.require_name_allow_all_keywords();
self.errors.len() == parser1.errors.len()
}
// SPEC:
// const-declaration:
// abstract_opt const type-specifier_opt constant-declarator-list ;
// visibility const type-specifier_opt constant-declarator-list ;
// constant-declarator-list:
// constant-declarator
// constant-declarator-list , constant-declarator
// constant-declarator:
// name constant-initializer_opt
// constant-initializer:
// = const-expression
fn parse_const_declaration(&mut self, modifiers: S::R, const_: S::R) -> S::R {
let type_spec = if self.is_type_in_const() {
self.parse_type_specifier(/* allow_var = */ false, /* allow_attr = */ true)
} else {
S!(make_missing, self, self.pos())
};
let const_list =
self.parse_comma_list(TokenKind::Semicolon, Errors::error1004, |x: &mut Self| {
x.parse_constant_declarator()
});
let semi = self.require_semicolon();
S!(
make_const_declaration,
self,
modifiers,
const_,
type_spec,
const_list,
semi
)
}
fn parse_constant_declarator(&mut self) -> S::R {
// TODO: We allow const names to be keywords here; in particular we
// require that const string TRUE = "true"; be legal. Likely this
// should be more strict. What are the rules for which keywords are
// legal constant names and which are not?
// Note that if this logic is changed, it should be changed in
// is_type_in_const above as well.
//
// This permits abstract variables to have an initializer, and vice-versa.
// This is deliberate, and those errors will be detected after the syntax
// tree is created.
let const_name = self.require_name_allow_all_keywords();
let initializer_ = self.parse_simple_initializer_opt();
S!(make_constant_declarator, self, const_name, initializer_)
}
// SPEC:
// type-constant-declaration:
// abstract-type-constant-declaration
// concrete-type-constant-declaration
// abstract-type-constant-declaration:
// abstract const type name type-constraintopt ;
// concrete-type-constant-declaration:
// const type name type-constraintopt = type-specifier ;
//
// ERROR RECOVERY:
//
// An abstract type constant may only occur in an interface or an abstract
// class. We allow that to be parsed here, and the type checker detects the
// error.
// CONSIDER: We could detect this error in a post-parse pass; it is entirely
// syntactic. Consider moving the error detection out of the type checker.
//
// An interface may not contain a non-abstract type constant that has a
// type constraint. We allow that to be parsed here, and the type checker
// detects the error.
// CONSIDER: We could detect this error in a post-parse pass; it is entirely
// syntactic. Consider moving the error detection out of the type checker.
fn parse_type_const_declaration(
&mut self,
attributes: S::R,
modifiers: S::R,
const_: S::R,
) -> S::R {
let type_token = self.assert_token(TokenKind::Type);
let name = self.require_name_allow_non_reserved();
let generic_type_parameter_list = self.parse_generic_type_parameter_list_opt();
let type_constraint = self.parse_type_constraint_opt();
let (equal_token, type_specifier) = if self.peek_token_kind() == TokenKind::Equal {
let equal_token = self.assert_token(TokenKind::Equal);
let type_spec = self
.parse_type_specifier(/* allow_var = */ false, /* allow_attr = */ true);
(equal_token, type_spec)
} else {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
};
let semicolon = self.require_semicolon();
S!(
make_type_const_declaration,
self,
attributes,
modifiers,
const_,
type_token,
name,
generic_type_parameter_list,
type_constraint,
equal_token,
type_specifier,
semicolon,
)
}
// SPEC:
// attribute_specification :=
// attribute_list
// old_attribute_specification
// attribute_list :=
// attribute
// attribute_list attribute
// attribute := @ attribute_name attribute_value_list_opt
// old_attribute_specification := << old_attribute_list >>
// old_attribute_list :=
// old_attribute
// old_attribute_list , old_attribute
// old_attribute := attribute_name attribute_value_list_opt
// attribute_name := name
// attribute_value_list := ( attribute_values_opt )
// attribute_values :=
// attribute_value
// attribute_values , attribute_value
// attribute_value := expression
//
// TODO: The list of attrs can have a trailing comma. Update the spec.
// TODO: The list of values can have a trailing comma. Update the spec.
// (Both these work items are tracked by spec issue 106.)
pub fn parse_old_attribute_specification_opt(&mut self) -> S::R {
if self.peek_token_kind() == TokenKind::LessThanLessThan {
let (left, items, right) =
self.parse_double_angled_comma_list_allow_trailing(|x: &mut Self| {
x.parse_old_attribute()
});
S!(make_old_attribute_specification, self, left, items, right)
} else {
S!(make_missing, self, self.pos())
}
}
fn parse_file_attribute_specification_opt(&mut self) -> S::R {
if self.peek_token_kind() == TokenKind::LessThanLessThan {
let left = self.assert_token(TokenKind::LessThanLessThan);
let keyword = self.assert_token(TokenKind::File);
let colon = self.require_colon();
let (items, _) = self.parse_comma_list_allow_trailing(
TokenKind::GreaterThanGreaterThan,
Errors::expected_user_attribute,
|x: &mut Self| x.parse_old_attribute(),
);
let right = self.require_token(TokenKind::GreaterThanGreaterThan, Errors::error1029);
S!(
make_file_attribute_specification,
self,
left,
keyword,
colon,
items,
right
)
} else {
S!(make_missing, self, self.pos())
}
}
fn parse_return_type_hint_opt(&mut self) -> (S::R, S::R) {
let token_kind = self.peek_token_kind();
if token_kind == TokenKind::Colon {
let token = self.next_token();
let colon_token = S!(make_token, self, token);
let return_type =
self.with_type_parser(|p: &mut TypeParser<'a, S, T>| p.parse_return_type());
(colon_token, return_type)
} else {
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
(missing1, missing2)
}
}
pub fn parse_parameter_list_opt(&mut self) -> (S::R, S::R, S::R) {
// SPEC
// TODO: The specification is wrong in several respects concerning
// variadic parameters. Variadic parameters are permitted to have a
// type and name but this is not mentioned in the spec. And variadic
// parameters are not mentioned at all in the grammar for constructor
// parameter lists. (This is tracked by spec issue 107.)
//
// parameter-list:
// variadic-parameter
// parameter-declaration-list
// parameter-declaration-list ,
// parameter-declaration-list , variadic-parameter
//
// parameter-declaration-list:
// parameter-declaration
// parameter-declaration-list , parameter-declaration
//
// variadic-parameter:
// ...
// attribute-specification-opt visiblity-modifier-opt type-specifier \
// ... variable-name
//
// This function parses the parens as well.
// ERROR RECOVERY: We allow variadic parameters in all positions; a later
// pass gives an error if a variadic parameter is in an incorrect position
// or followed by a trailing comma, or if the parameter has a
// default value.
self.parse_parenthesized_comma_list_opt_allow_trailing(|x: &mut Self| x.parse_parameter())
}
fn parse_parameter(&mut self) -> S::R {
let mut parser1 = self.clone();
let token = parser1.next_token();
match token.kind() {
TokenKind::DotDotDot => {
let next_kind = parser1.peek_token_kind();
if next_kind == TokenKind::Variable {
self.parse_parameter_declaration()
} else {
let missing1 = S!(make_missing, parser1, self.pos());
let missing2 = S!(make_missing, parser1, self.pos());
self.continue_from(parser1);
let token = S!(make_token, self, token);
S!(make_variadic_parameter, self, missing1, missing2, token)
}
}
_ => self.parse_parameter_declaration(),
}
}
fn parse_parameter_declaration(&mut self) -> S::R {
// SPEC
//
// TODO: Add call-convention-opt to the specification.
// (This work is tracked by task T22582676.)
//
// TODO: Update grammar for inout parameters.
// (This work is tracked by task T22582715.)
//
// parameter-declaration:
// attribute-specification-opt \
// call-convention-opt \
// type-specifier variable-name \
// default-argument-specifier-opt
//
// ERROR RECOVERY
// In strict mode, we require a type specifier. This error is not caught
// at parse time but rather by a later pass.
// Visibility modifiers are only legal in constructor parameter
// lists; we give an error in a later pass.
// Variadic params cannot be declared inout; we permit that here but
// give an error in a later pass.
// Variadic params and inout params cannot have default values; these
// errors are also reported in a later pass.
let attrs = self.parse_attribute_specification_opt();
let visibility = self.parse_visibility_modifier_opt();
let callconv = self.parse_call_convention_opt();
let token = self.peek_token();
let type_specifier = match token.kind() {
TokenKind::Variable | TokenKind::DotDotDot | TokenKind::Ampersand => {
S!(make_missing, self, self.pos())
}
_ => {
self.parse_type_specifier(/* allow_var = */ false, /* allow_attr */ false)
}
};
let name = self.parse_decorated_variable_opt();
let default = self.parse_simple_initializer_opt();
S!(
make_parameter_declaration,
self,
attrs,
visibility,
callconv,
type_specifier,
name,
default
)
}
fn parse_decorated_variable_opt(&mut self) -> S::R {
match self.peek_token_kind() {
TokenKind::DotDotDot | TokenKind::Ampersand => self.parse_decorated_variable(),
_ => self.require_variable(),
}
}
// TODO: This is wrong. The variable here is not anexpression* that has
// an optional decoration on it. It's a declaration. We shouldn't be using the
// same data structure for a decorated expression as a declaration; one
// is ause* and the other is a *definition*.
fn parse_decorated_variable(&mut self) -> S::R {
// ERROR RECOVERY
// Detection of (variadic, byRef) inout params happens in post-parsing.
// Although a parameter can have at most one variadic/reference decorator,
// we deliberately allow multiple decorators in the initial parse and produce
// an error in a later pass.
let decorator = self.fetch_token();
let variable = match self.peek_token_kind() {
TokenKind::DotDotDot | TokenKind::Ampersand => self.parse_decorated_variable(),
_ => self.require_variable(),
};
S!(make_decorated_expression, self, decorator, variable)
}
fn parse_visibility_modifier_opt(&mut self) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Public | TokenKind::Protected | TokenKind::Private => {
let token = self.next_token();
S!(make_token, self, token)
}
_ => S!(make_missing, self, self.pos()),
}
}
// SPEC
//
// TODO: Add this to the specification.
// (This work is tracked by task T22582676.)
//
// call-convention:
// inout
fn parse_call_convention_opt(&mut self) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Inout => {
let token = self.next_token();
S!(make_token, self, token)
}
_ => S!(make_missing, self, self.pos()),
}
}
// SPEC
// default-argument-specifier:
// = const-expression
//
// constant-initializer:
// = const-expression
fn parse_simple_initializer_opt(&mut self) -> S::R {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Equal => {
let token = self.next_token();
// TODO: Detect if expression is not const
let token = S!(make_token, self, token);
let default_value = self.parse_expression();
S!(make_simple_initializer, self, token, default_value)
}
_ => S!(make_missing, self, self.pos()),
}
}
pub fn parse_function_declaration(&mut self, attribute_specification: S::R) -> S::R {
let modifiers = self.parse_modifiers();
let header =
self.parse_function_declaration_header(modifiers, /* is_methodish =*/ false);
let body = self.parse_compound_statement();
S!(
make_function_declaration,
self,
attribute_specification,
header,
body
)
}
fn parse_constraint_operator(&mut self) -> S::R {
// TODO: Put this in the specification
// (This work is tracked by spec issue 100.)
// constraint-operator:
// =
// as
// super
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Equal | TokenKind::As | TokenKind::Super => {
let token = self.next_token();
S!(make_token, self, token)
}
_ =>
// ERROR RECOVERY: don't eat the offending token.
// TODO: Give parse error
{
S!(make_missing, self, self.pos())
}
}
}
fn parse_where_constraint(&mut self) -> S::R {
// TODO: Put this in the specification
// (This work is tracked by spec issue 100.)
// constraint:
// type-specifier constraint-operator type-specifier
let left =
self.parse_type_specifier(/* allow_var = */ false, /* allow_attr = */ true);
let op = self.parse_constraint_operator();
let right =
self.parse_type_specifier(/* allow_var = */ false, /* allow_attr = */ true);
S!(make_where_constraint, self, left, op, right)
}
fn parse_where_constraint_list_item(&mut self) -> Option<S::R> {
match self.peek_token_kind() {
TokenKind::Semicolon | TokenKind::LeftBrace => None,
_ => {
let where_constraint = self.parse_where_constraint();
let comma = self.optional_token(TokenKind::Comma);
let result = S!(make_list_item, self, where_constraint, comma);
Some(result)
}
}
}
fn parse_where_clause(&mut self) -> S::R {
// TODO: Add this to the specification
// (This work is tracked by spec issue 100.)
// where-clause:
// where constraint-list
//
// constraint-list:
// constraint
// constraint-list , constraint
let keyword = self.assert_token(TokenKind::Where);
let constraints =
self.parse_list_until_none(|x: &mut Self| x.parse_where_constraint_list_item());
S!(make_where_clause, self, keyword, constraints)
}
fn parse_where_clause_opt(&mut self) -> S::R {
if self.peek_token_kind() != TokenKind::Where {
S!(make_missing, self, self.pos())
} else {
self.parse_where_clause()
}
}
fn parse_function_declaration_header(&mut self, modifiers: S::R, is_methodish: bool) -> S::R {
// SPEC
// function-definition-header:
// attribute-specification-opt async-opt coroutine-opt function name /
// generic-type-parameter-list-opt ( parameter-list-opt ) : /
// return-type where-clause-opt
// TODO: The spec does not specify "where" clauses. Add them.
// (This work is tracked by spec issue 100.)
//
// In strict mode, we require a type specifier. This error is not caught
// at parse time but rather by a later pass.
let function_token = self.require_function();
let label = self.parse_function_label_opt(is_methodish);
let generic_type_parameter_list = self.parse_generic_type_parameter_list_opt();
let (left_paren_token, parameter_list, right_paren_token) = self.parse_parameter_list_opt();
let (colon_token, return_type) = self.parse_return_type_hint_opt();
let where_clause = self.parse_where_clause_opt();
S!(
make_function_declaration_header,
self,
modifiers,
function_token,
label,
generic_type_parameter_list,
left_paren_token,
parameter_list,
right_paren_token,
colon_token,
return_type,
where_clause,
)
}
// A function label is either a function name or a __construct label.
fn parse_function_label_opt(&mut self, is_methodish: bool) -> S::R {
let report_error = |x: &mut Self, token: S::Token| {
x.with_error(Errors::error1044);
let token = S!(make_token, x, token);
S!(make_error, x, token)
};
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Name | TokenKind::Construct => {
let token = self.next_token();
S!(make_token, self, token)
}
TokenKind::LeftParen => {
// It turns out, it was just a verbose lambda; YOLO PHP
S!(make_missing, self, self.pos())
}
TokenKind::Isset | TokenKind::Unset | TokenKind::Empty => {
// We need to parse those as names as they are defined in hhi
let token = self.next_token_as_name();
S!(make_token, self, token)
}
_ => {
let token = if is_methodish {
self.next_token_as_name()
} else {
self.next_token_non_reserved_as_name()
};
if token.kind() == TokenKind::Name {
S!(make_token, self, token)
} else {
// ERROR RECOVERY: Eat the offending token.
report_error(self, token)
}
}
}
}
fn parse_old_attribute(&mut self) -> S::R {
self.with_expression_parser(|p: &mut ExpressionParser<'a, S, T>| p.parse_constructor_call())
}
pub fn parse_attribute_specification_opt(&mut self) -> S::R {
match self.peek_token_kind() {
TokenKind::At if self.env.allow_new_attribute_syntax => {
self.parse_new_attribute_specification_opt()
}
TokenKind::LessThanLessThan => self.parse_old_attribute_specification_opt(),
_ => S!(make_missing, self, self.pos()),
}
}
fn parse_new_attribute_specification_opt(&mut self) -> S::R {
let attributes = self.parse_list_while(
|p: &mut Self| p.parse_new_attribute(),
|p: &Self| p.peek_token_kind() == TokenKind::At,
);
S!(make_attribute_specification, self, attributes)
}
fn parse_new_attribute(&mut self) -> S::R {
let at = self.assert_token(TokenKind::At);
let token = self.peek_token();
let constructor_call = match token.kind() {
TokenKind::Name => self.with_expression_parser(|p: &mut ExpressionParser<'a, S, T>| {
p.parse_constructor_call()
}),
_ => {
self.with_error(Errors::expected_user_attribute);
S!(make_missing, self, self.pos())
}
};
S!(make_attribute, self, at, constructor_call)
}
// Parses modifiers and passes them into the parse methods for the
// respective class body element.
fn parse_methodish_or_property_or_const_or_type_const(&mut self) -> S::R {
let mut parser1 = self.clone();
let modifiers = parser1.parse_modifiers();
let kind0 = parser1.peek_token_kind_with_lookahead(0);
let kind1 = parser1.peek_token_kind_with_lookahead(1);
let kind2 = parser1.peek_token_kind_with_lookahead(2);
match (kind0, kind1, kind2) {
(TokenKind::Const, TokenKind::Type, TokenKind::Semicolon) => {
self.continue_from(parser1);
let const_ = self.assert_token(TokenKind::Const);
self.parse_const_declaration(modifiers, const_)
}
(TokenKind::Const, TokenKind::Type, _) if kind2 != TokenKind::Equal => {
let attributes = S!(make_missing, self, self.pos());
let modifiers = self.parse_modifiers();
let const_ = self.assert_token(TokenKind::Const);
self.parse_type_const_declaration(attributes, modifiers, const_)
}
(TokenKind::Const, _, _) => {
self.continue_from(parser1);
let const_ = self.assert_token(TokenKind::Const);
self.parse_const_declaration(modifiers, const_)
}
_ => {
let missing = S!(make_missing, self, self.pos());
self.parse_methodish_or_property(missing)
}
}
}
// SPEC
// method-declaration:
// attribute-spec-opt method-modifiers function-definition
// attribute-spec-opt method-modifiers function-definition-header ;
// method-modifiers:
// method-modifier
// method-modifiers method-modifier
// method-modifier:
// visibility-modifier (i.e. private, public, protected)
// static
// abstract
// final
fn parse_methodish(&mut self, attribute_spec: S::R, modifiers: S::R) -> S::R {
let header =
self.parse_function_declaration_header(modifiers, /* is_methodish:*/ true);
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::LeftBrace => {
let body = self.parse_compound_statement();
let missing = S!(make_missing, self, self.pos());
S!(
make_methodish_declaration,
self,
attribute_spec,
header,
body,
missing
)
}
TokenKind::Semicolon => {
let pos = self.pos();
let token = self.next_token();
let missing = S!(make_missing, self, pos);
let semicolon = S!(make_token, self, token);
S!(
make_methodish_declaration,
self,
attribute_spec,
header,
missing,
semicolon
)
}
TokenKind::Equal => {
let equal = self.assert_token(TokenKind::Equal);
let qualifier = self.parse_qualified_name_type();
let cc_token = self.require_coloncolon();
let name = self.require_token_one_of(
&[TokenKind::Name, TokenKind::Construct],
Errors::error1004,
);
let name = S!(
make_scope_resolution_expression,
self,
qualifier,
cc_token,
name
);
let semi = self.require_semicolon();
S!(
make_methodish_trait_resolution,
self,
attribute_spec,
header,
equal,
name,
semi
)
}
_ => {
// ERROR RECOVERY: We expected either a block or a semicolon; we got
// neither. Use the offending token as the body of the method.
// TODO: Is this the right error recovery?
let pos = self.pos();
let token = self.next_token();
self.with_error(Errors::error1041);
let token = S!(make_token, self, token);
let error = S!(make_error, self, token);
let missing = S!(make_missing, self, pos);
S!(
make_methodish_declaration,
self,
attribute_spec,
header,
error,
missing
)
}
}
}
fn parse_modifiers(&mut self) -> S::R {
let mut items = vec![];
loop {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::Abstract
| TokenKind::Static
| TokenKind::Public
| TokenKind::Protected
| TokenKind::Private
| TokenKind::Async
| TokenKind::Coroutine
| TokenKind::Final => {
let token = self.next_token();
let item = S!(make_token, self, token);
items.push(item)
}
_ => break,
}
}
S!(make_list, self, items, self.pos())
}
fn parse_enum_or_classish_or_function_declaration(&mut self) -> S::R {
// An enum, type alias, function, interface, trait or class may all
// begin with an attribute.
let mut parser1 = self.clone();
let attribute_specification = parser1.parse_attribute_specification_opt();
let mut parser2 = parser1.clone();
let token = parser2.next_token();
match token.kind() {
TokenKind::Enum => {
self.continue_from(parser1);
self.parse_enum_declaration(attribute_specification)
}
TokenKind::Type | TokenKind::Newtype => {
self.continue_from(parser1);
self.parse_alias_declaration(attribute_specification)
}
TokenKind::Async | TokenKind::Coroutine | TokenKind::Function => {
if attribute_specification.is_missing() {
// if attribute section is missing - it might be either
// function declaration or expression statement containing
// anonymous function - use statement parser to determine in which case
// we are currently in
self.with_statement_parser(|p: &mut StatementParser<'a, S, T>| {
p.parse_possible_php_function(/* toplevel=*/ true)
})
} else {
self.continue_from(parser1);
self.parse_function_declaration(attribute_specification)
}
}
TokenKind::Abstract
| TokenKind::Final
| TokenKind::Interface
| TokenKind::Trait
| TokenKind::Class => {
self.continue_from(parser1);
self.parse_classish_declaration(attribute_specification)
}
_ => {
// ERROR RECOVERY: we encountered an unexpected token, raise an error and continue
// TODO: This is wrong; we have lost the attribute specification
// from the tree.
self.continue_from(parser2);
self.with_error(Errors::error1057(self.token_text(&token)));
let token = S!(make_token, self, token);
S!(make_error, self, token)
}
}
}
fn parse_classish_element(&mut self) -> S::R {
// We need to identify an element of a class, trait, etc. Possibilities
// are:
//
// // constant-declaration:
// const T $x = v ;
// abstract const T $x ;
// public const T $x = v ; // PHP7 only
//
// // type-constant-declaration
// const type T = X;
// abstract const type T;
//
// // property-declaration:
// public/private/protected/static T $x;
// TODO: We may wish to parse "T $x" and give an error indicating
// TODO: that we were expecting either const or public.
// Note that a visibility modifier is required; static is optional;
// any order is allowed.
//
// // method-declaration
// <<attr>> public/private/protected/abstract/final/static async function
// Note that a modifier is required, the attr and async are optional.
// TODO: Hack requires a visibility modifier, unless "static" is supplied,
// TODO: in which case the method is considered to be public. Is this
// TODO: desired? Resolve this disagreement with the spec.
//
// // constructor-declaration
// <<attr>> public/private/protected/abstract/final function __construct
// Note that we allow static constructors in this parser; we produce an
// error in the post-parse error detection pass.
//
// // trait clauses
// require extends qualified-name
// require implements qualified-name
//
// // XHP class attribute declaration
// attribute ... ;
//
// // XHP category declaration
// category ... ;
//
// // XHP children declaration
// children ... ;
//
// // Pocket Universe Enumeration
// final? enum id { ... (pocket-field ;) * }
match self.peek_token_kind() {
TokenKind::Children => self.parse_xhp_children_declaration(),
TokenKind::Category => self.parse_xhp_category_declaration(),
TokenKind::Use => self.parse_trait_use(),
TokenKind::Const
| TokenKind::Abstract
| TokenKind::Public
| TokenKind::Protected
| TokenKind::Private
| TokenKind::Static => self.parse_methodish_or_property_or_const_or_type_const(),
TokenKind::Enum => self.parse_class_enum(false),
TokenKind::Final => {
match self.peek_token_kind_with_lookahead(1) {
TokenKind::Enum => self.parse_class_enum(/* final:*/ true),
_ => {
// Parse class methods, constructors, properties
// or type constants.
let attr = self.parse_attribute_specification_opt();
self.parse_methodish_or_property_or_type_constant(attr)
}
}
}
TokenKind::Async | TokenKind::LessThanLessThan => |
TokenKind::At if self.env.allow_new_attribute_syntax => {
let attr = self.parse_attribute_specification_opt();
self.parse_methodish_or_property_or_type_constant(attr)
}
TokenKind::Require => {
// We give an error if these are found where they should not be,
// in a later pass.
self.parse_require_clause()
}
TokenKind::Attribute => self.parse_xhp_class_attribute_declaration(),
TokenKind::Function => {
// ERROR RECOVERY
// Hack requires that a function inside a class be marked
// with a visibility modifier, but PHP does not have this requirement.
// We accept the lack of a modifier here, and produce an error in
// a later pass.
let missing1 = S!(make_missing, self, self.pos());
let missing2 = S!(make_missing, self, self.pos());
self.parse_methodish(missing1, missing2)
}
kind if self.expects(kind) => S!(make_missing, self, self.pos()),
_ => {
// If this is a property declaration which is missing its visibility
// modifier, accept it here, but emit an error in a later pass.
let mut parser1 = self.clone();
let missing1 = S!(make_missing, parser1, self.pos());
let missing2 = S!(make_missing, parser1, self.pos());
let property = parser1.parse_property_declaration(missing1, missing2);
if self.errors.len() == parser1.errors.len() {
self.continue_from(parser1);
property
} else {
// TODO ERROR RECOVERY could be improved here.
let token = self.fetch_token();
self.with_error(Errors::error1033);
S!(make_error, self, token)
// Parser does not detect the error where non-static instance variables
// or methods are within abstract final classes in its first pass, but
// instead detects it in its second pass.
}
}
}
}
fn parse_generic_type_parameter_list_opt(&mut self) -> S::R {
match self.peek_token_kind_with_possible_attributized_type_list() {
TokenKind::LessThan => self.with_type_parser(|p: &mut TypeParser<'a, S, T>| {
p.parse_generic_type_parameter_list()
}),
_ => S!(make_missing, self, self.pos()),
}
}
fn parse_type_constraint_opt(&mut self) -> S::R {
self.with_type_parser(|p: &mut TypeParser<'a, S, T>| p.parse_type_constraint_opt())
}
fn parse_alias_declaration(&mut self, attr: S::R) -> S::R {
// SPEC
// alias-declaration:
// attribute-spec-opt type name
// generic-type-parameter-list-opt = type-specifier ;
// attribute-spec-opt newtype name
// generic-type-parameter-list-opt type-constraint-opt
// = type-specifier ;
let token = self.fetch_token();
// Not `require_name` but `require_name_allow_non_reserved`, because the parser
// must allow keywords in the place of identifiers; at least to parse .hhi
// files.
let name = self.require_name_allow_non_reserved();
let generic = self.parse_generic_type_parameter_list_opt();
let constr = self.parse_type_constraint_opt();
let equal = self.require_equal();
let ty = self.parse_type_specifier(false /* allow_var */, true /* allow_attr */);
let semi = self.require_semicolon();
S!(
make_alias_declaration,
self,
attr,
token,
name,
generic,
constr,
equal,
ty,
semi
)
}
fn parse_enumerator(&mut self) -> S::R {
// SPEC
// enumerator:
// enumerator-constant = constant-expression ;
// enumerator-constant:
// name
//
// TODO: Add an error to a later pass that determines the value is
// a constant.
// TODO: We must allow TRUE to be a legal enum member name; here we allow
// any keyword. Consider making this more strict.
let name = self.require_name_allow_all_keywords();
let equal = self.require_equal();
let value = self.parse_expression();
let semicolon = self.require_semicolon();
S!(make_enumerator, self, name, equal, value, semicolon)
}
fn parse_inclusion_directive(&mut self) -> S::R {
// SPEC:
// inclusion-directive:
// require-multiple-directive
// require-once-directive
//
// require-multiple-directive:
// require include-filename ;
//
// include-filename:
// expression
//
// require-once-directive:
// require_once include-filename ;
//
// In non-strict mode we allow an inclusion directive (without semi) to be
// used as an expression. It is therefore easier to actually parse this as:
//
// inclusion-directive:
// inclusion-expression ;
//
// inclusion-expression:
// require include-filename
// require_once include-filename
let expr = self.parse_expression();
let semi = self.require_semicolon();
S!(make_inclusion_directive, self, expr, semi)
}
fn parse_declaration(&mut self) -> S::R {
self.expect_in_new_scope(ExpectedTokens::Classish);
let mut parser1 = self.clone();
let token = parser1.next_token();
let result =
match token.kind() {
TokenKind::Include
| TokenKind::Include_once
| TokenKind::Require
| TokenKind::Require_once => self.parse_inclusion_directive(),
TokenKind::Type | TokenKind::Newtype
if {
let kind = parser1.peek_token_kind();
kind == TokenKind::Name || kind == TokenKind::Classname
} =>
{
let missing = S!(make_missing, self, self.pos());
self.parse_alias_declaration(missing)
}
TokenKind::Enum => {
let missing = S!(make_missing, self, self.pos());
self.parse_enum_declaration(missing)
}
TokenKind::RecordDec => {
let missing = S!(make_missing, self, self.pos());
self.parse_record_declaration(missing)
}
// The keyword namespace before a name should be parsed as
// "the current namespace we are in", essentially a no op.
// example:
// namespace\f1(); should be parsed as a call to the function f1 in
// the current namespace.
TokenKind::Namespace if parser1.peek_token_kind() == TokenKind::Backslash => self
.with_statement_parser(|p: &mut StatementParser<'a, S, T>| p.parse_statement()),
TokenKind::Namespace => self.parse_namespace_declaration(),
TokenKind::Use => self.parse_namespace_use_declaration(),
TokenKind::Trait | TokenKind::Interface | TokenKind::Class => {
let missing = S!(make_missing, self, self.pos());
self.parse_classish_declaration(missing)
}
TokenKind::Abstract | TokenKind::Final => {
let missing = S!(make_missing, self, self.pos());
match parser1.peek_token_kind() {
TokenKind::RecordDec => self.parse_record_declaration(missing),
_ => self.parse_classish_declaration(missing),
}
}
TokenKind::Async | TokenKind::Coroutine | TokenKind::Function => self
.with_statement_parser(|p: &mut StatementParser<'a, S, T>| {
p.parse_possible_php_function(true)
}),
TokenKind::At if self.env.allow_new_attribute_syntax => {
self.parse_enum_or_classish_or_function_declaration()
}
TokenKind::LessThanLessThan => match parser1.peek_token_kind() {
TokenKind::File
if parser1.peek_token_kind_with_lookahead(1) == TokenKind::Colon =>
{
self.parse_file_attribute_specification_opt()
}
_ => self.parse_enum_or_classish_or_function_declaration(),
},
// TODO figure out what global const differs from class const
TokenKind::Const => {
let missing1 = S!(make_missing, parser1, self.pos());
self.continue_from(parser1);
let token = S!(make_token, self, token);
self.parse_const_declaration(missing1, token)
}
// TODO: What if it's not a legal statement? Do we still make progress here?
_ => self
.with_statement_parser(|p: &mut StatementParser<'a, S, T>| p.parse_statement()),
};
self.pop_scope(ExpectedTokens::Classish);
result
}
fn parse_pocket_mapping(&mut self) -> S::R {
// SPEC
// pocket-mapping ::=
// | 'type' identifier '=' type-expression
// | identifier '=' expression
match self.peek_token_kind() {
TokenKind::Type => {
let typ = self.require_token(TokenKind::Type, Errors::type_keyword);
let tyname = self.require_name();
let equal = self.require_equal();
let ty = self.parse_type_specifier(false, true);
S!(
make_pocket_mapping_type_declaration,
self,
typ,
tyname,
equal,
ty
)
}
TokenKind::Name => {
let id = self.require_name();
let equal = self.require_equal();
let simple_init = self.parse_expression();
let sc_init = S!(make_simple_initializer, self, equal, simple_init);
S!(make_pocket_mapping_id_declaration, self, id, sc_init)
}
_ => {
self.with_error(Errors::pocket_universe_invalid_field);
S!(make_missing, self, self.pos())
}
}
}
fn parse_pocket_field(&mut self) -> S::R {
// SPEC
// pocket-field ::=
// | enum-member ;
// | enum-member '(' (pocket-mapping ',')')' ;
// | 'case' type-expression identifier ;
// | 'case' 'type' identifier ;
//
// enum-member ::= ':@' name
match self.peek_token_kind() {
TokenKind::ColonAt => {
let glyph = self.assert_token(TokenKind::ColonAt);
let enum_name = self.require_name();
match self.peek_token_kind() {
TokenKind::LeftParen => {
let (left_paren, mappings, right_paren) =
self.parse_parenthesized_comma_list(|x| x.parse_pocket_mapping());
let semi = self.require_semicolon();
S!(
make_pocket_atom_mapping_declaration,
self,
glyph,
enum_name,
left_paren,
mappings,
right_paren,
semi,
)
}
_ => {
let missing_left = S!(make_missing, self, self.pos());
let missing_mappings = S!(make_missing, self, self.pos());
let missing_right = S!(make_missing, self, self.pos());
let semi = self.require_semicolon();
S!(
make_pocket_atom_mapping_declaration,
self,
glyph,
enum_name,
missing_left,
missing_mappings,
missing_right,
semi,
)
}
}
}
TokenKind::Case => {
let case_tok = self.assert_token(TokenKind::Case);
match self.peek_token_kind() {
TokenKind::Type => {
let type_tok = self.assert_token(TokenKind::Type);
let name = self.require_name();
let semi = self.require_semicolon();
S!(
make_pocket_field_type_declaration,
self,
case_tok,
type_tok,
name,
semi
)
}
_ => {
let ty = self.parse_type_specifier(false, true);
let name = self.require_name();
let semi = self.require_semicolon();
S!(
make_pocket_field_type_expr_declaration,
self,
case_tok,
ty,
name,
semi
)
}
}
}
_ => {
self.with_error(Errors::pocket_universe_invalid_field);
S!(make_missing, self, self.pos())
}
}
}
fn parse_pocket_fields_opt(&mut self) -> S::R {
// SPEC
// pocket-field-list:
// pocket-field
// pocket-field-list pocket-field
self.parse_terminated_list(|x| x.parse_pocket_field(), TokenKind::RightBrace)
}
fn parse_class_enum(&mut self, final_: bool /* = false */) -> S::R {
// SPEC
// 'final'? 'enum' identifier '{' pocket-field-list '}'
//
// from parse_classish_declaration.. probably could do better
// read Final
let final_tok = if final_ {
self.require_token(TokenKind::Final, Errors::pocket_universe_final_expected)
} else {
S!(make_missing, self, self.pos())
};
// read Enum
let enum_tok = self.require_token(TokenKind::Enum, Errors::pocket_universe_enum_expected);
let name = self.require_name();
let (left_brace, pocket_fields, right_brace) =
self.parse_braced_list(|x| x.parse_pocket_fields_opt());
S!(
make_pocket_enum_declaration,
self,
final_tok,
enum_tok,
name,
left_brace,
pocket_fields,
right_brace,
)
}
pub fn parse_script(&mut self) -> S::R {
// TODO(kasper): no_markup for ".hack" files
let header = self.parse_leading_markup_section();
let mut declarations = vec![];
if let Some(x) = header {
declarations.push(x)
};
loop {
let token_kind = self.peek_token_kind();
match token_kind {
TokenKind::EndOfFile => {
let token = self.next_token();
let token = S!(make_token, self, token);
let end_of_file = S!(make_end_of_file, self, token);
declarations.push(end_of_file);
break;
}
_ => declarations.push(self.parse_declaration()),
}
}
let declarations = S!(make_list, self, declarations, self.pos());
let result = S!(make_script, self, declarations);
assert_eq!(self.peek_token_kind(), TokenKind::EndOfFile);
result
}
}
| {
// Parse methods, constructors, properties, or type constants.
let attr = self.parse_attribute_specification_opt();
self.parse_methodish_or_property_or_type_constant(attr)
} |
cstorcompletedbackup.go | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// CStorCompletedBackupLister helps list CStorCompletedBackups.
type CStorCompletedBackupLister interface {
// List lists all CStorCompletedBackups in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.CStorCompletedBackup, err error)
// CStorCompletedBackups returns an object that can list and get CStorCompletedBackups.
CStorCompletedBackups(namespace string) CStorCompletedBackupNamespaceLister
CStorCompletedBackupListerExpansion
}
// cStorCompletedBackupLister implements the CStorCompletedBackupLister interface.
type cStorCompletedBackupLister struct {
indexer cache.Indexer
}
// NewCStorCompletedBackupLister returns a new CStorCompletedBackupLister.
func | (indexer cache.Indexer) CStorCompletedBackupLister {
return &cStorCompletedBackupLister{indexer: indexer}
}
// List lists all CStorCompletedBackups in the indexer.
func (s *cStorCompletedBackupLister) List(selector labels.Selector) (ret []*v1alpha1.CStorCompletedBackup, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.CStorCompletedBackup))
})
return ret, err
}
// CStorCompletedBackups returns an object that can list and get CStorCompletedBackups.
func (s *cStorCompletedBackupLister) CStorCompletedBackups(namespace string) CStorCompletedBackupNamespaceLister {
return cStorCompletedBackupNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// CStorCompletedBackupNamespaceLister helps list and get CStorCompletedBackups.
type CStorCompletedBackupNamespaceLister interface {
// List lists all CStorCompletedBackups in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha1.CStorCompletedBackup, err error)
// Get retrieves the CStorCompletedBackup from the indexer for a given namespace and name.
Get(name string) (*v1alpha1.CStorCompletedBackup, error)
CStorCompletedBackupNamespaceListerExpansion
}
// cStorCompletedBackupNamespaceLister implements the CStorCompletedBackupNamespaceLister
// interface.
type cStorCompletedBackupNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all CStorCompletedBackups in the indexer for a given namespace.
func (s cStorCompletedBackupNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CStorCompletedBackup, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.CStorCompletedBackup))
})
return ret, err
}
// Get retrieves the CStorCompletedBackup from the indexer for a given namespace and name.
func (s cStorCompletedBackupNamespaceLister) Get(name string) (*v1alpha1.CStorCompletedBackup, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("cstorcompletedbackup"), name)
}
return obj.(*v1alpha1.CStorCompletedBackup), nil
}
| NewCStorCompletedBackupLister |
tracez.rs | // This file is generated by rust-protobuf 2.25.1. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![allow(unused_attributes)]
#![cfg_attr(rustfmt, rustfmt::skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `tracez.proto`
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_25_1;
#[derive(PartialEq,Clone,Default)]
#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))]
pub struct TracezCounts {
// message fields
pub spanname: ::std::string::String,
pub latency: ::std::vec::Vec<u32>,
pub running: u32,
pub error: u32,
// special fields
#[cfg_attr(feature = "with-serde", serde(skip))]
pub unknown_fields: ::protobuf::UnknownFields,
#[cfg_attr(feature = "with-serde", serde(skip))]
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TracezCounts {
fn default() -> &'a TracezCounts {
<TracezCounts as ::protobuf::Message>::default_instance()
}
}
impl TracezCounts {
pub fn new() -> TracezCounts {
::std::default::Default::default()
}
// string spanname = 1;
pub fn get_spanname(&self) -> &str {
&self.spanname
}
pub fn clear_spanname(&mut self) {
self.spanname.clear();
}
// Param is passed by value, moved
pub fn set_spanname(&mut self, v: ::std::string::String) {
self.spanname = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_spanname(&mut self) -> &mut ::std::string::String {
&mut self.spanname
}
// Take field
pub fn take_spanname(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.spanname, ::std::string::String::new())
}
// repeated uint32 latency = 2;
pub fn get_latency(&self) -> &[u32] {
&self.latency
}
pub fn clear_latency(&mut self) {
self.latency.clear();
}
// Param is passed by value, moved
pub fn set_latency(&mut self, v: ::std::vec::Vec<u32>) {
self.latency = v;
}
// Mutable pointer to the field.
pub fn mut_latency(&mut self) -> &mut ::std::vec::Vec<u32> {
&mut self.latency
}
// Take field
pub fn take_latency(&mut self) -> ::std::vec::Vec<u32> {
::std::mem::replace(&mut self.latency, ::std::vec::Vec::new())
}
// uint32 running = 3;
pub fn get_running(&self) -> u32 {
self.running
}
pub fn clear_running(&mut self) {
self.running = 0;
}
// Param is passed by value, moved
pub fn set_running(&mut self, v: u32) {
self.running = v;
}
// uint32 error = 4;
pub fn get_error(&self) -> u32 {
self.error
}
pub fn clear_error(&mut self) {
self.error = 0;
}
// Param is passed by value, moved
pub fn set_error(&mut self, v: u32) {
self.error = v;
}
}
impl ::protobuf::Message for TracezCounts {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.spanname)?;
},
2 => {
::protobuf::rt::read_repeated_uint32_into(wire_type, is, &mut self.latency)?;
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.running = tmp;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_uint32()?;
self.error = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.spanname.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.spanname);
}
for value in &self.latency {
my_size += ::protobuf::rt::value_size(2, *value, ::protobuf::wire_format::WireTypeVarint);
};
if self.running != 0 {
my_size += ::protobuf::rt::value_size(3, self.running, ::protobuf::wire_format::WireTypeVarint);
}
if self.error != 0 {
my_size += ::protobuf::rt::value_size(4, self.error, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.spanname.is_empty() {
os.write_string(1, &self.spanname)?;
}
for v in &self.latency {
os.write_uint32(2, *v)?;
};
if self.running != 0 {
os.write_uint32(3, self.running)?;
}
if self.error != 0 {
os.write_uint32(4, self.error)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TracezCounts {
TracezCounts::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"spanname",
|m: &TracezCounts| { &m.spanname },
|m: &mut TracezCounts| { &mut m.spanname },
));
fields.push(::protobuf::reflect::accessor::make_vec_accessor::<_, ::protobuf::types::ProtobufTypeUint32>(
"latency",
|m: &TracezCounts| { &m.latency },
|m: &mut TracezCounts| { &mut m.latency },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint32>(
"running",
|m: &TracezCounts| { &m.running },
|m: &mut TracezCounts| { &mut m.running },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint32>(
"error",
|m: &TracezCounts| { &m.error },
|m: &mut TracezCounts| { &mut m.error },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<TracezCounts>(
"TracezCounts",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static TracezCounts {
static instance: ::protobuf::rt::LazyV2<TracezCounts> = ::protobuf::rt::LazyV2::INIT;
instance.get(TracezCounts::new)
}
}
impl ::protobuf::Clear for TracezCounts {
fn clear(&mut self) {
self.spanname.clear();
self.latency.clear();
self.running = 0;
self.error = 0;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for TracezCounts {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for TracezCounts {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))]
pub struct LatencyData {
// message fields
pub traceid: ::std::vec::Vec<u8>,
pub spanid: ::std::vec::Vec<u8>,
pub parentid: ::std::vec::Vec<u8>,
pub starttime: u64,
pub endtime: u64,
pub attributes: ::protobuf::RepeatedField<super::common::KeyValue>,
pub events: ::protobuf::RepeatedField<super::trace::Span_Event>,
pub links: ::protobuf::RepeatedField<super::trace::Span_Link>,
// special fields
#[cfg_attr(feature = "with-serde", serde(skip))]
pub unknown_fields: ::protobuf::UnknownFields,
#[cfg_attr(feature = "with-serde", serde(skip))]
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a LatencyData {
fn default() -> &'a LatencyData {
<LatencyData as ::protobuf::Message>::default_instance()
}
}
impl LatencyData {
pub fn new() -> LatencyData {
::std::default::Default::default()
}
// bytes traceid = 1;
pub fn get_traceid(&self) -> &[u8] {
&self.traceid
}
pub fn clear_traceid(&mut self) {
self.traceid.clear();
}
// Param is passed by value, moved
pub fn set_traceid(&mut self, v: ::std::vec::Vec<u8>) {
self.traceid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_traceid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.traceid
}
// Take field
pub fn take_traceid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.traceid, ::std::vec::Vec::new())
}
// bytes spanid = 2;
pub fn get_spanid(&self) -> &[u8] {
&self.spanid
}
pub fn clear_spanid(&mut self) {
self.spanid.clear();
}
// Param is passed by value, moved
pub fn set_spanid(&mut self, v: ::std::vec::Vec<u8>) {
self.spanid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_spanid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.spanid
}
// Take field
pub fn take_spanid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.spanid, ::std::vec::Vec::new())
}
// bytes parentid = 3;
pub fn get_parentid(&self) -> &[u8] {
&self.parentid
}
pub fn clear_parentid(&mut self) {
self.parentid.clear();
}
// Param is passed by value, moved
pub fn set_parentid(&mut self, v: ::std::vec::Vec<u8>) {
self.parentid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_parentid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.parentid
}
// Take field
pub fn take_parentid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.parentid, ::std::vec::Vec::new())
}
// fixed64 starttime = 4;
pub fn get_starttime(&self) -> u64 {
self.starttime
}
pub fn clear_starttime(&mut self) {
self.starttime = 0;
}
// Param is passed by value, moved
pub fn set_starttime(&mut self, v: u64) {
self.starttime = v;
}
// fixed64 endtime = 5;
pub fn get_endtime(&self) -> u64 {
self.endtime
}
pub fn clear_endtime(&mut self) {
self.endtime = 0;
}
// Param is passed by value, moved
pub fn set_endtime(&mut self, v: u64) {
self.endtime = v;
}
// repeated .opentelemetry.proto.common.v1.KeyValue attributes = 6;
pub fn get_attributes(&self) -> &[super::common::KeyValue] {
&self.attributes
}
pub fn clear_attributes(&mut self) {
self.attributes.clear();
}
// Param is passed by value, moved
pub fn set_attributes(&mut self, v: ::protobuf::RepeatedField<super::common::KeyValue>) {
self.attributes = v;
}
// Mutable pointer to the field.
pub fn mut_attributes(&mut self) -> &mut ::protobuf::RepeatedField<super::common::KeyValue> {
&mut self.attributes
}
// Take field
pub fn take_attributes(&mut self) -> ::protobuf::RepeatedField<super::common::KeyValue> {
::std::mem::replace(&mut self.attributes, ::protobuf::RepeatedField::new())
}
// repeated .opentelemetry.proto.trace.v1.Span.Event events = 7;
pub fn get_events(&self) -> &[super::trace::Span_Event] {
&self.events
}
pub fn clear_events(&mut self) {
self.events.clear();
}
// Param is passed by value, moved
pub fn set_events(&mut self, v: ::protobuf::RepeatedField<super::trace::Span_Event>) {
self.events = v;
}
// Mutable pointer to the field.
pub fn mut_events(&mut self) -> &mut ::protobuf::RepeatedField<super::trace::Span_Event> {
&mut self.events
}
// Take field
pub fn take_events(&mut self) -> ::protobuf::RepeatedField<super::trace::Span_Event> {
::std::mem::replace(&mut self.events, ::protobuf::RepeatedField::new())
}
// repeated .opentelemetry.proto.trace.v1.Span.Link links = 8;
pub fn get_links(&self) -> &[super::trace::Span_Link] {
&self.links
}
pub fn clear_links(&mut self) {
self.links.clear();
}
// Param is passed by value, moved
pub fn set_links(&mut self, v: ::protobuf::RepeatedField<super::trace::Span_Link>) {
self.links = v;
}
// Mutable pointer to the field.
pub fn mut_links(&mut self) -> &mut ::protobuf::RepeatedField<super::trace::Span_Link> {
&mut self.links
}
// Take field
pub fn take_links(&mut self) -> ::protobuf::RepeatedField<super::trace::Span_Link> {
::std::mem::replace(&mut self.links, ::protobuf::RepeatedField::new())
}
}
impl ::protobuf::Message for LatencyData {
fn is_initialized(&self) -> bool {
for v in &self.attributes {
if !v.is_initialized() {
return false;
}
};
for v in &self.events {
if !v.is_initialized() {
return false;
}
};
for v in &self.links {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.traceid)?;
},
2 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.spanid)?;
},
3 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.parentid)?;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeFixed64 {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_fixed64()?;
self.starttime = tmp;
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeFixed64 {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_fixed64()?;
self.endtime = tmp;
},
6 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.attributes)?;
},
7 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.events)?;
},
8 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.links)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.traceid.is_empty() {
my_size += ::protobuf::rt::bytes_size(1, &self.traceid);
}
if !self.spanid.is_empty() {
my_size += ::protobuf::rt::bytes_size(2, &self.spanid);
}
if !self.parentid.is_empty() {
my_size += ::protobuf::rt::bytes_size(3, &self.parentid);
}
if self.starttime != 0 {
my_size += 9;
}
if self.endtime != 0 {
my_size += 9;
}
for value in &self.attributes {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.events {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.links {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.traceid.is_empty() {
os.write_bytes(1, &self.traceid)?;
}
if !self.spanid.is_empty() {
os.write_bytes(2, &self.spanid)?;
}
if !self.parentid.is_empty() {
os.write_bytes(3, &self.parentid)?;
}
if self.starttime != 0 {
os.write_fixed64(4, self.starttime)?;
}
if self.endtime != 0 {
os.write_fixed64(5, self.endtime)?;
}
for v in &self.attributes {
os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.events {
os.write_tag(7, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.links {
os.write_tag(8, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> LatencyData {
LatencyData::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"traceid",
|m: &LatencyData| { &m.traceid },
|m: &mut LatencyData| { &mut m.traceid },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"spanid",
|m: &LatencyData| { &m.spanid },
|m: &mut LatencyData| { &mut m.spanid },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"parentid",
|m: &LatencyData| { &m.parentid },
|m: &mut LatencyData| { &mut m.parentid },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeFixed64>(
"starttime",
|m: &LatencyData| { &m.starttime },
|m: &mut LatencyData| { &mut m.starttime },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeFixed64>(
"endtime",
|m: &LatencyData| { &m.endtime },
|m: &mut LatencyData| { &mut m.endtime },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::common::KeyValue>>(
"attributes",
|m: &LatencyData| { &m.attributes },
|m: &mut LatencyData| { &mut m.attributes },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::trace::Span_Event>>(
"events",
|m: &LatencyData| { &m.events },
|m: &mut LatencyData| { &mut m.events },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::trace::Span_Link>>(
"links",
|m: &LatencyData| { &m.links },
|m: &mut LatencyData| { &mut m.links },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<LatencyData>(
"LatencyData",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static LatencyData {
static instance: ::protobuf::rt::LazyV2<LatencyData> = ::protobuf::rt::LazyV2::INIT;
instance.get(LatencyData::new)
}
}
impl ::protobuf::Clear for LatencyData {
fn clear(&mut self) {
self.traceid.clear();
self.spanid.clear();
self.parentid.clear();
self.starttime = 0;
self.endtime = 0;
self.attributes.clear();
self.events.clear();
self.links.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for LatencyData {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for LatencyData {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))]
pub struct RunningData {
// message fields
pub traceid: ::std::vec::Vec<u8>,
pub spanid: ::std::vec::Vec<u8>,
pub parentid: ::std::vec::Vec<u8>,
pub starttime: u64,
pub attributes: ::protobuf::RepeatedField<super::common::KeyValue>,
pub events: ::protobuf::RepeatedField<super::trace::Span_Event>,
pub links: ::protobuf::RepeatedField<super::trace::Span_Link>,
// special fields
#[cfg_attr(feature = "with-serde", serde(skip))]
pub unknown_fields: ::protobuf::UnknownFields,
#[cfg_attr(feature = "with-serde", serde(skip))]
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a RunningData {
fn default() -> &'a RunningData {
<RunningData as ::protobuf::Message>::default_instance()
}
}
impl RunningData {
pub fn new() -> RunningData {
::std::default::Default::default()
}
// bytes traceid = 1;
pub fn get_traceid(&self) -> &[u8] {
&self.traceid
}
pub fn clear_traceid(&mut self) {
self.traceid.clear();
}
// Param is passed by value, moved
pub fn set_traceid(&mut self, v: ::std::vec::Vec<u8>) {
self.traceid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_traceid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.traceid
}
// Take field
pub fn take_traceid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.traceid, ::std::vec::Vec::new())
}
// bytes spanid = 2;
pub fn get_spanid(&self) -> &[u8] {
&self.spanid
}
pub fn clear_spanid(&mut self) {
self.spanid.clear();
}
// Param is passed by value, moved
pub fn set_spanid(&mut self, v: ::std::vec::Vec<u8>) {
self.spanid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_spanid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.spanid
}
// Take field
pub fn take_spanid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.spanid, ::std::vec::Vec::new())
}
// bytes parentid = 3;
pub fn get_parentid(&self) -> &[u8] {
&self.parentid
}
pub fn clear_parentid(&mut self) {
self.parentid.clear();
}
// Param is passed by value, moved
pub fn set_parentid(&mut self, v: ::std::vec::Vec<u8>) {
self.parentid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_parentid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.parentid
}
// Take field
pub fn take_parentid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.parentid, ::std::vec::Vec::new())
}
// fixed64 starttime = 4;
pub fn get_starttime(&self) -> u64 {
self.starttime
}
pub fn clear_starttime(&mut self) {
self.starttime = 0;
}
// Param is passed by value, moved
pub fn set_starttime(&mut self, v: u64) {
self.starttime = v;
}
// repeated .opentelemetry.proto.common.v1.KeyValue attributes = 5;
pub fn get_attributes(&self) -> &[super::common::KeyValue] {
&self.attributes
}
pub fn clear_attributes(&mut self) {
self.attributes.clear();
}
// Param is passed by value, moved
pub fn set_attributes(&mut self, v: ::protobuf::RepeatedField<super::common::KeyValue>) {
self.attributes = v;
}
// Mutable pointer to the field.
pub fn mut_attributes(&mut self) -> &mut ::protobuf::RepeatedField<super::common::KeyValue> {
&mut self.attributes
}
// Take field
pub fn take_attributes(&mut self) -> ::protobuf::RepeatedField<super::common::KeyValue> {
::std::mem::replace(&mut self.attributes, ::protobuf::RepeatedField::new())
}
// repeated .opentelemetry.proto.trace.v1.Span.Event events = 6;
pub fn get_events(&self) -> &[super::trace::Span_Event] {
&self.events
}
pub fn clear_events(&mut self) {
self.events.clear();
}
// Param is passed by value, moved
pub fn set_events(&mut self, v: ::protobuf::RepeatedField<super::trace::Span_Event>) {
self.events = v;
}
// Mutable pointer to the field.
pub fn mut_events(&mut self) -> &mut ::protobuf::RepeatedField<super::trace::Span_Event> {
&mut self.events
}
// Take field
pub fn take_events(&mut self) -> ::protobuf::RepeatedField<super::trace::Span_Event> {
::std::mem::replace(&mut self.events, ::protobuf::RepeatedField::new())
}
// repeated .opentelemetry.proto.trace.v1.Span.Link links = 7;
pub fn get_links(&self) -> &[super::trace::Span_Link] {
&self.links
}
pub fn clear_links(&mut self) {
self.links.clear();
}
// Param is passed by value, moved
pub fn set_links(&mut self, v: ::protobuf::RepeatedField<super::trace::Span_Link>) {
self.links = v;
}
// Mutable pointer to the field.
pub fn mut_links(&mut self) -> &mut ::protobuf::RepeatedField<super::trace::Span_Link> {
&mut self.links
}
// Take field
pub fn take_links(&mut self) -> ::protobuf::RepeatedField<super::trace::Span_Link> {
::std::mem::replace(&mut self.links, ::protobuf::RepeatedField::new())
}
}
impl ::protobuf::Message for RunningData {
fn is_initialized(&self) -> bool {
for v in &self.attributes {
if !v.is_initialized() {
return false;
}
};
for v in &self.events {
if !v.is_initialized() {
return false;
}
};
for v in &self.links {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.traceid)?;
},
2 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.spanid)?;
},
3 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.parentid)?;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeFixed64 {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_fixed64()?;
self.starttime = tmp;
},
5 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.attributes)?;
},
6 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.events)?;
},
7 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.links)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.traceid.is_empty() {
my_size += ::protobuf::rt::bytes_size(1, &self.traceid);
}
if !self.spanid.is_empty() {
my_size += ::protobuf::rt::bytes_size(2, &self.spanid);
}
if !self.parentid.is_empty() {
my_size += ::protobuf::rt::bytes_size(3, &self.parentid);
}
if self.starttime != 0 {
my_size += 9;
}
for value in &self.attributes {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.events {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.links {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.traceid.is_empty() {
os.write_bytes(1, &self.traceid)?;
}
if !self.spanid.is_empty() {
os.write_bytes(2, &self.spanid)?;
}
if !self.parentid.is_empty() {
os.write_bytes(3, &self.parentid)?;
}
if self.starttime != 0 {
os.write_fixed64(4, self.starttime)?;
}
for v in &self.attributes {
os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.events {
os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.links {
os.write_tag(7, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> RunningData {
RunningData::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"traceid",
|m: &RunningData| { &m.traceid },
|m: &mut RunningData| { &mut m.traceid },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"spanid",
|m: &RunningData| { &m.spanid },
|m: &mut RunningData| { &mut m.spanid },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"parentid",
|m: &RunningData| { &m.parentid },
|m: &mut RunningData| { &mut m.parentid },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeFixed64>(
"starttime",
|m: &RunningData| { &m.starttime },
|m: &mut RunningData| { &mut m.starttime },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::common::KeyValue>>(
"attributes",
|m: &RunningData| { &m.attributes },
|m: &mut RunningData| { &mut m.attributes },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::trace::Span_Event>>(
"events",
|m: &RunningData| { &m.events },
|m: &mut RunningData| { &mut m.events },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::trace::Span_Link>>(
"links",
|m: &RunningData| { &m.links },
|m: &mut RunningData| { &mut m.links },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<RunningData>(
"RunningData",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static RunningData {
static instance: ::protobuf::rt::LazyV2<RunningData> = ::protobuf::rt::LazyV2::INIT;
instance.get(RunningData::new)
}
}
impl ::protobuf::Clear for RunningData {
fn clear(&mut self) {
self.traceid.clear();
self.spanid.clear();
self.parentid.clear();
self.starttime = 0;
self.attributes.clear();
self.events.clear();
self.links.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for RunningData {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for RunningData {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))]
pub struct ErrorData {
// message fields
pub traceid: ::std::vec::Vec<u8>,
pub spanid: ::std::vec::Vec<u8>,
pub parentid: ::std::vec::Vec<u8>,
pub starttime: u64,
pub attributes: ::protobuf::RepeatedField<super::common::KeyValue>,
pub events: ::protobuf::RepeatedField<super::trace::Span_Event>,
pub links: ::protobuf::RepeatedField<super::trace::Span_Link>,
pub status: ::protobuf::SingularPtrField<super::trace::Status>,
// special fields
#[cfg_attr(feature = "with-serde", serde(skip))]
pub unknown_fields: ::protobuf::UnknownFields,
#[cfg_attr(feature = "with-serde", serde(skip))]
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a ErrorData {
fn default() -> &'a ErrorData {
<ErrorData as ::protobuf::Message>::default_instance()
}
}
impl ErrorData {
pub fn new() -> ErrorData {
::std::default::Default::default()
}
// bytes traceid = 1;
pub fn get_traceid(&self) -> &[u8] {
&self.traceid
}
pub fn clear_traceid(&mut self) {
self.traceid.clear();
}
// Param is passed by value, moved
pub fn set_traceid(&mut self, v: ::std::vec::Vec<u8>) {
self.traceid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_traceid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.traceid
}
// Take field
pub fn take_traceid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.traceid, ::std::vec::Vec::new())
}
// bytes spanid = 2;
pub fn get_spanid(&self) -> &[u8] {
&self.spanid
}
pub fn clear_spanid(&mut self) {
self.spanid.clear();
}
// Param is passed by value, moved
pub fn set_spanid(&mut self, v: ::std::vec::Vec<u8>) {
self.spanid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_spanid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.spanid
}
// Take field
pub fn take_spanid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.spanid, ::std::vec::Vec::new())
}
// bytes parentid = 3;
pub fn get_parentid(&self) -> &[u8] {
&self.parentid
}
pub fn clear_parentid(&mut self) {
self.parentid.clear();
}
// Param is passed by value, moved
pub fn set_parentid(&mut self, v: ::std::vec::Vec<u8>) {
self.parentid = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_parentid(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.parentid
}
// Take field
pub fn take_parentid(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.parentid, ::std::vec::Vec::new())
}
// fixed64 starttime = 4;
pub fn get_starttime(&self) -> u64 {
self.starttime
}
pub fn clear_starttime(&mut self) {
self.starttime = 0;
}
// Param is passed by value, moved
pub fn set_starttime(&mut self, v: u64) {
self.starttime = v;
}
// repeated .opentelemetry.proto.common.v1.KeyValue attributes = 5;
pub fn get_attributes(&self) -> &[super::common::KeyValue] {
&self.attributes
}
pub fn clear_attributes(&mut self) {
self.attributes.clear();
}
// Param is passed by value, moved
pub fn set_attributes(&mut self, v: ::protobuf::RepeatedField<super::common::KeyValue>) {
self.attributes = v;
}
// Mutable pointer to the field.
pub fn mut_attributes(&mut self) -> &mut ::protobuf::RepeatedField<super::common::KeyValue> {
&mut self.attributes
}
// Take field
pub fn take_attributes(&mut self) -> ::protobuf::RepeatedField<super::common::KeyValue> {
::std::mem::replace(&mut self.attributes, ::protobuf::RepeatedField::new())
}
// repeated .opentelemetry.proto.trace.v1.Span.Event events = 6;
pub fn get_events(&self) -> &[super::trace::Span_Event] {
&self.events
}
pub fn clear_events(&mut self) {
self.events.clear();
}
// Param is passed by value, moved
pub fn set_events(&mut self, v: ::protobuf::RepeatedField<super::trace::Span_Event>) {
self.events = v;
}
// Mutable pointer to the field.
pub fn mut_events(&mut self) -> &mut ::protobuf::RepeatedField<super::trace::Span_Event> {
&mut self.events
}
// Take field
pub fn take_events(&mut self) -> ::protobuf::RepeatedField<super::trace::Span_Event> {
::std::mem::replace(&mut self.events, ::protobuf::RepeatedField::new())
}
// repeated .opentelemetry.proto.trace.v1.Span.Link links = 7;
pub fn get_links(&self) -> &[super::trace::Span_Link] {
&self.links
}
pub fn clear_links(&mut self) {
self.links.clear();
}
// Param is passed by value, moved
pub fn set_links(&mut self, v: ::protobuf::RepeatedField<super::trace::Span_Link>) {
self.links = v;
}
// Mutable pointer to the field.
pub fn mut_links(&mut self) -> &mut ::protobuf::RepeatedField<super::trace::Span_Link> {
&mut self.links
}
// Take field
pub fn take_links(&mut self) -> ::protobuf::RepeatedField<super::trace::Span_Link> {
::std::mem::replace(&mut self.links, ::protobuf::RepeatedField::new())
}
// .opentelemetry.proto.trace.v1.Status status = 8;
pub fn get_status(&self) -> &super::trace::Status {
self.status.as_ref().unwrap_or_else(|| <super::trace::Status as ::protobuf::Message>::default_instance())
}
pub fn clear_status(&mut self) {
self.status.clear();
}
pub fn has_status(&self) -> bool {
self.status.is_some()
}
// Param is passed by value, moved
pub fn set_status(&mut self, v: super::trace::Status) {
self.status = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_status(&mut self) -> &mut super::trace::Status {
if self.status.is_none() {
self.status.set_default();
}
self.status.as_mut().unwrap()
}
// Take field
pub fn take_status(&mut self) -> super::trace::Status {
self.status.take().unwrap_or_else(|| super::trace::Status::new())
}
}
impl ::protobuf::Message for ErrorData {
fn is_initialized(&self) -> bool {
for v in &self.attributes {
if !v.is_initialized() {
return false;
}
};
for v in &self.events {
if !v.is_initialized() {
return false;
}
};
for v in &self.links {
if !v.is_initialized() {
return false;
}
};
for v in &self.status {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.traceid)?;
},
2 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.spanid)?;
},
3 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.parentid)?;
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeFixed64 {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_fixed64()?;
self.starttime = tmp;
},
5 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.attributes)?;
},
6 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.events)?;
},
7 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.links)?;
},
8 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.status)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.traceid.is_empty() {
my_size += ::protobuf::rt::bytes_size(1, &self.traceid);
}
if !self.spanid.is_empty() {
my_size += ::protobuf::rt::bytes_size(2, &self.spanid);
}
if !self.parentid.is_empty() {
my_size += ::protobuf::rt::bytes_size(3, &self.parentid);
}
if self.starttime != 0 {
my_size += 9;
}
for value in &self.attributes {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.events {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
for value in &self.links {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
if let Some(ref v) = self.status.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.traceid.is_empty() {
os.write_bytes(1, &self.traceid)?;
}
if !self.spanid.is_empty() {
os.write_bytes(2, &self.spanid)?;
}
if !self.parentid.is_empty() {
os.write_bytes(3, &self.parentid)?;
}
if self.starttime != 0 {
os.write_fixed64(4, self.starttime)?;
}
for v in &self.attributes {
os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.events {
os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
for v in &self.links {
os.write_tag(7, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
if let Some(ref v) = self.status.as_ref() {
os.write_tag(8, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> ErrorData {
ErrorData::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"traceid",
|m: &ErrorData| { &m.traceid },
|m: &mut ErrorData| { &mut m.traceid },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"spanid",
|m: &ErrorData| { &m.spanid },
|m: &mut ErrorData| { &mut m.spanid },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"parentid",
|m: &ErrorData| { &m.parentid },
|m: &mut ErrorData| { &mut m.parentid },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeFixed64>(
"starttime",
|m: &ErrorData| { &m.starttime },
|m: &mut ErrorData| { &mut m.starttime },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::common::KeyValue>>(
"attributes",
|m: &ErrorData| { &m.attributes },
|m: &mut ErrorData| { &mut m.attributes },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::trace::Span_Event>>(
"events",
|m: &ErrorData| { &m.events },
|m: &mut ErrorData| { &mut m.events },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::trace::Span_Link>>(
"links",
|m: &ErrorData| { &m.links },
|m: &mut ErrorData| { &mut m.links },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::trace::Status>>(
"status",
|m: &ErrorData| { &m.status },
|m: &mut ErrorData| { &mut m.status },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<ErrorData>(
"ErrorData",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static ErrorData {
static instance: ::protobuf::rt::LazyV2<ErrorData> = ::protobuf::rt::LazyV2::INIT;
instance.get(ErrorData::new)
}
}
impl ::protobuf::Clear for ErrorData {
fn clear(&mut self) {
self.traceid.clear();
self.spanid.clear();
self.parentid.clear();
self.starttime = 0;
self.attributes.clear();
self.events.clear();
self.links.clear();
self.status.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for ErrorData {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for ErrorData {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\x0ctracez.proto\x1a*opentelemetry/proto/common/v1/common.proto\x1a.op\
entelemetry/proto/resource/v1/resource.proto\x1a(opentelemetry/proto/tra\
ce/v1/trace.proto\"t\n\x0cTracezCounts\x12\x1a\n\x08spanname\x18\x01\x20\
\x01(\tR\x08spanname\x12\x18\n\x07latency\x18\x02\x20\x03(\rR\x07latency\
\x12\x18\n\x07running\x18\x03\x20\x01(\rR\x07running\x12\x14\n\x05error\
\x18\x04\x20\x01(\rR\x05error\"\xdd\x02\n\x0bLatencyData\x12\x18\n\x07tr\
aceid\x18\x01\x20\x01(\x0cR\x07traceid\x12\x16\n\x06spanid\x18\x02\x20\
\x01(\x0cR\x06spanid\x12\x1a\n\x08parentid\x18\x03\x20\x01(\x0cR\x08pare\
ntid\x12\x1c\n\tstarttime\x18\x04\x20\x01(\x06R\tstarttime\x12\x18\n\x07\
endtime\x18\x05\x20\x01(\x06R\x07endtime\x12G\n\nattributes\x18\x06\x20\
\x03(\x0b2'.opentelemetry.proto.common.v1.KeyValueR\nattributes\x12@\n\
\x06events\x18\x07\x20\x03(\x0b2(.opentelemetry.proto.trace.v1.Span.Even\
tR\x06events\x12=\n\x05links\x18\x08\x20\x03(\x0b2'.opentelemetry.proto.\
trace.v1.Span.LinkR\x05links\"\xc3\x02\n\x0bRunningData\x12\x18\n\x07tra\
ceid\x18\x01\x20\x01(\x0cR\x07traceid\x12\x16\n\x06spanid\x18\x02\x20\ | butes\x18\x05\x20\x03(\x0b2'.opentelemetry.proto.common.v1.KeyValueR\nat\
tributes\x12@\n\x06events\x18\x06\x20\x03(\x0b2(.opentelemetry.proto.tra\
ce.v1.Span.EventR\x06events\x12=\n\x05links\x18\x07\x20\x03(\x0b2'.opent\
elemetry.proto.trace.v1.Span.LinkR\x05links\"\xff\x02\n\tErrorData\x12\
\x18\n\x07traceid\x18\x01\x20\x01(\x0cR\x07traceid\x12\x16\n\x06spanid\
\x18\x02\x20\x01(\x0cR\x06spanid\x12\x1a\n\x08parentid\x18\x03\x20\x01(\
\x0cR\x08parentid\x12\x1c\n\tstarttime\x18\x04\x20\x01(\x06R\tstarttime\
\x12G\n\nattributes\x18\x05\x20\x03(\x0b2'.opentelemetry.proto.common.v1\
.KeyValueR\nattributes\x12@\n\x06events\x18\x06\x20\x03(\x0b2(.opentelem\
etry.proto.trace.v1.Span.EventR\x06events\x12=\n\x05links\x18\x07\x20\
\x03(\x0b2'.opentelemetry.proto.trace.v1.Span.LinkR\x05links\x12<\n\x06s\
tatus\x18\x08\x20\x01(\x0b2$.opentelemetry.proto.trace.v1.StatusR\x06sta\
tusb\x06proto3\
";
static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT;
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::Message::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
} | \x01(\x0cR\x06spanid\x12\x1a\n\x08parentid\x18\x03\x20\x01(\x0cR\x08pare\
ntid\x12\x1c\n\tstarttime\x18\x04\x20\x01(\x06R\tstarttime\x12G\n\nattri\ |
realms_client.go | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
package gaming
import (
"context"
"fmt"
"math"
"net/url"
"time"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
gtransport "google.golang.org/api/transport/grpc"
gamingpb "google.golang.org/genproto/googleapis/cloud/gaming/v1beta"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/protobuf/proto"
)
var newRealmsClientHook clientHook
// RealmsCallOptions contains the retry settings for each method of RealmsClient.
type RealmsCallOptions struct {
ListRealms []gax.CallOption
GetRealm []gax.CallOption
CreateRealm []gax.CallOption
DeleteRealm []gax.CallOption
UpdateRealm []gax.CallOption
PreviewRealmUpdate []gax.CallOption
}
func defaultRealmsGRPCClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("gameservices.googleapis.com:443"),
internaloption.WithDefaultMTLSEndpoint("gameservices.mtls.googleapis.com:443"),
internaloption.WithDefaultAudience("https://gameservices.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
}
func defaultRealmsCallOptions() *RealmsCallOptions {
return &RealmsCallOptions{
ListRealms: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 10000 * time.Millisecond,
Multiplier: 1.30,
})
}),
},
GetRealm: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 10000 * time.Millisecond,
Multiplier: 1.30,
})
}),
},
CreateRealm: []gax.CallOption{},
DeleteRealm: []gax.CallOption{},
UpdateRealm: []gax.CallOption{},
PreviewRealmUpdate: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 10000 * time.Millisecond,
Multiplier: 1.30,
})
}),
},
}
}
// internalRealmsClient is an interface that defines the methods available from Game Services API.
type internalRealmsClient interface {
Close() error
setGoogleClientInfo(...string)
Connection() *grpc.ClientConn
ListRealms(context.Context, *gamingpb.ListRealmsRequest, ...gax.CallOption) *RealmIterator
GetRealm(context.Context, *gamingpb.GetRealmRequest, ...gax.CallOption) (*gamingpb.Realm, error)
CreateRealm(context.Context, *gamingpb.CreateRealmRequest, ...gax.CallOption) (*CreateRealmOperation, error)
CreateRealmOperation(name string) *CreateRealmOperation
DeleteRealm(context.Context, *gamingpb.DeleteRealmRequest, ...gax.CallOption) (*DeleteRealmOperation, error)
DeleteRealmOperation(name string) *DeleteRealmOperation
UpdateRealm(context.Context, *gamingpb.UpdateRealmRequest, ...gax.CallOption) (*UpdateRealmOperation, error)
UpdateRealmOperation(name string) *UpdateRealmOperation
PreviewRealmUpdate(context.Context, *gamingpb.PreviewRealmUpdateRequest, ...gax.CallOption) (*gamingpb.PreviewRealmUpdateResponse, error)
}
// RealmsClient is a client for interacting with Game Services API.
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
//
// A realm is a grouping of game server clusters that are considered
// interchangeable.
type RealmsClient struct {
// The internal transport-dependent client.
internalClient internalRealmsClient
// The call options for this service.
CallOptions *RealmsCallOptions
// LROClient is used internally to handle long-running operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient
}
// Wrapper methods routed to the internal client.
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *RealmsClient) Close() error {
return c.internalClient.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *RealmsClient) setGoogleClientInfo(keyval ...string) {
c.internalClient.setGoogleClientInfo(keyval...)
}
// Connection returns a connection to the API service.
//
// Deprecated.
func (c *RealmsClient) Connection() *grpc.ClientConn {
return c.internalClient.Connection()
}
// ListRealms lists realms in a given project and location.
func (c *RealmsClient) ListRealms(ctx context.Context, req *gamingpb.ListRealmsRequest, opts ...gax.CallOption) *RealmIterator {
return c.internalClient.ListRealms(ctx, req, opts...)
}
// GetRealm gets details of a single realm.
func (c *RealmsClient) GetRealm(ctx context.Context, req *gamingpb.GetRealmRequest, opts ...gax.CallOption) (*gamingpb.Realm, error) {
return c.internalClient.GetRealm(ctx, req, opts...)
}
// CreateRealm creates a new realm in a given project and location.
func (c *RealmsClient) CreateRealm(ctx context.Context, req *gamingpb.CreateRealmRequest, opts ...gax.CallOption) (*CreateRealmOperation, error) {
return c.internalClient.CreateRealm(ctx, req, opts...)
}
// CreateRealmOperation returns a new CreateRealmOperation from a given name.
// The name must be that of a previously created CreateRealmOperation, possibly from a different process.
func (c *RealmsClient) CreateRealmOperation(name string) *CreateRealmOperation {
return c.internalClient.CreateRealmOperation(name)
}
// DeleteRealm deletes a single realm.
func (c *RealmsClient) DeleteRealm(ctx context.Context, req *gamingpb.DeleteRealmRequest, opts ...gax.CallOption) (*DeleteRealmOperation, error) {
return c.internalClient.DeleteRealm(ctx, req, opts...)
}
// DeleteRealmOperation returns a new DeleteRealmOperation from a given name.
// The name must be that of a previously created DeleteRealmOperation, possibly from a different process.
func (c *RealmsClient) DeleteRealmOperation(name string) *DeleteRealmOperation {
return c.internalClient.DeleteRealmOperation(name)
}
// UpdateRealm patches a single realm.
func (c *RealmsClient) UpdateRealm(ctx context.Context, req *gamingpb.UpdateRealmRequest, opts ...gax.CallOption) (*UpdateRealmOperation, error) {
return c.internalClient.UpdateRealm(ctx, req, opts...)
}
// UpdateRealmOperation returns a new UpdateRealmOperation from a given name.
// The name must be that of a previously created UpdateRealmOperation, possibly from a different process.
func (c *RealmsClient) UpdateRealmOperation(name string) *UpdateRealmOperation {
return c.internalClient.UpdateRealmOperation(name)
}
// PreviewRealmUpdate previews patches to a single realm.
func (c *RealmsClient) PreviewRealmUpdate(ctx context.Context, req *gamingpb.PreviewRealmUpdateRequest, opts ...gax.CallOption) (*gamingpb.PreviewRealmUpdateResponse, error) {
return c.internalClient.PreviewRealmUpdate(ctx, req, opts...)
}
// realmsGRPCClient is a client for interacting with Game Services API over gRPC transport.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type realmsGRPCClient struct {
// Connection pool of gRPC connections to the service.
connPool gtransport.ConnPool
// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
disableDeadlines bool
// Points back to the CallOptions field of the containing RealmsClient
CallOptions **RealmsCallOptions
// The gRPC API client.
realmsClient gamingpb.RealmsServiceClient
// LROClient is used internally to handle long-running operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient **lroauto.OperationsClient
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewRealmsClient creates a new realms service client based on gRPC.
// The returned client must be Closed when it is done being used to clean up its underlying connections.
//
// A realm is a grouping of game server clusters that are considered
// interchangeable.
func NewRealmsClient(ctx context.Context, opts ...option.ClientOption) (*RealmsClient, error) {
clientOpts := defaultRealmsGRPCClientOptions()
if newRealmsClientHook != nil {
hookOpts, err := newRealmsClientHook(ctx, clientHookParams{})
if err != nil {
return nil, err
}
clientOpts = append(clientOpts, hookOpts...)
}
disableDeadlines, err := checkDisableDeadlines()
if err != nil {
return nil, err
}
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
if err != nil |
client := RealmsClient{CallOptions: defaultRealmsCallOptions()}
c := &realmsGRPCClient{
connPool: connPool,
disableDeadlines: disableDeadlines,
realmsClient: gamingpb.NewRealmsServiceClient(connPool),
CallOptions: &client.CallOptions,
}
c.setGoogleClientInfo()
client.internalClient = c
client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))
if err != nil {
// This error "should not happen", since we are just reusing old connection pool
// and never actually need to dial.
// If this does happen, we could leak connp. However, we cannot close conn:
// If the user invoked the constructor with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO: investigate error conditions.
return nil, err
}
c.LROClient = &client.LROClient
return &client, nil
}
// Connection returns a connection to the API service.
//
// Deprecated.
func (c *realmsGRPCClient) Connection() *grpc.ClientConn {
return c.connPool.Conn()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *realmsGRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *realmsGRPCClient) Close() error {
return c.connPool.Close()
}
func (c *realmsGRPCClient) ListRealms(ctx context.Context, req *gamingpb.ListRealmsRequest, opts ...gax.CallOption) *RealmIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).ListRealms[0:len((*c.CallOptions).ListRealms):len((*c.CallOptions).ListRealms)], opts...)
it := &RealmIterator{}
req = proto.Clone(req).(*gamingpb.ListRealmsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*gamingpb.Realm, string, error) {
resp := &gamingpb.ListRealmsResponse{}
if pageToken != "" {
req.PageToken = pageToken
}
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else if pageSize != 0 {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.realmsClient.ListRealms(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
it.Response = resp
return resp.GetRealms(), resp.GetNextPageToken(), nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.GetPageSize())
it.pageInfo.Token = req.GetPageToken()
return it
}
func (c *realmsGRPCClient) GetRealm(ctx context.Context, req *gamingpb.GetRealmRequest, opts ...gax.CallOption) (*gamingpb.Realm, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).GetRealm[0:len((*c.CallOptions).GetRealm):len((*c.CallOptions).GetRealm)], opts...)
var resp *gamingpb.Realm
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.realmsClient.GetRealm(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
func (c *realmsGRPCClient) CreateRealm(ctx context.Context, req *gamingpb.CreateRealmRequest, opts ...gax.CallOption) (*CreateRealmOperation, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).CreateRealm[0:len((*c.CallOptions).CreateRealm):len((*c.CallOptions).CreateRealm)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.realmsClient.CreateRealm(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &CreateRealmOperation{
lro: longrunning.InternalNewOperation(*c.LROClient, resp),
}, nil
}
func (c *realmsGRPCClient) DeleteRealm(ctx context.Context, req *gamingpb.DeleteRealmRequest, opts ...gax.CallOption) (*DeleteRealmOperation, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).DeleteRealm[0:len((*c.CallOptions).DeleteRealm):len((*c.CallOptions).DeleteRealm)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.realmsClient.DeleteRealm(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &DeleteRealmOperation{
lro: longrunning.InternalNewOperation(*c.LROClient, resp),
}, nil
}
func (c *realmsGRPCClient) UpdateRealm(ctx context.Context, req *gamingpb.UpdateRealmRequest, opts ...gax.CallOption) (*UpdateRealmOperation, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "realm.name", url.QueryEscape(req.GetRealm().GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).UpdateRealm[0:len((*c.CallOptions).UpdateRealm):len((*c.CallOptions).UpdateRealm)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.realmsClient.UpdateRealm(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &UpdateRealmOperation{
lro: longrunning.InternalNewOperation(*c.LROClient, resp),
}, nil
}
func (c *realmsGRPCClient) PreviewRealmUpdate(ctx context.Context, req *gamingpb.PreviewRealmUpdateRequest, opts ...gax.CallOption) (*gamingpb.PreviewRealmUpdateResponse, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "realm.name", url.QueryEscape(req.GetRealm().GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).PreviewRealmUpdate[0:len((*c.CallOptions).PreviewRealmUpdate):len((*c.CallOptions).PreviewRealmUpdate)], opts...)
var resp *gamingpb.PreviewRealmUpdateResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.realmsClient.PreviewRealmUpdate(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateRealmOperation manages a long-running operation from CreateRealm.
type CreateRealmOperation struct {
lro *longrunning.Operation
}
// CreateRealmOperation returns a new CreateRealmOperation from a given name.
// The name must be that of a previously created CreateRealmOperation, possibly from a different process.
func (c *realmsGRPCClient) CreateRealmOperation(name string) *CreateRealmOperation {
return &CreateRealmOperation{
lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *CreateRealmOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*gamingpb.Realm, error) {
var resp gamingpb.Realm
if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *CreateRealmOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*gamingpb.Realm, error) {
var resp gamingpb.Realm
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *CreateRealmOperation) Metadata() (*gamingpb.OperationMetadata, error) {
var meta gamingpb.OperationMetadata
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *CreateRealmOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *CreateRealmOperation) Name() string {
return op.lro.Name()
}
// DeleteRealmOperation manages a long-running operation from DeleteRealm.
type DeleteRealmOperation struct {
lro *longrunning.Operation
}
// DeleteRealmOperation returns a new DeleteRealmOperation from a given name.
// The name must be that of a previously created DeleteRealmOperation, possibly from a different process.
func (c *realmsGRPCClient) DeleteRealmOperation(name string) *DeleteRealmOperation {
return &DeleteRealmOperation{
lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *DeleteRealmOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.WaitWithInterval(ctx, nil, time.Minute, opts...)
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *DeleteRealmOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
return op.lro.Poll(ctx, nil, opts...)
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *DeleteRealmOperation) Metadata() (*gamingpb.OperationMetadata, error) {
var meta gamingpb.OperationMetadata
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *DeleteRealmOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *DeleteRealmOperation) Name() string {
return op.lro.Name()
}
// UpdateRealmOperation manages a long-running operation from UpdateRealm.
type UpdateRealmOperation struct {
lro *longrunning.Operation
}
// UpdateRealmOperation returns a new UpdateRealmOperation from a given name.
// The name must be that of a previously created UpdateRealmOperation, possibly from a different process.
func (c *realmsGRPCClient) UpdateRealmOperation(name string) *UpdateRealmOperation {
return &UpdateRealmOperation{
lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *UpdateRealmOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*gamingpb.Realm, error) {
var resp gamingpb.Realm
if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *UpdateRealmOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*gamingpb.Realm, error) {
var resp gamingpb.Realm
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *UpdateRealmOperation) Metadata() (*gamingpb.OperationMetadata, error) {
var meta gamingpb.OperationMetadata
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *UpdateRealmOperation) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *UpdateRealmOperation) Name() string {
return op.lro.Name()
}
// RealmIterator manages a stream of *gamingpb.Realm.
type RealmIterator struct {
items []*gamingpb.Realm
pageInfo *iterator.PageInfo
nextFunc func() error
// Response is the raw response for the current page.
// It must be cast to the RPC response type.
// Calling Next() or InternalFetch() updates this value.
Response interface{}
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*gamingpb.Realm, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *RealmIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *RealmIterator) Next() (*gamingpb.Realm, error) {
var item *gamingpb.Realm
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *RealmIterator) bufLen() int {
return len(it.items)
}
func (it *RealmIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
| {
return nil, err
} |
dicomStoreIamBinding.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package healthcare
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Three different resources help you manage your IAM policy for Healthcare DICOM store. Each of these resources serves a different use case:
//
// * `healthcare.DicomStoreIamPolicy`: Authoritative. Sets the IAM policy for the DICOM store and replaces any existing policy already attached.
// * `healthcare.DicomStoreIamBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the DICOM store are preserved.
// * `healthcare.DicomStoreIamMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the DICOM store are preserved.
//
// > **Note:** `healthcare.DicomStoreIamPolicy` **cannot** be used in conjunction with `healthcare.DicomStoreIamBinding` and `healthcare.DicomStoreIamMember` or they will fight over what your policy should be.
//
// > **Note:** `healthcare.DicomStoreIamBinding` resources **can be** used in conjunction with `healthcare.DicomStoreIamMember` resources **only if** they do not grant privilege to the same role.
//
// ## google\_healthcare\_dicom\_store\_iam\_policy
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/healthcare"
// "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/organizations"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// admin, err := organizations.LookupIAMPolicy(ctx, &organizations.LookupIAMPolicyArgs{
// Bindings: []organizations.GetIAMPolicyBinding{
// organizations.GetIAMPolicyBinding{
// Role: "roles/editor",
// Members: []string{
// "user:[email protected]",
// },
// },
// },
// }, nil)
// if err != nil {
// return err
// }
// _, err = healthcare.NewDicomStoreIamPolicy(ctx, "dicomStore", &healthcare.DicomStoreIamPolicyArgs{
// DicomStoreId: pulumi.String("your-dicom-store-id"),
// PolicyData: pulumi.String(admin.PolicyData),
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## google\_healthcare\_dicom\_store\_iam\_binding
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/healthcare"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := healthcare.NewDicomStoreIamBinding(ctx, "dicomStore", &healthcare.DicomStoreIamBindingArgs{
// DicomStoreId: pulumi.String("your-dicom-store-id"),
// Members: pulumi.StringArray{
// pulumi.String("user:[email protected]"),
// },
// Role: pulumi.String("roles/editor"),
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## google\_healthcare\_dicom\_store\_iam\_member
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/healthcare"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := healthcare.NewDicomStoreIamMember(ctx, "dicomStore", &healthcare.DicomStoreIamMemberArgs{
// DicomStoreId: pulumi.String("your-dicom-store-id"),
// Member: pulumi.String("user:[email protected]"),
// Role: pulumi.String("roles/editor"),
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## Import
//
// IAM member imports use space-delimited identifiers; the resource in question, the role, and the account.
//
// This member resource can be imported using the `dicom_store_id`, role, and account e.g.
//
// ```sh
// $ pulumi import gcp:healthcare/dicomStoreIamBinding:DicomStoreIamBinding dicom_store_iam "your-project-id/location-name/dataset-name/dicom-store-name roles/viewer user:[email protected]"
// ```
//
// IAM binding imports use space-delimited identifiers; the resource in question and the role.
//
// This binding resource can be imported using the `dicom_store_id` and role, e.g.
//
// ```sh
// $ pulumi import gcp:healthcare/dicomStoreIamBinding:DicomStoreIamBinding dicom_store_iam "your-project-id/location-name/dataset-name/dicom-store-name roles/viewer"
// ```
//
// IAM policy imports use the identifier of the resource in question.
//
// This policy resource can be imported using the `dicom_store_id`, role, and account e.g.
//
// ```sh
// $ pulumi import gcp:healthcare/dicomStoreIamBinding:DicomStoreIamBinding dicom_store_iam your-project-id/location-name/dataset-name/dicom-store-name
// ```
type DicomStoreIamBinding struct {
pulumi.CustomResourceState
Condition DicomStoreIamBindingConditionPtrOutput `pulumi:"condition"`
// The DICOM store ID, in the form
// `{project_id}/{location_name}/{dataset_name}/{dicom_store_name}` or
// `{location_name}/{dataset_name}/{dicom_store_name}`. In the second form, the provider's
// project setting will be used as a fallback.
DicomStoreId pulumi.StringOutput `pulumi:"dicomStoreId"`
// (Computed) The etag of the DICOM store's IAM policy.
Etag pulumi.StringOutput `pulumi:"etag"`
Members pulumi.StringArrayOutput `pulumi:"members"`
// The role that should be applied. Only one
// `healthcare.DicomStoreIamBinding` can be used per role. Note that custom roles must be of the format
// `[projects|organizations]/{parent-name}/roles/{role-name}`.
Role pulumi.StringOutput `pulumi:"role"`
}
// NewDicomStoreIamBinding registers a new resource with the given unique name, arguments, and options.
func NewDicomStoreIamBinding(ctx *pulumi.Context,
name string, args *DicomStoreIamBindingArgs, opts ...pulumi.ResourceOption) (*DicomStoreIamBinding, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.DicomStoreId == nil {
return nil, errors.New("invalid value for required argument 'DicomStoreId'")
}
if args.Members == nil {
return nil, errors.New("invalid value for required argument 'Members'")
}
if args.Role == nil {
return nil, errors.New("invalid value for required argument 'Role'")
}
var resource DicomStoreIamBinding
err := ctx.RegisterResource("gcp:healthcare/dicomStoreIamBinding:DicomStoreIamBinding", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetDicomStoreIamBinding gets an existing DicomStoreIamBinding resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetDicomStoreIamBinding(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *DicomStoreIamBindingState, opts ...pulumi.ResourceOption) (*DicomStoreIamBinding, error) {
var resource DicomStoreIamBinding
err := ctx.ReadResource("gcp:healthcare/dicomStoreIamBinding:DicomStoreIamBinding", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering DicomStoreIamBinding resources.
type dicomStoreIamBindingState struct {
Condition *DicomStoreIamBindingCondition `pulumi:"condition"`
// The DICOM store ID, in the form
// `{project_id}/{location_name}/{dataset_name}/{dicom_store_name}` or
// `{location_name}/{dataset_name}/{dicom_store_name}`. In the second form, the provider's
// project setting will be used as a fallback.
DicomStoreId *string `pulumi:"dicomStoreId"`
// (Computed) The etag of the DICOM store's IAM policy.
Etag *string `pulumi:"etag"`
Members []string `pulumi:"members"`
// The role that should be applied. Only one
// `healthcare.DicomStoreIamBinding` can be used per role. Note that custom roles must be of the format
// `[projects|organizations]/{parent-name}/roles/{role-name}`.
Role *string `pulumi:"role"`
}
type DicomStoreIamBindingState struct {
Condition DicomStoreIamBindingConditionPtrInput
// The DICOM store ID, in the form
// `{project_id}/{location_name}/{dataset_name}/{dicom_store_name}` or
// `{location_name}/{dataset_name}/{dicom_store_name}`. In the second form, the provider's
// project setting will be used as a fallback.
DicomStoreId pulumi.StringPtrInput
// (Computed) The etag of the DICOM store's IAM policy.
Etag pulumi.StringPtrInput
Members pulumi.StringArrayInput
// The role that should be applied. Only one
// `healthcare.DicomStoreIamBinding` can be used per role. Note that custom roles must be of the format
// `[projects|organizations]/{parent-name}/roles/{role-name}`.
Role pulumi.StringPtrInput
}
func (DicomStoreIamBindingState) ElementType() reflect.Type {
return reflect.TypeOf((*dicomStoreIamBindingState)(nil)).Elem()
}
type dicomStoreIamBindingArgs struct {
Condition *DicomStoreIamBindingCondition `pulumi:"condition"`
// The DICOM store ID, in the form
// `{project_id}/{location_name}/{dataset_name}/{dicom_store_name}` or
// `{location_name}/{dataset_name}/{dicom_store_name}`. In the second form, the provider's
// project setting will be used as a fallback.
DicomStoreId string `pulumi:"dicomStoreId"`
Members []string `pulumi:"members"`
// The role that should be applied. Only one
// `healthcare.DicomStoreIamBinding` can be used per role. Note that custom roles must be of the format
// `[projects|organizations]/{parent-name}/roles/{role-name}`.
Role string `pulumi:"role"`
}
// The set of arguments for constructing a DicomStoreIamBinding resource.
type DicomStoreIamBindingArgs struct {
Condition DicomStoreIamBindingConditionPtrInput
// The DICOM store ID, in the form
// `{project_id}/{location_name}/{dataset_name}/{dicom_store_name}` or
// `{location_name}/{dataset_name}/{dicom_store_name}`. In the second form, the provider's
// project setting will be used as a fallback.
DicomStoreId pulumi.StringInput
Members pulumi.StringArrayInput
// The role that should be applied. Only one
// `healthcare.DicomStoreIamBinding` can be used per role. Note that custom roles must be of the format
// `[projects|organizations]/{parent-name}/roles/{role-name}`.
Role pulumi.StringInput
}
func (DicomStoreIamBindingArgs) ElementType() reflect.Type {
return reflect.TypeOf((*dicomStoreIamBindingArgs)(nil)).Elem()
}
type DicomStoreIamBindingInput interface {
pulumi.Input
ToDicomStoreIamBindingOutput() DicomStoreIamBindingOutput
ToDicomStoreIamBindingOutputWithContext(ctx context.Context) DicomStoreIamBindingOutput
}
func (*DicomStoreIamBinding) ElementType() reflect.Type {
return reflect.TypeOf((*DicomStoreIamBinding)(nil))
}
func (i *DicomStoreIamBinding) ToDicomStoreIamBindingOutput() DicomStoreIamBindingOutput {
return i.ToDicomStoreIamBindingOutputWithContext(context.Background())
}
func (i *DicomStoreIamBinding) ToDicomStoreIamBindingOutputWithContext(ctx context.Context) DicomStoreIamBindingOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamBindingOutput)
}
func (i *DicomStoreIamBinding) ToDicomStoreIamBindingPtrOutput() DicomStoreIamBindingPtrOutput {
return i.ToDicomStoreIamBindingPtrOutputWithContext(context.Background())
}
func (i *DicomStoreIamBinding) ToDicomStoreIamBindingPtrOutputWithContext(ctx context.Context) DicomStoreIamBindingPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamBindingPtrOutput)
}
type DicomStoreIamBindingPtrInput interface {
pulumi.Input
ToDicomStoreIamBindingPtrOutput() DicomStoreIamBindingPtrOutput
ToDicomStoreIamBindingPtrOutputWithContext(ctx context.Context) DicomStoreIamBindingPtrOutput
}
type dicomStoreIamBindingPtrType DicomStoreIamBindingArgs
func (*dicomStoreIamBindingPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**DicomStoreIamBinding)(nil))
}
func (i *dicomStoreIamBindingPtrType) ToDicomStoreIamBindingPtrOutput() DicomStoreIamBindingPtrOutput {
return i.ToDicomStoreIamBindingPtrOutputWithContext(context.Background())
}
func (i *dicomStoreIamBindingPtrType) ToDicomStoreIamBindingPtrOutputWithContext(ctx context.Context) DicomStoreIamBindingPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamBindingPtrOutput)
}
// DicomStoreIamBindingArrayInput is an input type that accepts DicomStoreIamBindingArray and DicomStoreIamBindingArrayOutput values.
// You can construct a concrete instance of `DicomStoreIamBindingArrayInput` via:
//
// DicomStoreIamBindingArray{ DicomStoreIamBindingArgs{...} }
type DicomStoreIamBindingArrayInput interface {
pulumi.Input
ToDicomStoreIamBindingArrayOutput() DicomStoreIamBindingArrayOutput
ToDicomStoreIamBindingArrayOutputWithContext(context.Context) DicomStoreIamBindingArrayOutput
}
type DicomStoreIamBindingArray []DicomStoreIamBindingInput
func (DicomStoreIamBindingArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]*DicomStoreIamBinding)(nil)).Elem()
}
func (i DicomStoreIamBindingArray) ToDicomStoreIamBindingArrayOutput() DicomStoreIamBindingArrayOutput {
return i.ToDicomStoreIamBindingArrayOutputWithContext(context.Background())
}
func (i DicomStoreIamBindingArray) ToDicomStoreIamBindingArrayOutputWithContext(ctx context.Context) DicomStoreIamBindingArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamBindingArrayOutput)
}
// DicomStoreIamBindingMapInput is an input type that accepts DicomStoreIamBindingMap and DicomStoreIamBindingMapOutput values.
// You can construct a concrete instance of `DicomStoreIamBindingMapInput` via:
//
// DicomStoreIamBindingMap{ "key": DicomStoreIamBindingArgs{...} }
type DicomStoreIamBindingMapInput interface {
pulumi.Input
ToDicomStoreIamBindingMapOutput() DicomStoreIamBindingMapOutput
ToDicomStoreIamBindingMapOutputWithContext(context.Context) DicomStoreIamBindingMapOutput
}
type DicomStoreIamBindingMap map[string]DicomStoreIamBindingInput
func (DicomStoreIamBindingMap) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]*DicomStoreIamBinding)(nil)).Elem()
}
func (i DicomStoreIamBindingMap) ToDicomStoreIamBindingMapOutput() DicomStoreIamBindingMapOutput {
return i.ToDicomStoreIamBindingMapOutputWithContext(context.Background())
}
func (i DicomStoreIamBindingMap) ToDicomStoreIamBindingMapOutputWithContext(ctx context.Context) DicomStoreIamBindingMapOutput {
return pulumi.ToOutputWithContext(ctx, i).(DicomStoreIamBindingMapOutput)
}
type DicomStoreIamBindingOutput struct{ *pulumi.OutputState }
func (DicomStoreIamBindingOutput) ElementType() reflect.Type {
return reflect.TypeOf((*DicomStoreIamBinding)(nil))
}
func (o DicomStoreIamBindingOutput) ToDicomStoreIamBindingOutput() DicomStoreIamBindingOutput {
return o
}
func (o DicomStoreIamBindingOutput) ToDicomStoreIamBindingOutputWithContext(ctx context.Context) DicomStoreIamBindingOutput {
return o
}
func (o DicomStoreIamBindingOutput) ToDicomStoreIamBindingPtrOutput() DicomStoreIamBindingPtrOutput {
return o.ToDicomStoreIamBindingPtrOutputWithContext(context.Background())
}
func (o DicomStoreIamBindingOutput) ToDicomStoreIamBindingPtrOutputWithContext(ctx context.Context) DicomStoreIamBindingPtrOutput {
return o.ApplyTWithContext(ctx, func(_ context.Context, v DicomStoreIamBinding) *DicomStoreIamBinding {
return &v
}).(DicomStoreIamBindingPtrOutput)
}
type DicomStoreIamBindingPtrOutput struct{ *pulumi.OutputState }
func (DicomStoreIamBindingPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**DicomStoreIamBinding)(nil))
}
func (o DicomStoreIamBindingPtrOutput) ToDicomStoreIamBindingPtrOutput() DicomStoreIamBindingPtrOutput {
return o
}
func (o DicomStoreIamBindingPtrOutput) ToDicomStoreIamBindingPtrOutputWithContext(ctx context.Context) DicomStoreIamBindingPtrOutput {
return o
}
func (o DicomStoreIamBindingPtrOutput) Elem() DicomStoreIamBindingOutput {
return o.ApplyT(func(v *DicomStoreIamBinding) DicomStoreIamBinding {
if v != nil {
return *v
}
var ret DicomStoreIamBinding
return ret
}).(DicomStoreIamBindingOutput)
}
type DicomStoreIamBindingArrayOutput struct{ *pulumi.OutputState }
func (DicomStoreIamBindingArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]DicomStoreIamBinding)(nil))
}
func (o DicomStoreIamBindingArrayOutput) ToDicomStoreIamBindingArrayOutput() DicomStoreIamBindingArrayOutput {
return o
}
func (o DicomStoreIamBindingArrayOutput) ToDicomStoreIamBindingArrayOutputWithContext(ctx context.Context) DicomStoreIamBindingArrayOutput {
return o
}
func (o DicomStoreIamBindingArrayOutput) Index(i pulumi.IntInput) DicomStoreIamBindingOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) DicomStoreIamBinding {
return vs[0].([]DicomStoreIamBinding)[vs[1].(int)]
}).(DicomStoreIamBindingOutput)
}
type DicomStoreIamBindingMapOutput struct{ *pulumi.OutputState }
func (DicomStoreIamBindingMapOutput) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]DicomStoreIamBinding)(nil))
}
func (o DicomStoreIamBindingMapOutput) ToDicomStoreIamBindingMapOutput() DicomStoreIamBindingMapOutput {
return o
}
func (o DicomStoreIamBindingMapOutput) ToDicomStoreIamBindingMapOutputWithContext(ctx context.Context) DicomStoreIamBindingMapOutput {
return o
}
func (o DicomStoreIamBindingMapOutput) MapIndex(k pulumi.StringInput) DicomStoreIamBindingOutput {
return pulumi.All(o, k).ApplyT(func(vs []interface{}) DicomStoreIamBinding {
return vs[0].(map[string]DicomStoreIamBinding)[vs[1].(string)]
}).(DicomStoreIamBindingOutput)
}
func | () {
pulumi.RegisterInputType(reflect.TypeOf((*DicomStoreIamBindingInput)(nil)).Elem(), &DicomStoreIamBinding{})
pulumi.RegisterInputType(reflect.TypeOf((*DicomStoreIamBindingPtrInput)(nil)).Elem(), &DicomStoreIamBinding{})
pulumi.RegisterInputType(reflect.TypeOf((*DicomStoreIamBindingArrayInput)(nil)).Elem(), DicomStoreIamBindingArray{})
pulumi.RegisterInputType(reflect.TypeOf((*DicomStoreIamBindingMapInput)(nil)).Elem(), DicomStoreIamBindingMap{})
pulumi.RegisterOutputType(DicomStoreIamBindingOutput{})
pulumi.RegisterOutputType(DicomStoreIamBindingPtrOutput{})
pulumi.RegisterOutputType(DicomStoreIamBindingArrayOutput{})
pulumi.RegisterOutputType(DicomStoreIamBindingMapOutput{})
}
| init |
UserinfoCommand.ts | import { Command } from "../../../client/structures/extensions";
import { ApplyOptions } from "@sapphire/decorators";
import type { Message } from "discord.js";
import axios from "axios";
import moment from "moment";
@ApplyOptions<Command.Options>({
name: "info",
aliases: ["userinfo", "uinfo"],
description: "Shows you the information about a user",
usage: "[user]",
requiredClientPermissions: ["EMBED_LINKS"]
})
export default class | extends Command {
public async messageRun(message: Message, args: Command.Args): Promise<void> {
const msg = await message.reply(`>>> ${this.client.constants.emojis.loading} | Getting user information...`);
let { value: user } = await args.pickResult("user");
if (!user) user = message.author;
const embed = this.client.utils.embed().setAuthor(`${user.tag} - user info`, user.displayAvatarURL({ dynamic: true, size: 4096 }));
const { data: rep } = await axios
.get<Reputation>(`https://discordrep.com/api/v3/rep/${user.id}`, this.getHeaders(true, "DREP_TOKEN"))
.catch(() => ({
data: {
upvotes: 0,
downvotes: 0,
reputation: 0,
rank: 0,
xp: 0,
staff: false
}
}));
const { data: banned } = await axios
.get<KsoftBan>(`https://api.ksoft.si/bans/check?user=${user.id}`, this.getHeaders(true, "KSOFT_TOKEN"))
.catch(() => ({
data: {
is_banned: false
}
}));
const roblox = await this.client.utils.robloxUser(user.id);
embed
.setThumbnail(user.displayAvatarURL({ dynamic: true, size: 4096 }))
.addField(
"• Global User Statistics",
[
`> 🤔 | **Reputation**: ${rep.upvotes - rep.downvotes < 0 ? "bad" : "good"}`,
`> 🔨 | **Globally banned**: ${banned.is_banned ? "🔨" : this.client.constants.emojis.redcross}`,
`> ⚖ | **Conclusion**: ${rep.upvotes - rep.downvotes < 0 || banned.is_banned ? "untrustable" : "trustable"}`
].join("\n")
)
.addField(
"• General Information",
[
`> 👤 | **User**: ${user.tag} (${user.toString()})`,
`> 🥽 | **User ID**: \`${user.id}\``,
`> 📆 | **Created at**: ${this.client.utils.formatTime(
moment(user.createdTimestamp).unix(),
"f"
)} | ${this.client.utils.formatTime(moment(user.createdTimestamp).unix(), "R")}`
].join("\n")
)
.addField(
"• Roblox Information",
[`>>> 🎮 | **Rover**: ${roblox.rover || "-"}`, `🕹 | **Bloxlink**: ${roblox.bloxlink || "-"}`].join("\n")
)
.setFooter("The global stats are fetched from an api - discordrep & KSoft Ban");
if (message.guild) {
const member = await this.client.utils.fetchMember(user.id, message.guild);
if (member) {
const r = member.roles.cache
.sort((a, b) => b.position - a.position)
.map((role) => role.toString())
.slice(0, -1);
const roles =
r.length < 10 ? r.map((role) => role.toString()).join(", ") : r.length > 10 ? this.client.utils.trimArray(r).join(", ") : "none";
embed.setColor(member.displayHexColor || process.env.COLOUR);
embed.addField(
"• Member Information",
[
`> 📆 | **Joined at**: ${this.client.utils.formatTime(
moment(member.joinedTimestamp ?? 0).unix(),
"f"
)} | ${this.client.utils.formatTime(moment(member.joinedTimestamp ?? 0).unix(), "R")}`,
`> 📂 | **Roles**: ${roles}`
].join("\n")
);
}
}
await msg.edit({
embeds: [embed],
content: null
});
}
private getHeaders(bearer: boolean, key: string) {
return {
headers: {
Authorization: `${bearer ? "Bearer " : ""}${process.env[key]}`
}
};
}
}
interface Reputation {
upvotes: number;
downvotes: number;
reputation: number;
rank: string;
xp: number;
staff: boolean;
}
export interface KsoftBan {
is_banned: boolean;
}
| ServerinfoCommand |
custom_tls_connection.rs | use lapin::{
message::DeliveryResult,
options::*,
publisher_confirm::Confirmation,
tcp::{AMQPUriTcpExt, NativeTlsConnector},
types::FieldTable,
uri::AMQPUri,
BasicProperties, Connection, ConnectionProperties, ConsumerDelegate, Result,
};
use std::{future::Future, pin::Pin};
use tracing::info;
#[derive(Clone, Debug, PartialEq)]
struct Subscriber;
impl ConsumerDelegate for Subscriber {
fn on_new_delivery(
&self,
delivery: DeliveryResult,
) -> Pin<Box<dyn Future<Output = ()> + Send>> {
Box::pin(async move {
info!("received message: {:?}", delivery);
})
}
}
async fn connect() -> Result<Connection> {
// You need to use amqp:// scheme here to handle the TLS part manually as it's automatic when you use amqps://
let uri = std::env::var("AMQP_ADDR")
.unwrap_or_else(|_| "amqp://127.0.0.1:5672/%2f".into())
.parse::<AMQPUri>()
.unwrap();
let connect = move |uri: &AMQPUri| {
uri.connect().and_then(|stream| {
let tls_builder = NativeTlsConnector::builder();
// Perform here your custom TLS setup, with tls_builder.identity or whatever else you need
stream.into_native_tls(
tls_builder.build().expect("TLS configuration failed"),
&uri.authority.host,
)
})
};
Connection::connector(uri, Box::new(connect), ConnectionProperties::default()).await
}
fn main() {
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", "info");
}
tracing_subscriber::fmt::init();
async_global_executor::block_on(async {
let conn = connect().await.expect("connection error");
info!("CONNECTED");
//send channel
let channel_a = conn.create_channel().await.expect("create_channel");
//receive channel
let channel_b = conn.create_channel().await.expect("create_channel");
info!("[{}] state: {:?}", line!(), conn.status().state());
//create the hello queue
let queue = channel_a
.queue_declare(
"hello",
QueueDeclareOptions::default(),
FieldTable::default(),
)
.await
.expect("queue_declare");
info!("[{}] state: {:?}", line!(), conn.status().state());
info!("[{}] declared queue: {:?}", line!(), queue);
info!("will consume");
channel_b
.basic_consume(
"hello",
"my_consumer",
BasicConsumeOptions::default(),
FieldTable::default(),
)
.await
.expect("basic_consume")
.set_delegate(Subscriber);
info!("[{}] state: {:?}", line!(), conn.status().state());
info!("will publish");
let payload = b"Hello world!";
let confirm = channel_a
.basic_publish(
"",
"hello",
BasicPublishOptions::default(),
payload.to_vec(),
BasicProperties::default(), | )
.await
.expect("basic_publish")
.await
.expect("publisher-confirms");
assert_eq!(confirm, Confirmation::NotRequested);
info!("[{}] state: {:?}", line!(), conn.status().state());
})
} | |
stream_hasher.rs | // Example program that reads from stdin and outputs the hash in hex.
//
// If running interactively, press CTRL+D to stop input or CTRL+C to exit.
use std::{hash::Hasher, io::Read};
use mx3::Mx3Hasher;
fn | () -> Result<(), std::io::Error> {
let mut hasher = Mx3Hasher::default();
let mut input_buffer = [0u8; 4096];
let mut stdin = std::io::stdin();
loop {
let bytes_read = stdin.read(&mut input_buffer)?;
if bytes_read == 0 {
break;
}
hasher.write(&input_buffer[0..bytes_read]);
}
println!("{:x}", hasher.finish());
Ok(())
}
| main |
connection.py | import socket
def is_connected_to_internet(host="8.8.8.8", port=53, timeout=3):
| """
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, port))
return True
except socket.error:
return False |
|
main.go | package main
import (
"bufio"
"flag"
"fmt"
"io"
"log"
"os"
"time"
ets2 "github.com/cyleriggs/go-ets2-telemetry-client"
serial "github.com/goburrow/serial"
)
var baseUrl = ""
var updateFreq = 0
var serialPort = ""
var speedUnits = ""
func init() {
flag.StringVar(&baseUrl, "baseUrl", "http://localhost:25555", "HTTP url for telemetry server")
flag.IntVar(&updateFreq, "updateFreq", 1000/24, "Update frequency in milliseconds")
flag.StringVar(&serialPort, "serialPort", "COM3", "A serial port to write updates to, one var at a time")
flag.StringVar(&speedUnits, "speedUnits", "kmh", "Interpret speed as [mph|kmh] (default: kmh)")
}
func main() {
flag.Parse()
var err error
// Open serial port (optional)
fmt.Printf("Connecting to serial...")
serialConfig := &serial.Config{
Address: serialPort, BaudRate: 115200, StopBits: 1,
Timeout: 100 * time.Millisecond, Parity: "N"}
fSerial, err := serial.Open(serialConfig)
if err != nil {
log.Fatalf("Failed to open serial port: %v", err)
os.Exit(1)
}
defer fSerial.Close()
time.Sleep(5 * time.Second)
fmt.Printf("OK\n")
if _, err := io.WriteString(fSerial, "\n"); err != nil {
log.Fatalf("Error writing to serial: %v", err)
os.Exit(-1)
}
monitor(fSerial, "\n", "")
}
func monitor(fh io.ReadWriteCloser, valueSep string, groupSep string) {
c := ets2.NewClient(baseUrl)
| for {
t, err := c.GetTelemetry()
if err != nil {
log.Fatalf("Error reading telemetry data: %v", err)
os.Exit(-1)
}
sendCmd(fh, fmt.Sprintf("rpm=%f%s", t.Truck.EngineRpm, valueSep))
sendCmd(fh, fmt.Sprintf("%s=%f%s", speedUnits, t.Truck.Speed, valueSep))
sendCmd(fh, fmt.Sprintf("fuel=%f%s", 100 * (t.Truck.Fuel/t.Truck.FuelCapacity), valueSep))
time.Sleep(time.Duration(updateFreq * int(time.Millisecond)))
}
}
func sendCmd(fh io.ReadWriter, cmd string) {
if _, err := io.WriteString(fh, cmd); err != nil {
log.Fatalf("Failed to write cmd: %v", err)
os.Exit(-1)
} else {
scanner := bufio.NewScanner(fh)
scanner.Scan()
log.Printf("serial: %v", scanner.Text())
scanner.Scan()
log.Printf("serial: %v", scanner.Text())
}
} | // Do the monitoring |
main.rs | // Copyright 2020. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#![cfg_attr(not(debug_assertions), deny(unused_variables))]
#![cfg_attr(not(debug_assertions), deny(unused_imports))]
#![cfg_attr(not(debug_assertions), deny(dead_code))]
#![cfg_attr(not(debug_assertions), deny(unused_extern_crates))]
#![deny(unused_must_use)]
#![deny(unreachable_patterns)]
#![deny(unknown_lints)]
mod common;
mod error;
mod proxy;
use crate::error::StratumTranscoderProxyError;
use futures::future;
use hyper::{service::make_service_fn, Server};
use proxy::{StratumTranscoderProxyConfig, StratumTranscoderProxyService};
use std::convert::Infallible;
use structopt::StructOpt;
use tari_app_grpc::tari_rpc as grpc;
use tari_common::{configuration::bootstrap::ApplicationType, ConfigBootstrap, GlobalConfig};
use tokio::time::Duration;
#[tokio::main]
async fn main() -> Result<(), StratumTranscoderProxyError> |
/// Loads the configuration and sets up logging
fn initialize() -> Result<GlobalConfig, StratumTranscoderProxyError> {
// Parse and validate command-line arguments
let mut bootstrap = ConfigBootstrap::from_args();
// Check and initialize configuration files
let application_type = ApplicationType::StratumTranscoder;
bootstrap.init_dirs(application_type)?;
// Load and apply configuration file
let cfg = bootstrap.load_configuration()?;
#[cfg(feature = "envlog")]
let _ = env_logger::try_init();
// Initialise the logger
#[cfg(not(feature = "envlog"))]
bootstrap.initialize_logging()?;
let cfg = GlobalConfig::convert_from(application_type, cfg)?;
Ok(cfg)
}
| {
let config = initialize()?;
let config = StratumTranscoderProxyConfig::from(config);
let addr = config.transcoder_host_address;
let client = reqwest::Client::builder()
.connect_timeout(Duration::from_secs(5))
.timeout(Duration::from_secs(10))
.pool_max_idle_per_host(25)
.build()
.map_err(StratumTranscoderProxyError::ReqwestError)?;
let base_node_client =
grpc::base_node_client::BaseNodeClient::connect(format!("http://{}", config.grpc_base_node_address)).await?;
let wallet_client =
grpc::wallet_client::WalletClient::connect(format!("http://{}", config.grpc_console_wallet_address)).await?;
let miningcore_service = StratumTranscoderProxyService::new(config, client, base_node_client, wallet_client);
let service = make_service_fn(|_conn| future::ready(Result::<_, Infallible>::Ok(miningcore_service.clone())));
match Server::try_bind(&addr) {
Ok(builder) => {
println!("Listening on {}...", addr);
builder.serve(service).await?;
Ok(())
},
Err(err) => {
println!("Fatal: Cannot bind to '{}'.", addr);
println!("It may be part of a Port Exclusion Range. Please try to use another port for the");
println!("'proxy_host_address' in 'config/config.toml' and for the applicable XMRig '[pools][url]' or");
println!("[pools][self-select]' config setting that can be found in 'config/xmrig_config_***.json' or");
println!("'<xmrig folder>/config.json'.");
println!();
Err(err.into())
},
}
} |
agent.ts | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { schema } from '@kbn/config-schema';
import { AGENT_TYPE_EPHEMERAL, AGENT_TYPE_PERMANENT, AGENT_TYPE_TEMPORARY } from '../../../common';
export const AgentTypeSchema = schema.oneOf([
schema.literal(AGENT_TYPE_EPHEMERAL),
schema.literal(AGENT_TYPE_PERMANENT),
schema.literal(AGENT_TYPE_TEMPORARY),
]);
const AgentEventBase = {
type: schema.oneOf([
schema.literal('STATE'),
schema.literal('ERROR'),
schema.literal('ACTION_RESULT'),
schema.literal('ACTION'),
]),
subtype: schema.oneOf([
// State
schema.literal('RUNNING'),
schema.literal('STARTING'),
schema.literal('IN_PROGRESS'), | schema.literal('STOPPING'),
schema.literal('STOPPED'),
// Action results
schema.literal('DATA_DUMP'),
// Actions
schema.literal('ACKNOWLEDGED'),
schema.literal('UNKNOWN'),
]),
timestamp: schema.string(),
message: schema.string(),
payload: schema.maybe(schema.any()),
agent_id: schema.string(),
action_id: schema.maybe(schema.string()),
config_id: schema.maybe(schema.string()),
stream_id: schema.maybe(schema.string()),
};
export const AckEventSchema = schema.object({
...AgentEventBase,
...{ action_id: schema.string() },
});
export const AgentEventSchema = schema.object({
...AgentEventBase,
});
export const NewAgentActionSchema = schema.object({
type: schema.oneOf([
schema.literal('CONFIG_CHANGE'),
schema.literal('DATA_DUMP'),
schema.literal('RESUME'),
schema.literal('PAUSE'),
]),
data: schema.maybe(schema.string()),
sent_at: schema.maybe(schema.string()),
}); | schema.literal('CONFIG'),
schema.literal('FAILED'), |
main.py | import requests, json
from sys import argv
from modify import write
country = "czech_republic"
url = f"https://top-ghusers.vercel.app/api?c={country}"
responce = requests.get(url)
resJson = json.loads(responce.text)
| print(user['rank']) | for user in resJson['users']:
if user['user']['username'] == argv[1]:
write(user['rank']) |
ippusb_ppd_copies_unsupported.go | // Copyright 2019 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package printer
import (
"context"
"chromiumos/tast/local/bundles/cros/printer/usbprintertests"
"chromiumos/tast/local/chrome"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: IPPUSBPPDCopiesUnsupported,
Desc: "Verifies that the 'copies-supported' attribute of the printer is used to populate the cupsManualCopies and cupsMaxCopies values in the corresponding generated PPD",
Contacts: []string{"[email protected]", "[email protected]"},
Attr: []string{"group:mainline"},
SoftwareDeps: []string{"chrome", "cros_internal", "cups", "virtual_usb_printer"},
Data: []string{"ippusb_copies_unsupported.json"},
Pre: chrome.LoggedIn(), |
// IPPUSBPPDCopiesUnsupported tests that the "cupsManualCopies" and
// "cupsMaxCopies" PPD fields will be correctly populated when configuring an
// IPP-over-USB printer whose "copies-supported" IPP attribute has an upper
// limit of 1 (i.e., it does not support copies).
func IPPUSBPPDCopiesUnsupported(ctx context.Context, s *testing.State) {
const descriptors = "/usr/local/etc/virtual-usb-printer/ippusb_printer.json"
usbprintertests.RunIPPUSBPPDTest(ctx, s, descriptors, s.DataPath("ippusb_copies_unsupported.json"), map[string]string{
"*cupsManualCopies": "True",
"*cupsMaxCopies": "1",
})
} | })
} |
robustPipelineSizing.py | """
Last edited: January 20 2020
|br| @author: FINE Developer Team (FZJ IEK-3) \n\n
The approaches used are described in
Robinius et. al. (2019) "Robust Optimal Discrete Arc Sizing for Tree-Shaped Potential Networks"
and they are further developed with the help of
Theorem 10 of Labbé et. al. (2019) "Bookings in the European gas market: characterisation of feasibility and
computational complexity results"
and Lemma 3.4 and 3.5 of Schewe et. al. (preprint 2020) "Computing Technical Capacities in the European Entry-Exit
Gas Market is NP-Hard"
"""
import pandas as pd
from FINE import utils
import networkx as nx
import math
import pyomo.environ as py
import warnings
from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition
import numpy as np
import copy
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
import matplotlib as mpl
import shapely as shp
import time
from multiprocessing import Pool
import sys
from functools import partial
try:
import geopandas as gpd
except ImportError:
warnings.warn('The GeoPandas python package could not be imported.')
# local type und value checker
def isPandasDataFrameNumber(dataframe):
# check if dataframe is a pandas dataframe and if each value is float or int
if not isinstance(dataframe, pd.DataFrame):
raise TypeError("The input argument has to be a pandas DataFrame")
else:
if not dataframe.select_dtypes(exclude=["float", "int"]).empty:
raise ValueError("The input pandas DataFrame has to contain only floats or ints")
def isPandasSeriesPositiveNumber(pandasSeries):
# Check if the input argument is a pandas series and it contains only positive numbers
if not isinstance(pandasSeries, pd.Series):
raise TypeError("The input argument has to be a pandas series")
else:
for index in pandasSeries.index:
utils.isPositiveNumber(pandasSeries[index])
def isNetworkxGraph(graph):
# Check if the input argument is a networkx graph
if not isinstance(graph, nx.Graph):
raise TypeError("The input argument has to be a networkx graph")
def isDictionaryPositiveNumber(dictionary):
# Check if the input argument is a dictionary with positive numbers as values
if not isinstance(dictionary, dict):
raise TypeError("The input argument has to be a dictionary")
else:
for key in dictionary.keys():
utils.isPositiveNumber(dictionary[key])
def checkLowerUpperBoundsOfDicts(lowerDict, upperDict):
# check if lowerDict and upperDict have the same keys and if lowerDict[key] <= upperDict[key] holds
if not (lowerDict.keys() == upperDict.keys()):
raise ValueError("The input arguments have to have the same keys")
else:
for key in lowerDict.keys():
if lowerDict[key] > upperDict[key]:
raise ValueError("The lower bound has to be the smaller than the upper bound")
def isListOfStrings(strings):
# check if strings is list of strings
if not isinstance(strings, list):
raise TypeError("The input argument has to be a list")
else:
for string in strings:
utils.isString(string)
def isBool(boolean):
# check if boolean is a bool
if not isinstance(boolean, bool):
raise TypeError("The input argument has to be a bool")
# End utils checks
def getInjectionWithdrawalRates(componentName='', esM=None, operationVariablesOptimumData=None):
"""
Determines the injection and withdrawal rates into a network from a component in an
EnergySystemModel object or based on the fluid flow data.
:param componentName: name of the network component in the EnergySystemModel class
(only required the fluid flows are to be obtained from the EnergySystemModel class)
|br| * the default value is ''
:type componentName: string
:param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be
specified if the operationVariablesOptimumData are to be obtained from the
EnergySystemModel object)
|br| * the default value is None
:type esM: FINE EnergySystemModel
:param operationVariablesOptimumData: the injection and withdrawal rates into and out of the
network can either be obtained from a DataFrame with the original fluid flows or an
EnergySystemModel with an optimized Pyomo instance.
In the former case, the argument is a pandas DataFrame with two index columns (specifying
the names of the start and end node of a pipeline) and one index row (for the time steps).
The data in the DataFrame denotes the flow coming from the start node and going to the end
node [e.g. in kWh or Nm^3]. Example:
0 1 ... 8759
node1 node2 0.1 0.0 ... 0.9
node2 node3 0.0 0.3 ... 0.4
node2 node1 0.9 0.9 ... 0.2
node3 node2 1.1 0.2 ... 0.9
|br| * the default value is None
:type operationVariablesOptimumData: pandas DataFrame with non-negative floats
:return: injection and withdrawal rates (withdrawals from the network are positive while
injections are negative)
:rtype: pandas DataFrame
"""
#TODO check type and value correctness
# Get the original optimal operation variables
if operationVariablesOptimumData is not None:
op = operationVariablesOptimumData
else:
op = esM.componentModelingDict[esM.componentNames[componentName]]. \
getOptimalValues('operationVariablesOptimum')['values'].loc[componentName]
# Get a map of the component's network
if esM is None:
mapN = {}
for conn in operationVariablesOptimumData.index:
loc, loc_ = conn
mapN.setdefault(loc, {}).update({loc_: loc + '_' + loc_})
mapN.setdefault(loc_, {}).update({loc: loc_ + '_' + loc})
else:
mapN = esM.getComponent(componentName)._mapL
# Initialize list for nodal injection and withdrawal time series data
injectionWithdrawalRates, nodeIx = [], []
# Reset connections set (not all indices might be in the operationVariablesOptimumData data)
connections = set()
# For each node loc, compute the injection and withdrawal rates
for loc, locConn in mapN.items():
# As in a few cases zero columns/ rows are dropped from data frames, two lists
# of eligible connection indices are created.
ixIn, ixOut = [], []
for loc_, conn in locConn.items():
if (loc, loc_) in op.index:
ixOut.append((loc, loc_)), connections.add((loc, loc_))
if (loc_, loc) in op.index:
ixIn.append((loc_, loc)), connections.add((loc_, loc))
# If either list has at least one entry, the incoming and outgoing flows are selected
# from the original optimal flow variables and aggregated. The resulting commodity
# withdrawals from the network are positive while injections are negative.
if (len(ixIn) != 0) | (len(ixOut) != 0):
injectionWithdrawalRates.append(op.loc[ixIn].sum() - op.loc[ixOut].sum())
nodeIx.append(loc)
# Concat data to a pandas dataframe
injectionWithdrawalRates = pd.concat(injectionWithdrawalRates, keys=nodeIx, axis=1)
return injectionWithdrawalRates
def getNetworkLengthsFromESM(componentName, esM):
"""
Obtains the pipeline lengths of a transmission component in an EnergySystemModel class.
:param componentName: name of the network component in the EnergySystemModel class
(only required if the fluid flows are to be obtained from the EnergySystemModel class)
|br| * the default value is ''
:type componentName: string
:param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be
specified if the operationVariablesOptimumData are to be obtained from the
EnergySystemModel object)
|br| * the default value is None
:type esM: FINE EnergySystemModel
:return: pipeline distances in the length unit specified in the esM object
:rtype: pandas series
"""
utils.isString(componentName)
utils.isEnergySystemModelInstance(esM)
distances = esM.getComponent(componentName).distances.copy()
indexMap = esM.getComponent(componentName)._mapC
distances.index = [indexMap[ix] for ix in distances.index]
return distances
def getRefinedShapeFile(shapeFilePath, regColumn1, regColumn2, dic_node_minPress, dic_node_maxPress, minPipeLength, maxPipeLength):
"""
If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length,
i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1
:param shapeFilePath: path to a shape file which connects the gas injection/ withdrawal nodes with each other. The rows of the
file describe connections between the injection/ withdrawal nodes. The required geometry of these connections is a shapely
LineString. Additionally, the file has two columns holding the names of the two injection/ withdrawal nodes (start and end
point of the LineString).
:type shapeFilePath: string
:param regColumn1: name of the column which holds the name of the injection/ withdrawal node at the beginning of the line
:type regColumn1: string
:param regColumn2: name of the column which holds the name of the injection/ withdrawal node at the end of the line
:type regColumn2: string
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar].
It holds: dic_node_minPress[index] <= dic_node_maxPress[index].
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
:param minPipeLength: desired minimum length of a pipe in [m], note: not always possible to achieve.
:type minPipeLength: positive number
:param maxPipeLength: determines the maximal length of a pipe in [m].
:type maxPipeLength: positive number
:return: distances_new - pipeline distances in m
:rtype: pandas series
:return: dic_node_minPress_new - dictionary that contains for every node of the network its lower pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
:return: dic_node_maxPress_new - dictionary that contains for every node of the network its upper pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
:return: gdfNodes - GeoDataFrame with the nodes of the network and their names
:rtype: geopandas GeoDataFrame
:return: gdfEdges - GeoDataFrame with the edges of the network and the names of their start and end nodes
:rtype: geopandas GeoDataFrame
"""
# type and value check
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
utils.isString(regColumn1), utils.isString(regColumn2)
utils.isStrictlyPositiveNumber(maxPipeLength)
utils.isStrictlyPositiveNumber(minPipeLength)
# Read shape file with linestrings connecting the entry/ exit nodes of the gas
gdf=gpd.read_file(shapeFilePath)
if not (gdf.geometry.type == 'LineString').all():
raise ValueError("Geometries of the shape file have to be LineStrings")
print('Number of edges before segmentation:', len(gdf))
originalNodesSet = set(gdf[regColumn1]) | set(gdf[regColumn2])
print('Number of nodes before segmentation:', len(originalNodesSet))
# Obtain nodes from shape file, assign names and minimum/ maximum pressure levels to them, delete duplicates
coordNames, coords = [], []
pMin, pMax = [], []
lines = []
# Break linestrings into linear pieces
for i, row in gdf.iterrows():
# Simplify linestring (to increase the minimum length of pipeline connections wherever possible)
line = row.geometry.simplify(minPipeLength)
lines.append(line)
row.geometry = line
# Get new nodes
coords_ = [i for i in line.coords]
coords.extend(coords_)
coordNames_ = [row[regColumn1]]
coordNames_.extend([row[regColumn1] + '_' + row[regColumn2] + '_' + str(j)
for j in range(len(coords_)-2)])
coordNames_.append(row[regColumn2])
coordNames.extend(coordNames_)
# Get averaged lower and upper pressure levels
pMin.extend([(dic_node_minPress[row[regColumn1]]*(len(coords_)-j-1) +
dic_node_minPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])
pMax.extend([(dic_node_maxPress[row[regColumn1]]*(len(coords_)-j-1) +
dic_node_maxPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))])
gdf['geometry'] = lines
# Create DataFrame of old and new nodes and drop duplicates
dfNodes = pd.DataFrame([coordNames, pMin, pMax, coords], index=['nodeName','pMin','pMax','lon_lat']).T
dfNodes = dfNodes.drop_duplicates(subset='lon_lat')
dfNodes = dfNodes.drop_duplicates(subset='nodeName')
# Obtain edges from shape file, assign names to them, delete duplicates
nodesIn_nodesOut = []
nodesIn = []
nodesOut = []
lineStrings = []
for i, row in gdf.iterrows():
coords_ = [i for i in row.geometry.coords]
for j in range(len(coords_)-1):
nodeIn = dfNodes.loc[dfNodes['lon_lat'] == coords_[j],'nodeName'].iloc[0]
nodeOut = dfNodes.loc[dfNodes['lon_lat'] == coords_[j+1],'nodeName'].iloc[0]
nodesIn.append(nodeIn), nodesOut.append(nodeOut)
nodes = [nodeIn,nodeOut]
nodes.sort()
nodesIn_nodesOut.append('edge_' + nodes[0] + '_' + nodes[1])
lineStrings.append(shp.geometry.LineString([coords_[j],coords_[j+1]]))
dfEdges = pd.DataFrame([nodesIn, nodesOut, nodesIn_nodesOut, lineStrings],
index=['nodeIn', 'nodeOut','edgeName','geometry']).T
dfEdges = dfEdges.drop_duplicates(subset='edgeName')
gdfEdges = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'})
print('Number of edges after 1. segmentation:', len(gdfEdges))
print('Number of nodes after 1. segmentation:', len(dfNodes))
# Add nodes when line distances are too long
newNodes, newLines, newNodesName, newLinesName = [], [], [], []
nodesIn, nodesOut, coords = [], [], []
pMin, pMax = [], []
for i, row in gdfEdges.iterrows():
# If lines are two long, segment them
if np.round(row['geometry'].length,2) > maxPipeLength:
nbNewNodes = int(np.floor(row['geometry'].length/maxPipeLength))
line = row.geometry
newNodes_, newLines_, newNodesName_, newLinesName_ = [], [], [], []
nodesIn_, nodesOut_, coords_ = [], [], []
pMin_, pMax_ = [], []
nodeStart, nodeEnd = line.interpolate(0), line.interpolate(line.length)
nodeStartName = row['nodeIn']
pMinIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMin'].iloc[0]
pMinOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMin'].iloc[0]
pMaxIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMax'].iloc[0]
pMaxOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMax'].iloc[0]
spacing = row['geometry'].length/(nbNewNodes+1)
for j in range(1,nbNewNodes+1):
newNode = line.interpolate(j*spacing)
newNodes_.append(newNode)
coords_.append((newNode.x, newNode.y))
newNodeName = row['nodeIn'] + '_' + row['nodeOut'] + '_a_' + str(j)
newNodesName_.append(newNodeName)
newLine = shp.geometry.LineString([nodeStart,newNode])
newLines_.append(newLine)
newLinesName_.append('temp'), nodesIn_.append(nodeStartName), nodesOut_.append(newNodeName)
pMin_.append((pMinIn*(nbNewNodes-j+1) + pMinOut*j)/(nbNewNodes+1))
pMax_.append((pMaxIn*(nbNewNodes-j+1) + pMaxOut*j)/(nbNewNodes+1))
nodeStart, nodeStartName = newNode, newNodeName
newLines_.append(shp.geometry.LineString([newNode,nodeEnd]))
newLinesName_.append('temp')
nodesIn_.append(newNodeName), nodesOut_.append(row['nodeOut'])
newNodes.extend(newNodes_), newLines.extend(newLines_), newNodesName.extend(newNodesName_)
newLinesName.extend(newLinesName_), pMin.extend(pMin_), pMax.extend(pMax_)
nodesIn.extend(nodesIn_), nodesOut.extend(nodesOut_), coords.extend(coords_)
if len(newNodes) > 0:
dfNodes = dfNodes.append(pd.DataFrame([newNodesName, pMin, pMax, coords],
index=['nodeName','pMin','pMax','lon_lat']).T)
dfEdges = pd.DataFrame([nodesIn, nodesOut, newLinesName, newLines],
index=['nodeIn', 'nodeOut','edgeName','geometry']).T
gdfEdgesNew = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'})
gdfEdges = gdfEdges.append(gdfEdgesNew)
gdfEdges = gdfEdges[gdfEdges.geometry.length.round(2) <= maxPipeLength]
del gdfEdges['edgeName']
renameDict = {name: 'auxNode' + str(i) for i, name in enumerate(dfNodes.nodeName.values)
if name not in originalNodesSet}
for node in originalNodesSet:
renameDict.update({node:node})
gdfEdges['nodeIn'] = gdfEdges.apply(lambda x: renameDict[x['nodeIn']], axis=1)
gdfEdges['nodeOut'] = gdfEdges.apply(lambda x: renameDict[x['nodeOut']], axis=1)
gdfEdges['distances'] = gdfEdges['geometry'].length
print('Number of edges after 2. segmentation:', len(gdfEdges))
dfNodes['nodeName'] = dfNodes.apply(lambda x: renameDict[x['nodeName']], axis=1)
dfNodes['geometry'] = dfNodes.apply(lambda x: shp.geometry.Point(x['lon_lat']), axis=1)
del dfNodes['lon_lat']
gdfNodes = gpd.GeoDataFrame(dfNodes,crs=gdf.crs).to_crs({'init': 'epsg:3035'})
print('Number of nodes after 2. segmentation:', len(gdfNodes))
print('Minimum length [m]:', gdfEdges.distances.min(), 'Maximum length [m]:', gdfEdges.distances.max())
distances_new = pd.Series(gdfEdges['distances'].values,
index = [(n1, n2) for n1, n2 in zip(gdfEdges['nodeIn'],gdfEdges['nodeOut'])])
dic_node_minPress_new = {n:pMin for n, pMin in zip(gdfNodes['nodeName'], gdfNodes['pMin'])}
dic_node_maxPress_new = {n:pMax for n, pMax in zip(gdfNodes['nodeName'], gdfNodes['pMax'])}
return distances_new, dic_node_minPress_new, dic_node_maxPress_new, gdfNodes, gdfEdges
def createNetwork(distances):
"""
Creates undirected network/graph from given distances; updates distances such that
either (u,v) or (v,u) are contained
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:return: graph of the network corresponding to the distances
:rtype: graph object of networkx
:return: pipeline distances in the length unit specified in the esM object
:rtype: pandas series
"""
# type and value check
isPandasSeriesPositiveNumber(distances)
for index in distances.index:
if not isinstance(index, tuple):
raise TypeError("Index of pandas series has to be a tuple")
# first check if distances are consistent, i.e. if (u,v) and (v,u) are in distances they have to have the same
# length and we will delete one of them
# tmp list for reversed edges that we will be delete
tmp_edges = []
for edge in distances.index:
if (edge[1], edge[0]) in distances.index and (edge[1], edge[0]) not in tmp_edges:
assert (distances[edge] == distances[(edge[1], edge[0])])
tmp_edges.append(edge)
# delete tmp_edges because reversed edges are already contained and we consider an undirected graph
distances = distances.drop(tmp_edges)
# get edges for graph
edges = distances.index
# create empty graph
G = nx.Graph()
# create graph from given edges and add length as edge attribute
for edge in edges:
G.add_edge(edge[0], edge[1], length=distances[edge])
return G, distances
def createSteinerTree(graph, distances, inner_nodes):
"""
Computes a steiner tree with minimal sum of pipeline lengths;
updates distances such that only arcs of the spanning tree are contained with corresponding length
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:return spanning tree with sum of lengths of pipelines is minimal
:rtype: graph object of networkx
"""
from networkx.algorithms import approximation
# type and value check
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
# compute spanning tree with minimal sum of pipeline lengths
S = approximation.steiner_tree(graph, terminal_nodes=inner_nodes, weight='length')
# TODO check why function fails when MST function is not called here
S = nx.minimum_spanning_tree(S, weight='length')
# delete edges that are in graph but not in the tree from the distance matrix
edgesToDelete = []
for edge in distances.index:
# check if edge or its reversed edge are contained in the tree
# you have to check both directions because we have an undirected graph
if edge not in S.edges and (edge[1], edge[0]) not in S.edges:
edgesToDelete.append(edge)
distances = distances.drop(edgesToDelete)
return S, distances
def _generateRobustScenarios(startNode_endNode, **kwargs):
startNode = startNode_endNode[0]
endNode = startNode_endNode[1]
return startNode_endNode, computeSingleSpecialScenario(startNode=startNode, endNode=endNode, **kwargs)
def generateRobustScenarios(injectionWithdrawalRates, graph, distances, dic_node_minPress, dic_node_maxPress,
solver='glpk', threads=1, verbose=0):
"""
Compute for every node combination a special robust scenario according to Robinius et. al. (2019)
and Labbé et. al. (2019)
:param injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while
injections are negative) for every time step and node; unit [kg/s]
:type: pandas dataframe
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:param threads: number of threads used for parallelization
:type threads: positive integer
:param verbose: if > 0, parallelization progress is displayed
:type verbose: int
:return dictionary that contains for every node pair a dictionary containing all arc flows of the corresponding
special scenario
:rtype: dictionary key: (node1,node2), value: dictionary: key: arc, value: arc flow in [kg/s]
:return list of entry node
:rtype: list of strings
:return list of exit node
:rtype: list of strings
"""
# Type and value checks
isPandasDataFrameNumber(injectionWithdrawalRates)
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
# get for every entry/exit node the minimal and maximal injection rate and save it in a
# dictionary: key: node, value: min Rate; respectively max Rate in [kg/s]
# we note that inner nodes a handled separately in the computation of the special scenario
dic_nodes_MinCapacity = {}
dic_nodes_MaxCapacity = {}
# list of entry nodes and exit nodes; note node can be in both for example storages
entries = []
exits = []
inners = []
for node in list(injectionWithdrawalRates.columns.values):
minRate = injectionWithdrawalRates[node].min()
maxRate = injectionWithdrawalRates[node].max()
assert (minRate <= maxRate)
dic_nodes_MinCapacity[node] = minRate
dic_nodes_MaxCapacity[node] = maxRate
# if minRate is negative, then node is an entry; if maxRate is positive, then node is an exit
if minRate < 0.0:
entries.append(node)
if maxRate > 0.0:
exits.append(node)
elif maxRate > 0:
exits.append(node)
else:
inners.append(node)
maxPressuresAreEqual = True if len(set(dic_node_maxPress.values())) == 1 else False
p_exits = [dic_node_minPress[exit] for exit in exits]
p_entries_inners = [dic_node_minPress[node] for node in entries]
p_inners = [dic_node_minPress[node] for node in inners]
p_entries_inners.extend(p_inners)
minPressureExitsIsLarger = True if min(p_exits) >= max(p_entries_inners) else False
# compute special scenario for each node combination; see Paper Robinius et. al.(2019); Labbé et. al. (2019)
# save arc flows of special scenarios for each node combination;
# dictionary: key: node pair, value: dictionary: key: arc, value: arc flow
dic_nodePair_flows = {}
if maxPressuresAreEqual and minPressureExitsIsLarger:
if verbose == 0:
print('Reduced robust scenario set can be generated' +
' (pMax is equal at all nodes & pMin at exits is >= at inner and entry nodes).')
nodes = [(startNode, endNode) for startNode in entries for endNode in exits if startNode != endNode]
else:
nodes = [(startNode, endNode) for startNode in graph.nodes for endNode in graph.nodes if startNode != endNode]
pool = Pool(threads)
for i, values in enumerate(pool.imap(partial(_generateRobustScenarios, graph=graph, distances=distances,
entries=entries, exits=exits, dic_nodes_MinCapacity=dic_nodes_MinCapacity,
dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, solver=solver),
nodes), 1):
if verbose == 0:
sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(nodes) * 100)))
dic_nodePair_flows[values[0]] = values[1]
pool.close()
pool.join()
return dic_nodePair_flows, entries, exits
def computeSingleSpecialScenario(graph, distances, entries, exits, startNode, endNode, dic_nodes_MinCapacity,
dic_nodes_MaxCapacity, specialScenario=True, solver='glpk'):
"""
Compute special robust scenario for given node combination according to Robinius et. al. (2019)
and Labbé et. al. (2019)
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:param entries: list of entry nodes of the network
:type entries: list of strings
:param exits: list of exit nodes of the network
:type exits: list of strings
:param startNode: node of the network (starting node of the special scenario)
:type startNode: string
:param endNode: node of the network (end node of special scenario)
:type endNode: string
:param dic_nodes_MinCapacity: dictionary containing minimal capacity for each node
:type dic_nodes_MinCapacity: dictionary: key: node of the network, value: float
:param dic_nodes_MaxCapacity: dictionary containing maximal capacity for each node
:type dic_nodes_MaxCapacity: dictionary: key: node of the network, value: float
:param specialScenario: bool: True if we compute special robust scenario; False if we compute scenario for fixed
demand vector, e.g., for scenario of a time step
:type specialScenario: bool
:param solver: name of the optimization solver to use
:type solver: string, default 'glpk'
:return dictionary that contains for every arc the corresponding arc flows of the (special) scenario
:rtype: dictionary key: arc, value: arc flow
"""
# Type and value check
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
isListOfStrings(entries)
isListOfStrings(exits)
utils.isString(startNode)
utils.isString(endNode)
if isinstance(dic_nodes_MinCapacity, dict) and isinstance(dic_nodes_MaxCapacity, dict):
if not (dic_nodes_MinCapacity.keys() == dic_nodes_MaxCapacity.keys()):
raise TypeError("Dictionaries for min and max capacity need same keys")
for node in dic_nodes_MinCapacity.keys():
if not (isinstance(dic_nodes_MinCapacity[node], float) or isinstance(dic_nodes_MinCapacity[node], int)):
raise TypeError("The input argument has to be an number")
if not (isinstance(dic_nodes_MaxCapacity[node], float) or isinstance(dic_nodes_MaxCapacity[node], int)):
raise TypeError("The input argument has to be an number")
if dic_nodes_MaxCapacity[node] < dic_nodes_MinCapacity[node]:
raise ValueError("minimal node capacity has to be equal or smaller than maximal node capacity")
else:
raise TypeError("dic_nodes_MinCapacity and dic_nodes_MinCapacity have to be dictionaries")
isBool(specialScenario)
# we build concrete Pyomo Model
model = py.ConcreteModel()
# Description model: we have a simple directed graph. We allow negative flows because a pipe can be used in both
# directions by the flows
model.Nodes = py.Set(initialize=graph.nodes)
# important to use distances.keys() instead of graph.edges such that we do not have key errors later on because
# the edges in graph are undirected and in distances.keys() directed
model.Arcs = py.Set(initialize=distances.keys(), dimen=2)
# create demand variables for every node;
# if specialScenario is true, then we compute special scenario, i.e. entry/exit demand variables are bounded by
# min(0,minimal_capacity) <= demandVariable <= max(0, maximal_capacity)
# demand variables for inner nodes are set to zero
# if specialScenario is false, the demand variable is just bounded by the minimal and maximal capacity
if specialScenario:
def demandCapacities(model, node):
if node in entries or node in exits:
return min(0, dic_nodes_MinCapacity[node]), max(0, dic_nodes_MaxCapacity[node])
else:
return 0, 0
model.Demand = py.Var(model.Nodes, bounds=demandCapacities)
else:
# we do not compute special scenarios; we just compute flows for given, possibly fixed, demands
def demandCapacities(model, node):
return dic_nodes_MinCapacity[node], dic_nodes_MaxCapacity[node]
model.Demand = py.Var(model.Nodes, bounds=demandCapacities)
# create arc flow variables for every arc of the network
model.Flow = py.Var(model.Arcs)
# compute NodesOut, i.e., set of nodes that are connected to considered node by outgoing arc
def nodes_out_init(model, node):
retval = []
for (i, j) in model.Arcs:
if i == node:
retval.append(j)
return retval
model.NodesOut = py.Set(model.Nodes, initialize=nodes_out_init)
# compute NodesIn, i.e., set of nodes connected to considered node by ingoing arc
def nodes_in_init(model, node):
retval = []
for (i, j) in model.Arcs:
if j == node:
retval.append(i)
return retval
model.NodesIn = py.Set(model.Nodes, initialize=nodes_in_init)
# add flow balance constraints corresponding to the node demands
def flow_balance_rule(model, node):
return sum(model.Flow[i, node] for i in model.NodesIn[node]) \
- sum(model.Flow[node, j] for j in model.NodesOut[node]) \
== model.Demand[node]
model.FlowBalance_cons = py.Constraint(model.Nodes, rule=flow_balance_rule)
# compute unique flow-path P(startNode,endNode) from entry to exit; given by list of nodes of the path
pathNodes = nx.shortest_path(graph, source=startNode, target=endNode)
# non zero coefficients of objective function
dic_arc_coef = {}
# determine coefficients for objective function
# if for an arc (u,v), u, respectively v, are not in pathNodes, then the coefficient is 0
# if arc (u,v) of pathNodes satisfies P(startNode, u) subset P(startNode,v), then coefficient is 1, otherwise -1
for index in range(0, len(pathNodes) - 1):
# check which direction of the arc is contained in the graph
if (pathNodes[index], pathNodes[index + 1]) in model.Arcs:
dic_arc_coef[(pathNodes[index], pathNodes[index + 1])] = 1
else:
dic_arc_coef[(pathNodes[index + 1], pathNodes[index])] = -1
# we set objective
def obj_rule(model):
return sum(dic_arc_coef[arc] * model.Flow[arc] for arc in dic_arc_coef.keys())
model.Obj = py.Objective(rule=obj_rule, sense=py.maximize)
# Create a solver
opt = SolverFactory(solver)
# Solve optimization model
results = opt.solve(model)
# status of solver
status = results.solver.status
# termination condition
termCondition = results.solver.termination_condition
# save the solution of the flows in a dictionary key: arcs, values: flow
dic_scenario_flow = {}
if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown:
utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) +
'. No output is generated.', 0, 0)
elif termCondition == TerminationCondition.infeasibleOrUnbounded or \
termCondition == TerminationCondition.infeasible or \
termCondition == TerminationCondition.unbounded:
utils.output('Optimization problem is ' + str(termCondition) +
'. No output is generated.', 0, 0)
else:
# If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown
# status), show a warning message.
if not termCondition == TerminationCondition.optimal:
warnings.warn('Output is generated for a non-optimal solution.')
# dic_arcScenario has key (v,w,scenario) and value flow will be needed for MIP
for arc in model.Arcs:
dic_scenario_flow[arc] = model.Flow[arc].value
return dic_scenario_flow
def computeLargeMergedDiameters(dic_subSetDiam_costs, nDigits=6):
"""
Compute merged diameters, i.e. compute equivalent single diameter for two looped pipes.
:param dic_subSetDiam_costs: dictionary containing diameters in [m] and costs in [Euro/m]
:type: dictionary: key: diameter, value: costs
:param nDigits: number of digits used in the round function
|br| * the default value is 6
:type nDigits: positive int
:return dic_newDiam_costs: dictionary containing merged diameters in [m] and costs in [Euro/m]
:rtype: dictionary: key: diameter, value: costs
:return dic_newDiam_oldDiam: dictionary matching new diameters to old diameters
:rtype: dictionary: key: new diameter, value: corresponding old diameter, which will be used in the looped pipe
"""
# Type and value check
if isinstance(dic_subSetDiam_costs, dict):
for diam in dic_subSetDiam_costs.keys():
utils.isStrictlyPositiveNumber(diam)
utils.isStrictlyPositiveNumber(dic_subSetDiam_costs[diam])
else:
raise TypeError("The input has to be a dictionary")
utils.isStrictlyPositiveInt(nDigits)
dic_newDiam_costs = {}
dic_newDiam_oldDiam = {}
for diam in dic_subSetDiam_costs.keys():
# compute new diameter in [m] and its costs in [Euro/m]
# for Formula see (1) in Paper Reuß et. al.
# since at current state we consider the diameter for a looped pipe the above is
# equivalent to 2^(2/5) * diam and thus, we do not have to transform diam from [m] to [mm]
newDiam = ((diam ** (5 / 2) + diam ** (5 / 2)) ** (2 / 5)).__round__(nDigits)
# costs are two times costs of diam because newDiam represents two looped pipe with diameter diam
newCosts = 2 * dic_subSetDiam_costs[diam]
dic_newDiam_costs[newDiam] = newCosts
dic_newDiam_oldDiam[newDiam] = diam
return dic_newDiam_costs, dic_newDiam_oldDiam
def determinePressureDropCoef(dic_scenario_flows, distances, dic_node_minPress, dic_node_maxPress,
diameters, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,
Z_n=1.00062387922965, nDigits=6):
"""
Compute for each scenario, diameter, and each arc the corresponding pressure drop
:param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all
arc flows in [kg/s] of the corresponding (special) scenario
:type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:param diameters: list of diameters in [m]
:type: list of strictly positive numbers
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float; optional
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float; optional
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float; optional
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float; optional
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float; optional
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float; optional
:param nDigits: number of digits used in the round function
|br| * the default value is 6
:type nDigits: positive int; optional
:return dictionary that contains for every scenario and diameter the corresponding pressure drops
:rtype: dictionary key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop
"""
# check type and value
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
isPandasSeriesPositiveNumber(distances)
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
if isinstance(diameters, list):
for diam in diameters:
utils.isPositiveNumber(diam)
else:
raise TypeError("Diameters has to be a list")
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
utils.isStrictlyPositiveInt(nDigits)
# compute for each diameter, scenario, and arc its pressure drop
# save results in dic: key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop
dic_pressureDropCoef = {}
for diameter in diameters:
for nodePair in dic_scenario_flows.keys():
# initialize dictionary
dic_pressureDropCoef[(diameter, nodePair)] = {}
# compute cross section of considered pipe and diameter
tmpvalue_A = 0.25 * np.pi * diameter ** 2
for arc in dic_scenario_flows[nodePair].keys():
# check if flow is unequal to zero
if dic_scenario_flows[nodePair][arc] != 0.0:
# Compute approximation of average pressure flow in pipe (u,v) by
# if flow((u,v)) is positive then set p_min to lower pressure bound of v and p_max to
# upper pressure bound u
# if flow((u,v)) is negative then set p_min to lower pressure bound of u and p_max to
# upper pressure bound v
if dic_scenario_flows[nodePair][arc] > 0:
p_min = dic_node_minPress[arc[1]]
p_max = dic_node_maxPress[arc[0]]
else:
p_min = dic_node_minPress[arc[0]]
p_max = dic_node_maxPress[arc[1]]
# compute approximation of average pressure
p_m = (2 / 3) * (p_max + p_min - (p_max * p_min) / (p_max + p_min))
# approximation for density
rho = 0.11922 * p_m ** 0.91192 - 0.17264
# approximation of the realgasfactor
Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050
K_m = Z_m / Z_n
# approximation of the dynamic viscosity
eta = 1.04298 * 10 ** (-10) * p_m ** 1.53560 + 8.79987 * 10 ** (-6)
nue = eta / rho
# compute velocity
tmpvalue_w = (abs(dic_scenario_flows[nodePair][arc]) / rho) / tmpvalue_A
# compute reynolds number
tmpvalue_Re = tmpvalue_w * (diameter / nue)
tmpvalue_alpha = np.exp(-np.exp(6.75 - 0.0025 * tmpvalue_Re))
tmpvalue_Lambda = (64 / tmpvalue_Re) * (1 - tmpvalue_alpha) + tmpvalue_alpha * (
-2 * np.log10(2.7 * (np.log10(tmpvalue_Re) ** 1.2 / tmpvalue_Re) + ir / (3.71 * 1000 *
diameter))) ** (-2)
# note p_n is in [bar] instead of [PA], thus we divide tmpvalue_C by 10**5
# explanation: we have p_i^2-p_j^2=C. If p_i is in [PA] and we want p_i in [bar] then this leads to
# (p_i/10^5)^2-(p_j/10^5)^2=C/10^10
# but we changed p_n in computation C from [PA] to [bar] hence we only divide C by 10^5
tmpvalue_C_bar = tmpvalue_Lambda * 16 * rho_n * T_m * p_n * K_m / (np.pi ** 2 * T_n * 10 ** 5)
# compute final pressure drop coefficient depending on the flow
tmp_value_C_coef = (distances[arc] / rho_n ** 2) * \
(tmpvalue_C_bar * dic_scenario_flows[nodePair][arc] *
abs(dic_scenario_flows[nodePair][arc]) / diameter ** 5)
# save pressure drop for considered diameter, scenario, and arc
dic_pressureDropCoef[(diameter, nodePair)][arc] = tmp_value_C_coef
else:
dic_pressureDropCoef[(diameter, nodePair)][arc] = 0
return dic_pressureDropCoef
def determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureDropCoef, specialScenarioNames,
dic_node_minPress, dic_node_maxPress, dic_diam_costs, robust=True,
solver='glpk', threads=4, verbose=0):
"""
Model of optimal pipeline sizing (diameter selection) w.r.t. to the given scenarios
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param dic_pressureDropCoef: dictionary that contains for every scenario and diameter the
corresponding pressure drops in [bar]
:type dic_pressureDropCoef: dictionary: keys: scenarioName; value: dict: key: arc, value: pressure drop in [bar]
:param specialScenarioNames: list of names of scenarios. In robust case tuples (startNode, endNode).
:type specialScenarioNames: list of tuples in the robust case, otherwise list of time Steps
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:param dic_diam_costs: dictionary that contains for every diameter in [m] its costs [Euro/m]
:type dic_diam_costs: dictionary key: diameter, value: non-negative float
:param robust: Bool that is true, if we optimize w.r.t. robust scenarios, otherwise False.
:type robust: bool
:return dictionary that contains for every arc the optimal diameter in [m]
:rtype dictionary: key: arc, value: optimal diameter
:param solver: name of the optimization solver to use
:type solver: string, default 'glpk'
:param threads: number of threads used for optimization (if gurobi is used)
:type threads: positive integer
:param verbose: if > 0, parallelization progress is displayed
:type verbose: int
:return dictionary that contains for every scenario the corresponding pressure levels
:rtype dictionary: key: scenarioName, value: dict: key: node, value: pressure level of node
"""
# type and value checks
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
if not isinstance(dic_pressureDropCoef, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(specialScenarioNames, list):
if robust:
for scenario in specialScenarioNames:
isinstance(scenario, tuple)
else:
raise TypeError("The input argument has to be a list")
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
if isinstance(dic_diam_costs, dict):
for diam in dic_diam_costs.keys():
utils.isStrictlyPositiveNumber(diam)
utils.isStrictlyPositiveNumber(dic_diam_costs[diam])
else:
raise TypeError("The input has to be a dictionary")
if not isinstance(robust, bool):
raise TypeError("The input has to be a bool")
utils.isString(solver)
utils.isPositiveNumber(verbose)
# set list of available diameters
diameters = dic_diam_costs.keys()
# build concrete pyomo model
model = py.ConcreteModel()
# sets for nodes, arcs, diameters, scenarios
model.nodes = py.Set(initialize=graph.nodes)
model.arcs = py.Set(initialize=list(distances.keys()), dimen=2)
# diameters assuming that each pipe has the same diameter options
model.diameters = py.Set(initialize=diameters)
# if we have special scenarios, scenario names are tuples, otherwise not
if robust:
# set indices for each scenario by its nodePair = (startnode, endnode)
model.scenarios = py.Set(initialize=specialScenarioNames, dimen=2)
else:
# set indices for each timeStep number
model.scenarios = py.Set(initialize=specialScenarioNames, dimen=1)
# create variables binaries x are the same for each scenario
# pressure variables are different for each scenario
model.x = py.Var(model.arcs, model.diameters, domain=py.Binary)
if robust:
def pressureBounds(model, node, startnode, endnode):
return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2
model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds)
else:
def pressureBounds(model, node, timeStep):
return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2
model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds)
# objective: minimize the costs
def obj_rule(model):
return sum(
sum(dic_diam_costs[diam] * distances[arc] * model.x[arc, diam] for diam in model.diameters)
for arc in model.arcs)
model.Obj = py.Objective(rule=obj_rule)
# pressure drop for each cons and each scenario
if robust:
def pressure_drop(model, arc0, arc1, scenarioStart, scenarioEnd):
return model.pi[arc1, (scenarioStart, scenarioEnd)] - model.pi[arc0, (scenarioStart, scenarioEnd)] == \
-sum(dic_pressureDropCoef[(diam, (scenarioStart, scenarioEnd))][(arc0, arc1)] *
model.x[arc0, arc1, diam] for diam in model.diameters)
model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_drop)
else:
def pressure_dropNotRobust(model, arc0, arc1, timeStep):
return model.pi[arc1, timeStep] - model.pi[arc0, timeStep] == \
-sum(dic_pressureDropCoef[(diam, timeStep)][(arc0, arc1)] *
model.x[arc0, arc1, diam] for diam in model.diameters)
model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_dropNotRobust)
# ensure that a single diameter per arc is chosen
def selection_diameter(model, arc0, arc1):
return sum(model.x[arc0, arc1, diam] for diam in model.diameters) == 1
model.SelectionDiameter_cons = py.Constraint(model.arcs, rule=selection_diameter)
# Create a solver
opt = SolverFactory(solver)
# Set the specified solver options
# Solve optimization problem. The optimization solve time is stored and the solver information is printed.
if (verbose == 2) & (solver == 'gurobi'):
optimizationSpecs = ' LogToConsole=0'
opt.set_options('Threads=' + str(threads) + optimizationSpecs)
results = opt.solve(model, tee=True, keepfiles=False)
else:
results = opt.solve(model, tee=True, report_timing=True, keepfiles=False)
# status of solver
status = results.solver.status
# termination condition
termCondition = results.solver.termination_condition
# write diameter solution to dictionary: key: arc, value: optimal diameter
# write pressure solutions to dictionary; key: scenarioName, value: dict: key: node, value: pressure level in [bar]
dic_arc_diam = {}
dic_scen_node_press = {}
if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown:
utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) +
'. No output is generated.', 0, 0)
elif termCondition == TerminationCondition.infeasibleOrUnbounded or \
termCondition == TerminationCondition.infeasible or \
termCondition == TerminationCondition.unbounded:
utils.output('Optimization problem is ' + str(termCondition) +
'. No output is generated.', 0, 0)
else:
# If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown
# status), show a warning message.
if not termCondition == TerminationCondition.optimal:
warnings.warn('Output is generated for a non-optimal solution.')
# initialize dict with empty dict
for scenario in specialScenarioNames:
dic_scen_node_press[scenario] = {}
for v in model.component_objects(py.Var, active=True):
varobject = getattr(model, str(v))
for index in varobject:
# round because sometimes we are nearly one
if str(varobject) == 'x' and round(varobject[index].value) == 1:
dic_arc_diam.update({(index[0], index[1]): index[2]})
elif str(varobject) == 'pi':
if robust:
# need sqrt() because in model pressure is quadratic because of the transformation
dic_scen_node_press[(index[1], index[2])].update({index[0]: np.sqrt(varobject[index].value)})
else:
# need sqrt() because in model pressure is quadratic because of the transformation
dic_scen_node_press[(index[1])].update({index[0]: np.sqrt(varobject[index].value)})
return dic_arc_diam, dic_scen_node_press
def _postprocessing(scenario, dic_scenario_flows, graph, **kwargs):
dic_scen_PressLevel = {}
dic_scen_MaxViolPress = math.inf
# copy a list of nodes
tmp_nodes = copy.deepcopy(list(graph.nodes))
# we now set iteratively the pressure level of a single node to its upper pressure bound and then compute the
# unique pressure levels until we find valid pressure levels or have tested all nodes
while tmp_nodes:
# we have not found valid pressure levels for this scenario
# temporary pressure levels
dic_tmp_pressure = {}
for node in list(graph.nodes):
dic_tmp_pressure[node] = None
# choose the node which pressure level is fixed to the upper pressure bound
current_node = tmp_nodes[0]
validation, tmp_viol = computePressureAtNode(graph=graph, node=current_node, nodeUpperBound=current_node,
dic_scenario_flows=dic_scenario_flows[scenario], dic_node_pressure=dic_tmp_pressure, **kwargs)
# if validation true, then we have feasible pressure levels; empty list of nodes that have to be
# considered
if validation:
tmp_nodes = []
# we have feasible pressure level and save them
dic_scen_PressLevel = dic_tmp_pressure
dic_scen_MaxViolPress = tmp_viol
else:
# remove considered entry from list of nodes that will be considered for fixing the pressure level
tmp_nodes.remove(tmp_nodes[0])
# we update the maximal pressure level violation
if tmp_viol < dic_scen_MaxViolPress:
# save currently best pressure levels
dic_scen_PressLevel = copy.deepcopy(dic_tmp_pressure)
dic_scen_MaxViolPress = tmp_viol
return scenario, dic_scen_PressLevel, dic_scen_MaxViolPress
def postprocessing(graph, distances, dic_arc_diam, dic_scenario_flows, dic_node_minPress, dic_node_maxPress,
threads=1, verbose=0):
""""
Compute "more" accurate pressure levels for the considered scenarios in the network with optimal diameters
Apply postprocessing of Master's thesis with adaption that we possibly consider every node for fixing its
pressure level to the upper pressure bound.
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]
:type: dictionary: key: arc, value: optimal diameter
:param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all
arc flows in [kg/s] of the corresponding (special) scenario
:type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
:param threads: number of threads used for parallelization
:type threads: positive integer
:param verbose: if > 0, parallelization progress is displayed
:type verbose: int
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:return: dictionary that contains for every scenario the corresponding pressure levels in [bar]
:rtype: dictionary key: scenarioName, value: dic: key: arc, value pressure level
:return: dictionary that contains for every scenario the maximal pressure bound violation in [bar]
:rtype: dictionary key: scenarioName, value: float = maximal pressure bound violation
"""
# Type and value check
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(dic_arc_diam, dict):
for d | else:
raise TypeError("The input has to be a dictionary")
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
# best found pressure levels for scenarios; dic key: scenario, value: dic: key: node, value: pressure level in [bar]
dic_scen_PressLevel = {}
# maximal violation of pressure bounds; zero if no violation exists; dic: key: scenario, value: pressure violation
dic_scen_MaxViolPress = {}
# we compute "precise" pressure levels for every scenarios
pool = Pool(threads)
scenarios = [scenario for scenario in dic_scenario_flows.keys()]
for i, values in enumerate(pool.imap(partial(_postprocessing, validation=True, graph=graph, dic_arc_diam=dic_arc_diam,
distances=distances, dic_node_minPress=dic_node_minPress, dic_node_maxPress=dic_node_maxPress, tmp_violation=0,
dic_scenario_flows=dic_scenario_flows), scenarios), 1):
if verbose == 0:
sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(scenarios) * 100)))
dic_scen_PressLevel[values[0]] = values[1]
dic_scen_MaxViolPress[values[0]] = values[2]
pool.close()
pool.join()
return dic_scen_PressLevel, dic_scen_MaxViolPress
def computePressureAtNode(validation, node, nodeUpperBound, graph, dic_arc_diam, distances, dic_scenario_flows,
dic_node_minPress, dic_node_maxPress, tmp_violation, dic_node_pressure,
ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,
Z_n=1.00062387922965, nDigits=6):
""""
Compute pressure levels recursive for given scenario and node that is fixed to its upper pressure level
:param validation: boolean that is False, if the computed pressure levels are infeasible
:rtype validation: bool
:param node: node of the network for which we currently consider for computing the pressure levels
:type node: str
:param nodeUpperBound: node which pressure level is fixed to the upper bound
:type node: str
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]
:type: dictionary: key: arc, value: optimal diameter
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]
:type: dictionary: key: arc, value: arc flow
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:param tmp_violation: violation of the current pressure bounds in [bar]
:type tmp_violation: float
:param dic_node_pressure: dictionary that contains node pressure levels in [bar]
:type dic_node_pressure: dictionary key: node of the network, value: non-negative float
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float
:param nDigits: number of digits used in the pandas round function. Is applied to the
specified or determined injection and withdrawal rates.
|br| * the default value is 6
:type nDigits: positive int
:return validation: boolean that is true, if the computed pressure levels are feasible
:rtype: bool
:return maximal violation of the pressure bounds w.r.t. the computed pressure levels in [bar]
:rtype: float
"""
# Type and value check
isBool(validation)
utils.isString(node)
utils.isString(nodeUpperBound)
isNetworkxGraph(graph)
isPandasSeriesPositiveNumber(distances)
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(dic_arc_diam, dict):
for diam in dic_arc_diam.keys():
utils.isStrictlyPositiveNumber(dic_arc_diam[diam])
else:
raise TypeError("The input has to be a dictionary")
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
utils.isPositiveNumber(tmp_violation)
if not isinstance(dic_node_pressure, dict):
raise TypeError("The Input has to a dictionary")
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
utils.isStrictlyPositiveInt(nDigits)
# if node is equal to nodeUpperBound, we fix its pressure level to the upper bound; base case in recursion
if node == nodeUpperBound:
dic_node_pressure[node] = dic_node_maxPress[node]
# list of arcs
arcs = list(distances.keys())
# we now compute the neighbors of the considered node
neighbors = graph.neighbors(node)
# compute pressure levels for neighbor nodes
for neighbor in neighbors:
# check if pressure is already computed
if dic_node_pressure[neighbor] is None:
# check if (node,neighbor) or (neighbor,node) is in graph
if (node, neighbor) in arcs:
# check flow direction for arc (node,neighbor)
if dic_scenario_flows[(node, neighbor)] >= 0.0:
# we know pressure level of beginning node of arc; compute pressure level for end node of arc
dic_node_pressure[neighbor] = computePressureEndnodeArc((node, neighbor), dic_node_pressure[node],
dic_scenario_flows, dic_arc_diam, distances,
ir, rho_n, T_m, T_n, p_n, Z_n)
else:
# we know pressure level of endnode
dic_node_pressure[neighbor] = computePressureStartnodeArc((node, neighbor), dic_node_pressure[node],
dic_scenario_flows, dic_arc_diam,
distances,
ir, rho_n, T_m, T_n, p_n, Z_n,
tol=10 ** (- nDigits))
else:
# we know that arc (neighbor,node) is contained in the graph
# check flow direction
if dic_scenario_flows[(neighbor, node)] <= 0.0:
# we know pressure of start node
dic_node_pressure[neighbor] = computePressureEndnodeArc((neighbor, node), dic_node_pressure[node],
dic_scenario_flows, dic_arc_diam, distances,
ir, rho_n, T_m, T_n, p_n, Z_n)
else:
# we know pressure level of end node
dic_node_pressure[neighbor] = computePressureStartnodeArc((neighbor, node), dic_node_pressure[node],
dic_scenario_flows, dic_arc_diam,
distances,
ir, rho_n, T_m, T_n, p_n, Z_n,
tol=10 ** (- nDigits))
# check if new computed pressure level is feasible
if dic_node_pressure[neighbor] == - math.inf:
# pressure violation is really high
tmp_violation = math.inf
return False, tmp_violation
# check if we violate pressure bounds for neighbor node
if dic_node_pressure[neighbor] < dic_node_minPress[neighbor] \
or dic_node_pressure[neighbor] > dic_node_maxPress[neighbor]:
# pressure level is not valid
validation = False
# update pressure bound violation
if dic_node_pressure[neighbor] < dic_node_minPress[neighbor]:
# update violation and violation node if it is bigger
if tmp_violation is None or \
abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor]) > tmp_violation:
tmp_violation = abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor])
else:
if tmp_violation is None or \
abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor]) > tmp_violation:
tmp_violation = abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor])
# compute value for neighbor of tmp
validation, tmp_violation = computePressureAtNode(validation, neighbor, nodeUpperBound, graph, dic_arc_diam,
distances,
dic_scenario_flows, dic_node_minPress, dic_node_maxPress,
tmp_violation, dic_node_pressure)
return validation, tmp_violation
def computePressureStartnodeArc(arc, pressureEndNode, dic_scenario_flows, dic_arc_diam, distances, ir=0.2,
rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,
Z_n=1.00062387922965, tol=10 ** (-4)):
""""
For given arc and pressure level of endNode compute the pressure of the startNode by solving the corresponding
equation system
:param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas
:type arc: tuple
:param pressureEndNode: pressure level of endNode
:type pressureEndNode: non-negative float
:param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]; note arc flow of arc has to be
positive
:type: dictionary: key: arc, value: arc flow
:param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]
:type: dictionary: key: arc, value: optimal diameter
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float
:param tol: tolerance to which accuracy we solve the equation system
|br| * the default value is 10^-4
:type tol: non-negative float
:return: pressure level of startNode in [bar]
:rtype: float
"""
# Type and Value check
if not isinstance(arc, tuple):
raise TypeError("The input has to be a tuple")
utils.isStrictlyPositiveNumber(pressureEndNode)
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(dic_arc_diam, dict):
for diam in dic_arc_diam.keys():
utils.isStrictlyPositiveNumber(dic_arc_diam[diam])
isPandasSeriesPositiveNumber(distances)
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
utils.isStrictlyPositiveNumber(tol)
if dic_scenario_flows[arc] == 0.0:
return pressureEndNode
# define function of nonlinear equation system f(x) = pressure_start^2-pressure_end^2-C
# because then root is our valid pressure level solution, because we know pressure_end
def f(pressure_start):
d = dic_arc_diam[arc]
A = 0.25 * math.pi * d ** 2
rho_in = 0.11922 * pressure_start ** 0.91192 - 0.17264
V_in = abs(dic_scenario_flows[arc]) / rho_in
w_in = V_in / A
eta_in = 1.04298 * 10 ** (-10) * pressure_start ** 1.53560 + 8.79987 * 10 ** (-6)
nue_in = eta_in / rho_in
Re_in = w_in * (d / nue_in)
alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in))
Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10(
(2.7 * (math.log10(Re_in)) ** 1.2) / Re_in +
ir / (3.71 * 1000 * d))) ** (-2)
C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d)
# note pressure_start is in bar
p_m = pressure_start - C_tilde / 10 ** 5
if p_m < 0.0:
# pressure drop too large no valid pressure assignment possible
return -math.inf
Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050
K_m = Z_m / Z_n
# note flow direction is given by startnode endnode so we square the arcflow
C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (
math.pi ** 2 * T_n * rho_n * 10 ** 5 * dic_arc_diam[arc] ** 5) * dic_scenario_flows[arc] ** 2
return pressure_start ** 2 - pressureEndNode ** 2 - C
# find root of f, start value pressure_end + 0.5(bar)
# x = fsolve(f, pressureEndNode + 0.5)
# pressureEndnode + guess for solution depending on flow; you can replace this guess by the approximation of the
# pressure drop of the MIP to probably achieve better results
x = fsolve(f, pressureEndNode + 0.5 * (dic_scenario_flows[arc] ** 2) / (dic_arc_diam[arc] ** 5))
# check if tolerance is ok
assert isinstance(tol, float)
# check tolerance of first solution
if f(x[0]) <= tol:
# value is ok
# because x is an array return first entry, we only have one solution for the nonlinear equation system
return x[0]
else:
print('nonlinear equation system failed')
# this warning means we could not solve the system, this could be the case if the pressure drop is too large
# or when the start value for the nonlinear equation solver is too far away from the solution
print("Nonlinear equation system in Postprocessing failed. Try another node which pressure level is"
" set to the upper bound")
return -math.inf
def computePressureEndnodeArc(arc, pressureStartNode, dic_scenario_flows, dic_arc_diam, distances,
ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325,
Z_n=1.00062387922965):
""""
For given arc and pressure level of startNode compute the pressure of the endNode
:param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas
:type arc: tuple
:param pressureStartNode: pressure level of endNode
:type pressureStartNode: non-negative float
:param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]
:type: dictionary: key: arc, value: arc flow
:param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m]
:type: dictionary: key: arc, value: optimal diameter
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float
:return: pressure level of endNode in [bar]
:rtype: float
"""
# Type and Value check
if not isinstance(arc, tuple):
raise TypeError("The input has to be a tuple")
utils.isStrictlyPositiveNumber(pressureStartNode)
if not isinstance(dic_scenario_flows, dict):
raise TypeError("The input has to be a dictionary")
if isinstance(dic_arc_diam, dict):
for diam in dic_arc_diam.keys():
utils.isStrictlyPositiveNumber(dic_arc_diam[diam])
isPandasSeriesPositiveNumber(distances)
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
arcFlow = dic_scenario_flows[arc]
if arcFlow != 0:
d = dic_arc_diam[arc]
A = 0.25 * math.pi * d ** 2
rho_in = 0.11922 * pressureStartNode ** 0.91192 - 0.17264
V_in = abs(arcFlow) / rho_in
w_in = V_in / A
eta_in = 1.04298 * 10 ** (-10) * pressureStartNode ** 1.53560 + 8.79987 * 10 ** (-6)
nue_in = eta_in / rho_in
Re_in = w_in * (d / nue_in)
alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in))
Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10(
(2.7 * (math.log10(Re_in)) ** 1.2) / Re_in +
ir / (3.71 * 1000 * d))) ** (-2)
C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d)
# note pressure_start is in bar
p_m = pressureStartNode - C_tilde / 10 ** 5
if p_m < 0.0:
# pressure drop too large no valid pressure assignment possible
return -math.inf
Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050
K_m = Z_m / Z_n
# note flow direction is given by startnode endnode so we square the arcflow
C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (math.pi ** 2 * T_n * rho_n * 10 ** 5 *
dic_arc_diam[arc] ** 5) * arcFlow ** 2
else:
# flow is zero therefore pressure drop is zero
C = 0
if pressureStartNode ** 2 - C >= 0:
return math.sqrt(pressureStartNode ** 2 - C)
else:
# pressure drop is too big return negative value, which is a invalid pressure value
return -math.inf
def _computeTimeStepFlows(index, injectionWithdrawalRates, graph, **kwargs):
# compute flows corresponding to demand by fixing demand for every node to given value and then compute
# flows by LP
dic_nodes_MinCapacity = {}
dic_nodes_MaxCapacity = {}
activeNodes = injectionWithdrawalRates.columns
for node in graph.nodes:
if node in activeNodes:
dic_nodes_MinCapacity[node] = injectionWithdrawalRates.at[index, node]
dic_nodes_MaxCapacity[node] = injectionWithdrawalRates.at[index, node]
else:
dic_nodes_MinCapacity[node] = 0
dic_nodes_MaxCapacity[node] = 0
# compute flows
return index, computeSingleSpecialScenario(dic_nodes_MinCapacity=dic_nodes_MinCapacity,
dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, graph=graph, **kwargs)
def computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits, threads=1, verbose=0, solver='glpk'):
""""
Compute for each timeStep and demands given by injectionWithdrawalRates the corresponding flow values
:param: injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while
injections are negative) in [kg^3/s]
:type injectionWithdrawalRates: pandas DataFrame
:param distances: pipeline distances in the length unit specified in the esM object ([m])
:type distances: pandas series
:param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m]
:type graph: networkx graph object
:param entries: list of entry nodes of the network
:type entries: list of str
:param exits: list of exit nodes of the network
:type exits: list of str
:param threads: number of threads used for parallelization
:type threads: positive integer
:param verbose: if > 0, parallelization progress is displayed
:type verbose: int
:param solver: name of the optimization solver to use
:type solver: string, default 'glpk'
:return: dictionary that contains for every time step the corresponding flows in [kg/s]
:rtype: dictionary key: timeStep, value: dict: key: arc, value: arc flow
"""
# Type and value check
isPandasDataFrameNumber(injectionWithdrawalRates)
isPandasSeriesPositiveNumber(distances)
isNetworkxGraph(graph)
isListOfStrings(entries)
isListOfStrings(exits)
# compute for every time step the corresponding flows; dict: key: timeStep, value: dict: key: arc, value: flow
dic_timeStep_flows = {}
# nodes with nonzero demand are given by columns of dataframe
activeNodes = injectionWithdrawalRates.columns
pool = Pool(threads)
indexList = list(injectionWithdrawalRates.index)
for i, values in enumerate(pool.imap(partial(_computeTimeStepFlows, graph=graph, distances=distances,
entries=entries, exits=exits, startNode=activeNodes[0],
endNode=activeNodes[1], specialScenario=False,
injectionWithdrawalRates=injectionWithdrawalRates,
solver=solver),
indexList), 1):
if verbose == 0:
sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(indexList) * 100)))
dic_timeStep_flows[values[0]] = values[1]
pool.close()
pool.join()
return dic_timeStep_flows
def networkRefinement(distances, maxPipeLength, dic_node_minPress, dic_node_maxPress):
"""
If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length,
i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1
# TODO this function is only used for testing
:param distances: pipeline distances in the length unit specified in the esM object
:type distances: pandas series
:param maxPipeLength: determines the maximal length of a pipe in [m].
:type maxPipeLength: positive number
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:return: graph of the network corresponding to the distances
:rtype: graph object of networkx
:return: pipeline distances in the length unit specified in the esM object
:rtype: pandas series
:return: dic_node_minPress dictionary that contains for every node of the network its lower pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
:return dic_node_maxPress dictionary that contains for every node of the network its upper pressure bound in [bar]
:rtype: dictionary key: node of the network, value: non-negative float
"""
# type and value check
isPandasSeriesPositiveNumber(distances)
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
if maxPipeLength is not None:
utils.isStrictlyPositiveNumber(maxPipeLength)
# if maximal pipeline length is a positive number we apply the refinement
if maxPipeLength is not None:
# we have to check if pipes satisfy maximal pipeline length
# list of new arcs that will be added
newPipes = []
# list of lengths of new added pipes
newPipesLengths = []
# list of split original pipes
splitEdges = []
for edge in distances.index:
# get length of pipeline
pipeLength = distances[edge]
if pipeLength > maxPipeLength:
# compute number of necessary artificial nodes
nArtificialNodes = math.ceil(pipeLength / maxPipeLength) - 1
# compute length of new pipelines
newPipeLength = float(pipeLength / (math.ceil(pipeLength / maxPipeLength)))
# lower and upper pressure bound for new nodes computed by average of nodes of original edge
lowPress = (dic_node_minPress[edge[0]] + dic_node_minPress[edge[1]]) / 2
maxPress = (dic_node_maxPress[edge[0]] + dic_node_maxPress[edge[1]]) / 2
# add first new pipe and its length
newPipes.append((edge[0], "v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])))
# add length of first new pipe
newPipesLengths.append(newPipeLength)
# add lower and upper bound for new artificial node
dic_node_minPress["v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])] = lowPress
dic_node_maxPress["v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])] = maxPress
# add intermediate artificial pipes, its length, and lower/upper pressure bounds
for index in range(1, nArtificialNodes):
newPipes.append(("v" + str(index) + "_" + str(edge[0]) + "_" + str(edge[1]),
"v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])))
newPipesLengths.append(newPipeLength)
dic_node_minPress["v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])] = lowPress
dic_node_maxPress["v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])] = maxPress
# add last new pipe and its length
newPipes.append(("v" + str(nArtificialNodes) + "_" + str(edge[0]) + "_" + str(edge[1]),
edge[1]))
newPipesLengths.append(newPipeLength)
# add edge to split edges
splitEdges.append(edge)
# Now delete edges that have been split
distances = distances.drop(splitEdges)
# Add new edges
distances = distances.append(pd.Series(newPipesLengths, index=newPipes))
# get edges for graph
edges = distances.index
# create empty graph
G = nx.Graph()
# create graph from given edges and add length as edge attribute
for edge in edges:
G.add_edge(edge[0], edge[1], length=distances[edge])
return G, distances, dic_node_minPress, dic_node_maxPress
def determineDiscretePipelineDesign(robust, injectionWithdrawalRates, distances, dic_node_minPress, dic_node_maxPress,
dic_diameter_costs=None, dic_candidateMergedDiam_costs=None,
gdfEdges=None, regColumn1='nodeIn', regColumn2='nodeOut', solver='glpk',
opexForDiameters=None, economicLifetime=30, interestRate=0.08, costUnit='€', ir=0.2,
rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965,
originalFluidFlows=None, nDigits=6, verbose=0, threads=1):
"""
We compute a robust (depending on parameter robust) optimal pipeline design,
i.e. for a given network, we compute a minimal spanning tree w.r.t. its total length.
Afterward, we compute our robust (special) scenarios, see Robinius et. al..
Also we compute for every timeStep of injectionWithdrawalRates the corresponding flows.
We compute merged diameters according to list candidatesMergedDiameter, i.e. we compute a equivalent single diameter
for two parallel pipes with the same diameter
If robust is True, then we compute the corresponding pressure drops for every diameter and robust scenario.
If robust is False, then we compute for every timeStep the corresponding pressure drops for every diameter and
timeStep.
If robust is True, then we compute optimal diameters by a MIP for the robust scenarios.
If robust is False, then we compute optimal diameters by a MIP for the timeStep scenarios. Not Robust Version!
In a postprocessing step, we compute "precise" pressure levels for the robust scenarios and the timeStep scenarios.
Note that if robust is False, then the network may be infeasible for robust scenarios
which can occur in the network!
:param robust: Bool that is true, we build a robust pipeline network, otherwise not
:type robust: bool
:param injectionWithdrawalRates: the argument is a pandas DataFrame with the index column
denoting the timesteps and the index row denoting the name of the network's nodes.
Injection are denoted with negative floats and withdrawal with positive floats
in [kg/s]. Example:
node1 node2 node3
0 -4 2 2
1 3 -1.5 -1.5
... ... ... ...
8759 0 -1 1.
:type injectionWithdrawalRates: pandas DataFrame with floats
:param distances: the parameter is a pandas Series with the indices being tuples of the
network's nodes and the values being the lengths of the pipelines in [m]. Example:
(node1, node2) 1000
(node2, node3) 50000
(node2, node1) 1000
(node3, node2) 50000
:type distances: pandas Series
:param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar]
:type dic_node_minPress: dictionary: key: node of the network, value: non-negative float
:param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]
:type dic_node_maxPress: dictionary key: node of the network, value: non-negative float
It holds dic_node_minPress[index] <= dic_node_maxPress[index]
:param dic_diameter_costs: dictionary that contains all diameters in [m] as keys and the values are the
corresponding costs in [Euro/m]. Default Value is a preselection of diameters and its costs.
if None, then we chose the following preselection of diameters and costs
dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69,
0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61,
0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28,
0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67,
1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78}
:type dic_diameter_costs: dict with keys: diameters, values: cost for pipeline; optional
:param dic_candidateMergedDiam_costs: dictionary that contains a set of diameters in [m] as keys and
the values are the corresponding costs in [Euro/m]. This diameters are then used to compute a single equivalent
diameter for two looped (parallel) pipes with the considered diameter.
|br| * the default value is empty dictionary {}
:type dic_candidateMergedDiam_costs: dict with keys: diameters, values: cost for pipeline; optional
:param gdfEdges: GeoDataFrame with the edges of the network and the names of their start and end nodes.
Required for geo-referenced result visualization. Should be obtained from the getRefinedShapeFile
function.
:type gdfEdges: GeoDataFrame or None: optional, default is None
:param regColumn1: name of the column in gdfEdges which holds the name of the injection/ withdrawal node
at the beginning of the line. Required if gdfEdges is specified.
:type regColumn1: string, optional, default is 'nodeIn'
:param regColumn2: name of the column in gdfEdges which holds the name of the injection/ withdrawal node
at the end of the line. Required if gdfEdges is specified.
:type regColumn2: string, optional, default is 'nodeOut'
:param solver: name of the optimization solver to use
:type solver: string, default 'glpk'
:param ir: integral roughness of pipe in [mm]
|br| * the default value is 0.2 (hydrogen, this value can also be used for methane)
:type ir: positive float
:param rho_n: density at standard state in [kg/m^3]
|br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane)
:type rho_n: positive float
:param T_m: constant temperature in [kelvin]
|br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane)
:type T_m: float
:param T_n: temperature in standard state in [kelvin]
|br| * the default value is 273.15 (hydrogen, this value can also be used for methane)
:type T_n: float
:param p_n: pressure at standard state in [bar]
|br| * the default value is 1.01325 (hydrogen, this value can also be used for methane)
:type p_n: non-negative float
:param Z_n: realgasfactor of hydrogen at standard state
|br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane)
:type Z_n: non-negative float
# TODO @Juelich where to use
param originalFluidFlows: string that specifies the considered fluid
|br| * the default value is None
:type originalFluidFlows: str; optional
:param nDigits: number of digits used in the round function
|br| * the default value is 6
:type nDigits: positive int
:param verbose: defines how verbose the console logging is:\n
- 0: general model logging, warnings and optimization solver logging are displayed.
- 1: warnings are displayed.
- 2: no general model logging or warnings are displayed, the optimization solver logging is set to a
minimum.\n
Note: if required, the optimization solver logging can be separately enabled in the optimizationSpecs
of the optimize function.
|br| * the default value is 0
:type verbose: integer (0, 1 or 2)
:return: tuple (dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels,
dic_timeStep_MaxViolPress, gdfEdges), with:
- dic_arc_optimalDiameters dictionary
- pressure levels of postprocessing of robust scenarios dic_scen_PressLevels
- violation of pressure bounds of robust scenarios in optimized network determined by postprocessing
- dic_scen_MaxViolPress: maximum pressure violation in robust scenarios
- pressure levels of postprocessing of timeSteps dic_timeStep_PressLevels
- violation of pressure bounds of timeStep scenarios in optimized network determined by postprocessing
- dic_timeStep_MaxViolPress: maximum pressure violation in timestep scenarios
- geopandas GeoDataFrame (information about diameters in 'diam' column and number of pipelines in
'nbPipes'); None if kwarg gdfEdges was specified as being Node
:rtype: return types:
- dic_arc_optimalDiameters: dictionary, key: arcs, values: (numberOfPipes, diameter) note usually numberOfPipes
is 1, but if we have chosen a merged diameter, then we have two parallel pipes with the same diameter,
i.e. numberOfPipes is 2.
- dic_scen_PressLevels: dictionary, key: nodePair, value: dict: key: arc, value: pressure level in [bar]
- dic_scen_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number
(zero means no pressure violation)
- dic_timeStep_PressLevels: dictionary, key: timeStep, value: dict: key: arc, value: pressure level in [bar]
- dic_timeStep_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number
(zero means no pressure violation)
- gdfEdges: geopandas geodataframe; None if kwarg gdfEdges was specified as being Node
"""
# Do type and value check of input data:
isBool(robust)
isPandasDataFrameNumber(injectionWithdrawalRates)
isPandasSeriesPositiveNumber(distances)
isDictionaryPositiveNumber(dic_node_minPress)
isDictionaryPositiveNumber(dic_node_maxPress)
checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress)
# extract diameters for the optimization
if dic_diameter_costs is not None:
if isinstance(dic_diameter_costs, dict):
diameters = list(dic_diameter_costs.keys())
if isinstance(diameters, list):
for diam in diameters:
utils.isStrictlyPositiveNumber(diam)
else:
raise TypeError("The input argument has to be a list")
isDictionaryPositiveNumber(dic_diameter_costs)
if dic_candidateMergedDiam_costs is not None:
if isinstance(dic_candidateMergedDiam_costs, dict):
for diam in dic_candidateMergedDiam_costs.keys():
utils.isStrictlyPositiveNumber(diam)
utils.isPositiveNumber(dic_candidateMergedDiam_costs[diam])
else:
raise TypeError("The input argument has to be a list")
utils.isString(regColumn1), utils.isString(regColumn2)
if gdfEdges is not None:
if isinstance(gdfEdges, gpd.GeoDataFrame):
if (not regColumn1 in gdfEdges.columns) | (not regColumn2 in gdfEdges.columns):
raise ValueError("regColumn1 or regColumn2 not in columns of gdfEdges")
else:
gdfEdges['nodes'] = gdfEdges.apply(lambda x: (x['nodeIn'], x['nodeOut']), axis=1)
else:
raise TypeError("gdfEdges has to be a geopandas GeoDataFrame.")
if opexForDiameters is not None:
if isinstance(opexForDiameters, list):
for opex in opexForDiameters:
utils.isPositiveNumber(opex)
else:
raise TypeError("The input argument has to be a list")
utils.isPositiveNumber(interestRate)
utils.isStrictlyPositiveNumber(economicLifetime)
utils.isString(costUnit)
utils.isStrictlyPositiveNumber(ir)
utils.isStrictlyPositiveNumber(rho_n)
if not isinstance(T_m, float):
raise TypeError("The input argument has to be an number")
if not isinstance(T_n, float):
raise TypeError("The input argument has to be an number")
utils.isPositiveNumber(p_n)
utils.isPositiveNumber(Z_n)
if originalFluidFlows is not None:
utils.isString(originalFluidFlows)
utils.isStrictlyPositiveInt(nDigits)
if dic_diameter_costs is None:
print("There are no diameters to choose in the optimization. Thus, we consider the diameters and costs:")
dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69,
0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61,
0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28,
0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67,
1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78}
print(dic_diameter_costs)
# create graph with respect to distances
utils.output('Creating graph with respect to given distances', verbose, 0)
graph, distances = createNetwork(distances)
# plot graph
if verbose < 1:
if gdfEdges is not None:
gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)]
fig, ax = plt.subplots(figsize=(4,4))
gdfEdges.plot(ax=ax, color='k'), ax.axis('off')
else:
utils.output("Original Network Graph:", verbose, 0)
nx.draw(graph, with_labels=True)
plt.show()
# Create a minimum spanning tree of the network with a reasonable logic
utils.output('Creating a Steiner treee', verbose, 0)
inner_nodes = list(injectionWithdrawalRates.columns)
graph, distances = createSteinerTree(graph, distances, inner_nodes)
utils.output("Steiner tree:", verbose, 0)
if verbose < 1:
if gdfEdges is not None:
gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)]
fig, ax = plt.subplots(figsize=(4,4))
gdfEdges.plot(ax=ax, color='k'), ax.axis('off')
else:
nx.draw(graph, with_labels=True)
plt.show()
# Compute robust scenarios for spanning tree network
utils.output("Compute robust scenario set for tree network (based on " +
str(len(graph.nodes)*len(graph.nodes)-len(graph.nodes)) +
' node combinations). Threads: ' + str(threads), verbose, 0)
timeStart = time.time()
dic_nodePair_flows, entries, exits = generateRobustScenarios(injectionWithdrawalRates, graph, distances,
dic_node_minPress, dic_node_maxPress, solver=solver, threads=threads, verbose=verbose)
utils.output("Number of robust scenarios: " + str(len(dic_nodePair_flows.keys())) , verbose, 0)
utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0)
# Compute scenarios for timeSteps
utils.output("Compute scenarios for each timestep. Number of timestep scenarios: "
+ str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0)
timeStart = time.time()
dic_timeStep_flows = computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits,
solver=solver, threads=threads, verbose=verbose)
utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0)
# Compute equivalent single diameters for looped (parallel) pipes
utils.output("Compute equivalent single diameters for looped (parallel) pipes", verbose, 0)
# dic_LoopedDiam_costs contains the new computed diameters and its costs
dic_LoopedDiam_costs = None
# dic_newDiam_oldDiam merges new and old diameters
dic_newDiam_oldDiam = None
if dic_candidateMergedDiam_costs is not None:
dic_LoopedDiam_costs, dic_newDiam_oldDiam = computeLargeMergedDiameters(dic_candidateMergedDiam_costs)
# merge all diameters to one dictionary for the optimization model
dic_diameter_costs.update(dic_LoopedDiam_costs)
# Compute pressure drops for each scenario and diameter and the compute optimal diameters
# depending on robust, we do this w.r.t. robust scenarios or every timeStep
# dictionary for the pressure coefficients
dic_pressureCoef = {}
# dictionary for the optimal diameters
dic_arc_diam = {}
if robust:
# we compute the pressure drops for the robust scenarios
utils.output("Pressure drop coefficients for diameters with respect to robust scenarios", verbose, 0)
dic_pressureCoef = determinePressureDropCoef(dic_nodePair_flows, distances, dic_node_minPress,
dic_node_maxPress, list(dic_diameter_costs.keys()))
specialScenarionames = list(dic_nodePair_flows.keys())
# Determine optimal discrete pipeline selection by solving a MIP w.r.t. the robust scenarios
utils.output('Determining optimal robust pipeline design under the consideration of pressure ' +
'losses and robust scenarios', verbose, 0)
# returns dict: key: arc, value: optimal diameter
# returns dict: key: nodePair, value: dic: key: node, value: pressure level
dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef,
specialScenarionames, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, robust, verbose=verbose,
solver=solver, threads=threads)
else:
# we compute pressure drops for every timeStep scenario. Not robust version!
# we compute the pressure drops for the robust scenarios and optimize
utils.output("Pressure drop coefficients for diameters with respect to robust scenarios", verbose, 0)
dic_pressureCoef = determinePressureDropCoef(dic_timeStep_flows, distances, dic_node_minPress,
dic_node_maxPress, list(dic_diameter_costs.keys()))
timeSteps = list(dic_timeStep_flows.keys())
# Determine optimal discrete pipeline selection by solving a MIP w.r.t. the timeStep scenarios
utils.output('Determining optimal pipeline design under the consideration of pressure losses and every time step',
verbose, 0)
utils.output('This network design is necessarily robust!', verbose, 0)
# returns dict: key: arc, value: optimal diameter
# returns dict: key: timeStep, value: dic: key: node, value: pressure level
dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef,
timeSteps, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, False, verbose=verbose,
solver=solver, threads=threads)
if not dic_arc_diam:
utils.output("No feasible diameter selections exits", verbose, 0)
return None
# Do postprocessing: Use a "more" accurate pressure model and apply Postprocessing of master's thesis:
# first do postprocessing for special scenarios
utils.output("Do postprocessing for robust (special) scenarios. Number of scenarios: " + str(len(dic_nodePair_flows)) +
'. Threads: ' + str(threads), verbose, 0)
timeStart = time.time()
dic_scen_PressLevels, dic_scen_MaxViolPress = postprocessing(graph, distances, dic_arc_diam, dic_nodePair_flows,
dic_node_minPress, dic_node_maxPress,
threads=threads, verbose=verbose)
utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0)
# print if some of these scenarios are not feasible for the "more" precise pressure model
for scenario in dic_scen_MaxViolPress.keys():
if dic_scen_MaxViolPress[scenario] > 0:
utils.output("Robust Scenario " + str(scenario) + " violates pressure bounds by " +
str(dic_scen_MaxViolPress[scenario]), verbose, 0)
# compute pressure levels for each time step
utils.output("Do postprocessing for each timestep scenarios. Number of scenarios: " +
str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0)
timeStart = time.time()
dic_timeStep_PressLevels, dic_timeStep_MaxViolPress = postprocessing(graph, distances, dic_arc_diam,
dic_timeStep_flows, dic_node_minPress,
dic_node_maxPress,
threads=threads, verbose=verbose)
utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0)
for timeStep in dic_timeStep_MaxViolPress.keys():
if dic_timeStep_MaxViolPress[timeStep] > 0:
utils.output("Time Step " + str(timeStep) + " violates pressure bounds by " +
str(dic_timeStep_MaxViolPress[timeStep]), verbose, 0)
# now determine final output, i.e. dictionary: key: arcs, values: (numberOfPipes, diameter)
# note usually numberOfPipes is 1, but if we have chosen a merged diameter, then we have two parallel pipes with
# the same diameter, i.e. numberOfPipes is 2.
dic_arc_optimalDiameters = {}
for arc in dic_arc_diam.keys():
if dic_LoopedDiam_costs is not None:
if dic_arc_diam[arc] in dic_LoopedDiam_costs.keys():
dic_arc_optimalDiameters[arc] = (2, dic_newDiam_oldDiam[dic_arc_diam[arc]])
else:
dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc])
else:
dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc])
if verbose < 1:
if gdfEdges is not None:
gdfEdges = gdfEdges[gdfEdges.nodes.isin(dic_arc_optimalDiameters)]
gdfEdges['diam'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][1], axis=1)
gdfEdges['nbPipes'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][0], axis=1)
plotOptimizedNetwork(gdfEdges)
else:
# plot network with new diameters
utils.output("Network with optimized diameters, looped pipes are indicated by two colored edges, " +
"Thicker edge means larger diameter", verbose, 0)
finalG = nx.MultiGraph()
for arc in dic_arc_optimalDiameters.keys():
if dic_arc_optimalDiameters[arc][0] == 1:
# we have a single not looped pipe
finalG.add_edge(arc[0], arc[1], color='black', weight=5 * dic_arc_optimalDiameters[arc][1])
else:
# we have a looped pipe
finalG.add_edge(arc[0], arc[1], color='r',
weight=10 * dic_arc_optimalDiameters[arc][1])
finalG.add_edge(arc[0], arc[1], color='b',
weight=5 * dic_arc_optimalDiameters[arc][1])
# pos = nx.circular_layout(finalG)
edges = finalG.edges()
colors = []
weight = []
for (u, v, attrib_dict) in list(finalG.edges.data()):
colors.append(attrib_dict['color'])
weight.append(attrib_dict['weight'])
nx.draw(finalG, edges=edges, edge_color=colors, width=weight, with_labels=True)
plt.show()
# Add some output which somehow quantifies the difference between the original and the new
# pipeline design (for this additional input argument are required)
# TODO @ Juelich just compare original solution to solution dic_arc_optimalDiameters
return dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels, \
dic_timeStep_MaxViolPress, gdfEdges
def plotOptimizedNetwork(gdf_pipes, figsize=(4,4), nodesColumn='nodes', diamColumn='diam',
nbPipesColumn='nbPipes', line_scaling=1, gdf_regions=None, pressureLevels=None, pMin=50, pMax=100,
cmap='Spectral_r', cbxShift=0.32, cbyShift=0.08, cbWidth=0.4, fontsize=10, cbTitle='Pressure [bar]'):
"""Plot optimized network, visualizing chosen pipe diameters and, if selected, pressure levels of
a scenario.
:param gdf_pipes: GeoDataFrame, containing information about the diameters, number of pipes and
routes of the pipeline network
:type gdf_pipes: geopandas GeoDataFrame
:param figsize: figure size, defaults to (4,4)
:type figsize: tuple, optional
:param nodesColumn: name of the column in gdf_pipes containing a tuple (startNode, endNode) with the
name of the nodes being strings, defaults to 'nodes'
:type nodesColumn: str, optional
:param diamColumn: name of the column in gdf_pipes containing the diameters of the pipelines in m,
defaults to 'diam'
:type diamColumn: str, optional
:param nbPipesColumn: name of the column in gdf_pipes containing the number of parallel pipes along
a connection (maximum parallel pipes: 2),
defaults to 'nbPipes'
:type nbPipesColumn: str, optional
:param line_scaling: scaling factor for line width, defaults to 1
:type line_scaling: int, optional
:param gdf_regions: GeoDataFrame for background plotting, defaults to None
:type gdf_regions: geopandas GeoDataFrame, optional
:param pressureLevels: pressure levels at each node for one scenario/ timestep, defaults to None
:type pressureLevels: dictionary or series with keys/ indices being the nodes of the network, optional
:param pMin: minimum pressure of colorbar, defaults to 50
:type pMin: int, optional
:param pMax: maximum pressure of colorbar, defaults to 100
:type pMax: int, optional
:param cmap: colormap name, defaults to 'Spectral_r'
:type cmap: str, optional
:param cbxShift: colorbar x shift, defaults to 0.32
:type cbxShift: float, optional
:param cbyShift: colorbar y shift, defaults to 0.08
:type cbyShift: float, optional
:param cbWidth: colorbar width, defaults to 0.4
:type cbWidth: float, optional
:param fontsize: fontsize of legend and colorbar, defaults to 10
:type fontsize: int, optional
:param cbTitle: colorbar title, defaults to 'Pressure [bar]'
:type cbTitle: str, optional
:return: tuple (fig, ax)
:rtype:
- fig: matplotlib figure
- ax: matplotlib axis
"""
fig, ax = plt.subplots(figsize=figsize)
cmap = mpl.cm.get_cmap(cmap)
if gdf_regions is not None:
gdf_regions.plot(ax=ax, facecolor='lightgrey', edgecolor='lightgrey')
diamMin = gdf_pipes[gdf_pipes[diamColumn] > 0][diamColumn].min()
for i, row in gdf_pipes.iterrows():
lw = row[diamColumn]/diamMin*line_scaling
if pressureLevels is not None:
p = (pressureLevels[row[nodesColumn][0]] + pressureLevels[row[nodesColumn][1]])/2
color = cmap((p-pMin)/(pMax-pMin))
else:
color='k'
if (row[nbPipesColumn] == 1):
gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw, capstyle='round')
else:
gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw*3, capstyle='round')
gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color='white', linewidth=lw)
ax.axis('off')
lines = []
for diam in sorted(gdf_pipes[diamColumn].unique()):
line = plt.Line2D(range(1), range(1), linewidth=diam/diamMin*line_scaling, color='k', marker='_',
label="{:>1.5}".format(str(diam)) + ' m')
lines.append(line)
leg = ax.legend(handles=lines, prop={'size': fontsize}, loc=6, bbox_to_anchor=(1,0.5), title='Diameters')
leg.get_frame().set_edgecolor('white')
if pressureLevels is not None:
sm1 = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=pMin, vmax=pMax))
sm1._A = []
cax = fig.add_axes([cbxShift, cbyShift, cbWidth, 0.03])
cb1 = fig.colorbar(sm1, cax=cax, pad=0.05, aspect=7, fraction=0.07, orientation='horizontal')
cax.tick_params(labelsize=fontsize)
cax.set_xlabel(cbTitle, size=fontsize)
cb1.ax.xaxis.set_label_position('top')
plt.show()
return fig, ax
| iam in dic_arc_diam.keys():
utils.isStrictlyPositiveNumber(dic_arc_diam[diam])
|
interval.py | """ define the IntervalIndex """
import textwrap
import warnings
import numpy as np
from pandas.compat import add_metaclass
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_platform_int,
is_list_like,
is_datetime_or_timedelta_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_float_dtype,
is_interval_dtype,
is_object_dtype,
is_scalar,
is_float,
is_number,
is_integer)
from pandas.core.indexes.base import (
Index, ensure_index,
default_pprint, _index_shared_docs)
from pandas._libs import Timestamp, Timedelta
from pandas._libs.interval import (
Interval, IntervalMixin, IntervalTree,
)
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.core.indexes.multi import MultiIndex
import pandas.core.common as com
from pandas.util._decorators import cache_readonly, Appender
from pandas.util._doctools import _WritableDoc
from pandas.util._exceptions import rewrite_exception
from pandas.core.config import get_option
from pandas.tseries.frequencies import to_offset
from pandas.tseries.offsets import DateOffset
import pandas.core.indexes.base as ibase
from pandas.core.arrays.interval import (IntervalArray,
_interval_shared_docs)
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
target_klass='IntervalIndex or list of Intervals',
name=textwrap.dedent("""\
name : object, optional
to be stored in the index.
"""),
))
def _get_next_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_prev_label(label):
dtype = getattr(label, 'dtype', type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = 'datetime64'
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, 'ns')
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError('cannot determine next label for type {typ!r}'
.format(typ=type(label)))
def _get_interval_closed_bounds(interval):
"""
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
"""
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
def _new_IntervalIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't have
arguments and breaks __new__
"""
return cls.from_arrays(**d)
@Appender(_interval_shared_docs['class'] % dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs['name'],
versionadded="0.20.0",
extra_methods="contains\n",
examples=textwrap.dedent("""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""),
))
@add_metaclass(_WritableDoc)
class IntervalIndex(IntervalMixin, Index):
_typ = 'intervalindex'
_comparables = ['name']
_attributes = ['name', 'closed']
# we would like our indexing holder to defer to us
_defer_to_indexing = True
# Immutable, so we are able to cache computations like isna in '_mask'
_mask = None
def __new__(cls, data, closed=None, dtype=None, copy=False,
name=None, verify_integrity=True):
if name is None and hasattr(data, 'name'):
name = data.name
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype,
verify_integrity=verify_integrity)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array, name, closed=None):
"""
Construct from an IntervalArray
Parameters
----------
array : IntervalArray
name : str
Attached as result.name
closed : Any
Ignored.
"""
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, left=None, right=None, **kwargs):
result = self._data._shallow_copy(left=left, right=right)
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(result, **attributes)
@cache_readonly
def _isnan(self):
"""Return a mask indicating if each value is NA"""
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
return IntervalTree(self.left, self.right, closed=self.closed)
def __contains__(self, key):
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
boolean
"""
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
def contains(self, key):
"""
Return a boolean indicating if the key is IN the index
We accept / allow keys to be not *just* actual
objects.
Parameters
----------
key : int, float, Interval
Returns
-------
boolean
"""
try:
self.get_loc(key)
return True
except KeyError:
return False
@classmethod
@Appender(_interval_shared_docs['from_breaks'] % _index_doc_kwargs)
def from_breaks(cls, breaks, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_arrays'] % _index_doc_kwargs)
def from_arrays(cls, left, right, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(left, right, closed, copy=copy,
dtype=dtype)
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_intervals'] % _index_doc_kwargs)
def from_intervals(cls, data, closed=None, name=None, copy=False,
dtype=None):
msg = ('IntervalIndex.from_intervals is deprecated and will be '
'removed in a future version; Use IntervalIndex(...) instead')
warnings.warn(msg, FutureWarning, stacklevel=2)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype)
if name is None and isinstance(data, cls):
name = data.name
return cls._simple_new(array, name=name)
@classmethod
@Appender(_interval_shared_docs['from_tuples'] % _index_doc_kwargs)
def from_tuples(cls, data, closed='right', name=None, copy=False,
dtype=None):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy,
dtype=dtype)
return cls._simple_new(arr, name=name)
@Appender(_interval_shared_docs['to_tuples'] % dict(
return_type="Index",
examples="""
Examples
--------
>>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])
>>> idx.to_tuples()
Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')
>>> idx.to_tuples(na_tuple=False)
Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""",
))
def to_tuples(self, na_tuple=True):
tuples = self._data.to_tuples(na_tuple=na_tuple)
return Index(tuples)
@cache_readonly
def _multiindex(self):
return MultiIndex.from_arrays([self.left, self.right],
names=['left', 'right'])
@property
def left(self):
"""
Return the left endpoints of each Interval in the IntervalIndex as
an Index
"""
return self._data._left
@property
def right(self):
"""
Return the right endpoints of each Interval in the IntervalIndex as
an Index
"""
return self._data._right
@property
def closed(self):
"""
Whether the intervals are closed on the left-side, right-side, both or
neither
"""
return self._data._closed
@Appender(_interval_shared_docs['set_closed'] % _index_doc_kwargs)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
msg = "invalid option for 'closed': {closed}"
raise ValueError(msg.format(closed=closed))
# return self._shallow_copy(closed=closed)
array = self._data.set_closed(closed)
return self._simple_new(array, self.name)
@property
def length(self):
"""
Return an Index with entries denoting the length of each Interval in
the IntervalIndex
"""
return self._data.length
@property
def size(self):
# Avoid materializing ndarray[Interval]
return self._data.size
@property
def shape(self):
# Avoid materializing ndarray[Interval]
return self._data.shape
@property
def itemsize(self):
msg = ('IntervalIndex.itemsize is deprecated and will be removed in '
'a future version')
warnings.warn(msg, FutureWarning, stacklevel=2)
# supress the warning from the underlying left/right itemsize
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return self.left.itemsize + self.right.itemsize
def __len__(self):
return len(self.left)
@cache_readonly
def values(self):
|
@cache_readonly
def _values(self):
return self._data
@cache_readonly
def _ndarray_values(self):
return np.array(self._data)
def __array__(self, result=None):
""" the array interface, return my values """
return self._ndarray_values
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
return result
def __reduce__(self):
d = dict(left=self.left,
right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (self.__class__, d), None
@Appender(_index_shared_docs['copy'])
def copy(self, deep=False, name=None):
array = self._data.copy(deep=deep)
attributes = self._get_attributes_dict()
if name is not None:
attributes.update(name=name)
return self._simple_new(array, **attributes)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
with rewrite_exception('IntervalArray', self.__class__.__name__):
new_values = self.values.astype(dtype, copy=copy)
if is_interval_dtype(new_values):
return self._shallow_copy(new_values.left, new_values.right)
return super(IntervalIndex, self).astype(dtype, copy=copy)
@cache_readonly
def dtype(self):
"""Return the dtype object of the underlying data"""
return self._data.dtype
@property
def inferred_type(self):
"""Return a string of the type inferred from the values"""
return 'interval'
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we don't use an explicit engine
# so return the bytes here
return (self.left.memory_usage(deep=deep) +
self.right.memory_usage(deep=deep))
@cache_readonly
def mid(self):
"""
Return the midpoint of each Interval in the IntervalIndex as an Index
"""
return self._data.mid
@cache_readonly
def is_monotonic(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self._multiindex.is_monotonic
@cache_readonly
def is_monotonic_increasing(self):
"""
Return True if the IntervalIndex is monotonic increasing (only equal or
increasing values), else False
"""
return self._multiindex.is_monotonic_increasing
@cache_readonly
def is_monotonic_decreasing(self):
"""
Return True if the IntervalIndex is monotonic decreasing (only equal or
decreasing values), else False
"""
return self._multiindex.is_monotonic_decreasing
@cache_readonly
def is_unique(self):
"""
Return True if the IntervalIndex contains unique elements, else False
"""
return self._multiindex.is_unique
@cache_readonly
def is_non_overlapping_monotonic(self):
return self._data.is_non_overlapping_monotonic
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
return super(IntervalIndex, self)._convert_scalar_indexer(
key, kind=kind)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
def _maybe_cast_indexed(self, key):
"""
we need to cast the key, which could be a scalar
or an array-like to the type of our subtype
"""
if isinstance(key, IntervalIndex):
return key
subtype = self.dtype.subtype
if is_float_dtype(subtype):
if is_integer(key):
key = float(key)
elif isinstance(key, (np.ndarray, Index)):
key = key.astype('float64')
elif is_integer_dtype(subtype):
if is_integer(key):
key = int(key)
return key
def _check_method(self, method):
if method is None:
return
if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']:
msg = 'method {method} not yet implemented for IntervalIndex'
raise NotImplementedError(msg.format(method=method))
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError('can only get slices from an IntervalIndex if '
'bounds are non-overlapping and all monotonic '
'increasing or decreasing')
if isinstance(label, IntervalMixin):
raise NotImplementedError
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
# indexes being both increasing and decreasing
if ((side == 'left' and self.left.is_monotonic_increasing) or
(side == 'right' and not self.left.is_monotonic_increasing)):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def _get_loc_only_exact_matches(self, key):
if isinstance(key, Interval):
if not self.is_unique:
raise ValueError("cannot index with a slice Interval"
" and a non-unique index")
# TODO: this expands to a tuple index, see if we can
# do better
return Index(self._multiindex.values).get_loc(key)
raise KeyError
def _find_non_overlapping_monotonic_bounds(self, key):
if isinstance(key, IntervalMixin):
start = self._searchsorted_monotonic(
key.left, 'left', exclude_label=key.open_left)
stop = self._searchsorted_monotonic(
key.right, 'right', exclude_label=key.open_right)
elif isinstance(key, slice):
# slice
start, stop = key.start, key.stop
if (key.step or 1) != 1:
raise NotImplementedError("cannot slice with a slice step")
if start is None:
start = 0
else:
start = self._searchsorted_monotonic(start, 'left')
if stop is None:
stop = len(self)
else:
stop = self._searchsorted_monotonic(stop, 'right')
else:
# scalar or index-like
start = self._searchsorted_monotonic(key, 'left')
stop = self._searchsorted_monotonic(key, 'right')
return start, stop
def get_loc(self, key, method=None):
"""Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}, optional
* default: matches where the label is within an interval only.
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
---------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
>>> index = pd.IntervalIndex([i1, i2])
>>> index.get_loc(1)
0
You can also supply an interval or an location for a point inside an
interval.
>>> index.get_loc(pd.Interval(0, 2))
array([0, 1], dtype=int64)
>>> index.get_loc(1.5)
1
If a label is in several intervals, you get the locations of all the
relevant intervals.
>>> i3 = pd.Interval(0, 2)
>>> overlapping_index = pd.IntervalIndex([i2, i3])
>>> overlapping_index.get_loc(1.5)
array([0, 1], dtype=int64)
"""
self._check_method(method)
original_key = key
key = self._maybe_cast_indexed(key)
if self.is_non_overlapping_monotonic:
if isinstance(key, Interval):
left = self._maybe_cast_slice_bound(key.left, 'left', None)
right = self._maybe_cast_slice_bound(key.right, 'right', None)
key = Interval(left, right, key.closed)
else:
key = self._maybe_cast_slice_bound(key, 'left', None)
start, stop = self._find_non_overlapping_monotonic_bounds(key)
if start is None or stop is None:
return slice(start, stop)
elif start + 1 == stop:
return start
elif start < stop:
return slice(start, stop)
else:
raise KeyError(original_key)
else:
# use the interval tree
if isinstance(key, Interval):
left, right = _get_interval_closed_bounds(key)
return self._engine.get_loc_interval(left, right)
else:
return self._engine.get_loc(key)
def get_value(self, series, key):
if com.is_bool_indexer(key):
loc = key
elif is_list_like(key):
loc = self.get_indexer(key)
elif isinstance(key, slice):
if not (key.step is None or key.step == 1):
raise ValueError("cannot support not-default step in a slice")
try:
loc = self.get_loc(key)
except TypeError:
# we didn't find exact intervals or are non-unique
msg = "unable to slice with this key: {key}".format(key=key)
raise ValueError(msg)
else:
loc = self.get_loc(key)
return series.iloc[loc]
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
self._check_method(method)
target = ensure_index(target)
target = self._maybe_cast_indexed(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if self.is_non_overlapping_monotonic:
start, stop = self._find_non_overlapping_monotonic_bounds(target)
start_plus_one = start + 1
if not ((start_plus_one < stop).any()):
return np.where(start_plus_one == stop, start, -1)
if not self.is_unique:
raise ValueError("cannot handle non-unique indices")
# IntervalIndex
if isinstance(target, IntervalIndex):
indexer = self._get_reindexer(target)
# non IntervalIndex
else:
indexer = np.concatenate([self.get_loc(i) for i in target])
return ensure_platform_int(indexer)
def _get_reindexer(self, target):
"""
Return an indexer for a target IntervalIndex with self
"""
# find the left and right indexers
lindexer = self._engine.get_indexer(target.left.values)
rindexer = self._engine.get_indexer(target.right.values)
# we want to return an indexer on the intervals
# however, our keys could provide overlapping of multiple
# intervals, so we iterate thru the indexers and construct
# a set of indexers
indexer = []
n = len(self)
for i, (lhs, rhs) in enumerate(zip(lindexer, rindexer)):
target_value = target[i]
# matching on the lhs bound
if (lhs != -1 and
self.closed == 'right' and
target_value.left == self[lhs].right):
lhs += 1
# matching on the lhs bound
if (rhs != -1 and
self.closed == 'left' and
target_value.right == self[rhs].left):
rhs -= 1
# not found
if lhs == -1 and rhs == -1:
indexer.append(np.array([-1]))
elif rhs == -1:
indexer.append(np.arange(lhs, n))
elif lhs == -1:
# care about left/right closed here
value = self[i]
# target.closed same as self.closed
if self.closed == target.closed:
if target_value.left < value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'left'
elif self.closed == 'right':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
# target.closed == 'right'
elif self.closed == 'left':
if target_value.left <= value.left:
indexer.append(np.array([-1]))
continue
indexer.append(np.arange(0, rhs + 1))
else:
indexer.append(np.arange(lhs, rhs + 1))
return np.concatenate(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = self._maybe_cast_indexed(ensure_index(target))
return super(IntervalIndex, self).get_indexer_non_unique(target)
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy(values)
def delete(self, loc):
"""
Return a new IntervalIndex with passed location(-s) deleted
Returns
-------
new_index : IntervalIndex
"""
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
return self._shallow_copy(new_left, new_right)
def insert(self, loc, item):
"""
Return a new IntervalIndex inserting new item at location. Follows
Python list.append semantics for negative values. Only Interval
objects and NA can be inserted into an IntervalIndex
Parameters
----------
loc : int
item : object
Returns
-------
new_index : IntervalIndex
"""
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError('inserted item must be closed on the same '
'side as the index')
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError('can only insert Interval objects and NA into '
'an IntervalIndex')
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
return self._shallow_copy(new_left, new_right)
def _as_like_interval_index(self, other):
self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
msg = ('the other index needs to be an IntervalIndex too, but '
'was type {}').format(other.__class__.__name__)
raise TypeError(msg)
elif self.closed != other.closed:
msg = ('can only do set operations between two IntervalIndex '
'objects that are closed on the same side')
raise ValueError(msg)
return other
def _concat_same_dtype(self, to_concat, name):
"""
assert that we all have the same .closed
we allow a 0-len index here as well
"""
if not len({i.closed for i in to_concat if len(i)}) == 1:
msg = ('can only append two IntervalIndex objects '
'that are closed on the same side')
raise ValueError(msg)
return super(IntervalIndex, self)._concat_same_dtype(to_concat, name)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
result = self._data.take(indices, axis=axis, allow_fill=allow_fill,
fill_value=fill_value, **kwargs)
attributes = self._get_attributes_dict()
return self._simple_new(result, **attributes)
def __getitem__(self, value):
result = self._data[value]
if isinstance(result, IntervalArray):
return self._shallow_copy(result)
else:
# scalar
return result
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
from pandas.io.formats.format import IntervalArrayFormatter
return IntervalArrayFormatter(values=self,
na_rep=na_rep,
justify='all').get_result()
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option(
'display.max_seq_items') or n) // 10, 10)
formatter = str
if n == 0:
summary = '[]'
elif n == 1:
first = formatter(self[0])
summary = '[{first}]'.format(first=first)
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[{first}, {last}]'.format(first=first, last=last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
summary = '[{head} ... {tail}]'.format(
head=', '.join(head), tail=', '.join(tail))
else:
tail = [formatter(x) for x in self]
summary = '[{tail}]'.format(tail=', '.join(tail))
return summary + ',' + self._format_space()
def _format_attrs(self):
attrs = [('closed', repr(self.closed))]
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
attrs.append(('dtype', "'{dtype}'".format(dtype=self.dtype)))
return attrs
def _format_space(self):
space = ' ' * (len(self.__class__.__name__) + 1)
return "\n{space}".format(space=space)
def argsort(self, *args, **kwargs):
return np.lexsort((self.right, self.left))
def equals(self, other):
"""
Determines if two IntervalIndex objects contain the same elements
"""
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(getattr(other, '.values', other))
return (self.left.equals(other.left) and
self.right.equals(other.right) and
self.closed == other.closed)
def _setop(op_name):
def func(self, other):
other = self._as_like_interval_index(other)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
msg = ('can only do {op} between two IntervalIndex '
'objects that have compatible dtypes')
raise TypeError(msg.format(op=op_name))
result = getattr(self._multiindex, op_name)(other._multiindex)
result_name = self.name if self.name == other.name else None
# GH 19101: ensure empty results have correct dtype
if result.empty:
result = result.values.astype(self.dtype.subtype)
else:
result = result.values
return type(self).from_tuples(result, closed=self.closed,
name=result_name)
return func
union = _setop('union')
intersection = _setop('intersection')
difference = _setop('difference')
symmetric_difference = _setop('symmetric_difference')
# TODO: arithmetic operations
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint):
"""helper for interval_range to check if start/end are valid types"""
return any([is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None])
def _is_type_compatible(a, b):
"""helper for interval_range to check type compat of start/end/freq"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return ((is_number(a) and is_number(b)) or
(is_ts_compat(a) and is_ts_compat(b)) or
(is_td_compat(a) and is_td_compat(b)) or
com._any_none(a, b))
def interval_range(start=None, end=None, periods=None, freq=None,
name=None, closed='right'):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals
end : numeric or datetime-like, default None
Right bound for generating intervals
periods : integer, default None
Number of periods to generate
freq : numeric, string, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : string, default None
Name of the resulting IntervalIndex
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : IntervalIndex
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]]
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]]
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]]
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]]
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]]
closed='both', dtype='interval[int64]')
See Also
--------
IntervalIndex : an Index of intervals that are all closed on the same side.
"""
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com._any_none(periods, start, end):
freq = 1 if is_number(endpoint) else 'D'
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, and '
'freq, exactly three must be specified')
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = 'end must be numeric or datetime-like, got {end}'
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError('freq must be numeric or convertible to '
'DateOffset, got {freq}'.format(freq=freq))
# verify type compatibility
if not all([_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq)]):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com._all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com._not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, 'int64')
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
| """
Return the IntervalIndex's data as an IntervalArray.
"""
return self._data |
git.go | // Copyright 2020 The Defold Foundation
// Licensed under the Defold License version 1.0 (the "License"); you may not use
// this file except in compliance with the License.
//
// You may obtain a copy of the License, together with FAQs at
// https://www.defold.com/license
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
// +build !windows
/*
Git server library with support for the "smart protocol"
References:
http-backend.c in git
https://github.com/schacon/grack/blob/master/lib/grack.rb
Note: It's currently not required that http.receivepack is set to true.
*/
package git
import (
"bytes"
"compress/zlib"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
)
var (
defaultLog = log.New(os.Stderr, "", log.LstdFlags)
)
/*
Git server abstraction. Use NewServer() to create an instance.
The git server implements net.http.Handler.
*/
type Server struct {
root string
accessLog *log.Logger
errorLog *log.Logger
authorizor Authorizor
}
func decompress(name string, data string) |
func init() {
// Decompress and write embedded git executables
// We used to call prefix these commands with go- but
// git-upload-pack run "git pack-objects" and will fail
// if git is called go-git
decompress("git", gitCompressedBase64)
decompress("git-upload-pack", gitUploadPackCompressedBase64)
}
/*
Creates a new git server. Git repositories are located under root.
*/
func NewServer(root string, authorizor Authorizor, accessLog, errorLog *log.Logger) *Server {
if accessLog == nil {
accessLog = defaultLog
}
if errorLog == nil {
errorLog = defaultLog
}
return &Server{
root: root,
accessLog: accessLog,
errorLog: errorLog,
authorizor: authorizor,
}
}
/*
Interface for pluggable authorization
*/
type Authorizor interface {
// Authorize user for repository. For successful authorization
// nil is returned, otherwise an error message.
Authorize(header http.Header, repo string) error
}
type session struct {
repo string
errorLog *log.Logger
}
type gitHandler func(*session, http.ResponseWriter, *http.Request)
type service struct {
method string
handler gitHandler
pattern *regexp.Regexp
}
var services = []service{
{"POST", (*session).rpcUploadPack, regexp.MustCompile("(.*?)/git-upload-pack$")},
{"POST", (*session).rpcReceivePack, regexp.MustCompile("(.*?)/git-receive-pack$")},
{"GET", (*session).getInfoRefs, regexp.MustCompile("(.*?)/info/refs$")},
}
func writePacket(w io.Writer, s string) error {
_, e := io.WriteString(w, fmt.Sprintf("%04x%s", len(s)+4, s))
return e
}
func noCacheHeaders(rw http.ResponseWriter) {
rw.Header().Set("Expires", "Fri, 01 Jan 1980 00:00:00 GMT")
rw.Header().Set("Pragma", "no-cache")
rw.Header().Set("Cache-Control", "no-cache, max-age=0, must-revalidate")
}
// Used to redirect stderr from git to log
type logWriter struct {
logger *log.Logger
buf *bytes.Buffer
}
func (l *logWriter) Write(p []byte) (int, error) {
if l.buf.Len() < 1024 {
// Limit error output messages
l.buf.Write(p)
}
return len(p), nil
}
func (l *logWriter) Close() error {
if l.buf.Len() > 0 {
l.logger.Println(strings.TrimSpace(l.buf.String()))
}
return nil
}
func (s *session) send(w io.Writer, in io.ReadCloser, gitCmd string, args ...string) error {
// git-x-y -> x-y
gitSubCmd := strings.SplitN(gitCmd, "-", 2)[1]
var prefix []string
// upload-pack is a special case. In general, git embeds
// most commands but not upload-pack
if gitSubCmd == "upload-pack" {
prefix = []string{"./git-upload-pack"}
} else {
prefix = []string{"./git", gitSubCmd}
}
lw := &logWriter{s.errorLog, bytes.NewBuffer(nil)}
cmd := &exec.Cmd{
Path: prefix[0],
Args: append(prefix, args...),
Stderr: lw,
}
cmd.Stdin = in
cmd.Stdout = w
defer in.Close()
defer lw.Close()
err := cmd.Start()
if err != nil {
return err
}
if err = cmd.Wait(); err != nil {
return err
}
return nil
}
func (s *session) serviceRpc(serviceName string, rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", fmt.Sprintf("application/x-git-%s-result", serviceName))
gitCmd := fmt.Sprintf("git-%s", serviceName)
if err := s.send(rw, r.Body, gitCmd, "--stateless-rpc", s.repo); err != nil {
http.Error(rw, "Internal server error", http.StatusInternalServerError)
s.errorLog.Printf("%s failed: %v", gitCmd, err)
}
}
func (s *session) rpcUploadPack(rw http.ResponseWriter, r *http.Request) {
s.serviceRpc("upload-pack", rw, r)
}
func (s *session) rpcReceivePack(rw http.ResponseWriter, r *http.Request) {
s.serviceRpc("receive-pack", rw, r)
}
func (s *session) getInfoRefs(rw http.ResponseWriter, r *http.Request) {
service := r.URL.Query().Get("service")
if !(service == "git-upload-pack" || service == "git-receive-pack") {
rw.WriteHeader(http.StatusBadRequest)
io.WriteString(rw, fmt.Sprintf("Invalid service: '%s'\n", service))
return
}
serviceName := service[4:]
// Write info-refs to a buffer first in order to be able
// to set http error code if an error occur.
bw := bytes.NewBuffer(nil)
if err := s.send(bw, r.Body, service, "--stateless-rpc", "--advertise-refs", s.repo); err != nil {
http.Error(rw, "Internal server error", http.StatusInternalServerError)
s.errorLog.Printf("%s failed: %v", service, err)
} else {
rw.Header().Set("Content-Type", fmt.Sprintf("application/x-git-%s-advertisement", serviceName))
writePacket(rw, fmt.Sprintf("# service=git-%s\n", serviceName))
io.WriteString(rw, "0000")
rw.Write(bw.Bytes())
}
}
func (s *Server) serveHTTP(rw http.ResponseWriter, r *http.Request) {
// Currently caching is disabled for all requests
// Better be safe than sorry
noCacheHeaders(rw)
// TODO: We get broken pipe when pushing large data if connection isn't closed
// Potential bug: https://code.google.com/p/go/issues/detail?id=5660
rw.Header().Set("Connection", "close")
for _, srv := range services {
res := srv.pattern.FindStringSubmatch(r.URL.Path)
if r.Method == srv.method && len(res) > 0 {
if err := s.authorizor.Authorize(r.Header, res[1]); err != nil {
if authHeader := r.Header.Get("Authorization"); authHeader == "" {
// TODO: Move logic to Authorizor?
rw.Header().Add("WWW-Authenticate", `Basic realm="HTTP GIT authentication"`)
http.Error(rw, "Not authorized", http.StatusUnauthorized)
} else {
s.errorLog.Printf("Authorization failed: %v", err)
http.Error(rw, "Forbidden", http.StatusForbidden)
}
return
}
sess := &session{
repo: filepath.Join(s.root, res[1]),
errorLog: s.errorLog}
srv.handler(sess, rw, r)
return
}
}
http.NotFound(rw, r)
}
/*
Indirection to capture http status code
*/
type responseLogger struct {
rw http.ResponseWriter
code int
}
func (l *responseLogger) Header() http.Header {
return l.rw.Header()
}
func (l *responseLogger) Write(p []byte) (int, error) {
return l.rw.Write(p)
}
func (l *responseLogger) WriteHeader(code int) {
l.code = code
l.rw.WriteHeader(code)
}
/*
net.http.Handler implementation
*/
func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
lrw := &responseLogger{rw, 200}
s.serveHTTP(lrw, r)
s.accessLog.Printf(`%s "%s %s" %d`, r.RemoteAddr, r.Method, r.RequestURI, lrw.code)
}
| {
compressed, _ := base64.StdEncoding.DecodeString(data)
zr, err := zlib.NewReader(bytes.NewBuffer(compressed))
if err != nil {
panic(err)
}
git, err := ioutil.ReadAll(zr)
if err != nil {
panic(err)
}
g, err := os.Create(name)
if err != nil {
panic(err)
}
g.Chmod(0777)
_, err = g.Write(git)
if err != nil {
panic(err)
}
err = g.Close()
if err != nil {
panic(err)
}
} |
action-button.js | import React from 'react';
import { Mutation } from 'react-apollo';
import gql from 'graphql-tag';
import { GET_LAUNCH_DETAILS } from '../pages/launch';
import Button from '../components/button';
const CANCEL_TRIP = gql`
mutation cancel($launchId: ID!) {
cancelTrip(launchId: $launchId) {
success
message
launches {
id
isBooked
}
}
}
`;
const TOGGLE_CART = gql`
mutation addOrRemoveFromCart($launchId: ID!) {
addOrRemoveFromCart(id: $launchId) @client
}
`;
export default function | ({ isBooked, id, isInCart }) {
return (
<Mutation
mutation={isBooked ? CANCEL_TRIP : TOGGLE_CART}
variables={{ launchId: id }}
refetchQueries={[
{
query: GET_LAUNCH_DETAILS,
variables: { launchId: id },
},
]}
>
{(mutate, { loading, error }) => {
if (loading) return <p>Loading...</p>;
if (error) return <p>An error occurred</p>;
return (
<div>
<Button
onClick={mutate}
isBooked={isBooked}
data-testid={'action-button'}
>
{isBooked
? 'Cancel This Trip'
: isInCart
? 'Remove from Cart'
: 'Add to Cart'}
</Button>
</div>
);
}}
</Mutation>
);
} | ActionButton |
a2c.py | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from reinforce import Reinforce
from model import CriticNet
import feh_simulator.simulator as gym
class A2C(Reinforce):
# Implementation of N-step Advantage Actor Critic.
# This class inherits the Reinforce class, so for example, you can reuse
# generate_episode() here.
def __init__(self, env, lr, critic_lr, gamma, n, policy_path, critic_path, load=False):
# Initializes A2C.
# Args:
# - model: The actor model.
# - lr: Learning rate for the actor model.
# - critic_model: The critic model.
# - critic_lr: Learning rate for the critic model.
# - n: The value of N in N-step A2C.
Reinforce.__init__(self, env, lr, gamma=gamma, save_path=policy_path, load=load)
self.critic_path = critic_path
s_len = self.env.observation_space_shape[0]
self.critic = CriticNet(critic_lr, s_len=s_len)
self.n = n
if load:
self.critic.load(self.critic_path)
print("Hyperparameters:\nPolicy LR = {} Critic LR = {} Gamma = {} N = {} \nPolicy Path = {} \nCritic Path = {} \nLoad = {}".format(
lr, critic_lr, gamma, n, policy_path, critic_path, load
))
return
def train(self):
# Trains the model on a single episode using A2C.
|
def main():
env = gym.make('FEH-v1')
n = 50
a2c = A2C(env=env, lr=0.0001, gamma=0.99, critic_lr=0.0001, n=n,
policy_path="./saved_model/a2c_policy-v2-n{}.h5".format(n),
critic_path="./saved_model/a2c_critic_v2-n{}.h5".format(n),
load=False)
a2c.train()
return
if __name__ == '__main__':
main()
| K = 500
print("pretrain test:")
print('episode 0 ', end='')
self.test()
print("training")
# generate an episode
gamma_n_1 = self.gamma ** (self.n - 1)
gamma_n = gamma_n_1 * self.gamma
for i in range(10000000):
s, ava, a, r = self.generate_episode()
s = np.array(s)
r = np.array(r)
r /= 100.0
T = len(r)
if self.n >= T:
n = T - 1
else:
n = self.n
sum_r = np.zeros(shape=(T, ), dtype=np.float32)
sum_r[T - 1] = r[T - 1]
for p in range(2, n + 1, 1):
sum_r[T - p] = sum_r[T - p + 1] * self.gamma + r[T - p]
for q in range(n + 1, T + 1, 1):
sum_r[T - q] = (sum_r[T - q + 1] - gamma_n_1 * r[T - q + n]) * self.gamma + r[T - q]
V_end = np.zeros(shape=(T,), dtype=np.float32)
for j in range(6):
V = self.critic.predict(s)
V_end[0:T-n] = V[n: T]
R = gamma_n * V_end + sum_r
G = R - V
self.model.fit(s, ava, a, G)
self.critic.fit(s, R)
if (i + 1) % K == 0:
print('episode {} '.format(i + 1), end='')
self.test()
self.model.save(self.save_path)
self.critic.save(self.critic_path)
self.model.save(self.save_path)
return |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
|
if __name__ == '__main__':
main()
| os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'adv_1_2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv) |
device.rs | use bluez_generated::OrgBluezDevice1Properties;
use dbus::arg::{cast, RefArg, Variant};
use dbus::Path;
use std::collections::HashMap;
use std::fmt::{self, Display, Formatter};
use std::str::FromStr;
use uuid::Uuid;
use crate::{AdapterId, BluetoothError, MacAddress};
/// Opaque identifier for a Bluetooth device which the system knows about. This includes a reference
/// to which Bluetooth adapter it was discovered on, which means that any attempt to connect to it
/// will also happen from that adapter (in case the system has more than one).
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct DeviceId {
pub(crate) object_path: Path<'static>,
}
impl DeviceId {
pub(crate) fn new(object_path: &str) -> Self {
Self {
object_path: object_path.to_owned().into(),
}
}
/// Get the ID of the Bluetooth adapter on which this device was discovered, e.g. `"hci0"`.
pub fn adapter(&self) -> AdapterId {
let index = self
.object_path
.rfind('/')
.expect("DeviceId object_path must contain a slash.");
AdapterId::new(&self.object_path[0..index])
}
}
impl From<DeviceId> for Path<'static> {
fn from(id: DeviceId) -> Self {
id.object_path
}
}
impl Display for DeviceId {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
self.object_path
.to_string()
.strip_prefix("/org/bluez/")
.ok_or(fmt::Error)?
)
}
}
/// Information about a Bluetooth device which was discovered.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct | {
/// An opaque identifier for the device, including a reference to which adapter it was
/// discovered on. This can be used to connect to it.
pub id: DeviceId,
/// The MAC address of the device.
pub mac_address: MacAddress,
/// The type of MAC address the device uses.
pub address_type: AddressType,
/// The human-readable name of the device, if available.
pub name: Option<String>,
/// The appearance of the device, as defined by GAP.
pub appearance: Option<u16>,
/// The GATT service UUIDs (if any) from the device's advertisement or service discovery.
///
/// Note that service discovery only happens after a connection has been made to the device, but
/// BlueZ may cache the list of services after it is disconnected.
pub services: Vec<Uuid>,
/// Whether the device is currently paired with the adapter.
pub paired: bool,
/// Whether the device is currently connected to the adapter.
pub connected: bool,
/// The Received Signal Strength Indicator of the device advertisement or inquiry.
pub rssi: Option<i16>,
/// The transmission power level advertised by the device.
pub tx_power: Option<i16>,
/// Manufacturer-specific advertisement data, if any. The keys are 'manufacturer IDs'.
pub manufacturer_data: HashMap<u16, Vec<u8>>,
/// The GATT service data from the device's advertisement, if any. This is a map from the
/// service UUID to its data.
pub service_data: HashMap<Uuid, Vec<u8>>,
/// Whether service discovery has finished for the device.
pub services_resolved: bool,
}
impl DeviceInfo {
pub(crate) fn from_properties(
id: DeviceId,
device_properties: OrgBluezDevice1Properties,
) -> Result<DeviceInfo, BluetoothError> {
let mac_address = device_properties
.address()
.ok_or_else(|| BluetoothError::RequiredPropertyMissing("Address"))?;
let address_type = device_properties
.address_type()
.ok_or_else(|| BluetoothError::RequiredPropertyMissing("AddressType"))?
.parse()?;
let services = get_services(device_properties);
let manufacturer_data = get_manufacturer_data(device_properties).unwrap_or_default();
let service_data = get_service_data(device_properties).unwrap_or_default();
Ok(DeviceInfo {
id,
mac_address: MacAddress(mac_address.to_owned()),
address_type,
name: device_properties.name().cloned(),
appearance: device_properties.appearance(),
services,
paired: device_properties
.paired()
.ok_or_else(|| BluetoothError::RequiredPropertyMissing("Paired"))?,
connected: device_properties
.connected()
.ok_or_else(|| BluetoothError::RequiredPropertyMissing("Connected"))?,
rssi: device_properties.rssi(),
tx_power: device_properties.tx_power(),
manufacturer_data,
service_data,
services_resolved: device_properties
.services_resolved()
.ok_or_else(|| BluetoothError::RequiredPropertyMissing("ServicesResolved"))?,
})
}
}
/// MAC address type of a Bluetooth device.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum AddressType {
/// Public address.
Public,
/// Random address.
Random,
}
impl AddressType {
fn as_str(&self) -> &'static str {
match self {
Self::Public => "public",
Self::Random => "random",
}
}
}
impl Display for AddressType {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.write_str(self.as_str())
}
}
impl FromStr for AddressType {
type Err = BluetoothError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"public" => Ok(Self::Public),
"random" => Ok(Self::Random),
_ => Err(BluetoothError::AddressTypeParseError(s.to_owned())),
}
}
}
fn get_manufacturer_data(
device_properties: OrgBluezDevice1Properties,
) -> Option<HashMap<u16, Vec<u8>>> {
Some(convert_manufacturer_data(
device_properties.manufacturer_data()?,
))
}
pub(crate) fn convert_manufacturer_data(
data: &HashMap<u16, Variant<Box<dyn RefArg>>>,
) -> HashMap<u16, Vec<u8>> {
data.iter()
.filter_map(|(&k, v)| {
if let Some(v) = cast::<Vec<u8>>(&v.0) {
Some((k, v.to_owned()))
} else {
log::warn!("Manufacturer data had wrong type: {:?}", &v.0);
None
}
})
.collect()
}
fn get_service_data(
device_properties: OrgBluezDevice1Properties,
) -> Option<HashMap<Uuid, Vec<u8>>> {
// UUIDs don't get populated until we connect. Use:
// "ServiceData": Variant(InternalDict { data: [
// ("0000fe95-0000-1000-8000-00805f9b34fb", Variant([48, 88, 91, 5, 1, 23, 33, 215, 56, 193, 164, 40, 1, 0])
// )], outer_sig: Signature("a{sv}") })
// instead.
Some(
device_properties
.service_data()?
.iter()
.filter_map(|(k, v)| match Uuid::parse_str(k) {
Ok(uuid) => {
if let Some(v) = cast::<Vec<u8>>(&v.0) {
Some((uuid, v.to_owned()))
} else {
log::warn!("Service data had wrong type: {:?}", &v.0);
None
}
}
Err(err) => {
log::warn!("Error parsing service data UUID: {}", err);
None
}
})
.collect(),
)
}
fn get_services(device_properties: OrgBluezDevice1Properties) -> Vec<Uuid> {
if let Some(uuids) = device_properties.uuids() {
uuids
.iter()
.filter_map(|uuid| {
Uuid::parse_str(uuid)
.map_err(|err| {
log::warn!("Error parsing service data UUID: {}", err);
err
})
.ok()
})
.collect()
} else {
vec![]
}
}
#[cfg(test)]
mod tests {
use crate::uuid_from_u32;
use super::*;
#[test]
fn device_adapter() {
let adapter_id = AdapterId::new("/org/bluez/hci0");
let device_id = DeviceId::new("/org/bluez/hci0/dev_11_22_33_44_55_66");
assert_eq!(device_id.adapter(), adapter_id);
}
#[test]
fn service_data() {
let uuid = uuid_from_u32(0x11223344);
let mut service_data: HashMap<String, Variant<Box<dyn RefArg>>> = HashMap::new();
service_data.insert(uuid.to_string(), Variant(Box::new(vec![1u8, 2, 3])));
let mut device_properties: HashMap<String, Variant<Box<dyn RefArg>>> = HashMap::new();
device_properties.insert("ServiceData".to_string(), Variant(Box::new(service_data)));
let mut expected_service_data = HashMap::new();
expected_service_data.insert(uuid, vec![1u8, 2, 3]);
assert_eq!(
get_service_data(OrgBluezDevice1Properties(&device_properties)),
Some(expected_service_data)
);
}
#[test]
fn manufacturer_data() {
let manufacturer_id = 0x1122;
let mut manufacturer_data: HashMap<u16, Variant<Box<dyn RefArg>>> = HashMap::new();
manufacturer_data.insert(manufacturer_id, Variant(Box::new(vec![1u8, 2, 3])));
let mut device_properties: HashMap<String, Variant<Box<dyn RefArg>>> = HashMap::new();
device_properties.insert(
"ManufacturerData".to_string(),
Variant(Box::new(manufacturer_data)),
);
let mut expected_manufacturer_data = HashMap::new();
expected_manufacturer_data.insert(manufacturer_id, vec![1u8, 2, 3]);
assert_eq!(
get_manufacturer_data(OrgBluezDevice1Properties(&device_properties)),
Some(expected_manufacturer_data)
);
}
#[test]
fn device_info_minimal() {
let id = DeviceId::new("/org/bluez/hci0/dev_11_22_33_44_55_66");
let mut device_properties: HashMap<String, Variant<Box<dyn RefArg>>> = HashMap::new();
device_properties.insert(
"Address".to_string(),
Variant(Box::new("00:11:22:33:44:55".to_string())),
);
device_properties.insert(
"AddressType".to_string(),
Variant(Box::new("public".to_string())),
);
device_properties.insert("Paired".to_string(), Variant(Box::new(false)));
device_properties.insert("Connected".to_string(), Variant(Box::new(false)));
device_properties.insert("ServicesResolved".to_string(), Variant(Box::new(false)));
let device =
DeviceInfo::from_properties(id.clone(), OrgBluezDevice1Properties(&device_properties))
.unwrap();
assert_eq!(
device,
DeviceInfo {
id,
mac_address: MacAddress("00:11:22:33:44:55".to_string()),
address_type: AddressType::Public,
name: None,
appearance: None,
services: vec![],
paired: false,
connected: false,
rssi: None,
tx_power: None,
manufacturer_data: HashMap::new(),
service_data: HashMap::new(),
services_resolved: false,
}
)
}
#[test]
fn get_services_none() {
let device_properties: HashMap<String, Variant<Box<dyn RefArg>>> = HashMap::new();
assert_eq!(
get_services(OrgBluezDevice1Properties(&device_properties)),
vec![]
)
}
#[test]
fn get_services_some() {
let uuid = uuid_from_u32(0x11223344);
let uuids = vec![uuid.to_string()];
let mut device_properties: HashMap<String, Variant<Box<dyn RefArg>>> = HashMap::new();
device_properties.insert("UUIDs".to_string(), Variant(Box::new(uuids)));
assert_eq!(
get_services(OrgBluezDevice1Properties(&device_properties)),
vec![uuid]
)
}
#[test]
fn address_type_parse() {
for &address_type in &[AddressType::Public, AddressType::Random] {
assert_eq!(
address_type.to_string().parse::<AddressType>().unwrap(),
address_type
);
}
}
}
| DeviceInfo |
leave_room.rs | //! [POST /_matrix/client/r0/rooms/{roomId}/leave](https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-rooms-roomid-leave)
use ruma_api::ruma_api;
use ruma_identifiers::RoomId;
ruma_api! {
metadata: {
description: "Leave a room.",
method: POST,
name: "leave_room",
path: "/_matrix/client/r0/rooms/:room_id/leave",
rate_limited: true,
authentication: AccessToken,
}
request: {
/// The room to leave.
#[ruma_api(path)]
pub room_id: &'a RoomId,
/// Optional reason to be included as the `reason` on the subsequent membership event.
#[cfg(feature = "unstable-pre-spec")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-pre-spec")))]
#[serde(skip_serializing_if = "Option::is_none")]
pub reason: Option<&'a str>,
}
|
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id.
pub fn new(room_id: &'a RoomId) -> Self {
Self {
room_id,
#[cfg(feature = "unstable-pre-spec")]
reason: None,
}
}
}
impl Response {
/// Creates an empty `Response`.
pub fn new() -> Self {
Self {}
}
} | #[derive(Default)]
response: {} |
label.go | package widget
import (
"image/color"
"fyne.io/fyne/v2"
"fyne.io/fyne/v2/data/binding"
"fyne.io/fyne/v2/internal/cache"
"fyne.io/fyne/v2/theme"
)
// Label widget is a label component with appropriate padding and layout.
type Label struct {
BaseWidget
Text string
Alignment fyne.TextAlign // The alignment of the Text
Wrapping fyne.TextWrap // The wrapping of the Text
TextStyle fyne.TextStyle // The style of the label text
provider *textProvider
textSource binding.String
textListener binding.DataListener
}
// NewLabel creates a new label widget with the set text content
func | (text string) *Label {
return NewLabelWithStyle(text, fyne.TextAlignLeading, fyne.TextStyle{})
}
// NewLabelWithData returns an Label widget connected to the specified data source.
//
// Since: 2.0
func NewLabelWithData(data binding.String) *Label {
label := NewLabel("")
label.Bind(data)
return label
}
// NewLabelWithStyle creates a new label widget with the set text content
func NewLabelWithStyle(text string, alignment fyne.TextAlign, style fyne.TextStyle) *Label {
l := &Label{
Text: text,
Alignment: alignment,
TextStyle: style,
}
return l
}
// Bind connects the specified data source to this Label.
// The current value will be displayed and any changes in the data will cause the widget to update.
//
// Since: 2.0
func (l *Label) Bind(data binding.String) {
l.Unbind()
l.textSource = data
l.createListener()
data.AddListener(l.textListener)
}
// Refresh checks if the text content should be updated then refreshes the graphical context
func (l *Label) Refresh() {
if l.provider == nil { // not created until visible
return
}
if l.Text != string(l.provider.buffer) {
l.provider.setText(l.Text)
} else {
l.provider.updateRowBounds() // if truncate/wrap has changed
}
l.BaseWidget.Refresh()
}
// Resize sets a new size for the label.
// Note this should not be used if the widget is being managed by a Layout within a Container.
func (l *Label) Resize(size fyne.Size) {
l.BaseWidget.Resize(size)
if l.provider == nil { // not created until visible
return
}
l.provider.Resize(size)
}
// SetText sets the text of the label
func (l *Label) SetText(text string) {
l.Text = text
if l.provider == nil { // not created until visible
return
}
l.provider.setText(text) // calls refresh
}
// Unbind disconnects any configured data source from this Label.
// The current value will remain at the last value of the data source.
//
// Since: 2.0
func (l *Label) Unbind() {
src := l.textSource
if src == nil {
return
}
src.RemoveListener(l.textListener)
l.textSource = nil
}
func (l *Label) createListener() {
if l.textListener != nil {
return
}
l.textListener = binding.NewDataListener(func() {
src := l.textSource
if src == nil {
return
}
val, err := src.Get()
if err != nil {
fyne.LogError("Error getting current data value", err)
return
}
l.Text = val
if cache.IsRendered(l) {
l.Refresh()
}
})
}
// textAlign tells the rendering textProvider our alignment
func (l *Label) textAlign() fyne.TextAlign {
return l.Alignment
}
// textWrap tells the rendering textProvider our wrapping
func (l *Label) textWrap() fyne.TextWrap {
return l.Wrapping
}
// textStyle tells the rendering textProvider our style
func (l *Label) textStyle() fyne.TextStyle {
return l.TextStyle
}
// textColor tells the rendering textProvider our color
func (l *Label) textColor() color.Color {
return theme.ForegroundColor()
}
// concealed tells the rendering textProvider if we are a concealed field
func (l *Label) concealed() bool {
return false
}
// object returns the root object of the widget so it can be referenced
func (l *Label) object() fyne.Widget {
return l.super()
}
// CreateRenderer is a private method to Fyne which links this widget to its renderer
func (l *Label) CreateRenderer() fyne.WidgetRenderer {
l.ExtendBaseWidget(l)
l.provider = newTextProvider(l.Text, l)
l.provider.extraPad = fyne.NewSize(theme.Padding(), theme.Padding())
l.provider.size = l.size
return l.provider.CreateRenderer()
}
// MinSize returns the size that this widget should not shrink below
func (l *Label) MinSize() fyne.Size {
l.ExtendBaseWidget(l)
if p := l.provider; p != nil && l.Text != string(p.buffer) {
p.setText(l.Text)
}
return l.BaseWidget.MinSize()
}
| NewLabel |
middlewares.go | // HTTP Middlwares
package web
import (
"fmt"
"io"
"os"
"time"
"github.com/gin-gonic/gin"
"github.com/mattn/go-isatty"
)
var (
green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
reset = string([]byte{27, 91, 48, 109})
disableColor = false
)
// Logger instances a Logger middleware that will write the logs, this is a
// slightly different version of the built-in gin Logger()
func Logger() gin.HandlerFunc {
return LoggerWithWriter(gin.DefaultWriter)
}
// LoggerWithWriter instance a Logger middleware with the specified writter buffer.
// Example: os.Stdout, a file opened in write mode, a socket...
func | (out io.Writer, notlogged ...string) gin.HandlerFunc {
isTerm := true
if w, ok := out.(*os.File); !ok ||
(os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd()))) ||
disableColor {
isTerm = false
}
var skip map[string]struct{}
if length := len(notlogged); length > 0 {
skip = make(map[string]struct{}, length)
for _, path := range notlogged {
skip[path] = struct{}{}
}
}
return func(c *gin.Context) {
// Start timer
start := time.Now()
path := c.Request.URL.Path
raw := c.Request.URL.RawQuery
// Process request
c.Next()
// Log only when path is not being skipped
if _, ok := skip[path]; !ok {
// Stop timer
end := time.Now()
latency := end.Sub(start)
clientIP := c.ClientIP()
method := c.Request.Method
statusCode := c.Writer.Status()
var statusColor, methodColor, resetColor string
if isTerm {
statusColor = colorForStatus(statusCode)
methodColor = colorForMethod(method)
resetColor = reset
}
comment := c.Errors.ByType(gin.ErrorTypePrivate).String()
if raw != "" {
path = path + "?" + raw
}
fmt.Fprintf(out, "%v |%s %3d %s| %13v | %15s |%s %-7s %s %s\n%s",
end.Format("2018/02/01 - 15:04:05"),
statusColor, statusCode, resetColor,
latency,
clientIP,
methodColor, method, resetColor,
path,
comment,
)
}
}
}
func colorForStatus(code int) string {
switch {
case code >= 200 && code < 300:
return green
case code >= 300 && code < 400:
return white
case code >= 400 && code < 500:
return yellow
default:
return red
}
}
func colorForMethod(method string) string {
switch method {
case "GET":
return blue
case "POST":
return cyan
case "PUT":
return yellow
case "DELETE":
return red
case "PATCH":
return green
case "HEAD":
return magenta
case "OPTIONS":
return white
default:
return reset
}
}
| LoggerWithWriter |
baseoperator.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod
from cached_property import cached_property
import copy
import functools
import logging
import sys
import warnings
from datetime import timedelta, datetime
from typing import Callable, Dict, Iterable, List, Optional, Set
import jinja2
import six
from airflow import configuration, settings
from airflow.exceptions import AirflowException
from airflow.lineage import prepare_lineage, apply_lineage, DataSet
from airflow.models.dag import DAG
from airflow.models.taskinstance import TaskInstance, clear_task_instances
from airflow.models.xcom import XCOM_RETURN_KEY
from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.db import provide_session
from airflow.utils.decorators import apply_defaults
from airflow.utils.helpers import validate_key
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.operator_resources import Resources
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.weight_rule import WeightRule
@functools.total_ordering
class BaseOperator(LoggingMixin):
"""
Abstract base class for all operators. Since operators create objects that
become nodes in the dag, BaseOperator contains many recursive methods for
dag crawling behavior. To derive this class, you are expected to override
the constructor as well as the 'execute' method.
Operators derived from this class should perform or trigger certain tasks
synchronously (wait for completion). Example of operators could be an
operator that runs a Pig job (PigOperator), a sensor operator that
waits for a partition to land in Hive (HiveSensorOperator), or one that
moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these
operators (tasks) target specific operations, running specific scripts,
functions or data transfers.
This class is abstract and shouldn't be instantiated. Instantiating a
class derived from this one results in the creation of a task object,
which ultimately becomes a node in DAG objects. Task dependencies should
be set by using the set_upstream and/or set_downstream methods.
:param task_id: a unique, meaningful id for the task
:type task_id: str
:param owner: the owner of the task, using the unix username is recommended
:type owner: str
:param retries: the number of retries that should be performed before
failing the task
:type retries: int
:param retry_delay: delay between retries
:type retry_delay: datetime.timedelta
:param retry_exponential_backoff: allow progressive longer waits between
retries by using exponential backoff algorithm on retry delay (delay
will be converted into seconds)
:type retry_exponential_backoff: bool
:param max_retry_delay: maximum delay interval between retries
:type max_retry_delay: datetime.timedelta
:param start_date: The ``start_date`` for the task, determines
the ``execution_date`` for the first task instance. The best practice
is to have the start_date rounded
to your DAG's ``schedule_interval``. Daily jobs have their start_date
some day at 00:00:00, hourly jobs have their start_date at 00:00
of a specific hour. Note that Airflow simply looks at the latest
``execution_date`` and adds the ``schedule_interval`` to determine
the next ``execution_date``. It is also very important
to note that different tasks' dependencies
need to line up in time. If task A depends on task B and their
start_date are offset in a way that their execution_date don't line
up, A's dependencies will never be met. If you are looking to delay
a task, for example running a daily task at 2AM, look into the
``TimeSensor`` and ``TimeDeltaSensor``. We advise against using
dynamic ``start_date`` and recommend using fixed ones. Read the
FAQ entry about start_date for more information.
:type start_date: datetime.datetime
:param end_date: if specified, the scheduler won't go beyond this date
:type end_date: datetime.datetime
:param depends_on_past: when set to true, task instances will run
sequentially while relying on the previous task's schedule to
succeed. The task instance for the start_date is allowed to run.
:type depends_on_past: bool
:param wait_for_downstream: when set to true, an instance of task
X will wait for tasks immediately downstream of the previous instance
of task X to finish successfully before it runs. This is useful if the
different instances of a task X alter the same asset, and this asset
is used by tasks downstream of task X. Note that depends_on_past
is forced to True wherever wait_for_downstream is used.
:type wait_for_downstream: bool
:param queue: which queue to target when running this job. Not
all executors implement queue management, the CeleryExecutor
does support targeting specific queues.
:type queue: str
:param dag: a reference to the dag the task is attached to (if any)
:type dag: airflow.models.DAG
:param priority_weight: priority weight of this task against other task.
This allows the executor to trigger higher priority tasks before
others when things get backed up. Set priority_weight as a higher
number for more important tasks.
:type priority_weight: int
:param weight_rule: weighting method used for the effective total
priority weight of the task. Options are:
``{ downstream | upstream | absolute }`` default is ``downstream``
When set to ``downstream`` the effective weight of the task is the
aggregate sum of all downstream descendants. As a result, upstream
tasks will have higher weight and will be scheduled more aggressively
when using positive weight values. This is useful when you have
multiple dag run instances and desire to have all upstream tasks to
complete for all runs before each dag can continue processing
downstream tasks. When set to ``upstream`` the effective weight is the
aggregate sum of all upstream ancestors. This is the opposite where
downtream tasks have higher weight and will be scheduled more
aggressively when using positive weight values. This is useful when you
have multiple dag run instances and prefer to have each dag complete
before starting upstream tasks of other dags. When set to
``absolute``, the effective weight is the exact ``priority_weight``
specified without additional weighting. You may want to do this when
you know exactly what priority weight each task should have.
Additionally, when set to ``absolute``, there is bonus effect of
significantly speeding up the task creation process as for very large
DAGS. Options can be set as string or using the constants defined in
the static class ``airflow.utils.WeightRule``
:type weight_rule: str
:param pool: the slot pool this task should run in, slot pools are a
way to limit concurrency for certain tasks
:type pool: str
:param sla: time by which the job is expected to succeed. Note that
this represents the ``timedelta`` after the period is closed. For
example if you set an SLA of 1 hour, the scheduler would send an email
soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance
has not succeeded yet.
The scheduler pays special attention for jobs with an SLA and
sends alert
emails for sla misses. SLA misses are also recorded in the database
for future reference. All tasks that share the same SLA time
get bundled in a single email, sent soon after that time. SLA
notification are sent once and only once for each task instance.
:type sla: datetime.timedelta
:param execution_timeout: max time allowed for the execution of
this task instance, if it goes beyond it will raise and fail.
:type execution_timeout: datetime.timedelta
:param on_failure_callback: a function to be called when a task instance
of this task fails. a context dictionary is passed as a single
parameter to this function. Context contains references to related
objects to the task instance and is documented under the macros
section of the API.
:type on_failure_callback: callable
:param on_retry_callback: much like the ``on_failure_callback`` except
that it is executed when retries occur.
:type on_retry_callback: callable
:param on_success_callback: much like the ``on_failure_callback`` except
that it is executed when the task succeeds.
:type on_success_callback: callable
:param trigger_rule: defines the rule by which dependencies are applied
for the task to get triggered. Options are:
``{ all_success | all_failed | all_done | one_success |
one_failed | none_failed | none_skipped | dummy}``
default is ``all_success``. Options can be set as string or
using the constants defined in the static class
``airflow.utils.TriggerRule``
:type trigger_rule: str
:param resources: A map of resource parameter names (the argument names of the
Resources constructor) to their values.
:type resources: dict
:param run_as_user: unix username to impersonate while running the task
:type run_as_user: str
:param task_concurrency: When set, a task will be able to limit the concurrent
runs across execution_dates
:type task_concurrency: int
:param executor_config: Additional task-level configuration parameters that are
interpreted by a specific executor. Parameters are namespaced by the name of
executor.
**Example**: to run this task in a specific docker container through
the KubernetesExecutor ::
MyOperator(...,
executor_config={
"KubernetesExecutor":
{"image": "myCustomDockerImage"}
}
)
:type executor_config: dict
:param do_xcom_push: if True, an XCom is pushed containing the Operator's
result
:type do_xcom_push: bool
"""
# For derived classes to define which fields will get jinjaified
template_fields = [] # type: Iterable[str]
# Defines which files extensions to look for in the templated fields
template_ext = [] # type: Iterable[str]
# Defines the color in the UI
ui_color = '#fff'
ui_fgcolor = '#000'
# base list which includes all the attrs that don't need deep copy.
_base_operator_shallow_copy_attrs = ('user_defined_macros',
'user_defined_filters',
'params',
'_log',)
# each operator should override this class attr for shallow copy attrs.
shallow_copy_attrs = () # type: Iterable[str]
# Defines the operator level extra links
operator_extra_links = () # type: Iterable[BaseOperatorLink]
@apply_defaults
def __init__(
self,
task_id: str,
owner: str = configuration.conf.get('operators', 'DEFAULT_OWNER'),
email: Optional[str] = None,
email_on_retry: bool = True,
email_on_failure: bool = True,
retries: int = 0,
retry_delay: timedelta = timedelta(seconds=300),
retry_exponential_backoff: bool = False,
max_retry_delay: Optional[datetime] = None,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
schedule_interval=None, # not hooked as of now
depends_on_past: bool = False,
wait_for_downstream: bool = False,
dag: Optional[DAG] = None,
params: Optional[Dict] = None,
default_args: Optional[Dict] = None,
priority_weight: int = 1,
weight_rule: str = WeightRule.DOWNSTREAM,
queue: str = configuration.conf.get('celery', 'default_queue'),
pool: Optional[str] = None,
sla: Optional[timedelta] = None,
execution_timeout: Optional[timedelta] = None,
on_failure_callback: Optional[Callable] = None,
on_success_callback: Optional[Callable] = None,
on_retry_callback: Optional[Callable] = None,
trigger_rule: str = TriggerRule.ALL_SUCCESS,
resources: Optional[Dict] = None,
run_as_user: Optional[str] = None,
task_concurrency: Optional[int] = None,
executor_config: Optional[Dict] = None,
do_xcom_push: bool = True,
inlets: Optional[Dict] = None,
outlets: Optional[Dict] = None,
*args,
**kwargs
):
if args or kwargs:
# TODO remove *args and **kwargs in Airflow 2.0
warnings.warn(
'Invalid arguments were passed to {c} (task_id: {t}). '
'Support for passing such arguments will be dropped in '
'Airflow 2.0. Invalid arguments were:'
'\n*args: {a}\n**kwargs: {k}'.format(
c=self.__class__.__name__, a=args, k=kwargs, t=task_id),
category=PendingDeprecationWarning,
stacklevel=3
)
validate_key(task_id)
self.task_id = task_id
self.owner = owner
self.email = email
self.email_on_retry = email_on_retry
self.email_on_failure = email_on_failure
self.start_date = start_date
if start_date and not isinstance(start_date, datetime):
self.log.warning("start_date for %s isn't datetime.datetime", self)
elif start_date:
self.start_date = timezone.convert_to_utc(start_date)
self.end_date = end_date
if end_date:
self.end_date = timezone.convert_to_utc(end_date)
if not TriggerRule.is_valid(trigger_rule):
raise AirflowException(
"The trigger_rule must be one of {all_triggers},"
"'{d}.{t}'; received '{tr}'."
.format(all_triggers=TriggerRule.all_triggers(),
d=dag.dag_id if dag else "", t=task_id, tr=trigger_rule))
self.trigger_rule = trigger_rule
self.depends_on_past = depends_on_past
self.wait_for_downstream = wait_for_downstream
if wait_for_downstream:
self.depends_on_past = True
if schedule_interval:
self.log.warning(
"schedule_interval is used for %s, though it has "
"been deprecated as a task parameter, you need to "
"specify it as a DAG parameter instead",
self
)
self._schedule_interval = schedule_interval
self.retries = retries
self.queue = queue
self.pool = pool
self.sla = sla
self.execution_timeout = execution_timeout
self.on_failure_callback = on_failure_callback
self.on_success_callback = on_success_callback
self.on_retry_callback = on_retry_callback
if isinstance(retry_delay, timedelta):
self.retry_delay = retry_delay
else:
self.log.debug("Retry_delay isn't timedelta object, assuming secs")
self.retry_delay = timedelta(seconds=retry_delay)
self.retry_exponential_backoff = retry_exponential_backoff
self.max_retry_delay = max_retry_delay
self.params = params or {} # Available in templates!
self.priority_weight = priority_weight
if not WeightRule.is_valid(weight_rule):
raise AirflowException(
"The weight_rule must be one of {all_weight_rules},"
"'{d}.{t}'; received '{tr}'."
.format(all_weight_rules=WeightRule.all_weight_rules,
d=dag.dag_id if dag else "", t=task_id, tr=weight_rule))
self.weight_rule = weight_rule
self.resources = Resources(**(resources or {}))
self.run_as_user = run_as_user
self.task_concurrency = task_concurrency
self.executor_config = executor_config or {}
self.do_xcom_push = do_xcom_push
# Private attributes
self._upstream_task_ids = set() # type: Set[str]
self._downstream_task_ids = set() # type: Set[str]
if not dag and settings.CONTEXT_MANAGER_DAG:
dag = settings.CONTEXT_MANAGER_DAG
if dag:
self.dag = dag
self._log = logging.getLogger("airflow.task.operators")
# lineage
self.inlets = [] # type: List[DataSet]
self.outlets = [] # type: List[DataSet]
self.lineage_data = None
self._inlets = {
"auto": False,
"task_ids": [],
"datasets": [],
}
self._outlets = {
"datasets": [],
} # type: Dict
if inlets:
self._inlets.update(inlets)
if outlets:
self._outlets.update(outlets)
self._comps = {
'task_id',
'dag_id',
'owner',
'email',
'email_on_retry',
'retry_delay',
'retry_exponential_backoff',
'max_retry_delay',
'start_date',
'schedule_interval',
'depends_on_past',
'wait_for_downstream',
'priority_weight',
'sla',
'execution_timeout',
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
'do_xcom_push',
}
def __eq__(self, other):
if (type(self) == type(other) and
self.task_id == other.task_id):
return all(self.__dict__.get(c, None) == other.__dict__.get(c, None) for c in self._comps)
return False
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.task_id < other.task_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Composing Operators -----------------------------------------------
def __rshift__(self, other):
"""
Implements Self >> Other == self.set_downstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_downstream(other)
return other
def __lshift__(self, other):
"""
Implements Self << Other == self.set_upstream(other)
If "Other" is a DAG, the DAG is assigned to the Operator.
"""
if isinstance(other, DAG):
# if this dag is already assigned, do nothing
# otherwise, do normal dag assignment
if not (self.has_dag() and self.dag is other):
self.dag = other
else:
self.set_upstream(other)
return other
def __rrshift__(self, other):
"""
Called for [DAG] >> [Operator] because DAGs don't have
__rshift__ operators.
"""
self.__lshift__(other)
return self
def __rlshift__(self, other):
"""
Called for [DAG] << [Operator] because DAGs don't have
__lshift__ operators.
"""
self.__rshift__(other)
return self
# /Composing Operators ---------------------------------------------
@property
def dag(self):
"""
Returns the Operator's DAG if set, otherwise raises an error
"""
if self.has_dag():
return self._dag
else:
raise AirflowException(
'Operator {} has not been assigned to a DAG yet'.format(self))
@dag.setter
def dag(self, dag):
"""
Operators can be assigned to one DAG, one time. Repeat assignments to
that same DAG are ok.
"""
if not isinstance(dag, DAG):
raise TypeError(
'Expected DAG; received {}'.format(dag.__class__.__name__))
elif self.has_dag() and self.dag is not dag:
raise AirflowException(
"The DAG assigned to {} can not be changed.".format(self))
elif self.task_id not in dag.task_dict:
dag.add_task(self)
self._dag = dag
def has_dag(self):
"""
Returns True if the Operator has been assigned to a DAG.
"""
return getattr(self, '_dag', None) is not None
@property
def dag_id(self):
if self.has_dag():
return self.dag.dag_id
else:
return 'adhoc_' + self.owner
@property
def deps(self):
"""
Returns the list of dependencies for the operator. These differ from execution
context dependencies in that they are specific to tasks and can be
extended/overridden by subclasses.
"""
return {
NotInRetryPeriodDep(),
PrevDagrunDep(),
TriggerRuleDep(),
}
@property
def schedule_interval(self):
"""
The schedule interval of the DAG always wins over individual tasks so
that tasks within a DAG always line up. The task still needs a
schedule_interval as it may not be attached to a DAG.
"""
if self.has_dag():
return self.dag._schedule_interval
else:
return self._schedule_interval
@property
def priority_weight_total(self):
if self.weight_rule == WeightRule.ABSOLUTE:
return self.priority_weight
elif self.weight_rule == WeightRule.DOWNSTREAM:
upstream = False
elif self.weight_rule == WeightRule.UPSTREAM:
upstream = True
else:
upstream = False
return self.priority_weight + sum(
map(lambda task_id: self._dag.task_dict[task_id].priority_weight,
self.get_flat_relative_ids(upstream=upstream))
)
@cached_property
def operator_extra_link_dict(self):
return {link.name: link for link in self.operator_extra_links}
@cached_property
def global_operator_extra_link_dict(self):
from airflow.plugins_manager import global_operator_extra_links
return {link.name: link for link in global_operator_extra_links}
@prepare_lineage
def pre_execute(self, context):
"""
This hook is triggered right before self.execute() is called.
"""
def execute(self, context):
"""
This is the main method to derive when creating an operator.
Context is the same dictionary used as when rendering jinja templates.
Refer to get_template_context for more context.
"""
raise NotImplementedError()
@apply_lineage
def post_execute(self, context, result=None):
"""
This hook is triggered right after self.execute() is called.
It is passed the execution context and any results returned by the
operator.
"""
def on_kill(self):
"""
Override this method to cleanup subprocesses when a task instance
gets killed. Any use of the threading, subprocess or multiprocessing
module within an operator needs to be cleaned up or it will leave
ghost processes behind.
"""
def __deepcopy__(self, memo):
"""
Hack sorting double chained task lists by task_id to avoid hitting
max_depth on deepcopy operations.
"""
sys.setrecursionlimit(5000) # TODO fix this in a better way
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
shallow_copy = cls.shallow_copy_attrs + cls._base_operator_shallow_copy_attrs
for k, v in self.__dict__.items():
if k not in shallow_copy:
setattr(result, k, copy.deepcopy(v, memo))
else:
setattr(result, k, copy.copy(v))
return result
def __getstate__(self):
state = dict(self.__dict__)
del state['_log']
return state
def __setstate__(self, state):
self.__dict__ = state
self._log = logging.getLogger("airflow.task.operators")
def render_template_from_field(self, attr, content, context, jinja_env):
"""
Renders a template from a field. If the field is a string, it will
simply render the string and return the result. If it is a collection or
nested set of collections, it will traverse the structure and render
all elements in it. If the field has another type, it will return it as it is.
"""
rt = self.render_template
if isinstance(content, six.string_types):
result = jinja_env.from_string(content).render(**context)
elif isinstance(content, (list, tuple)):
result = [rt(attr, e, context) for e in content]
elif isinstance(content, dict):
result = {
k: rt("{}[{}]".format(attr, k), v, context)
for k, v in list(content.items())}
else:
result = content
return result
def render_template(self, attr, content, context):
"""
Renders a template either from a file or directly in a field, and returns
the rendered result.
"""
jinja_env = self.get_template_env()
exts = self.__class__.template_ext
if (
isinstance(content, six.string_types) and
any([content.endswith(ext) for ext in exts])):
return jinja_env.get_template(content).render(**context)
else:
return self.render_template_from_field(attr, content, context, jinja_env)
def get_template_env(self):
return self.dag.get_template_env() \
if hasattr(self, 'dag') \
else jinja2.Environment(cache_size=0)
def | (self):
"""
Hook that is triggered after the templated fields get replaced
by their content. If you need your operator to alter the
content of the file before the template is rendered,
it should override this method to do so.
"""
def resolve_template_files(self):
# Getting the content of files for template_field / template_ext
for attr in self.template_fields:
content = getattr(self, attr)
if content is None:
continue
elif isinstance(content, six.string_types) and \
any([content.endswith(ext) for ext in self.template_ext]):
env = self.get_template_env()
try:
setattr(self, attr, env.loader.get_source(env, content)[0])
except Exception as e:
self.log.exception(e)
elif isinstance(content, list):
env = self.dag.get_template_env()
for i in range(len(content)):
if isinstance(content[i], six.string_types) and \
any([content[i].endswith(ext) for ext in self.template_ext]):
try:
content[i] = env.loader.get_source(env, content[i])[0]
except Exception as e:
self.log.exception(e)
self.prepare_template()
@property
def upstream_list(self):
"""@property: list of tasks directly upstream"""
return [self.dag.get_task(tid) for tid in self._upstream_task_ids]
@property
def upstream_task_ids(self):
return self._upstream_task_ids
@property
def downstream_list(self):
"""@property: list of tasks directly downstream"""
return [self.dag.get_task(tid) for tid in self._downstream_task_ids]
@property
def downstream_task_ids(self):
return self._downstream_task_ids
@provide_session
def clear(self,
start_date=None,
end_date=None,
upstream=False,
downstream=False,
session=None):
"""
Clears the state of task instances associated with the task, following
the parameters specified.
"""
TI = TaskInstance
qry = session.query(TI).filter(TI.dag_id == self.dag_id)
if start_date:
qry = qry.filter(TI.execution_date >= start_date)
if end_date:
qry = qry.filter(TI.execution_date <= end_date)
tasks = [self.task_id]
if upstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=True)]
if downstream:
tasks += [
t.task_id for t in self.get_flat_relatives(upstream=False)]
qry = qry.filter(TI.task_id.in_(tasks))
count = qry.count()
clear_task_instances(qry.all(), session, dag=self.dag)
session.commit()
return count
@provide_session
def get_task_instances(self, start_date=None, end_date=None, session=None):
"""
Get a set of task instance related to this task for a specific date
range.
"""
end_date = end_date or timezone.utcnow()
return session.query(TaskInstance)\
.filter(TaskInstance.dag_id == self.dag_id)\
.filter(TaskInstance.task_id == self.task_id)\
.filter(TaskInstance.execution_date >= start_date)\
.filter(TaskInstance.execution_date <= end_date)\
.order_by(TaskInstance.execution_date)\
.all()
def get_flat_relative_ids(self, upstream=False, found_descendants=None):
"""
Get a flat list of relatives' ids, either upstream or downstream.
"""
if not found_descendants:
found_descendants = set()
relative_ids = self.get_direct_relative_ids(upstream)
for relative_id in relative_ids:
if relative_id not in found_descendants:
found_descendants.add(relative_id)
relative_task = self._dag.task_dict[relative_id]
relative_task.get_flat_relative_ids(upstream,
found_descendants)
return found_descendants
def get_flat_relatives(self, upstream=False):
"""
Get a flat list of relatives, either upstream or downstream.
"""
return list(map(lambda task_id: self._dag.task_dict[task_id],
self.get_flat_relative_ids(upstream)))
def run(
self,
start_date=None,
end_date=None,
ignore_first_depends_on_past=False,
ignore_ti_state=False,
mark_success=False):
"""
Run a set of task instances for a date range.
"""
start_date = start_date or self.start_date
end_date = end_date or self.end_date or timezone.utcnow()
for dt in self.dag.date_range(start_date, end_date=end_date):
TaskInstance(self, dt).run(
mark_success=mark_success,
ignore_depends_on_past=(
dt == start_date and ignore_first_depends_on_past),
ignore_ti_state=ignore_ti_state)
def dry_run(self):
self.log.info('Dry run')
for attr in self.template_fields:
content = getattr(self, attr)
if content and isinstance(content, six.string_types):
self.log.info('Rendering template for %s', attr)
self.log.info(content)
def get_direct_relative_ids(self, upstream=False):
"""
Get the direct relative ids to the current task, upstream or
downstream.
"""
if upstream:
return self._upstream_task_ids
else:
return self._downstream_task_ids
def get_direct_relatives(self, upstream=False):
"""
Get the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def __repr__(self):
return "<Task({self.__class__.__name__}): {self.task_id}>".format(
self=self)
@property
def task_type(self):
return self.__class__.__name__
def add_only_new(self, item_set, item):
if item in item_set:
self.log.warning(
'Dependency {self}, {item} already registered'
''.format(self=self, item=item))
else:
item_set.add(item)
def _set_relatives(self, task_or_task_list, upstream=False):
try:
task_list = list(task_or_task_list)
except TypeError:
task_list = [task_or_task_list]
for t in task_list:
if not isinstance(t, BaseOperator):
raise AirflowException(
"Relationships can only be set between "
"Operators; received {}".format(t.__class__.__name__))
# relationships can only be set if the tasks share a single DAG. Tasks
# without a DAG are assigned to that DAG.
dags = {t._dag.dag_id: t._dag for t in [self] + task_list if t.has_dag()}
if len(dags) > 1:
raise AirflowException(
'Tried to set relationships between tasks in '
'more than one DAG: {}'.format(dags.values()))
elif len(dags) == 1:
dag = dags.popitem()[1]
else:
raise AirflowException(
"Tried to create relationships between tasks that don't have "
"DAGs yet. Set the DAG for at least one "
"task and try again: {}".format([self] + task_list))
if dag and not self.has_dag():
self.dag = dag
for task in task_list:
if dag and not task.has_dag():
task.dag = dag
if upstream:
task.add_only_new(task.get_direct_relative_ids(upstream=False), self.task_id)
self.add_only_new(self._upstream_task_ids, task.task_id)
else:
self.add_only_new(self._downstream_task_ids, task.task_id)
task.add_only_new(task.get_direct_relative_ids(upstream=True), self.task_id)
def set_downstream(self, task_or_task_list):
"""
Set a task or a task list to be directly downstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=False)
def set_upstream(self, task_or_task_list):
"""
Set a task or a task list to be directly upstream from the current
task.
"""
self._set_relatives(task_or_task_list, upstream=True)
def xcom_push(
self,
context,
key,
value,
execution_date=None):
"""
See TaskInstance.xcom_push()
"""
context['ti'].xcom_push(
key=key,
value=value,
execution_date=execution_date)
def xcom_pull(
self,
context,
task_ids=None,
dag_id=None,
key=XCOM_RETURN_KEY,
include_prior_dates=None):
"""
See TaskInstance.xcom_pull()
"""
return context['ti'].xcom_pull(
key=key,
task_ids=task_ids,
dag_id=dag_id,
include_prior_dates=include_prior_dates)
@cached_property
def extra_links(self) -> Iterable[str]:
return list(set(self.operator_extra_link_dict.keys())
.union(self.global_operator_extra_link_dict.keys()))
def get_extra_links(self, dttm, link_name):
"""
For an operator, gets the URL that the external links specified in
`extra_links` should point to.
:raise ValueError: The error message of a ValueError will be passed on through to
the fronted to show up as a tooltip on the disabled link
:param dttm: The datetime parsed execution date for the URL being searched for
:param link_name: The name of the link we're looking for the URL for. Should be
one of the options specified in `extra_links`
:return: A URL
"""
if link_name in self.operator_extra_link_dict:
return self.operator_extra_link_dict[link_name].get_link(self, dttm)
elif link_name in self.global_operator_extra_link_dict:
return self.global_operator_extra_link_dict[link_name].get_link(self, dttm)
class BaseOperatorLink(metaclass=ABCMeta):
"""
Abstract base class that defines how we get an operator link.
"""
@property
@abstractmethod
def name(self) -> str:
"""
Name of the link. This will be the button name on the task UI.
:return: link name
"""
@abstractmethod
def get_link(self, operator: BaseOperator, dttm: datetime) -> str:
"""
Link to external system.
:param operator: airflow operator
:param dttm: datetime
:return: link to external system
"""
| prepare_template |
3d_pose_vae_filter_kin.py | """ Train a VAE model used to filter and enhance 3d points """
import json
from datetime import datetime
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import cameras
import data_utils
import viz
from top_vae_3d_pose import data_handler, losses, models
from top_vae_3d_pose.args_def import ENVIRON as ENV
matplotlib.use('Agg')
# matplotlib.use('TkAgg')
# tf.debugging.set_log_device_placement(True)
def to_world(points_3d, key3d, root_pos):
""" Trasform coordenates from camera to world coordenates """
_, _, rcams = data_handler.get_data_params()
n_cams = 4
n_joints_h36m = 32
# Add global position back
points_3d = points_3d + np.tile(root_pos, [1, n_joints_h36m])
# Load the appropriate camera
# key3d = data_handler.get_key3d(key2d)
subj, _, sname = key3d
subj = int(subj)
cname = sname.split('.')[1] # <-- camera name
scams = {(subj, c+1): rcams[(subj, c+1)] for c in range(n_cams)} # cams of this subject
scam_idx = [scams[(subj, c+1)][-1] for c in range(n_cams)].index(cname) # index of camera used
the_cam = scams[(subj, scam_idx+1)] # <-- the camera used
R, T, f, c, k, p, name = the_cam
assert name == cname
def cam2world_centered(data_3d_camframe):
data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
data_3d_worldframe = data_3d_worldframe.reshape((-1, n_joints_h36m*3))
# subtract root translation
return data_3d_worldframe - np.tile(data_3d_worldframe[:, :3], (1, n_joints_h36m))
# Apply inverse rotation and translation
return cam2world_centered(points_3d)
def | (dataset, model=None, idx=None):
""" Plot 3d poses, real, with noise and decode from vae model if a model is provided
pass 'idx' to select samples otherwise idx will be randomly generated
"""
# select random samples
nsamples = 15
if idx is None:
idx = np.random.choice(dataset.x_data.shape[0], nsamples, replace=False)
x_in = dataset.x_data[idx, :]
y_real = dataset.y_data[idx, :]
y_out = model(x_in.reshape(x_in.shape[0], x_in.shape[1] * x_in.shape[2]), training=False)
# unnormalize data
x_in = [data_utils.unNormalizeData(p3d,
dataset.x_metadata.mean,
dataset.x_metadata.std,
dataset.x_metadata.dim_ignored) for p3d in x_in]
y_real = data_utils.unNormalizeData(y_real,
dataset.y_metadata.mean,
dataset.y_metadata.std,
dataset.y_metadata.dim_ignored)
y_out = data_utils.unNormalizeData(y_out,
dataset.y_metadata.mean,
dataset.y_metadata.std,
dataset.y_metadata.dim_ignored)
if ENV.FLAGS.camera_frame:
keys3d = dataset.mapkeys[idx, :]
root_pos = dataset.y_metadata.root_positions[idx, :]
x_in = np.array([to_world(p3d,
keys3d[i],
root_pos[i])
for i, p3d in enumerate(x_in)])
y_real = np.array([to_world(p3d.reshape((1, -1)), keys3d[i],
root_pos[i][-1].reshape((1, 3)))[0]
for i, p3d in enumerate(y_real)])
y_out = np.array([to_world(p3d.reshape((1, -1)), keys3d[i],
root_pos[i][-1].reshape((1, 3)))[0]
for i, p3d in enumerate(y_out)])
# 1080p = 1,920 x 1,080
fig = plt.figure(figsize=(19.2, 10.8))
gs1 = gridspec.GridSpec(5, 6*3) # 5 rows, 18 columns
gs1.update(wspace=-0.00, hspace=0.05) # set the spacing between axes.
plt.axis('off')
subplot_idx, exidx = 0, 0
for _ in np.arange(nsamples):
# Sequence
for pt3d in x_in[exidx]:
# Plot 3d gt
ax2 = plt.subplot(gs1[subplot_idx], projection='3d')
p3d = pt3d
viz.show3Dpose(p3d, ax2)
subplot_idx += 1
# Plot 3d predictions
ax3 = plt.subplot(gs1[subplot_idx], projection='3d')
p3d = y_out[exidx, :]
viz.show3Dpose(p3d, ax3, lcolor="#9b59b6", rcolor="#2ecc71")
subplot_idx += 1
# Plot 3d real
ax4 = plt.subplot(gs1[subplot_idx], projection='3d')
p3d = y_real[exidx, :]
viz.show3Dpose(p3d, ax4, lcolor="#9b59b6", rcolor="#2ecc71")
subplot_idx += 1
exidx = exidx + 1
file_name = "imgs/vae_concat_seq/%s.png" % datetime.utcnow().isoformat()
plt.savefig(file_name)
print("Saved samples on: %s" % file_name)
# plt.show()
plt.close()
def get_optimizer():
""" Returns the optimizer required by flags """
if ENV.FLAGS.optimizer == 'adam':
return tf.keras.optimizers.Adam(ENV.FLAGS.learning_rate)
if ENV.FLAGS.optimizer == 'rmsprop':
return tf.keras.optimizers.RMSprop(ENV.FLAGS.learning_rate)
raise Exception('Optimizer not found: %s' % ENV.FLAGS.optimizer)
@tf.function
def train_step_vae(model, x_data, y_data, optimizer):
""" Define a train step """
with tf.GradientTape() as tape:
loss = losses.ELBO.compute_loss(model, x_data, y_data)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def train():
""" Train function """
data_train, data_test = data_handler.load_dataset_3d_seq(seq_len=3)
print("Dataset dims")
print(data_train.x_data.shape, data_train.y_data.shape)
print(data_test.x_data.shape, data_test.y_data.shape)
seq_len, size = data_train.x_data[0].shape
# The Vae model must process the seq as a single concatenate input
model = models.VAE(seq_len*size,
latent_dim=ENV.FLAGS.latent_dim,
enc_dim=ENV.FLAGS.enc_dim,
dec_dim=ENV.FLAGS.dec_dim)
optimizer = get_optimizer()
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=optimizer, net=model)
manager = tf.train.CheckpointManager(ckpt, './experiments/vae_concat_seq/tf_ckpts', max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restaurado de {}".format(manager.latest_checkpoint))
else:
print("Inicializando desde cero.")
print("Trainable weights:", len(model.trainable_weights))
# Indexes for sampling
idx = np.random.choice(data_test.x_data.shape[0], 15, replace=False)
# Logs for errors and losses
loss_train_history = []
loss_test_history = []
pred_error_history = []
error_34_history = []
error_3_pred_history = []
for epoch in range(1, ENV.FLAGS.epochs + 1):
print("\nStarting epoch:", epoch)
loss_train = tf.keras.metrics.Mean()
# start_time = time.time()
for step, (x_train, y_train) in enumerate(tqdm(data_train, ascii=True)):
# x_train is a batch of seq of dimentions (batch_size, seq_len, input_size)
batch_size, seq_len, size = x_train.shape
x_train = x_train.reshape(batch_size, seq_len * size)
x_train = data_handler.add_noise(x_train)
step_loss = train_step_vae(model, x_train, y_train, optimizer)
loss_train(step_loss)
if step % ENV.FLAGS.step_log == 0:
ltp = tf.math.reduce_mean(step_loss)
tqdm.write(" Training loss at step %d: %.4f" % (step, ltp))
tqdm.write(" Seen : %s samples" % ((step + 1) * ENV.FLAGS.batch_size))
# end_time = time.time()
loss_train_history.append(loss_train.result())
print("Evaluation on Test data...")
loss_test = tf.keras.metrics.Mean()
pred_error = tf.keras.metrics.Mean()
error_34 = tf.keras.metrics.Mean()
error_3_pred = tf.keras.metrics.Mean()
error_2_pred = tf.keras.metrics.Mean()
error_1_pred = tf.keras.metrics.Mean()
for x_test, y_test in tqdm(data_test, ascii=True):
# x_test is a batch of seq of dimentions (batch_size, seq_len, input_size)
batch_size, seq_len, size = x_test.shape
y_test = x_test[:, 2, :]
x_test3 = x_test[:, 1, :]
x_test2 = x_test[:, 0, :]
# x_test1 = x_test[:, 0, :]
x_test = x_test.reshape(batch_size, seq_len * size)
loss_test(losses.ELBO.compute_loss(model, x_test, y_test))
preds = model(x_test, training=False)
pred_error(losses.ELBO.compute_pred_error(y_test, preds))
error_34(losses.ELBO.compute_pred_error(x_test3, y_test))
error_3_pred(losses.ELBO.compute_pred_error(x_test3, preds))
error_2_pred(losses.ELBO.compute_pred_error(x_test2, preds))
# error_1_pred(losses.ELBO.compute_pred_error(x_test1, preds))
loss_test_history.append(loss_test.result())
pred_error_history.append(pred_error.result())
error_34_history.append(error_34.result())
error_3_pred_history.append(error_3_pred.result())
print('Epoch: {}, Test set ELBO: {}'.format(epoch, loss_test_history[-1]))
print('Epoch: {}, Error frame 2 vs 3: {}'.format(epoch, error_34_history[-1]))
print('Epoch: {}, Prediction Error: {}'.format(epoch, pred_error_history[-1]))
print('Epoch: {}, Error frame 2 vs pred: {}'.format(epoch, error_3_pred_history[-1]))
print('Epoch: {}, Error frame 1 vs pred: {}'.format(epoch, error_2_pred.result()))
# print('Epoch: {}, Error frame 1 vs pred: {}'.format(epoch, error_1_pred.result()))
tf.print('\nSaving samples...')
gen_sample_img(data_test, model=model, idx=idx)
# Reset data for next epoch
data_train.on_epoch_end()
data_test.on_epoch_end(avoid_suffle=True)
ckpt.step.assign_add(1)
save_path = manager.save()
print("Checkpoint saved: {}".format(save_path))
data_handler.plot_history([('Train Loss', loss_train_history),
('Test Loss', loss_test_history)],
xlabel='Epochs',
ylabel='Loss',
fname='loss.png')
data_handler.plot_history([('Pred error', pred_error_history),
# ('Frame err 4vs5', error_34_history),
('Frame err 4vsPred', error_3_pred_history)],
xlabel='Epochs',
ylabel='Error',
fname='error.png')
# Save the weights of the las model and the config use to run and train
model.save_weights('./experiments/vae_concat_seq/last_model_weights')
with open('./experiments/vae_concat_seq/train.cfg', 'w') as cfg:
json.dump(vars(ENV.FLAGS), cfg)
data_handler.save_history(loss_train_history, 'train_loss.npy')
data_handler.save_history(loss_test_history, 'test_loss.npy')
def evaluate():
data2d, data3d = data_handler.load_2d_3d_data(return_raw=True)
model_2d23d = models.PoseBase()
# Dummy input for creation for bach normalization weigths
ainput = np.ones((10, 32), dtype=np.float32)
model_2d23d(ainput, training=False)
# Load weights for 2d to 3d prediction
model_2d23d.load_weights('pretrained_models/4874200_PoseBase/PoseBase')
# Load VAE Model
seq_len = 3
human_3d_size = 48
model_vae_kin = models.VAE(seq_len*human_3d_size,
latent_dim=ENV.FLAGS.latent_dim,
enc_dim=ENV.FLAGS.enc_dim,
dec_dim=ENV.FLAGS.dec_dim)
model_vae_kin.load_weights('experiments/vae_concat_seq/last_model_weights')
error_2d_3d = tf.keras.metrics.Mean()
error_vae_kin = tf.keras.metrics.Mean()
noise_log = []
for key2d in tqdm(data2d.test.keys(), ascii=True):
err23d = tf.keras.metrics.Mean()
errvk = tf.keras.metrics.Mean()
tqdm.write("Subject: {}, action: {}, fname: {}".format(*key2d))
key3d = data_handler.get_key3d(key2d)
x_in = data2d.test[key2d]
x_out = data3d.test[key3d]
# Make a batch of size x.shape[0] to start the generation of the buffer
x_in = np.array_split(x_in, x_in.shape[0])
x_out = np.array_split(x_out, x_out.shape[0])
buffer = []
for x_2d, y_3d in tqdm(zip(x_in, x_out), total=len(x_in), ascii=True):
pred_3d = model_2d23d(x_2d, training=False)
if len(buffer) == 0:
# Start the buffer with the same predicion
buffer = [pred_3d[0] for _ in range(seq_len)]
buffer.append(pred_3d[0])
buffer.pop(0)
# print(pred_3d.shape)
# print(buffer)
# print(len(buffer))
vin = np.array([np.concatenate(buffer)])
ref_3d = model_vae_kin(vin, training=False)
# Add the last ref to the buffer
buffer[-1] = ref_3d[0]
err1 = losses.ELBO.compute_pred_error(y_3d, pred_3d)
err2 = losses.ELBO.compute_pred_error(y_3d, ref_3d)
err23d(err1)
errvk(err2)
error_2d_3d(err1)
error_vae_kin(err2)
noise_log.append(err1)
tqdm.write("Err 2d-3d: {}, VAE: {}".format(err23d.result(), errvk.result()))
print("Pred error 2d to 3d:", error_2d_3d.result())
print("Pred error vae filter:", error_vae_kin.result())
print(tf.math.reduce_mean(noise_log))
print(tf.math.reduce_std(noise_log))
print(tf.math.reduce_min(noise_log))
print(tf.math.reduce_max(noise_log))
def main():
""" Main """
with tf.device('/device:GPU:%d' % ENV.FLAGS.gpu_device):
if ENV.FLAGS.evaluate:
evaluate()
else:
train()
if __name__ == "__main__":
ENV.setup()
main()
| gen_sample_img |
global_options.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//
// @generated SignedSource<<53ed3297ff588bb049836726c15d009b>>
//
// To regenerate this file, run:
// hphp/hack/src/oxidized_regen.sh
use arena_trait::TrivialDrop;
use ocamlrep_derive::FromOcamlRepIn;
use ocamlrep_derive::ToOcamlRep;
use serde::Serialize;
#[allow(unused_imports)]
use crate::*;
#[derive(
Clone,
Debug,
FromOcamlRepIn,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep
)]
pub struct | <'a> {
pub tco_experimental_features: s_set::SSet<'a>,
pub tco_migration_flags: s_set::SSet<'a>,
pub tco_dynamic_view: bool,
pub tco_num_local_workers: Option<isize>,
pub tco_parallel_type_checking_threshold: isize,
pub tco_max_typechecker_worker_memory_mb: Option<isize>,
pub tco_defer_class_declaration_threshold: Option<isize>,
pub tco_defer_class_memory_mb_threshold: Option<isize>,
pub tco_max_times_to_defer_type_checking: Option<isize>,
pub tco_prefetch_deferred_files: bool,
pub tco_remote_type_check_threshold: Option<isize>,
pub tco_remote_type_check: bool,
pub tco_remote_worker_key: Option<&'a str>,
pub tco_remote_check_id: Option<&'a str>,
pub tco_remote_max_batch_size: isize,
pub tco_remote_min_batch_size: isize,
pub tco_num_remote_workers: isize,
pub so_remote_version_specifier: Option<&'a str>,
pub so_remote_worker_vfs_checkout_threshold: isize,
pub so_naming_sqlite_path: Option<&'a str>,
pub po_auto_namespace_map: &'a [(&'a str, &'a str)],
pub po_codegen: bool,
pub po_deregister_php_stdlib: bool,
pub po_disallow_toplevel_requires: bool,
pub po_disable_nontoplevel_declarations: bool,
pub po_disable_static_closures: bool,
pub po_allow_unstable_features: bool,
pub tco_log_inference_constraints: bool,
pub tco_disallow_array_typehint: bool,
pub tco_disallow_array_literal: bool,
pub tco_language_feature_logging: bool,
pub tco_unsafe_rx: bool,
pub tco_disallow_scrutinee_case_value_type_mismatch: bool,
pub tco_timeout: isize,
pub tco_disallow_invalid_arraykey: bool,
pub tco_disallow_byref_dynamic_calls: bool,
pub tco_disallow_byref_calls: bool,
pub allowed_fixme_codes_strict: i_set::ISet<'a>,
pub allowed_fixme_codes_partial: i_set::ISet<'a>,
pub codes_not_raised_partial: i_set::ISet<'a>,
pub log_levels: s_map::SMap<'a, isize>,
pub po_disable_lval_as_an_expression: bool,
pub tco_shallow_class_decl: bool,
pub po_rust_parser_errors: bool,
pub profile_type_check_duration_threshold: f64,
pub profile_type_check_twice: bool,
pub profile_owner: Option<&'a str>,
pub profile_desc: &'a str,
pub tco_like_type_hints: bool,
pub tco_union_intersection_type_hints: bool,
pub tco_coeffects: bool,
pub tco_coeffects_local: bool,
pub tco_strict_contexts: bool,
pub tco_like_casts: bool,
pub tco_simple_pessimize: f64,
pub tco_complex_coercion: bool,
pub tco_disable_partially_abstract_typeconsts: bool,
pub error_codes_treated_strictly: i_set::ISet<'a>,
pub tco_check_xhp_attribute: bool,
pub tco_check_redundant_generics: bool,
pub tco_disallow_unresolved_type_variables: bool,
pub tco_disallow_trait_reuse: bool,
pub tco_disallow_invalid_arraykey_constraint: bool,
pub po_enable_class_level_where_clauses: bool,
pub po_disable_legacy_soft_typehints: bool,
pub po_allowed_decl_fixme_codes: i_set::ISet<'a>,
pub po_allow_new_attribute_syntax: bool,
pub tco_global_inference: bool,
pub tco_gi_reinfer_types: &'a [&'a str],
pub tco_ordered_solving: bool,
pub tco_const_static_props: bool,
pub po_disable_legacy_attribute_syntax: bool,
pub tco_const_attribute: bool,
pub po_const_default_func_args: bool,
pub po_const_default_lambda_args: bool,
pub po_disallow_silence: bool,
pub po_abstract_static_props: bool,
pub po_disable_unset_class_const: bool,
pub po_parser_errors_only: bool,
pub tco_check_attribute_locations: bool,
pub glean_service: &'a str,
pub glean_hostname: &'a str,
pub glean_port: isize,
pub glean_reponame: &'a str,
pub symbol_write_root_path: &'a str,
pub symbol_write_hhi_path: &'a str,
pub symbol_write_ignore_paths: &'a [&'a str],
pub symbol_write_index_paths: &'a [&'a str],
pub symbol_write_include_hhi: bool,
pub po_disallow_func_ptrs_in_constants: bool,
pub tco_error_php_lambdas: bool,
pub tco_disallow_discarded_nullable_awaitables: bool,
pub po_enable_xhp_class_modifier: bool,
pub po_disable_xhp_element_mangling: bool,
pub po_disable_xhp_children_declarations: bool,
pub po_enable_enum_classes: bool,
pub po_disable_modes: bool,
pub po_disable_hh_ignore_error: bool,
pub po_disable_array: bool,
pub po_disable_array_typehint: bool,
pub tco_enable_systemlib_annotations: bool,
pub tco_higher_kinded_types: bool,
pub tco_method_call_inference: bool,
pub tco_report_pos_from_reason: bool,
pub tco_typecheck_sample_rate: f64,
pub tco_enable_sound_dynamic: bool,
pub po_disallow_hash_comments: bool,
pub po_disallow_fun_and_cls_meth_pseudo_funcs: bool,
pub tco_use_direct_decl_parser: bool,
pub tco_ifc_enabled: &'a [&'a str],
pub po_enable_enum_supertyping: bool,
pub po_array_unification: bool,
pub po_interpret_soft_types_as_like_types: bool,
}
impl<'a> TrivialDrop for GlobalOptions<'a> {}
| GlobalOptions |
hello.py | from flask import Flask, render_template, redirect, flash
app = Flask(__name__)
#Cross Site Request Forgery estämiseen
app.secret_key = "mikalegall"
@app.route('/')
def index():
message = "Tämä viesti on etusivun Flashin jonopuskurista"
flash(message)
return render_template('index.html')
@app.route("/uudelleenohjaus")
def viestiKayttajalle():
flash('"Uudelleeohjaus" linkistä tallennettu Flash viesti käyttöliittymässä näytettäväksi, mutta sitä ei näytetä etusivulla')
return redirect("/")
@app.route("/vilauta")
def vilauta():
flash("KlikkaaMua kautta tulostettu viesti")
return render_template("vilauta.html")
if __name__ == "__main__":
app.run()
| ||
mod.rs | use indexmap::IndexMap;
use std::fmt::{self, Debug};
use std::ops::Deref;
mod builder;
mod cycle;
mod data;
mod label;
mod node;
mod sort;
mod weight;
pub use builder::GraphBuilder;
pub use cycle::GraphCycles;
pub use data::NodeData;
pub use label::NodeLabel;
pub use node::{Node, NodeRef};
pub use sort::{topological_sort, try_topological_sort};
pub use weight::{compute_max_weight_path, NodeWeight, WeightedGraph, WeightedPath};
/// Implements a `Graph`
///
/// * Each `Node` is associated with a unique identifier called a `label` (type `L`)
/// * Each `Node` has references to:
/// * `incoming Nodes` - forming together `incoming edges`
/// * `outgoing Nodes` - forming together `outgoing edges`
///
/// Note:
/// * There is no `Edge` type in the implementation and no data (such as weight) associated with a logical `edge`
/// * There can be only a single logical edge between any two nodes.
/// (that makes sense since as stated above, there is no data stored for an `edge` besides its `source` and `destination`
pub struct Graph<L, D>
where
L: NodeLabel,
D: NodeData,
{
nodes: IndexMap<L, NodeRef<L, D>>,
}
impl<L, D> Graph<L, D>
where
L: NodeLabel,
D: NodeData,
{
/// Returns references to the `Graph`'s nodes
pub fn nodes(&self) -> Vec<NodeRef<L, D>> {
self.nodes.values().cloned().collect()
}
/// Removes a `Node` from the `Graph`
pub fn remove_node(&self, node: &NodeRef<L, D>) {
for neighbor in node.outgoing() {
let edge = (node.label(), neighbor.label());
self.remove_edge(edge);
}
}
/// Removes an `Edge` from the `Graph`
pub fn remove_edge(&self, (source, dest): (L, L)) {
let source = self.nodes.get(&source).unwrap();
let dest = self.nodes.get(&dest).unwrap();
source.as_mut().remove_out_edge(dest);
dest.as_mut().remove_in_edge(source);
}
/// Returns the number of `Node`s in the `Graph`
pub fn node_count(&self) -> usize {
self.nodes.len()
}
/// Returns references to `Nodes` which have no incoming edges
pub fn source_nodes(&self) -> Vec<NodeRef<L, D>> {
self.nodes()
.iter()
.filter(|node| node.is_source())
.cloned()
.collect()
}
/// Returns a reference to a `Node` given its `label` identifier
///
/// # Panics
///
/// Panics if there is node `Node` labeled with `label`
///
pub fn get_node(&self, label: L) -> NodeRef<L, D> {
self.try_get_node(label).unwrap()
}
/// Tries to return a reference to a `Node` given its `label` identifier
pub fn try_get_node(&self, label: L) -> Option<NodeRef<L, D>> {
let node_ref = self.nodes.get(&label);
node_ref.cloned()
}
}
impl<L, D> Debug for Graph<L, D>
where
L: NodeLabel,
D: NodeData,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for node in self.nodes() {
node.fmt(f);
}
| Ok(())
}
} |
|
chatbot-controller.js | const Util = require('../../utils/Utils');
const { GetView, GetAll, GetId, Store, Update, } = require('./chatbot-models');
const util = new Util();
/**
* The ChatbotController.
*
* @method view select all records
* @method index select all outputs by input
* @method show select a record by id
* @method store create record
* @method update update record
*/
class | {
async view(req, res) {
try {
const data = await GetView();
if (data) {
util.setSuccess(200, data);
} else {
util.setError(400, 'no data!');
}
return util.send(res);
} catch (err) {
util.setError(500, err.message);
return util.send(res);
}
}
async index(req, res) {
try {
const { input, activate } = req.query;
const data = await GetAll(activate, input);
if (data) {
util.setSuccess(200, data);
} else {
util.setError(400, 'no data!');
}
return util.send(res);
} catch (err) {
util.setError(500, err.message);
return util.send(res);
}
}
async show(req, res) {
try {
const { id } = req.query;
const data = await GetId(id);
if (data) {
util.setSuccess(200, data);
} else {
util.setError(400, 'no data!');
}
return util.send(res);
} catch (err) {
util.setError(500, err.message);
return util.send(res);
}
}
async store(req, res) {
try {
const { activate, input, output } = req.body;
const data = await Store(activate, input, output);
if (data) {
util.setSuccess(200, data);
}
req.io.emit('chatbot', data);
return util.send(res);
} catch (err) {
util.setError(500, err.message);
return util.send(res);
}
}
async update(req, res) {
try {
const { _id, activate, input, output } = req.body;
const data = await Update(_id, activate, input, output);
if (data) {
util.setSuccess(200, data);
}
req.io.emit('chatbot', data);
return util.send(res);
} catch (err) {
util.setError(500, err.message);
return util.send(res);
}
}
}
module.exports = ChatbotController;
| ChatbotController |
wallet_abandonconflict.py | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already abandoned.
"""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
wait_until,
)
from test_framework.sbercoinconfig import *
class AbandonConflictTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[1].generate(COINBASE_MATURITY)
self.sync_blocks()
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.sync_mempools()
self.nodes[1].generate(1)
# Can not abandon non-wallet transaction
assert_raises_rpc_error(-5, 'Invalid or non-wallet transaction id', lambda: self.nodes[0].abandontransaction(txid='ff' * 32))
# Can not abandon confirmed transaction
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: self.nodes[0].abandontransaction(txid=txA))
self.sync_blocks()
newbalance = self.nodes[0].getbalance()
assert balance - newbalance < Decimal("0.01") #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10btc outputs
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txA)["details"] if tx_out["amount"] == Decimal("10"))
nB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txB)["details"] if tx_out["amount"] == Decimal("10"))
nC = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txC)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
# spend 10btc outputs from txA and txB
inputs.append({"txid": txA, "vout": nA})
inputs.append({"txid": txB, "vout": nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txAB1)["details"] if tx_out["amount"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid": txAB1, "vout": nAB})
inputs.append({"txid": txC, "vout": nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# Create a child tx spending ABC2
signed3_change = Decimal("24.999")
inputs = [{"txid": txABC2, "vout": 0}]
outputs = {self.nodes[0].getnewaddress(): signed3_change}
signed3 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs))
# note tx is never directly referenced, only abandoned as a child of the above
self.nodes[0].sendrawtransaction(signed3["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + signed3_change)
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - signed3_change)
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if it is received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so it is unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
wait_until(lambda: self.nodes[0].getmempoolinfo()['loaded'])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1 | tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransactionwithwallet(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
self.sync_blocks()
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main() | inputs = []
inputs.append({"txid": txA, "vout": nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.99") |
heap.rs | // Copyright (c) 2021, COSIC-KU Leuven, Kasteelpark Arenberg 10, bus 2452, B-3001 Leuven-Heverlee, Belgium.
// Copyright (c) 2021, Cosmian Tech SAS, 53-55 rue La Boétie, Paris, France.
use scale::alloc::GetAllocator;
use scale::{alloc::Allocate, LoadFromMem, StoreInMem};
/// A smart pointer datastructure that allocates memory and deallocates when goes out of scope
pub struct Box<T>
where
T: GetAllocator,
{
// Act like we contain an element of type T
phantom: core::marker::PhantomData<T>,
pub(crate) keep_alive: bool,
pub(crate) address: u64,
}
impl<T> Box<T>
where
T: GetAllocator,
{
pub unsafe fn offset(&self, offset: u64) -> Self {
Self {
address: self.address + offset,
keep_alive: self.keep_alive,
..*self
}
}
}
impl<T: GetAllocator> GetAllocator for Box<T> {
type Allocator = T::Allocator;
fn get_allocator() -> Self::Allocator {
T::get_allocator()
}
fn size_type() -> u64 {
T::size_type()
}
}
impl<T: StoreInMem<i64> + GetAllocator> Box<T> {
pub fn new(t: T) -> Self {
let mut this = Self::uninitialized();
this.set(t);
this
}
}
impl<T: GetAllocator> Box<T> {
/// Allocate a single element without a value
pub fn uninitialized() -> Self {
Self {
phantom: core::marker::PhantomData,
keep_alive: false,
address: T::get_allocator().allocate(T::size_type()),
}
}
/// Allocate a multiple elements without a value
pub fn uninitialized_multiple(length: u64) -> Self {
Self {
phantom: core::marker::PhantomData,
keep_alive: false,
address: T::get_allocator().allocate(length * T::size_type()),
}
}
// Use with caution
pub(crate) unsafe fn load_from_addr(address: u64) -> Self {
Self {
phantom: core::marker::PhantomData,
keep_alive: true,
address,
}
}
#[allow(dead_code)]
pub(crate) fn keep_alive(&mut self) {
self.keep_alive = true;
}
}
impl<T: StoreInMem<i64> + GetAllocator> Box<T> {
pub fn set(&mut self, val: T) {
unsafe {
val.store_in_mem(self.address as i64);
}
}
}
impl<T: LoadFromMem<i64> + GetAllocator + Clone> Box<T> {
pub fn get(&self) -> T {
T::load_from_mem(self.address as i64).clone()
}
}
impl<T: GetAllocator> Drop for Box<T> {
fn drop(&mut self) {
if !self.keep_alive { | }
}
|
unsafe {
Self::get_allocator().free(self.address);
}
}
|
lib.rs | //! A cross-platform library for opening OS pipes.
//!
//! The standard library uses pipes to read output from child processes,
//! but it doesn't expose a way to create them directly. This crate
//! fills that gap with the `pipe` function. It also includes some
//! helpers for passing pipes to the `std::process::Command` API.
//!
//! - [Docs](https://docs.rs/os_pipe)
//! - [Crate](https://crates.io/crates/os_pipe)
//! - [Repo](https://github.com/oconnor663/os_pipe.rs)
//!
//! Usage note: The main purpose of `os_pipe` is to support the
//! higher-level [`duct`](https://github.com/oconnor663/duct.rs)
//! library, which handles most of the same use cases with much less
//! code and no risk of deadlocks. `duct` can run the entire example
//! below in one line of code.
//!
//! # Example
//!
//! Join the stdout and stderr of a child process into a single stream,
//! and read it. To do that we open a pipe, duplicate its write end, and
//! pass those writers as the child's stdout and stderr. Then we can
//! read combined output from the read end of the pipe. We have to be
//! careful to close the write ends first though, or reading will block
//! waiting for EOF.
//!
//! ```rust
//! use os_pipe::pipe;
//! use std::io::prelude::*;
//! use std::process::{Command, Stdio};
//!
//! // This command prints "foo" to stdout and "bar" to stderr. It
//! // works on both Unix and Windows, though there are whitespace
//! // differences that we'll account for at the bottom.
//! let shell_command = "echo foo && echo bar >&2";
//!
//! // Ritual magic to run shell commands on different platforms.
//! let (shell, flag) = if cfg!(windows) { ("cmd.exe", "/C") } else { ("sh", "-c") };
//!
//! let mut child = Command::new(shell);
//! child.arg(flag);
//! child.arg(shell_command);
//!
//! // Here's the interesting part. Open a pipe, copy its write end, and
//! // give both copies to the child.
//! let (mut reader, writer) = pipe().unwrap();
//! let writer_clone = writer.try_clone().unwrap();
//! child.stdout(writer);
//! child.stderr(writer_clone);
//!
//! // Now start the child running.
//! let mut handle = child.spawn().unwrap();
//!
//! // Very important when using pipes: This parent process is still
//! // holding its copies of the write ends, and we have to close them
//! // before we read, otherwise the read end will never report EOF. The
//! // Command object owns the writers now, and dropping it closes them.
//! drop(child);
//!
//! // Finally we can read all the output and clean up the child.
//! let mut output = String::new();
//! reader.read_to_string(&mut output).unwrap();
//! handle.wait().unwrap();
//! assert!(output.split_whitespace().eq(vec!["foo", "bar"]));
//! ```
use std::fs::File;
use std::io;
use std::process::Stdio;
/// The reading end of a pipe, returned by [`pipe`](fn.pipe.html).
///
/// `PipeReader` implements `Into<Stdio>`, so you can pass it as an argument to
/// `Command::stdin` to spawn a child process that reads from the pipe.
#[derive(Debug)]
pub struct PipeReader(File);
impl PipeReader {
pub fn try_clone(&self) -> io::Result<PipeReader> {
// Do *not* use File::try_clone here. It's buggy on windows. See
// comments on windows.rs::dup().
sys::dup(&self.0).map(PipeReader)
}
}
impl io::Read for PipeReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
}
impl<'a> io::Read for &'a PipeReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut file_ref = &self.0;
file_ref.read(buf)
}
}
impl From<PipeReader> for Stdio {
fn from(p: PipeReader) -> Stdio {
p.0.into()
}
}
/// The writing end of a pipe, returned by [`pipe`](fn.pipe.html).
///
/// `PipeWriter` implements `Into<Stdio>`, so you can pass it as an argument to
/// `Command::stdout` or `Command::stderr` to spawn a child process that writes
/// to the pipe.
#[derive(Debug)]
pub struct PipeWriter(File);
impl PipeWriter {
pub fn try_clone(&self) -> io::Result<PipeWriter> {
// Do *not* use File::try_clone here. It's buggy on windows. See
// comments on windows.rs::dup().
sys::dup(&self.0).map(PipeWriter)
}
}
impl io::Write for PipeWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
}
impl<'a> io::Write for &'a PipeWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut file_ref = &self.0;
file_ref.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
let mut file_ref = &self.0;
file_ref.flush()
}
}
impl From<PipeWriter> for Stdio {
fn from(p: PipeWriter) -> Stdio {
p.0.into()
}
}
/// Open a new pipe and return a [`PipeReader`] and [`PipeWriter`] pair.
///
/// This corresponds to the `pipe2` library call on Posix and the
/// `CreatePipe` library call on Windows (though these implementation
/// details might change). Pipes are non-inheritable, so new child
/// processes won't receive a copy of them unless they're explicitly
/// passed as stdin/stdout/stderr.
///
/// [`PipeReader`]: struct.PipeReader.html
/// [`PipeWriter`]: struct.PipeWriter.html
pub fn | () -> io::Result<(PipeReader, PipeWriter)> {
sys::pipe()
}
/// Get a duplicated copy of the current process's standard input, as a
/// [`PipeReader`].
///
/// Reading directly from this pipe isn't recommended, because it's not
/// synchronized with [`std::io::stdin`]. [`PipeReader`] implements
/// [`Into<Stdio>`], so it can be passed directly to [`Command::stdin`]. This is
/// equivalent to [`Stdio::inherit`], though, so it's usually not necessary
/// unless you need a collection of different pipes.
///
/// [`std::io::stdin`]: https://doc.rust-lang.org/std/io/fn.stdin.html
/// [`PipeReader`]: struct.PipeReader.html
/// [`Into<Stdio>`]: https://doc.rust-lang.org/std/process/struct.Stdio.html
/// [`Command::stdin`]: https://doc.rust-lang.org/std/process/struct.Command.html#method.stdin
/// [`Stdio::inherit`]: https://doc.rust-lang.org/std/process/struct.Stdio.html#method.inherit
pub fn dup_stdin() -> io::Result<PipeReader> {
sys::dup(&io::stdin()).map(PipeReader)
}
/// Get a duplicated copy of the current process's standard output, as a
/// [`PipeWriter`](struct.PipeWriter.html).
///
/// Writing directly to this pipe isn't recommended, because it's not
/// synchronized with [`std::io::stdout`]. [`PipeWriter`] implements
/// [`Into<Stdio>`], so it can be passed directly to [`Command::stdout`] or
/// [`Command::stderr`]. This can be useful if you want the child's stderr to go
/// to the parent's stdout.
///
/// [`std::io::stdout`]: https://doc.rust-lang.org/std/io/fn.stdout.html
/// [`PipeWriter`]: struct.PipeWriter.html
/// [`Into<Stdio>`]: https://doc.rust-lang.org/std/process/struct.Stdio.html
/// [`Command::stdout`]: https://doc.rust-lang.org/std/process/struct.Command.html#method.stdout
/// [`Command::stderr`]: https://doc.rust-lang.org/std/process/struct.Command.html#method.stderr
/// [`Stdio::inherit`]: https://doc.rust-lang.org/std/process/struct.Stdio.html#method.inherit
pub fn dup_stdout() -> io::Result<PipeWriter> {
sys::dup(&io::stdout()).map(PipeWriter)
}
/// Get a duplicated copy of the current process's standard error, as a
/// [`PipeWriter`](struct.PipeWriter.html).
///
/// Writing directly to this pipe isn't recommended, because it's not
/// synchronized with [`std::io::stderr`]. [`PipeWriter`] implements
/// [`Into<Stdio>`], so it can be passed directly to [`Command::stdout`] or
/// [`Command::stderr`]. This can be useful if you want the child's stdout to go
/// to the parent's stderr.
///
/// [`std::io::stderr`]: https://doc.rust-lang.org/std/io/fn.stderr.html
/// [`PipeWriter`]: struct.PipeWriter.html
/// [`Into<Stdio>`]: https://doc.rust-lang.org/std/process/struct.Stdio.html
/// [`Command::stdout`]: https://doc.rust-lang.org/std/process/struct.Command.html#method.stdout
/// [`Command::stderr`]: https://doc.rust-lang.org/std/process/struct.Command.html#method.stderr
/// [`Stdio::inherit`]: https://doc.rust-lang.org/std/process/struct.Stdio.html#method.inherit
pub fn dup_stderr() -> io::Result<PipeWriter> {
sys::dup(&io::stderr()).map(PipeWriter)
}
#[cfg(not(windows))]
#[path = "unix.rs"]
mod sys;
#[cfg(windows)]
#[path = "windows.rs"]
mod sys;
#[cfg(test)]
mod tests {
use std::env::consts::EXE_EXTENSION;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::sync::Once;
use std::thread;
fn path_to_exe(name: &str) -> PathBuf {
// This project defines some associated binaries for testing, and we shell out to them in
// these tests. `cargo test` doesn't automatically build associated binaries, so this
// function takes care of building them explicitly, with the right debug/release flavor.
static CARGO_BUILD_ONCE: Once = Once::new();
CARGO_BUILD_ONCE.call_once(|| {
let mut build_command = Command::new("cargo");
build_command.args(&["build", "--quiet"]);
if !cfg!(debug_assertions) {
build_command.arg("--release");
}
let build_status = build_command.status().unwrap();
assert!(
build_status.success(),
"Cargo failed to build associated binaries."
);
});
let flavor = if cfg!(debug_assertions) {
"debug"
} else {
"release"
};
Path::new("target")
.join(flavor)
.join(name)
.with_extension(EXE_EXTENSION)
}
#[test]
fn test_pipe_some_data() {
let (mut reader, mut writer) = crate::pipe().unwrap();
// A small write won't fill the pipe buffer, so it won't block this thread.
writer.write_all(b"some stuff").unwrap();
drop(writer);
let mut out = String::new();
reader.read_to_string(&mut out).unwrap();
assert_eq!(out, "some stuff");
}
#[test]
fn test_pipe_some_data_with_refs() {
// As with `File`, there's a second set of impls for shared
// refs. Test those.
let (reader, writer) = crate::pipe().unwrap();
let mut reader_ref = &reader;
{
let mut writer_ref = &writer;
// A small write won't fill the pipe buffer, so it won't block this thread.
writer_ref.write_all(b"some stuff").unwrap();
}
drop(writer);
let mut out = String::new();
reader_ref.read_to_string(&mut out).unwrap();
assert_eq!(out, "some stuff");
}
#[test]
fn test_pipe_no_data() {
let (mut reader, writer) = crate::pipe().unwrap();
drop(writer);
let mut out = String::new();
reader.read_to_string(&mut out).unwrap();
assert_eq!(out, "");
}
#[test]
fn test_pipe_a_megabyte_of_data_from_another_thread() {
let data = vec![0xff; 1_000_000];
let data_copy = data.clone();
let (mut reader, mut writer) = crate::pipe().unwrap();
let joiner = thread::spawn(move || {
writer.write_all(&data_copy).unwrap();
// This drop happens automatically, so writing it out here is mostly
// just for clarity. For what it's worth, it also guards against
// accidentally forgetting to drop if we switch to scoped threads or
// something like that and change this to a non-moving closure. The
// explicit drop forces `writer` to move.
drop(writer);
});
let mut out = Vec::new();
reader.read_to_end(&mut out).unwrap();
joiner.join().unwrap();
assert_eq!(out, data);
}
#[test]
fn test_pipes_are_not_inheritable() {
// Create pipes for a child process.
let (input_reader, mut input_writer) = crate::pipe().unwrap();
let (mut output_reader, output_writer) = crate::pipe().unwrap();
// Create a bunch of duplicated copies, which we'll close later. This
// tests that duplication preserves non-inheritability.
let ir_dup = input_reader.try_clone().unwrap();
let iw_dup = input_writer.try_clone().unwrap();
let or_dup = output_reader.try_clone().unwrap();
let ow_dup = output_writer.try_clone().unwrap();
// Spawn the child. Note that this temporary Command object takes
// ownership of our copies of the child's stdin and stdout, and then
// closes them immediately when it drops. That stops us from blocking
// our own read below. We use our own simple implementation of cat for
// compatibility with Windows.
let mut child = Command::new(path_to_exe("cat"))
.stdin(input_reader)
.stdout(output_writer)
.spawn()
.unwrap();
// Drop all the dups now that the child is spawned.
drop(ir_dup);
drop(iw_dup);
drop(or_dup);
drop(ow_dup);
// Write to the child's stdin. This is a small write, so it shouldn't
// block.
input_writer.write_all(b"hello").unwrap();
drop(input_writer);
// Read from the child's stdout. If this child has accidentally
// inherited the write end of its own stdin, then it will never exit,
// and this read will block forever. That's what this test is all
// about.
let mut output = Vec::new();
output_reader.read_to_end(&mut output).unwrap();
child.wait().unwrap();
// Confirm that we got the right bytes.
assert_eq!(b"hello", &*output);
}
#[test]
fn test_parent_handles() {
// This test invokes the `swap` test program, which uses parent_stdout() and
// parent_stderr() to swap the outputs for another child that it spawns.
// Create pipes for a child process.
let (reader, mut writer) = crate::pipe().unwrap();
// Write input. This shouldn't block because it's small. Then close the write end, or else
// the child will hang.
writer.write_all(b"quack").unwrap();
drop(writer);
// Use `swap` to run `cat_both`. `cat_both will read "quack" from stdin
// and write it to stdout and stderr with different tags. But because we
// run it inside `swap`, the tags in the output should be backwards.
let output = Command::new(path_to_exe("swap"))
.arg(path_to_exe("cat_both"))
.stdin(reader)
.output()
.unwrap();
// Check for a clean exit.
assert!(
output.status.success(),
"child process returned {:#?}",
output
);
// Confirm that we got the right bytes.
assert_eq!(b"stderr: quack", &*output.stdout);
assert_eq!(b"stdout: quack", &*output.stderr);
}
#[test]
fn test_parent_handles_dont_close() {
// Open and close each parent pipe multiple times. If this closes the
// original, subsequent opens should fail.
let stdin = crate::dup_stdin().unwrap();
drop(stdin);
let stdin = crate::dup_stdin().unwrap();
drop(stdin);
let stdout = crate::dup_stdout().unwrap();
drop(stdout);
let stdout = crate::dup_stdout().unwrap();
drop(stdout);
let stderr = crate::dup_stderr().unwrap();
drop(stderr);
let stderr = crate::dup_stderr().unwrap();
drop(stderr);
}
#[test]
fn test_try_clone() {
let (reader, writer) = crate::pipe().unwrap();
let mut reader_clone = reader.try_clone().unwrap();
let mut writer_clone = writer.try_clone().unwrap();
// A small write won't fill the pipe buffer, so it won't block this thread.
writer_clone.write_all(b"some stuff").unwrap();
drop(writer);
drop(writer_clone);
let mut out = String::new();
reader_clone.read_to_string(&mut out).unwrap();
assert_eq!(out, "some stuff");
}
#[test]
fn test_debug() {
let (reader, writer) = crate::pipe().unwrap();
format!("{:?} {:?}", reader, writer);
}
}
| pipe |
is_between.rs | use language::operations::{make_param_doc, Operation, ParamInfo};
pub struct IsBetweenOp;
const DOC: &str = "Checks that lower_bound <= value < upper_bound";
pub const OP_CODE: u32 = 33;
pub const IDENT: &str = "is_between";
impl Operation for IsBetweenOp {
fn op_code(&self) -> u32 |
fn documentation(&self) -> &'static str {
DOC
}
fn identifier(&self) -> &'static str {
IDENT
}
fn param_info(&self) -> ParamInfo {
ParamInfo {
num_required: 3,
num_optional: 0,
param_docs: vec![
make_param_doc("<value>", ""),
make_param_doc("<lower_bound>", ""),
make_param_doc("<upper_bound>", ""),
],
}
}
}
| {
OP_CODE
} |
helper.go | // Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package oidc
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/goharbor/harbor/src/lib/config"
cfgModels "github.com/goharbor/harbor/src/lib/config/models"
"github.com/goharbor/harbor/src/lib/orm"
"github.com/goharbor/harbor/src/pkg/usergroup"
"github.com/goharbor/harbor/src/pkg/usergroup/model"
gooidc "github.com/coreos/go-oidc/v3/oidc"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/lib/log"
"golang.org/x/oauth2"
)
const (
googleEndpoint = "https://accounts.google.com"
)
type claimsProvider interface {
Claims(v interface{}) error
}
type providerHelper struct {
sync.Mutex
instance atomic.Value
setting atomic.Value
creationTime time.Time
}
func (p *providerHelper) get() (*gooidc.Provider, error) {
if p.instance.Load() != nil {
if time.Now().Sub(p.creationTime) > 3*time.Second {
if err := p.create(); err != nil {
return nil, err
}
}
} else {
p.Lock()
defer p.Unlock()
if p.instance.Load() == nil {
if err := p.reloadSetting(); err != nil {
return nil, err
}
if err := p.create(); err != nil {
return nil, err
}
go func() {
for {
if err := p.reloadSetting(); err != nil {
log.Warningf("Failed to refresh configuration, error: %v", err)
}
time.Sleep(3 * time.Second)
}
}()
}
}
return p.instance.Load().(*gooidc.Provider), nil
}
func (p *providerHelper) reloadSetting() error {
conf, err := config.OIDCSetting(orm.Context())
if err != nil {
return fmt.Errorf("failed to load OIDC setting: %v", err)
}
p.setting.Store(*conf)
return nil
}
func (p *providerHelper) create() error {
if p.setting.Load() == nil {
return errors.New("the configuration is not loaded")
}
s := p.setting.Load().(cfgModels.OIDCSetting)
ctx := clientCtx(context.Background(), s.VerifyCert)
provider, err := gooidc.NewProvider(ctx, s.Endpoint)
if err != nil {
return fmt.Errorf("failed to create OIDC provider, error: %v", err)
}
p.instance.Store(provider)
p.creationTime = time.Now()
return nil
}
var provider = &providerHelper{}
var insecureTransport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
Proxy: http.ProxyFromEnvironment,
}
// Token wraps the attributes of a oauth2 token plus the attribute of ID token
type Token struct {
oauth2.Token
RawIDToken string `json:"id_token,omitempty"`
}
// UserInfo wraps the information that is extracted via token. It will be transformed to data object that is persisted
// in the DB
type UserInfo struct {
Issuer string `json:"iss"`
Subject string `json:"sub"`
Username string `json:"name"`
Email string `json:"email"`
Groups []string `json:"groups"`
AdminGroupMember bool `json:"admin_group_member"`
autoOnboardUsername string
hasGroupClaim bool
}
func getOauthConf() (*oauth2.Config, error) {
p, err := provider.get()
if err != nil {
return nil, err
}
setting := provider.setting.Load().(cfgModels.OIDCSetting)
scopes := make([]string, 0)
for _, sc := range setting.Scope {
if strings.HasPrefix(p.Endpoint().AuthURL, googleEndpoint) && sc == gooidc.ScopeOfflineAccess {
log.Warningf("Dropped unsupported scope: %s ", sc)
continue
}
scopes = append(scopes, sc)
}
return &oauth2.Config{
ClientID: setting.ClientID,
ClientSecret: setting.ClientSecret,
Scopes: scopes,
RedirectURL: setting.RedirectURL,
Endpoint: p.Endpoint(),
}, nil
}
// AuthCodeURL returns the URL for OIDC provider's consent page. The state should be verified when user is redirected
// back to Harbor.
func AuthCodeURL(state string) (string, error) {
conf, err := getOauthConf()
if err != nil {
log.Errorf("Failed to get OAuth configuration, error: %v", err)
return "", err
}
var options []oauth2.AuthCodeOption
setting := provider.setting.Load().(cfgModels.OIDCSetting)
for k, v := range setting.ExtraRedirectParms {
options = append(options, oauth2.SetAuthURLParam(k, v))
}
if strings.HasPrefix(conf.Endpoint.AuthURL, googleEndpoint) { // make sure the refresh token will be returned
options = append(options, oauth2.AccessTypeOffline)
options = append(options, oauth2.SetAuthURLParam("prompt", "consent"))
}
return conf.AuthCodeURL(state, options...), nil
}
// ExchangeToken get the token from token provider via the code
func ExchangeToken(ctx context.Context, code string) (*Token, error) {
oauth, err := getOauthConf()
if err != nil {
log.Errorf("Failed to get OAuth configuration, error: %v", err)
return nil, err
}
setting := provider.setting.Load().(cfgModels.OIDCSetting)
ctx = clientCtx(ctx, setting.VerifyCert)
oauthToken, err := oauth.Exchange(ctx, code)
if err != nil {
return nil, err
}
return &Token{Token: *oauthToken, RawIDToken: oauthToken.Extra("id_token").(string)}, nil
}
func parseIDToken(ctx context.Context, rawIDToken string) (*gooidc.IDToken, error) {
conf := &gooidc.Config{SkipClientIDCheck: true, SkipExpiryCheck: true}
return verifyTokenWithConfig(ctx, rawIDToken, conf)
}
// VerifyToken verifies the ID token based on the OIDC settings
func VerifyToken(ctx context.Context, rawIDToken string) (*gooidc.IDToken, error) {
return verifyTokenWithConfig(ctx, rawIDToken, nil)
}
func verifyTokenWithConfig(ctx context.Context, rawIDToken string, conf *gooidc.Config) (*gooidc.IDToken, error) {
log.Debugf("Raw ID token for verification: %s", rawIDToken)
p, err := provider.get()
if err != nil {
return nil, err
}
settings := provider.setting.Load().(cfgModels.OIDCSetting)
if conf == nil {
conf = &gooidc.Config{ClientID: settings.ClientID}
}
verifier := p.Verifier(conf)
ctx = clientCtx(ctx, settings.VerifyCert)
return verifier.Verify(ctx, rawIDToken)
}
func clientCtx(ctx context.Context, verifyCert bool) context.Context {
var client *http.Client
if !verifyCert {
client = &http.Client{
Transport: insecureTransport,
}
} else {
client = &http.Client{}
}
return gooidc.ClientContext(ctx, client)
}
// refreshToken tries to refresh the token if it's expired, if it doesn't the
// original one will be returned.
func refreshToken(ctx context.Context, token *Token) (*Token, error) {
oauthCfg, err := getOauthConf()
if err != nil {
return nil, err
}
setting := provider.setting.Load().(cfgModels.OIDCSetting)
cctx := clientCtx(ctx, setting.VerifyCert)
ts := oauthCfg.TokenSource(cctx, &token.Token)
nt, err := ts.Token()
if err != nil {
return nil, err
}
it, ok := nt.Extra("id_token").(string)
if !ok {
log.Debug("id_token not exist in refresh response")
}
return &Token{Token: *nt, RawIDToken: it}, nil
}
// UserInfoFromToken tries to call the UserInfo endpoint of the OIDC provider, and consolidate with ID token
// to generate a UserInfo object, if the ID token is not in the input token struct, some attributes will be empty
func UserInfoFromToken(ctx context.Context, token *Token) (*UserInfo, error) {
// #10913: preload the configuration, in case it was not previously loaded by the UI
_, err := provider.get()
if err != nil {
return nil, err
}
setting := provider.setting.Load().(cfgModels.OIDCSetting)
local, err := UserInfoFromIDToken(ctx, token, setting)
if err != nil {
return nil, err
}
remote, err := userInfoFromRemote(ctx, token, setting) | if err != nil {
log.Warningf("Failed to get userInfo by calling remote userinfo endpoint, error: %v ", err)
}
if remote != nil && local != nil {
if remote.Subject != local.Subject {
return nil, fmt.Errorf("the subject from userinfo: %s does not match the subject from ID token: %s, probably a security attack happened", remote.Subject, local.Subject)
}
return mergeUserInfo(remote, local), nil
} else if remote != nil && local == nil {
return remote, nil
} else if local != nil && remote == nil {
log.Debugf("Fall back to user data from ID token.")
return local, nil
}
return nil, fmt.Errorf("failed to get userinfo from both remote and ID token")
}
func mergeUserInfo(remote, local *UserInfo) *UserInfo {
res := &UserInfo{
// data only contained in ID token
Subject: local.Subject,
Issuer: local.Issuer,
// Used data from userinfo
Email: remote.Email,
}
// priority for username (high to low):
// 1. Username based on the auto onboard claim from ID token
// 2. Username from response of userinfo endpoint
// 3. Username from the default "name" claim from ID token
if local.autoOnboardUsername != "" {
res.Username = local.autoOnboardUsername
} else if remote.Username != "" {
res.Username = remote.Username
} else {
res.Username = local.Username
}
if remote.hasGroupClaim {
res.Groups = remote.Groups
res.AdminGroupMember = remote.AdminGroupMember
res.hasGroupClaim = true
} else if local.hasGroupClaim {
res.Groups = local.Groups
res.AdminGroupMember = local.AdminGroupMember
res.hasGroupClaim = true
} else {
res.Groups = []string{}
}
return res
}
func userInfoFromRemote(ctx context.Context, token *Token, setting cfgModels.OIDCSetting) (*UserInfo, error) {
p, err := provider.get()
if err != nil {
return nil, err
}
cctx := clientCtx(ctx, setting.VerifyCert)
u, err := p.UserInfo(cctx, oauth2.StaticTokenSource(&token.Token))
if err != nil {
return nil, err
}
return userInfoFromClaims(u, setting)
}
// UserInfoFromIDToken extract user info from ID token
func UserInfoFromIDToken(ctx context.Context, token *Token, setting cfgModels.OIDCSetting) (*UserInfo, error) {
if token.RawIDToken == "" {
return nil, nil
}
idt, err := parseIDToken(ctx, token.RawIDToken)
if err != nil {
return nil, err
}
return userInfoFromClaims(idt, setting)
}
func userInfoFromClaims(c claimsProvider, setting cfgModels.OIDCSetting) (*UserInfo, error) {
res := &UserInfo{}
if err := c.Claims(res); err != nil {
return nil, err
}
if setting.UserClaim != "" {
allClaims := make(map[string]interface{})
if err := c.Claims(&allClaims); err != nil {
return nil, err
}
if username, ok := allClaims[setting.UserClaim].(string); ok {
// res.Username and autoOnboardUsername both need to be set to create a fallback when mergeUserInfo has not been successfully called.
// This can for example occur when remote fails and only a local token is available for onboarding.
// Otherwise the onboard flow only has a fallback when "name" is set in the token, which is not always the case as a custom Username Claim could be configured.
res.autoOnboardUsername, res.Username = username, username
} else {
log.Warningf("OIDC. Failed to recover Username from claim. Claim '%s' is invalid or not a string", setting.UserClaim)
}
}
res.Groups, res.hasGroupClaim = groupsFromClaims(c, setting.GroupsClaim)
if len(setting.AdminGroup) > 0 {
for _, g := range res.Groups {
if g == setting.AdminGroup {
res.AdminGroupMember = true
break
}
}
}
return res, nil
}
// groupsFromClaims fetches the group name list from claimprovider, such as decoded ID token.
// If the claims does not have the claim defined as k, the second return value will be false, otherwise true
func groupsFromClaims(gp claimsProvider, k string) ([]string, bool) {
res := make([]string, 0)
claimMap := make(map[string]interface{})
if err := gp.Claims(&claimMap); err != nil {
log.Errorf("failed to fetch claims, error: %v", err)
return res, false
}
g, ok := claimMap[k].([]interface{})
if !ok {
if len(strings.TrimSpace(k)) > 0 {
log.Warningf("Unable to get groups from claims, claims: %+v, groups claims key: %s", claimMap, k)
}
return res, false
}
for _, e := range g {
s, ok := e.(string)
if !ok {
log.Warningf("Element in group list is not string: %v, list: %v", e, g)
continue
}
res = append(res, s)
}
return res, true
}
type populate func(groupNames []string) ([]int, error)
func populateGroupsDB(groupNames []string) ([]int, error) {
return usergroup.Mgr.Populate(orm.Context(), model.UserGroupsFromName(groupNames, common.OIDCGroupType))
}
// InjectGroupsToUser populates the group to DB and inject the group IDs to user model.
// The third optional parm is for UT only.
func InjectGroupsToUser(info *UserInfo, user *models.User, f ...populate) {
if info == nil || user == nil {
log.Warningf("user info or user model is nil, skip the func")
return
}
var populateGroups populate
if len(f) == 0 {
populateGroups = populateGroupsDB
} else {
populateGroups = f[0]
}
if gids, err := populateGroups(info.Groups); err != nil {
log.Warningf("failed to get group ID, error: %v, skip populating groups", err)
} else {
user.GroupIDs = gids
}
user.AdminRoleInAuth = info.AdminGroupMember
}
// Conn wraps connection info of an OIDC endpoint
type Conn struct {
URL string `json:"url"`
VerifyCert bool `json:"verify_cert"`
}
// TestEndpoint tests whether the endpoint is a valid OIDC endpoint.
// The nil return value indicates the success of the test
func TestEndpoint(conn Conn) error {
// gooidc will try to call the discovery api when creating the provider and that's all we need to check
ctx := clientCtx(context.Background(), conn.VerifyCert)
_, err := gooidc.NewProvider(ctx, conn.URL)
return err
} | |
forms-wizard.js | $(document).ready(function() {
/* =================================================================
Basic
================================================================= */
$('#exampleBasic').wizard({
templates: {
buttons: function(){
var options = this.options;
return '<div class="clearfix">' +
'<button class="btn btn-secondary" data-target="#'+this.id+'" data-wizard="back">'+options.buttonLabels.back+'</button>' +
'<button class="btn btn-primary float-right" data-target="#'+this.id+'" data-wizard="next">'+options.buttonLabels.next+'</button>' +
'<button class="btn btn-primary float-right" data-target="#'+this.id+'" data-wizard="finish">'+options.buttonLabels.finish+'</button>' +
'</div>';
}
},
});
/* =================================================================
Validation
================================================================= */
$('#exampleValidator').wizard({
templates: {
buttons: function(){
var options = this.options;
return '<div class="clearfix">' +
'<button class="btn btn-secondary" data-target="#'+this.id+'" data-wizard="back">'+options.buttonLabels.back+'</button>' +
'<button class="btn btn-primary float-right" data-target="#'+this.id+'" data-wizard="next">'+options.buttonLabels.next+'</button>' +
'<button class="btn btn-primary float-right" data-target="#'+this.id+'" data-wizard="finish">'+options.buttonLabels.finish+'</button>' +
'</div>';
}
},
onInit: function(){
$('#validation').formValidation({
framework: 'bootstrap',
fields: {
| validators: {
notEmpty: {
message: 'The username is required'
},
stringLength: {
min: 6,
max: 30,
message: 'The username must be more than 6 and less than 30 characters long'
},
regexp: {
regexp: /^[a-zA-Z0-9_\.]+$/,
message: 'The username can only consist of alphabetical, number, dot and underscore'
}
}
},
email: {
validators: {
notEmpty: {
message: 'The email address is required'
},
emailAddress: {
message: 'The input is not a valid email address'
}
}
},
password: {
validators: {
notEmpty: {
message: 'The password is required'
},
different: {
field: 'username',
message: 'The password cannot be the same as username'
}
}
}
}
});
},
validator: function(){
var fv = $('#validation').data('formValidation');
var $this = $(this);
// Validate the container
fv.validateContainer($this);
var isValidStep = fv.isValidContainer($this);
if (isValidStep === false || isValidStep === null) {
return false;
}
return true;
},
onFinish: function(){
$('#validation').submit();
alert('finish');
}
});
/* =================================================================
Tabs
================================================================= */
$('.wizard').wizard({
step: '> .nav > li > a',
templates: {
buttons: function(){
var options = this.options;
return '<div class="clearfix">' +
'<button class="btn btn-default" data-target="#'+this.id+'" data-wizard="back">'+options.buttonLabels.back+'</button>' +
'<button class="btn btn-primary float-right" data-target="#'+this.id+'" data-wizard="next">'+options.buttonLabels.next+'</button>' +
'<button class="btn btn-primary float-right" data-target="#'+this.id+'" data-wizard="finish">'+options.buttonLabels.finish+'</button>' +
'</div>';
}
},
onBeforeShow: function(step){
step.$element.tab('show');
},
onFinish: function(){
alert('finish');
}
});
}); | username: {
|
transform.go | package transform
import (
"strings"
"os"
)
type Type string
const (
TypeFileMove Type = "file_move"
TypeFileStringReplace Type = "file_string_replace"
TypeInput Type = "input"
TypeRunScript Type = "run_script"
TypeVariableStringReplace Type = "variable_string_replace"
)
type Transform interface {
Apply(variables Variables) error
}
type Options struct {
Arguments []string `json:"arguments,omitempty"`
InputPath string `json:"input_path,omitempty"`
InputVariable string `json:"input_variable,omitempty"`
OutputPath string `json:"output_path,omitempty"`
OutputVariable string `json:"output_variable,omitempty"`
SkipIfVariableExists bool `json:"skip_if_variable_exists,omitempty"`
StringPrefix string `json:"string_prefix,omitempty"`
StringReplace StringReplace `json:"string_replace,omitempty"`
StringSuffix string `json:"string_suffix,omitempty"`
Type Type `json:"type"`
}
type StringReplace struct {
Old string `json:"old"`
New string `json:"new"`
}
func (stringReplace *StringReplace) Replace(input string, variables Variables) string {
return strings.Replace(input,
stringReplace.oldString(variables),
stringReplace.newString(variables),
-1)
}
func (stringReplace *StringReplace) oldString(variables Variables) string {
if strings.HasPrefix(stringReplace.Old, "$") {
return variables[strings.TrimPrefix(stringReplace.Old, "$")]
} else {
return stringReplace.Old
}
}
func (stringReplace *StringReplace) newString(variables Variables) string {
if strings.HasPrefix(stringReplace.New, "$") {
return variables[strings.TrimPrefix(stringReplace.New, "$")]
} else {
return stringReplace.New
}
}
type Variables map[string]string
func | () Variables {
return make(Variables)
}
func (variables Variables) AddEnv() {
for _, env := range os.Environ() {
parts := strings.Split(env, "=")
key := parts[0]
value := parts[1]
variables[key] = value
}
}
func (variables Variables) AddProjectName(projectName string) {
variables["project_name"] = projectName
}
| NewVariables |
db_state_keeper_test.go | package messengerdb
import (
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"gorm.io/gorm"
"berty.tech/berty/v2/go/pkg/messengertypes"
)
func Test_keepUsername(t *testing.T) {
db, _, dispose := GetInMemoryTestDB(t, GetInMemoryTestDBOptsNoInit)
defer dispose()
log := zap.NewNop()
require.Equal(t, "", keepDisplayName(db.db, nil))
// table schema on 2020_10_13
require.NoError(t, db.db.Exec("CREATE TABLE accounts (public_key text, display_name text, link text, replicate_new_groups_automatically numeric DEFAULT true,PRIMARY KEY (public_key))").Error)
require.Equal(t, "", keepDisplayName(db.db, log))
require.NoError(t, db.db.Exec(`INSERT INTO accounts (public_key, display_name, link, replicate_new_groups_automatically) VALUES ("pk_1", "display_name_1", "http://display_name_1/", true)`).Error)
require.Equal(t, "display_name_1", keepDisplayName(db.db, log))
require.NoError(t, db.db.Exec(`INSERT INTO accounts (public_key, display_name, link, replicate_new_groups_automatically) VALUES ("pk_2", "display_name_2", "http://display_name_2/", true)`).Error)
require.Equal(t, "display_name_1", keepDisplayName(db.db, log))
}
func | (t *testing.T) {
db, _, dispose := GetInMemoryTestDB(t, GetInMemoryTestDBOptsNoInit)
defer dispose()
log := zap.NewNop()
require.Equal(t, true, keepAutoReplicateFlag(db.db, nil))
// table schema on 2020_10_13
require.NoError(t, db.db.Exec("CREATE TABLE accounts (public_key text, display_name text, link text, replicate_new_groups_automatically numeric DEFAULT true,PRIMARY KEY (public_key))").Error)
require.Equal(t, true, keepAutoReplicateFlag(db.db, log))
require.NoError(t, db.db.Exec(`INSERT INTO accounts (public_key, display_name, link, replicate_new_groups_automatically) VALUES ("pk_1", "display_name_1", "http://display_name_1/", false)`).Error)
require.Equal(t, false, keepAutoReplicateFlag(db.db, log))
require.NoError(t, db.db.Exec(`INSERT INTO accounts (public_key, display_name, link, replicate_new_groups_automatically) VALUES ("pk_2", "display_name_2", "http://display_name_2/", true)`).Error)
require.Equal(t, false, keepAutoReplicateFlag(db.db, log))
require.NoError(t, db.db.Exec(`UPDATE accounts SET replicate_new_groups_automatically = true WHERE public_key = "pk_1"`).Error)
require.Equal(t, true, keepAutoReplicateFlag(db.db, log))
}
func Test_keepConversationsUnreadCounts(t *testing.T) {
db, _, dispose := GetInMemoryTestDB(t, GetInMemoryTestDBOptsNoInit)
defer dispose()
log := zap.NewNop()
res := keepConversationsLocalData(db.db, nil)
require.Empty(t, res)
require.NoError(t, db.db.Exec("CREATE TABLE `conversations` (`public_key` text,`type` integer,`is_open` numeric,`display_name` text,`link` text,`unread_count` integer,`last_update` integer,`contact_public_key` text,`account_member_public_key` text,`local_device_public_key` text,`created_date` integer,`reply_options_cid` text,PRIMARY KEY (`public_key`))").Error)
res = keepConversationsLocalData(db.db, log)
require.Empty(t, res)
require.NoError(t, db.db.Exec(`INSERT INTO conversations (public_key, is_open, unread_count) VALUES ("pk_1", true, 1000)`).Error)
require.NoError(t, db.db.Exec(`INSERT INTO conversations (public_key, is_open, unread_count) VALUES ("pk_2", false, 2000)`).Error)
require.NoError(t, db.db.Exec(`INSERT INTO conversations (public_key, is_open, unread_count) VALUES ("pk_3", true, 3000)`).Error)
res = keepConversationsLocalData(db.db, log)
expectedValues := map[string]*messengertypes.LocalConversationState{
"pk_1": {IsOpen: true, UnreadCount: 1000},
"pk_2": {IsOpen: false, UnreadCount: 2000},
"pk_3": {IsOpen: true, UnreadCount: 3000},
}
require.Len(t, res, len(expectedValues))
for _, found := range res {
expected, ok := expectedValues[found.PublicKey]
require.True(t, ok)
require.Equal(t, expected.UnreadCount, found.UnreadCount)
require.Equal(t, expected.IsOpen, found.IsOpen)
}
}
func Test_keepDatabaseState_restoreDatabaseState(t *testing.T) {
db, _, dispose := GetInMemoryTestDB(t, GetInMemoryTestDBOptsNoInit)
defer dispose()
log := zap.NewNop()
// Schema 2021_07_27
require.NoError(t, db.db.Exec("CREATE TABLE accounts (public_key text, display_name text, link text, replicate_new_groups_automatically numeric DEFAULT true,auto_share_push_token_flag numeric DEFAULT true,PRIMARY KEY (public_key))").Error)
require.NoError(t, db.db.Exec(`INSERT INTO accounts (public_key, display_name, link, replicate_new_groups_automatically, auto_share_push_token_flag) VALUES ("pk_1", "display_name_1", "http://display_name_1/", false, false)`).Error)
require.NoError(t, db.db.Exec(`INSERT INTO accounts (public_key, display_name, link, replicate_new_groups_automatically, auto_share_push_token_flag) VALUES ("pk_2", "display_name_2", "http://display_name_2/", true, true)`).Error)
require.NoError(t, db.db.Exec("CREATE TABLE `conversations` (`public_key` text,`type` integer,`is_open` numeric,`display_name` text,`link` text,`unread_count` integer,`last_update` integer,`contact_public_key` text,`account_member_public_key` text,`local_device_public_key` text,`created_date` integer,`reply_options_cid` text,PRIMARY KEY (`public_key`))").Error)
require.NoError(t, db.db.Exec(`INSERT INTO conversations (public_key, is_open, unread_count) VALUES ("pk_1", true, 1000)`).Error)
require.NoError(t, db.db.Exec(`INSERT INTO conversations (public_key, is_open, unread_count) VALUES ("pk_2", false, 2000)`).Error)
require.NoError(t, db.db.Exec(`INSERT INTO conversations (public_key, is_open, unread_count) VALUES ("pk_3", true, 3000)`).Error)
state := keepDatabaseLocalState(db.db, log)
require.NoError(t, dropAllTables(db.db))
tables := []string(nil)
require.NoError(t, db.db.Raw("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'").Scan(&tables).Error)
require.Len(t, tables, 0)
// Schema 2020_10_13
require.NoError(t, db.db.Exec("CREATE TABLE accounts (public_key text, display_name text, link text, replicate_new_groups_automatically numeric DEFAULT true, auto_share_push_token_flag numeric DEFAULT true ,PRIMARY KEY (public_key))").Error)
require.NoError(t, db.db.Exec("CREATE TABLE `conversations` (`public_key` text,`type` integer,`is_open` numeric,`display_name` text,`link` text,`unread_count` integer,`last_update` integer,`contact_public_key` text,`account_member_public_key` text,`local_device_public_key` text,`created_date` integer,`reply_options_cid` text,PRIMARY KEY (`public_key`))").Error)
require.NoError(t, db.db.Exec("CREATE TABLE `conversation_replication_infos` (`cid` text,`conversation_public_key` text, PRIMARY KEY (`cid`))").Error)
require.NoError(t, db.db.Exec(`INSERT INTO accounts (public_key, display_name, link, replicate_new_groups_automatically, auto_share_push_token_flag) VALUES ("pk_1", "", "", true, true)`).Error)
require.NoError(t, db.db.Exec(`INSERT INTO conversations (public_key, is_open, unread_count) VALUES ("pk_1", false, 0)`).Error)
require.NoError(t, db.db.Exec(`INSERT INTO conversations (public_key, is_open, unread_count) VALUES ("pk_2", false, 0)`).Error)
require.NoError(t, db.db.Exec(`INSERT INTO conversations (public_key, is_open, unread_count) VALUES ("pk_3", false, 0)`).Error)
require.NoError(t, restoreDatabaseLocalState(NewDBWrapper(db.db, zap.NewNop()), state))
require.True(t, hasRecord(db.db.Table("accounts").Where("public_key = ? AND display_name = ? AND replicate_new_groups_automatically = ? AND auto_share_push_token_flag = ?", "pk_1", "display_name_1", false, false), log))
require.True(t, hasRecord(db.db.Table("conversations").Where("public_key = ? AND unread_count = ? AND is_open = ?", "pk_1", 1000, true), log))
require.True(t, hasRecord(db.db.Table("conversations").Where("public_key = ? AND unread_count = ? AND is_open = ?", "pk_2", 2000, false), log))
require.True(t, hasRecord(db.db.Table("conversations").Where("public_key = ? AND unread_count = ? AND is_open = ?", "pk_3", 3000, true), log))
}
func hasRecord(query *gorm.DB, logger *zap.Logger) bool {
count := int64(0)
if err := query.Count(&count).Error; err != nil {
logger.Error("unable to check if entry exists", zap.Error(err))
}
return count == 1
}
| Test_keepAutoReplicateFlag |
ericwburden.rs | use std::iter::zip;
#[allow(dead_code)]
fn hidden_digits(s: &str) -> String {
let time_in = s
.chars()
.filter(|c| *c != ':')
.map(|c| c.to_digit(10))
.collect::<Vec<_>>(); | }
if let Some(h) = time_in[0] {
if h < 2 { max_vals[1] = 9; }
}
let out: Vec<_> = zip(time_in, max_vals)
.map(|(a, b)| a.unwrap_or(b))
.collect();
format!("{}{}:{}{}", out[0], out[1], out[2], out[3])
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn example_tests() {
assert_eq!(hidden_digits("2?:?0"), String::from("23:50"));
assert_eq!(hidden_digits("0?:3?"), String::from("09:39"));
assert_eq!(hidden_digits("?7:?1"), String::from("17:51"));
assert_eq!(hidden_digits("1?:22"), String::from("19:22"));
assert_eq!(hidden_digits("00:00"), String::from("00:00"));
assert_eq!(hidden_digits("??:??"), String::from("23:59"));
}
} | let mut max_vals = vec![2, 3, 5, 9];
if let Some(h) = time_in[1] {
if h > 3 { max_vals[0] = 1; } |
engine.py | import json
from flask import Blueprint, request, current_app
from flask.ext.jsontools import jsonapi
from flask.ext.login import login_required
from dart.auth.required_roles import required_roles
from dart.message.trigger_proxy import TriggerProxy
from dart.model.action import ActionState
from dart.model.engine import Engine, ActionResult, ActionResultState, ActionContext
from dart.model.graph import SubGraphDefinition
from dart.service.action import ActionService
from dart.service.datastore import DatastoreService
from dart.service.engine import EngineService
from dart.service.filter import FilterService
from dart.service.trigger import TriggerService
from dart.service.workflow import WorkflowService
from dart.web.api.entity_lookup import fetch_model, accounting_track
from dart.model.exception import DartRequestException
api_engine_bp = Blueprint('api_engine', __name__)
@api_engine_bp.route('/engine', methods=['POST'])
@login_required
@accounting_track
@jsonapi
def post_engine():
engine = engine_service().save_engine(Engine.from_dict(request.get_json()))
return {'results': engine.to_dict()}
@api_engine_bp.route('/engine/<engine>', methods=['GET'])
@login_required
@fetch_model
@jsonapi
def get_engine(engine):
"""
This is the engine API
Call this api passing a engine id (id column in engine table) and get back its data column.
E.g. {"name": "no_op_engine", "tags": [], "description": "Helps engineering test dart", "ecs_task_definition": ...}
"""
return {'results': engine.to_dict()}
@api_engine_bp.route('/engine', methods=['GET'])
@login_required
@jsonapi
def | ():
"""
This is the engine API
Get back all existing engines.
"""
limit = int(request.args.get('limit', 20))
offset = int(request.args.get('offset', 0))
filters = [filter_service().from_string(f) for f in json.loads(request.args.get('filters', '[]'))]
engines = engine_service().query_engines(filters, limit, offset)
return {
'results': [d.to_dict() for d in engines],
'limit': limit,
'offset': offset,
'total': engine_service().query_engines_count(filters)
}
@api_engine_bp.route('/engine/<engine>', methods=['PUT'])
@login_required
@fetch_model
@accounting_track
@jsonapi
def put_engine(engine):
js = request.get_json()
engineFromJS = Engine.from_dict(js)
engine = engine_service().update_engine(engine, engineFromJS)
return {'results': engine.to_dict()}
@api_engine_bp.route('/engine/action/<action>/checkout', methods=['PUT'])
@login_required
@fetch_model
@accounting_track
@jsonapi
def action_checkout(action):
""" :type action: dart.model.action.Action """
results = validate_engine_action(action, ActionState.PENDING)
if len(results) == 3:
return results
try:
action = workflow_service().action_checkout(action)
except DartRequestException:
raise
except Exception as err:
return {'results': 'FAILURE', 'error_message': str(err)}, 529
engine, datastore = results
return {'results': ActionContext(engine, action, datastore).to_dict()}
@api_engine_bp.route('/engine/action/<action>/checkin', methods=['PUT'])
@login_required
@fetch_model
@accounting_track
@jsonapi
def action_checkin(action):
""" :type action: dart.model.action.Action """
results = validate_engine_action(action, ActionState.RUNNING)
# (error_response, error_response_code, headers)
if len(results) == 3:
return results
action_result = ActionResult.from_dict(request.get_json())
assert isinstance(action_result, ActionResult)
action_state = ActionState.COMPLETED if action_result.state == ActionResultState.SUCCESS else ActionState.FAILED
action = workflow_service().action_checkin(action, action_state, action_result.consume_subscription_state)
error_message = action.data.error_message
if action_result.state == ActionResultState.FAILURE:
error_message = action_result.error_message
trigger_proxy().complete_action(action.id, action_state, error_message)
return {'results': 'OK'}
def validate_engine_action(action, state):
if action.data.state != state:
return {'results': 'ERROR', 'error_message': 'action is no longer %s: %s' % (state, action.id)}, 400, None
engine_name = action.data.engine_name
engine = engine_service().get_engine_by_name(engine_name, raise_when_missing=False)
if not engine:
return {'results': 'ERROR', 'error_message': 'engine not found: %s' % engine_name}, 404, None
datastore = datastore_service().get_datastore(action.data.datastore_id)
if not datastore:
return {'results': 'ERROR', 'error_message': 'datastore not found: %s' % datastore.id}, 404, None
return engine, datastore
@api_engine_bp.route('/engine/<engine>', methods=['DELETE'])
@login_required
@fetch_model
@accounting_track
@jsonapi
def delete_engine(engine):
engine_service().delete_engine(engine)
return {'results': 'OK'}
@api_engine_bp.route('/engine/<engine>/subgraph_definition', methods=['POST'])
@login_required
@fetch_model
@accounting_track
@jsonapi
def post_subgraph_definition(engine):
subgraph_definition = engine_service().save_subgraph_definition(
SubGraphDefinition.from_dict(request.get_json()), engine, trigger_service().trigger_schemas()
)
return {'results': subgraph_definition.to_dict()}
@api_engine_bp.route('/subgraph_definition/<subgraph_definition>', methods=['GET'])
@login_required
@fetch_model
@jsonapi
def get_subgraph_definition(subgraph_definition):
return {'results': subgraph_definition.to_dict()}
@api_engine_bp.route('/engine/<engine>/subgraph_definition', methods=['GET'])
@login_required
@fetch_model
@jsonapi
def get_subgraph_definitions(engine):
return {'results': engine_service().get_subgraph_definitions(engine.data.name)}
@api_engine_bp.route('/subgraph_definition/<subgraph_definition>', methods=['DELETE'])
@login_required
@fetch_model
@accounting_track
@jsonapi
def delete_subgraph_definition(subgraph_definition):
engine_service().delete_subgraph_definition(subgraph_definition.id)
return {'results': 'OK'}
def filter_service():
""" :rtype: dart.service.filter.FilterService """
return current_app.dart_context.get(FilterService)
def datastore_service():
""" :rtype: dart.service.datastore.DatastoreService """
return current_app.dart_context.get(DatastoreService)
def action_service():
""" :rtype: dart.service.action.ActionService """
return current_app.dart_context.get(ActionService)
def workflow_service():
""" :rtype: dart.service.workflow.WorkflowService """
return current_app.dart_context.get(WorkflowService)
def trigger_proxy():
""" :rtype: dart.message.trigger_proxy.TriggerProxy """
return current_app.dart_context.get(TriggerProxy)
def trigger_service():
""" :rtype: dart.service.trigger.TriggerService """
return current_app.dart_context.get(TriggerService)
def engine_service():
""" :rtype: dart.service.engine.EngineService """
return current_app.dart_context.get(EngineService)
| find_engines |
api.js | import {compose, contains} from "ramda"
import {domainStore} from "../stores"
const API_URL = "https://api.meetup.com/"
const buildRequestUrl = (uri, token) => {
const separator = contains("?", uri) ? "&" : "?"
return `${API_URL}${uri}${separator}${token}`
}
export const getApi = ({get}) => {
const getUrl = compose(get, buildRequestUrl)
return {
findGroups: ({postcode, offset}) => {
const PAGES_PER_REQUEST = 20
return getUrl(
`find/groups?photo-host=public&zip=${postcode}&page=${PAGES_PER_REQUEST}&offset=${offset}`,
domainStore.token
)
},
findGroupDetail: ({urlname}) => getUrl(`${urlname}`, domainStore.token),
findEventDetail: ({urlname, id}) => getUrl(`${urlname}/events/${id}`, domainStore.token), | } | getSelfProfile: () => getUrl("members/self", domainStore.token),
} |
0005_auto_20200822_1452.py | # Generated by Django 3.0.3 on 2020-08-22 06:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jenkins_tasks', '0004_auto_20200822_1439'),
] | name='code',
field=models.CharField(max_length=64, verbose_name='code'),
),
migrations.AlterField(
model_name='tasklog',
name='name',
field=models.CharField(max_length=64, verbose_name='名称'),
),
] |
operations = [
migrations.AlterField(
model_name='tasklog', |
base.py | from django.conf import settings
from .. import Tags, Warning, register
SECRET_KEY_MIN_LENGTH = 50
SECRET_KEY_MIN_UNIQUE_CHARACTERS = 5
W001 = Warning(
"You do not have 'django.middleware.security.SecurityMiddleware' "
"in your MIDDLEWARE_CLASSES so the SECURE_HSTS_SECONDS, "
"SECURE_CONTENT_TYPE_NOSNIFF, "
"SECURE_BROWSER_XSS_FILTER, and SECURE_SSL_REDIRECT settings "
"will have no effect.",
id='security.W001',
)
W002 = Warning(
"You do not have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE_CLASSES, so your pages will not be served with an "
"'x-frame-options' header. Unless there is a good reason for your "
"site to be served in a frame, you should consider enabling this "
"header to help prevent clickjacking attacks.",
id='security.W002',
)
W004 = Warning(
"You have not set a value for the SECURE_HSTS_SECONDS setting. "
"If your entire site is served only over SSL, you may want to consider "
"setting a value and enabling HTTP Strict Transport Security. "
"Be sure to read the documentation first; enabling HSTS carelessly "
"can cause serious, irreversible problems.",
id='security.W004',
)
W005 = Warning(
"You have not set the SECURE_HSTS_INCLUDE_SUBDOMAINS setting to True. "
"Without this, your site is potentially vulnerable to attack "
"via an insecure connection to a subdomain. Only set this to True if "
"you are certain that all subdomains of your domain should be served "
"exclusively via SSL.",
id='security.W005',
)
W006 = Warning(
"Your SECURE_CONTENT_TYPE_NOSNIFF setting is not set to True, "
"so your pages will not be served with an "
"'x-content-type-options: nosniff' header. "
"You should consider enabling this header to prevent the "
"browser from identifying content types incorrectly.",
id='security.W006',
)
W007 = Warning(
"Your SECURE_BROWSER_XSS_FILTER setting is not set to True, "
"so your pages will not be served with an "
"'x-xss-protection: 1; mode=block' header. "
"You should consider enabling this header to activate the "
"browser's XSS filtering and help prevent XSS attacks.",
id='security.W007',
)
W008 = Warning(
"Your SECURE_SSL_REDIRECT setting is not set to True. "
"Unless your site should be available over both SSL and non-SSL "
"connections, you may want to either set this setting True "
"or configure a load balancer or reverse-proxy server "
"to redirect all connections to HTTPS.",
id='security.W008',
)
W009 = Warning(
"Your SECRET_KEY has less than %(min_length)s characters or less than "
"%(min_unique_chars)s unique characters. Please generate a long and random "
"SECRET_KEY, otherwise many of Django's security-critical features will be "
"vulnerable to attack." % {
'min_length': SECRET_KEY_MIN_LENGTH,
'min_unique_chars': SECRET_KEY_MIN_UNIQUE_CHARACTERS,
},
id='security.W009',
)
W018 = Warning(
"You should not have DEBUG set to True in deployment.",
id='security.W018',
)
W019 = Warning(
"You have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE_CLASSES, but X_FRAME_OPTIONS is not set to 'DENY'. "
"The default is 'SAMEORIGIN', but unless there is a good reason for "
"your site to serve other parts of itself in a frame, you should "
"change it to 'DENY'.",
id='security.W019',
)
def _security_middleware():
|
def _xframe_middleware():
return "django.middleware.clickjacking.XFrameOptionsMiddleware" in settings.MIDDLEWARE_CLASSES
@register(Tags.security, deploy=True)
def check_security_middleware(app_configs, **kwargs):
passed_check = _security_middleware()
return [] if passed_check else [W001]
@register(Tags.security, deploy=True)
def check_xframe_options_middleware(app_configs, **kwargs):
passed_check = _xframe_middleware()
return [] if passed_check else [W002]
@register(Tags.security, deploy=True)
def check_sts(app_configs, **kwargs):
passed_check = not _security_middleware() or settings.SECURE_HSTS_SECONDS
return [] if passed_check else [W004]
@register(Tags.security, deploy=True)
def check_sts_include_subdomains(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_INCLUDE_SUBDOMAINS is True
)
return [] if passed_check else [W005]
@register(Tags.security, deploy=True)
def check_content_type_nosniff(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_CONTENT_TYPE_NOSNIFF is True
)
return [] if passed_check else [W006]
@register(Tags.security, deploy=True)
def check_xss_filter(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_BROWSER_XSS_FILTER is True
)
return [] if passed_check else [W007]
@register(Tags.security, deploy=True)
def check_ssl_redirect(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_SSL_REDIRECT is True
)
return [] if passed_check else [W008]
@register(Tags.security, deploy=True)
def check_secret_key(app_configs, **kwargs):
passed_check = (
getattr(settings, 'SECRET_KEY', None) and
len(set(settings.SECRET_KEY)) >= SECRET_KEY_MIN_UNIQUE_CHARACTERS and
len(settings.SECRET_KEY) >= SECRET_KEY_MIN_LENGTH
)
return [] if passed_check else [W009]
@register(Tags.security, deploy=True)
def check_debug(app_configs, **kwargs):
passed_check = not settings.DEBUG
return [] if passed_check else [W018]
@register(Tags.security, deploy=True)
def check_xframe_deny(app_configs, **kwargs):
passed_check = (
not _xframe_middleware() or
settings.X_FRAME_OPTIONS == 'DENY'
)
return [] if passed_check else [W019]
| return "django.middleware.security.SecurityMiddleware" in settings.MIDDLEWARE_CLASSES |
DataModel.js | module.exports = {
Config : {
PI_url: "/pdf/pi.aspx",
PI_open_function: "",
close_function: "destroyIC",
footer_function: "leaveSite",
privacy_url: "",
legal_url: "",
share: "Already on ABILIFY? Share Your Story",
share_over: "Share your story",
share_url: "http://www.abilify.com/story/mdd/sign-up.aspx",
stories: [{
heading: "Kalene's Story",
header_image: "images/hdr-kalenes-story.jpg",
subheading: "Kalene still had depressive symptoms even though she was taking an antidepressant. Then she and her doctor decided to try adding ABILIFY® (aripiprazole). Watch Kalene's story here.",
chapters: [{
id: "video1",
title: "I'M KALENE (05:19)", | {src: "video/01_Kalene_OPEN.f4v", type:"video/mp4"} ],
reference_copy: "Treating Depression When Your Antidepressant Alone Isn't Enough: A Patient's Guide.",
reference_url: "http://www.abilify.com/pdf/treating-depression-hcp.aspx"
},
{
id: "video2",
title: "WHY AM I STILL SAD? (01:10)",
duration: 70,
video: [
{src: "video/02_Kalene_AWARENESS.f4v", type:"video/f4v"},
{src: "video/02_Kalene_AWARENESS.f4v", type:"video/mp4"} ],
reference_copy: "Talking to Your Healthcare Professional:Tips and talking points to help you have a productive conversation with your healthcare professional.",
reference_url: "http://www.abilify.com/pdf/MDD-Dr-discussionRevised.aspx"
},
{
id: "video3",
title: "HOW I STAYED MOTIVATED (01:39)",
duration: 99,
video: [
{src: "video/03_Kalene_Motivation.f4v", type:"video/f4v"},
{src: "video/03_Kalene_Motivation.f4v", type:"video/mp4"} ],
reference_copy: "Resources for Depression:These organizations provide information and support for people living with depression.",
reference_url: "http://www.abilify.com/depression/about/depression-resources-websites.aspx"
},
{
id: "video4",
title: "WORKING WITH MY DOCTOR IS KEY (02:26)",
duration: 146,
video: [
{src: "video/04_Kalene_DocPatient.f4v", type:"video/f4v"},
{src: "video/04_Kalene_DocPatient.f4v", type:"video/mp4"} ],
reference_copy: "Depressive Symptom Questionnaire:Use this tool to track your symptoms so you can review them with your healthcare professional.",
reference_url: "http://www.abilify.com/pdf/depressive_symptom_questionnaire.aspx"
},
{
id: "video5",
title: "ABILIFY + ME (01:59)",
duration: 119,
video: [
{src: "video/05_Kalene_FindTreat.f4v", type:"video/f4v"},
{src: "video/05_Kalene_FindTreat.f4v", type:"video/mp4"} ],
reference_copy: "ABILIFY FREE trial offer (Restrictions apply): if you're an adult with unresolved symptoms of depression and taking an antidepressant, click here to see if you qualify.",
reference_url: "https://www.abilify.com/depression/tools/sign-up.aspx"
},
{
id: "video6",
title: "IMPORTANT SAFETY INFORMATION (05:30)",
duration: 119,
video: [
{src: "video/06_isi_TRACK.f4v", type:"video/f4v"},
{src: "video/06_isi_TRACK.f4v", type:"video/mp4"} ],
reference_copy: "ABILIFY FREE trial offer (Restrictions apply): if you're an adult with unresolved symptoms of depression and taking an antidepressant, click here to see if you qualify.",
reference_url: "https://www.abilify.com/depression/tools/sign-up.aspx"
}]
}]
}
}; | duration: 319,
video: [
{src: "video/01_Kalene_OPEN.f4v", type:"video/f4v"}, |
context.ts | import { Action } from "./action"
import { ActionObserver, ActionObserverDelegate } from "./action_observer"
import { Application } from "./application"
import { Controller } from "./controller"
import { ErrorHandler } from "./error_handler"
import { EventListenerSet } from "./event_listener_set"
import { Module } from "./module"
import { Schema } from "./schema"
import { Scope } from "./scope"
export class | implements ErrorHandler, ActionObserverDelegate {
readonly module: Module
readonly scope: Scope
readonly controller: Controller
private actionObserver: ActionObserver
private eventListeners: EventListenerSet
constructor(module: Module, scope: Scope) {
this.module = module
this.scope = scope
this.actionObserver = new ActionObserver(this.element, this.schema, this)
this.eventListeners = new EventListenerSet(this)
try {
this.controller = new module.controllerConstructor(this)
this.controller.initialize()
} catch (error) {
this.handleError(error, "initializing controller")
}
}
connect() {
this.actionObserver.start()
this.eventListeners.start()
try {
this.controller.connect()
} catch (error) {
this.handleError(error, "connecting controller")
}
}
disconnect() {
try {
this.controller.disconnect()
} catch (error) {
this.handleError(error, "disconnecting controller")
}
this.eventListeners.stop()
this.actionObserver.stop()
}
get application(): Application {
return this.module.application
}
get identifier(): string {
return this.module.identifier
}
get schema(): Schema {
return this.application.schema
}
get element(): Element {
return this.scope.element
}
get parentElement(): Element | null {
return this.element.parentElement
}
// Inline action observer delegate
/** @private */
actionConnected(action: Action) {
this.eventListeners.addEventListenerForAction(action)
}
/** @private */
actionDisconnected(action: Action) {
this.eventListeners.deleteEventListenerForAction(action)
}
// Error handling
handleError(error: Error, message: string, detail: object = {}) {
const { identifier, controller, element } = this
detail = Object.assign({ identifier, controller, element }, detail)
this.application.handleError(error, `Error ${message}`, detail)
}
}
| Context |
metrics.py | from typing import Any, Callable, Iterable, List, Optional, Tuple, Union
import warnings
import numpy as np
import pytorch_lightning
from scipy.sparse import csr_matrix
import torch
from torchmetrics import Metric
from torchmetrics.functional import auroc
from tqdm.auto import tqdm
import collie
from collie.interactions import ExplicitInteractions, Interactions, InteractionsDataLoader
from collie.model import BasePipeline
def _get_user_item_pairs(user_ids: Union[np.array, torch.tensor],
n_items: int,
device: Union[str, torch.device]) -> Tuple[torch.tensor, torch.tensor]:
"""
Create tensors pairing each input user ID with each item ID.
Parameters
----------
user_ids: np.array or torch.tensor, 1-d
Iterable[int] of users to score
n_items: int
Number of items in the training data
device: string
Device to store tensors on
Returns
-------
users: torch.tensor, 1-d
Tensor with ``n_items`` copies of each user ID
items: torch.tensor, 1-d
Tensor with ``len(user_ids)`` copies of each item ID
Example
-------
.. code-block:: python
>>> user_ids = np.array([10, 11, 12])
>>> n_items = 4
>>> user, item = _get_user_item_pairs(user_ids: user_ids, n_items: 4, device: 'cpu'):
>>> user
np.array([10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12])
>>> item
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3])
"""
# Added because sometimes we call this function with ``n_items`` as ``np.int64`` type which
# breaks ``repeat_interleave``.
if isinstance(n_items, np.int64):
n_items = n_items.item()
users = torch.tensor(
user_ids,
dtype=torch.int64,
requires_grad=False,
device=device,
).repeat_interleave(n_items)
items = torch.arange(
start=0,
end=n_items,
requires_grad=False,
device=device,
).repeat(len(user_ids))
return users, items
def get_preds(model: BasePipeline,
user_ids: Union[np.array, torch.tensor],
n_items: int,
device: Union[str, torch.device]) -> torch.tensor:
"""
Returns a ``n_users x n_items`` tensor with the item IDs of recommended products for each user
ID.
Parameters
----------
model: collie.model.BasePipeline
Model that can take a (user_id, item_id) pair as input and return a recommendation score
user_ids: np.array or torch.tensor
Iterable[int] of users to score
n_items: int
Number of items in the training data
device: string
Device torch should use
Returns
-------
predicted_scores: torch.tensor
Tensor of shape ``n_users x n_items``
"""
user, item = _get_user_item_pairs(user_ids, n_items, device)
with torch.no_grad():
predicted_scores = model(user, item)
return predicted_scores.view(-1, n_items)
def _get_labels(targets: csr_matrix,
user_ids: Union[np.array, torch.tensor],
preds: Union[np.array, torch.tensor],
device: str) -> torch.tensor:
"""
Returns a binary array indicating which of the recommended products are in each user's target
set.
Parameters
----------
targets: scipy.sparse.csr_matrix
Interaction matrix containing user and item IDs
user_ids: np.array or torch.tensor
Users corresponding to the recommendations in the top k predictions
preds: torch.tensor
Top ``k`` item IDs to recommend to each user of shape (n_users x k)
device: string
Device torch should use
Returns
-------
labels: torch.tensor
Tensor with the same dimensions as input ``preds``
"""
return torch.tensor(
(targets[user_ids[:, None], np.array(preds.detach().cpu())] > 0)
.astype('double')
.toarray(),
requires_grad=False,
device=device,
)
def mapk(targets: csr_matrix,
user_ids: Union[np.array, torch.tensor],
preds: Union[np.array, torch.tensor],
k: int = 10) -> float:
"""
Calculate the mean average precision at K (MAP@K) score for each user.
Parameters
----------
targets: scipy.sparse.csr_matrix
Interaction matrix containing user and item IDs
user_ids: np.array or torch.tensor
Users corresponding to the recommendations in the top k predictions
preds: torch.tensor
Tensor of shape (n_users x n_items) with each user's scores for each item
k: int
Number of recommendations to consider per user
Returns
-------
mapk_score: float
"""
device = preds.device
n_users = preds.shape[0]
try:
predicted_items = preds.topk(k, dim=1).indices
except RuntimeError as e:
raise ValueError(
f'Ensure ``k`` ({k}) is less than the number of items ({preds.shape[1]}):', str(e)
)
topk_labeled = _get_labels(targets, user_ids, predicted_items, device)
accuracy = topk_labeled.int()
weights = (
1.0 / torch.arange(
start=1,
end=k+1,
dtype=torch.float64,
requires_grad=False,
device=device
)
).repeat(n_users, 1)
denominator = torch.min(
torch.tensor(k, device=device, dtype=torch.int).repeat(len(user_ids)),
torch.tensor(targets[user_ids].getnnz(axis=1), device=device)
)
res = ((accuracy * accuracy.cumsum(axis=1) * weights).sum(axis=1)) / denominator
res[torch.isnan(res)] = 0
return res.mean().item()
def mrr(targets: csr_matrix,
user_ids: Union[np.array, torch.tensor],
preds: Union[np.array, torch.tensor],
k: Optional[Any] = None) -> float:
"""
Calculate the mean reciprocal rank (MRR) of the input predictions.
Parameters
----------
targets: scipy.sparse.csr_matrix
Interaction matrix containing user and item IDs
user_ids: np.array or torch.tensor
Users corresponding to the recommendations in the top k predictions
preds: torch.tensor
Tensor of shape (n_users x n_items) with each user's scores for each item
k: Any
Ignored, included only for compatibility with ``mapk``
Returns
-------
mrr_score: float
"""
predicted_items = preds.topk(preds.shape[1], dim=1).indices
labeled = _get_labels(targets, user_ids, predicted_items, device=preds.device)
# weighting each 0/1 by position so that topk returns index of *first* postive result
position_weight = 1.0/(
torch.arange(1, targets.shape[1] + 1, device=preds.device)
.repeat(len(user_ids), 1)
.float()
)
labeled_weighted = (labeled.float() * position_weight)
highest_score, rank = labeled_weighted.topk(k=1)
reciprocal_rank = 1.0/(rank.float() + 1)
reciprocal_rank[highest_score == 0] = 0
return reciprocal_rank.mean().item()
def auc(targets: csr_matrix,
user_ids: Union[np.array, torch.tensor],
preds: Union[np.array, torch.tensor],
k: Optional[Any] = None) -> float:
"""
Calculate the area under the ROC curve (AUC) for each user and average the results.
Parameters
----------
targets: scipy.sparse.csr_matrix
Interaction matrix containing user and item IDs
user_ids: np.array or torch.tensor
Users corresponding to the recommendations in the top k predictions
preds: torch.tensor
Tensor of shape (n_users x n_items) with each user's scores for each item
k: Any
Ignored, included only for compatibility with ``mapk``
Returns
-------
auc_score: float
"""
agg = 0
for i, user_id in enumerate(user_ids):
target_tensor = torch.tensor(
targets[user_id].toarray(),
device=preds.device,
dtype=torch.long
).view(-1)
# many models' ``preds`` may be unbounded if a final activation layer is not applied
# we have to normalize ``preds`` here to avoid a ``ValueError`` stating that ``preds``
# should be probabilities, but values were detected outside of [0,1] range
auc = auroc(torch.sigmoid(preds[i, :]), target=target_tensor, pos_label=1)
agg += auc
return (agg/len(user_ids)).item()
def evaluate_in_batches(
metric_list: Iterable[Callable[
[csr_matrix, Union[np.array, torch.tensor], Union[np.array, torch.tensor], Optional[int]],
float
]],
test_interactions: collie.interactions.Interactions,
model: collie.model.BasePipeline,
k: int = 10,
batch_size: int = 20,
logger: pytorch_lightning.loggers.base.LightningLoggerBase = None,
verbose: bool = True,
) -> List[float]:
"""
Evaluate a model with potentially several different metrics.
Memory constraints require that most test sets will need to be evaluated in batches. This
function handles the looping and batching boilerplate needed to properly evaluate the model
without running out of memory.
Parameters
----------
metric_list: list of functions
List of evaluation functions to apply. Each function must accept keyword arguments:
* ``targets``
* ``user_ids``
* ``preds``
* ``k``
test_interactions: collie.interactions.Interactions
Interactions to use as labels
model: collie.model.BasePipeline
Model that can take a (user_id, item_id) pair as input and return a recommendation score
k: int
Number of recommendations to consider per user. This is ignored by some metrics
batch_size: int
Number of users to score in a single batch. For best efficiency, this number should be as
high as possible without running out of memory
logger: pytorch_lightning.loggers.base.LightningLoggerBase
If provided, will log outputted metrics dictionary using the ``log_metrics`` method with
keys being the string representation of ``metric_list`` and values being
``evaluation_results``. Additionally, if ``model.hparams.num_epochs_completed`` exists, this
will be logged as well, making it possible to track metrics progress over the course of
model training
verbose: bool
Display progress bar and print statements during function execution
Returns
-------
evaluation_results: list
List of floats, with each metric value corresponding to the respective function passed in
``metric_list``
Examples
--------
.. code-block:: python
from collie.metrics import auc, evaluate_in_batches, mapk, mrr
map_10_score, mrr_score, auc_score = evaluate_in_batches(
metric_list=[mapk, mrr, auc],
test_interactions=test,
model=model,
)
print(map_10_score, mrr_score, auc_score)
"""
if not isinstance(test_interactions, Interactions):
raise ValueError(
'``test_interactions`` must be of type ``Interactions``, not '
f'{type(test_interactions)}. Try using ``explicit_evaluate_in_batches`` instead.'
)
device = _get_evaluate_in_batches_device(model=model)
model.to(device)
model._move_any_external_data_to_device()
test_users = np.unique(test_interactions.mat.row)
targets = test_interactions.mat.tocsr()
if len(test_users) < batch_size:
batch_size = len(test_users)
accumulators = [0] * len(metric_list)
data_to_iterate_over = range(int(np.ceil(len(test_users) / batch_size)))
if verbose:
data_to_iterate_over = tqdm(data_to_iterate_over)
for i in data_to_iterate_over:
user_range = test_users[i * batch_size:(i + 1) * batch_size]
preds = get_preds(model, user_range, test_interactions.num_items, device)
for metric_ind, metric in enumerate(metric_list):
score = metric(targets=targets, user_ids=user_range, preds=preds, k=k)
accumulators[metric_ind] += (score * len(user_range))
all_scores = [acc_score / len(test_users) for acc_score in accumulators]
if logger is not None:
_log_metrics(model=model,
logger=logger,
metric_list=metric_list,
all_scores=all_scores,
verbose=verbose)
return all_scores[0] if len(all_scores) == 1 else all_scores
def explicit_evaluate_in_batches(
metric_list: Iterable[Metric],
test_interactions: collie.interactions.ExplicitInteractions,
model: collie.model.BasePipeline,
logger: pytorch_lightning.loggers.base.LightningLoggerBase = None,
verbose: bool = True,
**kwargs,
) -> List[float]:
"""
Evaluate a model with potentially several different metrics.
Memory constraints require that most test sets will need to be evaluated in batches. This
function handles the looping and batching boilerplate needed to properly evaluate the model
without running out of memory.
Parameters
----------
metric_list: list of ``torchmetrics.Metric``
List of evaluation functions to apply. Each function must accept arguments for predictions
and targets, in order
test_interactions: collie.interactions.ExplicitInteractions
model: collie.model.BasePipeline
Model that can take a (user_id, item_id) pair as input and return a recommendation score
batch_size: int
Number of users to score in a single batch. For best efficiency, this number should be as
high as possible without running out of memory
logger: pytorch_lightning.loggers.base.LightningLoggerBase
If provided, will log outputted metrics dictionary using the ``log_metrics`` method with
keys being the string representation of ``metric_list`` and values being
``evaluation_results``. Additionally, if ``model.hparams.num_epochs_completed`` exists, this
will be logged as well, making it possible to track metrics progress over the course of
model training
verbose: bool
Display progress bar and print statements during function execution
**kwargs: keyword arguments
Additional arguments sent to the ``InteractionsDataLoader``
Returns
----------
evaluation_results: list
List of floats, with each metric value corresponding to the respective function passed in
``metric_list``
Examples
-------------
.. code-block:: python
import torchmetrics
from collie.metrics import explicit_evaluate_in_batches
mse_score, mae_score = evaluate_in_batches(
metric_list=[torchmetrics.MeanSquaredError(), torchmetrics.MeanAbsoluteError()],
test_interactions=test,
model=model,
)
print(mse_score, mae_score)
"""
if not isinstance(test_interactions, ExplicitInteractions):
raise ValueError(
'``test_interactions`` must be of type ``ExplicitInteractions``, not '
f'{type(test_interactions)}. Try using ``evaluate_in_batches`` instead.'
)
try:
device = _get_evaluate_in_batches_device(model=model)
model.to(device)
model._move_any_external_data_to_device()
test_loader = InteractionsDataLoader(interactions=test_interactions,
**kwargs)
data_to_iterate_over = test_loader
if verbose:
|
for batch in data_to_iterate_over:
users, items, ratings = batch
# move data to batch before sending to model
users = users.to(device)
items = items.to(device)
ratings = ratings.cpu()
preds = model(users, items)
for metric in metric_list:
metric(preds.cpu(), ratings)
all_scores = [metric.compute() for metric in metric_list]
if logger is not None:
_log_metrics(model=model,
logger=logger,
metric_list=metric_list,
all_scores=all_scores,
verbose=verbose)
return all_scores[0] if len(all_scores) == 1 else all_scores
finally:
for metric in metric_list:
metric.reset()
def _get_evaluate_in_batches_device(model: BasePipeline):
device = getattr(model, 'device', None)
if torch.cuda.is_available() and str(device) == 'cpu':
warnings.warn('CUDA available but model device is set to CPU - is this desired?')
if device is None:
if torch.cuda.is_available():
warnings.warn(
'``model.device`` attribute is ``None``. Since GPU is available, putting model on '
'GPU.'
)
device = 'cuda:0'
else:
device = 'cpu'
return device
def _log_metrics(model: BasePipeline,
logger: pytorch_lightning.loggers.base.LightningLoggerBase,
metric_list: List[Union[Callable[..., Any], Metric]],
all_scores: List[float],
verbose: bool):
try:
step = model.hparams.get('num_epochs_completed')
except torch.nn.modules.module.ModuleAttributeError:
# if, somehow, there is no ``model.hparams`` attribute, this shouldn't fail
step = None
try:
metrics_dict = dict(zip([x.__name__ for x in metric_list], all_scores))
except AttributeError:
metrics_dict = dict(zip([type(x).__name__ for x in metric_list], all_scores))
if verbose:
print(f'Logging metrics {metrics_dict} to ``logger``...')
logger.log_metrics(metrics=metrics_dict, step=step)
| data_to_iterate_over = tqdm(test_loader) |
device_tracker.py | """Support for French FAI Bouygues Bbox routers."""
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = '192.168.1.254'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
})
def get_scanner(hass, config):
"""Validate the configuration and return a Bbox scanner."""
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple('Device', ['mac', 'name', 'ip', 'last_update'])
class BboxDeviceScanner(DeviceScanner):
| """This class scans for devices connected to the bbox."""
def __init__(self, config):
"""Get host from config."""
from typing import List # noqa: pylint: disable=unused-import
self.host = config[CONF_HOST]
"""Initialize the scanner."""
self.last_results = [] # type: List[Device]
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [result.name for result in self.last_results if
result.mac == device]
if filter_named:
return filter_named[0]
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Check the Bbox for devices.
Returns boolean if scanning successful.
"""
_LOGGER.info("Scanning...")
import pybbox
box = pybbox.Bbox(ip=self.host)
result = box.get_all_connected_devices()
now = dt_util.now()
last_results = []
for device in result:
if device['active'] != 1:
continue
last_results.append(
Device(device['macaddress'], device['hostname'],
device['ipaddress'], now))
self.last_results = last_results
_LOGGER.info("Scan successful")
return True |
|
[slug].tsx | import { format, parseISO } from 'date-fns'
import ptBR from 'date-fns/locale/pt-BR';
import { GetStaticPaths, GetStaticProps } from 'next';
import Head from 'next/head';
import Image from 'next/image';
import Link from 'next/link';
import { usePlayer } from '../../contexts/PlayerContext';
import { api } from '../../service/api';
import { convertDurationToTimeString } from '../../utils/convertDurationToTimeString';
import styles from './episode.module.scss';
type Episode = {
id: string,
title: string,
members: string,
thumbnail: string,
description: string,
duration: number,
durationAsString: string,
url: string,
publishedAt: string,
}
type EpisodeProps = {
episode: Episode;
}
export default function Episode({ episode }: EpisodeProps) {
const { play } = usePlayer();
return (
<div className={styles.episode}>
<Head>
<title>{episode.title} | Podcastr</title>
</Head>
<div className={styles.thumbnailContainer}>
<Link href="/">
<button type="button" >
<img src="/arrow-left.svg" alt="Voltar"/>
</button>
</Link>
<Image
width={700}
height={160}
src={episode.thumbnail}
objectFit="cover"
/>
<button type="button" onClick={() => play(episode)}>
<img src="/play.svg" alt="Tocar"/>
</button>
</div>
<header>
<h1>{episode.title}</h1>
<span>{episode.members}</span>
<span>{episode.publishedAt}</span>
<span>{episode.durationAsString}</span>
</header>
<div
className={styles.description}
dangerouslySetInnerHTML={{__html: episode.description}}
/>
</div>
)
}
export const getStaticPaths: GetStaticPaths = async () => {
return {
paths: [],
fallback: 'blocking'
}
}
export const getStaticProps: GetStaticProps = async (ctx) => {
const { slug } = ctx.params;
const { data } = await api.get(`/episodes/${slug}`)
const episode = {
id: data.id,
title: data.title,
thumbnail: data.thumbnail,
members: data.members,
publishedAt: format(parseISO(data.published_at), 'd MMM yy', {locale: ptBR}),
description: data.description,
duration: Number(data.file.duration),
durationAsString: convertDurationToTimeString(Number(data.file.duration)),
url: data.file.url
};
return {
props: {
episode
},
revalidate: 60 * 60 * 14 // 24 hours
} | } |
|
tracker.rs | use crate::rules::values::CmpOperator;
use crate::rules::{path_value::PathAwareValue, EvaluationContext, EvaluationType, Result, Status};
use nom::lib::std::fmt::Formatter;
use serde::Serialize;
use std::cell::Ref;
#[derive(Serialize, Debug)]
pub(crate) struct StatusContext {
pub(crate) eval_type: EvaluationType,
pub(crate) context: String,
pub(crate) msg: Option<String>,
pub(crate) from: Option<PathAwareValue>,
pub(crate) to: Option<PathAwareValue>,
pub(crate) status: Option<Status>,
pub(crate) comparator: Option<(CmpOperator, bool)>,
pub(crate) children: Vec<StatusContext>,
}
impl StatusContext {
fn new(eval_type: EvaluationType, context: &str) -> Self {
StatusContext {
eval_type,
context: context.to_string(),
status: None,
msg: None,
from: None,
to: None,
comparator: None, | }
}
pub(crate) struct StackTracker<'r> {
root_context: &'r dyn EvaluationContext,
stack: std::cell::RefCell<Vec<StatusContext>>,
}
impl<'r> std::fmt::Debug for StackTracker<'r> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.stack.borrow().fmt(f)
}
}
impl<'r> StackTracker<'r> {
pub(super) fn new(delegate: &'r dyn EvaluationContext) -> Self {
StackTracker {
root_context: delegate,
stack: std::cell::RefCell::new(Vec::new()),
}
}
pub(super) fn stack(self) -> Vec<StatusContext> {
self.stack.into_inner()
}
}
impl<'r> EvaluationContext for StackTracker<'r> {
fn resolve_variable(&self, variable: &str) -> Result<Vec<&PathAwareValue>> {
self.root_context.resolve_variable(variable)
}
fn rule_status(&self, rule_name: &str) -> Result<Status> {
self.root_context.rule_status(rule_name)
}
fn end_evaluation(
&self,
eval_type: EvaluationType,
context: &str,
msg: String,
from: Option<PathAwareValue>,
to: Option<PathAwareValue>,
status: Option<Status>,
cmp: Option<(CmpOperator, bool)>,
) {
if self.stack.borrow().len() == 1 {
match self.stack.borrow_mut().get_mut(0) {
Some(top) => {
top.status = status.clone();
top.from = from.clone();
top.to = to.clone();
top.msg = Some(msg.clone());
top.comparator = cmp.clone();
}
None => unreachable!(),
}
return;
}
let stack = self.stack.borrow_mut().pop();
match stack {
Some(mut stack) => {
stack.status = status.clone();
stack.from = from.clone();
stack.to = to.clone();
stack.msg = Some(msg.clone());
stack.comparator = cmp.clone();
match self.stack.borrow_mut().last_mut() {
Some(cxt) => cxt.children.push(stack),
None => unreachable!(),
}
}
None => {}
}
self.root_context
.end_evaluation(eval_type, context, msg, from, to, status, cmp);
}
fn start_evaluation(&self, eval_type: EvaluationType, context: &str) {
let _indent = self.stack.borrow().len();
self.stack
.borrow_mut()
.push(StatusContext::new(eval_type, context));
self.root_context.start_evaluation(eval_type, context);
}
} | children: vec![],
} |
test_depth.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import retworkx
class TestLongestPath(unittest.TestCase):
def test_linear(self):
"""Longest depth for a simple dag.
a
|
b
|\
c d
|\
e |
| |
f g
"""
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", {})
node_c = dag.add_child(node_b, "c", {})
dag.add_child(node_b, "d", {})
node_e = dag.add_child(node_c, "e", {})
node_f = dag.add_child(node_e, "f", {})
dag.add_child(node_c, "g", {})
self.assertEqual(4, retworkx.dag_longest_path_length(dag))
self.assertEqual(
[node_a, node_b, node_c, node_e, node_f],
retworkx.dag_longest_path(dag),
)
def test_less_linear(self):
dag = retworkx.PyDAG()
node_a = dag.add_node("a")
node_b = dag.add_child(node_a, "b", {})
node_c = dag.add_child(node_b, "c", {})
node_d = dag.add_child(node_c, "d", {})
node_e = dag.add_child(node_d, "e", {})
dag.add_edge(node_a, node_c, {})
dag.add_edge(node_a, node_e, {})
dag.add_edge(node_c, node_e, {})
self.assertEqual(4, retworkx.dag_longest_path_length(dag))
self.assertEqual(
[node_a, node_b, node_c, node_d, node_e], |
def test_degenerate_graph(self):
dag = retworkx.PyDAG()
dag.add_node(0)
self.assertEqual(0, retworkx.dag_longest_path_length(dag))
self.assertEqual([0], retworkx.dag_longest_path(dag))
def test_empty_graph(self):
dag = retworkx.PyDAG()
self.assertEqual(0, retworkx.dag_longest_path_length(dag))
self.assertEqual([], retworkx.dag_longest_path(dag)) | retworkx.dag_longest_path(dag),
) |
articles.py | from datetime import datetime
from html import unescape
import logging
from dateutil.parser import parse as parse_date
import htmlmin
from scrapy import Request
from scrapy.spiders import Spider, CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.loader.processors import TakeFirst, MapCompose, Join, Compose, Identity
from telesurscraper.itemloaders import ExtructItemLoader
from telesurscraper.items import ArticleItem
class ArticlePageItemLoader(ExtructItemLoader):
default_input_processor = MapCompose(unescape)
default_output_processor = TakeFirst()
body_in = MapCompose(default_input_processor, htmlmin.minify)
tags_out = Identity()
images_out = Identity()
sections_out = Identity()
author_in = MapCompose(str.strip)
datePublished_in = MapCompose(parse_date, lambda date: date.isoformat())
dateModified_in = MapCompose(parse_date, lambda date: date.isoformat())
class BaseArticlePageSpider(Spider):
"""Load data from an Article Page
Example URL: https://www.telesurtv.net/news/muere-ali-rodriguez-araque-venezuela-cuba-20181119-0040.html"""
def parse_article_page(self, response):
l = ArticlePageItemLoader(item=ArticleItem(), response=response)
# URL
l.add_value('url', response.url)
# Headline
l.add_jsonld('headline', 'NewsArticle', '[].headline')
l.add_css('headline', '[itemprop=headline]')
# Date published/modified
l.add_jsonld('datePublished', 'NewsArticle', '[].datePublished')
l.add_jsonld('dateModified', 'NewsArticle', '[].dateModified')
# Description
l.add_jsonld('description', 'NewsArticle', '[].description')
# Author
l.add_jsonld('author', 'NewsArticle', '[].author.name')
# Images
l.add_jsonld('images', 'NewsArticle', '[].image.url')
# Body
l.add_css('body', '.txt_newworld')
# Sections
l.add_css('sections', '.nworldtop .itacaput a::attr(title)')
# Tags
l.add_css('tags', '.tagBarNews a::attr(title)')
item = l.load_item()
return item
class ArticleJspListingSpider(BaseArticlePageSpider):
"""Follows Article links from website's listing view endpoint"""
name = 'article-jsplisting'
def start_requests(self): |
service_name = getattr(self, 'service_name', 'teleSUR HD')
default_url = 'https://www.telesurtv.net/system/modules/com.tfsla.diario.telesur/elements/TS_NewsCategory_Page.jsp'
if service_name == 'teleSUR English':
default_url = 'https://www.telesurenglish.net/system/modules/com.tfsla.diario.telesur.en/elements/TS_NewsCategory_Page.jsp'
jsp_url = self.settings.get('JSPLISTING_PAGE_URL', default_url)
for i in range(max_pages):
url = '{}?pagina={}&size={}'.format(jsp_url, i+start_page, page_size)
yield Request(url, callback=self.parse_article_links)
def parse_article_links(self, response):
for href in response.css('a::attr(href)'):
yield response.follow(href, callback=self.parse_article_page)
class HomeCrawlerSpider(CrawlSpider, BaseArticlePageSpider):
"""Follows Article links from Home 's listing view endpoint"""
name='home'
allowed_domains = ['telesurtv.net', 'telesurenglish.net']
start_urls = ['https://www.telesurtv.net/']
rules = [
Rule(LinkExtractor(deny=(r'multimedia/.+\.html$',)),
callback='parse_article_page')
] | page_size = int(getattr(self, 'page_size', self.settings.get('JSPLISTING_PAGE_SIZE', 20)))
max_pages = int(getattr(self, 'max_pages', self.settings.get('JSPLISTING_MAX_PAGES', 10)))
start_page = int(getattr(self, 'start_page', self.settings.get('JSPLISTING_START_PAGE', 1))) |
input_read.rs | // Copyright 2021 Jedrzej Stuczynski
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::fs;
use std::fs::File;
use std::io::{self, BufRead};
use std::path::Path;
use std::str::FromStr;
pub fn read_input_lines<P>(path: P) -> io::Result<Vec<String>>
where
P: AsRef<Path>,
{
let file = File::open(path)?;
io::BufReader::new(file).lines().collect()
}
pub fn read_input_lines_with_parser<T, F, P>(path: P, parser: F) -> io::Result<Vec<T>>
where
P: AsRef<Path>,
F: Fn(String) -> io::Result<T>,
{
read_input_lines(path)?
.into_iter()
.map(parser)
.collect::<Result<Vec<T>, _>>()
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
}
/// Reads the file as lines, parsing each of them into desired type.
pub fn read_parsed_line_input<T, P>(path: P) -> io::Result<Vec<T>>
where
P: AsRef<Path>,
T: FromStr,
<T as FromStr>::Err: Debug,
{
read_input_lines(path)?
.into_iter()
.map(|line| line.parse::<T>())
.collect::<Result<Vec<T>, _>>()
.map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("input could not be parsed into desired type - {:?}", err),
)
})
}
/// Reads the file and outputs String groups that were originally separated by an empty line
pub fn read_into_string_groups<P: AsRef<Path>>(path: P) -> io::Result<Vec<String>> {
fs::read_to_string(path).map(|string| {
string
.replace("\r\n", "\n") // Windows fix
.split("\n\n")
.map(|split| split.to_owned())
.collect()
})
}
/// Reads the file as a string and parses comma-separated types
pub fn | <T, P>(path: P) -> io::Result<Vec<T>>
where
P: AsRef<Path>,
T: FromStr,
<T as FromStr>::Err: Debug,
{
fs::read_to_string(path)?
.split(',')
.map(|split| split.parse())
.collect::<Result<Vec<T>, _>>()
.map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("input could not be parsed into desired type - {:?}", err),
)
})
}
pub fn read_parsed<T, P>(path: P) -> io::Result<T>
where
P: AsRef<Path>,
T: FromStr,
<T as FromStr>::Err: Debug,
{
fs::read_to_string(path).map(|s| s.parse())?.map_err(|err| {
io::Error::new(
io::ErrorKind::InvalidData,
format!("input could not be parsed into desired type - {:?}", err),
)
})
}
| read_parsed_comma_separated_values |
bd-data-sync-cell.component.ts | import { Component, Input, OnInit } from '@angular/core';
import { BdDataColumn } from 'src/app/models/data';
@Component({
selector: 'app-bd-data-sync-cell',
templateUrl: './bd-data-sync-cell.component.html',
styleUrls: ['./bd-data-sync-cell.component.css'],
})
export class | <T> implements OnInit {
@Input() record: T;
@Input() column: BdDataColumn<T>;
constructor() {}
ngOnInit(): void {}
}
| BdDataSyncCellComponent |
api_op_ListDistributionsByCachePolicyId.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package cloudfront
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/cloudfront/types"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Gets a list of distribution IDs for distributions that have a cache behavior
// that’s associated with the specified cache policy. You can optionally specify
// the maximum number of items to receive in the response. If the total number of
// items in the list exceeds the maximum that you specify, or the default maximum,
// the response is paginated. To get the next page of items, send a subsequent
// request that specifies the NextMarker value from the current response as the
// Marker value in the subsequent request.
func (c *Client) ListDistributionsByCachePolicyId(ctx context.Context, params *ListDistributionsByCachePolicyIdInput, optFns ...func(*Options)) (*ListDistributionsByCachePolicyIdOutput, error) {
if params == nil {
params = &ListDistributionsByCachePolicyIdInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListDistributionsByCachePolicyId", params, optFns, addOperationListDistributionsByCachePolicyIdMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListDistributionsByCachePolicyIdOutput)
out.ResultMetadata = metadata
return out, nil
}
type ListDistributionsByCachePolicyIdInput struct {
// The ID of the cache policy whose associated distribution IDs you want to list.
//
// This member is required.
CachePolicyId *string
// Use this field when paginating results to indicate where to begin in your list
// of distribution IDs. The response includes distribution IDs in the list that
// occur after the marker. To get the next page of the list, set this field’s value
// to the value of NextMarker from the current page’s response.
Marker *string
// The maximum number of distribution IDs that you want in the response.
MaxItems *string
}
type ListDistributionsByCachePolicyIdOutput struct {
// A list of distribution IDs.
DistributionIdList *types.DistributionIdList
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationListDistributionsByCachePolicyIdMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestxml_serializeOpListDistributionsByCachePolicyId{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestxml_deserializeOpListDistributionsByCachePolicyId{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpListDistributionsByCachePolicyIdValidationMiddleware(stack); err != nil {
re | rr = stack.Initialize.Add(newServiceMetadataMiddleware_opListDistributionsByCachePolicyId(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opListDistributionsByCachePolicyId(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "cloudfront",
OperationName: "ListDistributionsByCachePolicyId",
}
}
| turn err
}
if e |
forwarder.go | // Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ssh
import (
"context"
"fmt"
"io"
"net"
"time"
"yunion.io/x/log"
)
type TickFunc func(context.Context)
type LocalForwardReq struct {
LocalAddr string
LocalPort int
RemoteAddr string
RemotePort int
Tick time.Duration
TickCb TickFunc
} |
type RemoteForwardReq struct {
// LocalAddr is the address the forward will forward to
LocalAddr string
// LocalPort is the port the forward will forward to
LocalPort int
// RemoteAddr is the address on the remote to listen on
RemoteAddr string
// RemotePort is the address on the remote to listen on
RemotePort int
Tick time.Duration
TickCb TickFunc
}
type dialFunc func(n, addr string) (net.Conn, error)
type doneFunc func(laddr string, lport int)
type forwarder struct {
listener net.Listener
dial dialFunc
dialAddr string
dialPort int
done doneFunc
doneAddr string
donePort int
tick time.Duration
tickCb TickFunc
}
func (fwd *forwarder) Stop(ctx context.Context) {
fwd.listener.Close()
}
func (fwd *forwarder) Start(
ctx context.Context,
) {
var (
listener = fwd.listener
dial = fwd.dial
dialAddr = fwd.dialAddr
dialPort = fwd.dialPort
done = fwd.done
doneAddr = fwd.doneAddr
donePort = fwd.donePort
tick = fwd.tick
tickCb = fwd.tickCb
)
ctx, cancelFunc := context.WithCancel(ctx)
if done != nil {
defer done(doneAddr, donePort)
}
defer listener.Close()
go func() { // accept local/remote connection
for {
conn, err := listener.Accept()
if err != nil {
log.Warningf("local forward: accept: %v", err)
cancelFunc()
break
}
go func(local net.Conn) {
defer local.Close()
// dial remote/local
addr := net.JoinHostPort(dialAddr, fmt.Sprintf("%d", dialPort))
remote, err := dial("tcp", addr)
if err != nil {
log.Warningf("local forward: dial remote: %v", err)
return
}
defer remote.Close()
// forward
go io.Copy(local, remote)
go io.Copy(remote, local)
<-ctx.Done()
}(conn)
}
}()
if tick > 0 && tickCb != nil {
go func() {
ticker := time.NewTicker(tick)
defer ticker.Stop()
for {
select {
case <-ticker.C:
tickCb(ctx)
case <-ctx.Done():
return
}
}
}()
}
for {
select {
case <-ctx.Done():
return
}
}
} | |
u32var_test.go | package pack
import (
"github.com/tilezen/tileops.git/go/pkg/coord"
"testing"
"testing/quick"
)
func TestPackU32VarSymmetric(t *testing.T) {
maxZoomToGen := uint(15)
cfg := quick.Config{
MaxCount: 1000,
Values: newValidCoordGenerator(maxZoomToGen),
}
f := func(c *coord.Coord) bool {
packed, err := ToU32Var(*c)
if err != nil { | }
if err := quick.Check(f, &cfg); err != nil {
t.Error(err)
}
} | panic(err)
}
unpackedCoord, err := FromU32Var(packed)
return err == nil && unpackedCoord == *c |
fake_factory.go | package fake
import (
"io"
"os"
"github.com/jenkins-x/jx/pkg/kustomize"
"k8s.io/client-go/dynamic"
"github.com/jenkins-x/jx/pkg/cmd/clients"
"github.com/jenkins-x/jx/pkg/util"
"github.com/jenkins-x/jx/pkg/builds"
v1fake "github.com/jenkins-x/jx/pkg/client/clientset/versioned/fake"
kservefake "github.com/knative/serving/pkg/client/clientset/versioned/fake"
apifake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
"k8s.io/client-go/kubernetes/fake"
gojenkins "github.com/jenkins-x/golang-jenkins"
"github.com/jenkins-x/jx/pkg/io/secrets"
"github.com/jenkins-x/jx/pkg/vault"
certmngclient "github.com/jetstack/cert-manager/pkg/client/clientset/versioned"
fake_certmngclient "github.com/jetstack/cert-manager/pkg/client/clientset/versioned/fake"
vaultoperatorclient "github.com/banzaicloud/bank-vaults/operator/pkg/client/clientset/versioned"
fake_vaultoperatorclient "github.com/banzaicloud/bank-vaults/operator/pkg/client/clientset/versioned/fake"
"github.com/heptio/sonobuoy/pkg/client"
sonoboy_dynamic "github.com/heptio/sonobuoy/pkg/dynamic"
"github.com/jenkins-x/jx/pkg/auth"
"github.com/jenkins-x/jx/pkg/client/clientset/versioned"
"github.com/jenkins-x/jx/pkg/gits"
"github.com/jenkins-x/jx/pkg/helm"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/table"
fake_vault "github.com/jenkins-x/jx/pkg/vault/fake"
kserve "github.com/knative/serving/pkg/client/clientset/versioned"
"github.com/pkg/errors"
tektonclient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
tektonfake "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
fake_metricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
prowjobclient "k8s.io/test-infra/prow/client/clientset/versioned"
fake_prowjobclient "k8s.io/test-infra/prow/client/clientset/versioned/fake"
)
// FakeFactory points to a fake factory implementation
type FakeFactory struct {
Batch bool
delegate clients.Factory
namespace string
kubeConfig kube.Kuber
impersonateUser string
bearerToken string
secretLocation secrets.SecretLocation
offline bool
// cached fake clients
apiClient apiextensionsclientset.Interface
jxClient versioned.Interface
kubeClient kubernetes.Interface
kserveClient kserve.Interface
tektonClient tektonclient.Interface
prowJobClient prowjobclient.Interface
dyncClient dynamic.Interface
}
var _ clients.Factory = (*FakeFactory)(nil)
// NewFakeFactory creates a fake factory which uses fake k8s clients for testing
func | () clients.Factory {
f := &FakeFactory{
namespace: "jx",
}
f.kubeConfig = kube.NewKubeConfig()
return f
}
// NewFakeFactoryFromClients creates a fake factory which uses fake k8s clients for testing
func NewFakeFactoryFromClients(apiClient apiextensionsclientset.Interface,
jxClient versioned.Interface,
kubeClient kubernetes.Interface,
tektonClient tektonclient.Interface,
dyncClient dynamic.Interface) *FakeFactory {
f := &FakeFactory{
namespace: "jx",
apiClient: apiClient,
jxClient: jxClient,
kubeClient: kubeClient,
tektonClient: tektonClient,
dyncClient: dyncClient,
}
f.kubeConfig = kube.NewKubeConfig()
return f
}
// SetDelegateFactory sets the delegate factory
func (f *FakeFactory) SetDelegateFactory(factory clients.Factory) {
f.delegate = factory
}
// GetDelegateFactory returns the delegate factory
func (f *FakeFactory) GetDelegateFactory() clients.Factory {
if f.delegate == nil {
f.delegate = clients.NewFactory()
}
return f.delegate
}
// SetNamespace sets the default namespace
func (f *FakeFactory) SetNamespace(ns string) {
f.namespace = ns
}
// SetBatch sets batch
func (f *FakeFactory) SetBatch(batch bool) {
f.Batch = batch
}
// SetOffline sets offline
func (f *FakeFactory) SetOffline(offline bool) {
f.offline = offline
}
// ImpersonateUser returns a new factory impersonating the given user
func (f *FakeFactory) ImpersonateUser(user string) clients.Factory {
copy := *f
copy.impersonateUser = user
return ©
}
// WithBearerToken returns a new factory with bearer token
func (f *FakeFactory) WithBearerToken(token string) clients.Factory {
copy := *f
copy.bearerToken = token
return ©
}
// CreateJenkinsClient creates a new Jenkins client
func (f *FakeFactory) CreateJenkinsClient(kubeClient kubernetes.Interface, ns string, handles util.IOFileHandles) (gojenkins.JenkinsClient, error) {
return f.GetDelegateFactory().CreateJenkinsClient(kubeClient, ns, handles)
}
// CreateCustomJenkinsClient creates a new Jenkins client for the given custom Jenkins App
func (f *FakeFactory) CreateCustomJenkinsClient(kubeClient kubernetes.Interface, ns string, jenkinsServiceName string, handles util.IOFileHandles) (gojenkins.JenkinsClient, error) {
return f.GetDelegateFactory().CreateCustomJenkinsClient(kubeClient, ns, jenkinsServiceName, handles)
}
// GetJenkinsURL gets the Jenkins URL for the given namespace
func (f *FakeFactory) GetJenkinsURL(kubeClient kubernetes.Interface, ns string) (string, error) {
return f.GetDelegateFactory().GetJenkinsURL(kubeClient, ns)
}
// GetCustomJenkinsURL gets a custom jenkins App service URL
func (f *FakeFactory) GetCustomJenkinsURL(kubeClient kubernetes.Interface, ns string, jenkinsServiceName string) (string, error) {
return f.GetDelegateFactory().GetCustomJenkinsURL(kubeClient, ns, jenkinsServiceName)
}
// CreateJenkinsAuthConfigService creates a new Jenkins authentication configuration service
func (f *FakeFactory) CreateJenkinsAuthConfigService(namespace string, jenkinsServiceName string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.JenkinsAuthConfigFile, namespace, kube.ValueKindJenkins, "")
}
// CreateChartmuseumAuthConfigService creates a new Chartmuseum authentication configuration service
func (f *FakeFactory) CreateChartmuseumAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.ChartmuseumAuthConfigFile, namespace, kube.ValueKindChartmuseum, serviceKind)
}
// CreateIssueTrackerAuthConfigService creates a new issuer tracker configuration service
func (f *FakeFactory) CreateIssueTrackerAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.IssuesAuthConfigFile, namespace, kube.ValueKindIssue, serviceKind)
}
// CreateChatAuthConfigService creates a new chat configuration service
func (f *FakeFactory) CreateChatAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.ChatAuthConfigFile, namespace, kube.ValueKindChat, serviceKind)
}
// CreateAddonAuthConfigService creates a new addon auth configuration service
func (f *FakeFactory) CreateAddonAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.AddonAuthConfigFile, namespace, kube.ValueKindAddon, serviceKind)
}
// CreateGitAuthConfigService creates a new git auth configuration service
func (f *FakeFactory) CreateGitAuthConfigService(namespace string, serviceKind string) (auth.ConfigService, error) {
return f.CreateAuthConfigService(auth.GitAuthConfigFile, namespace, kube.ValueKindGit, serviceKind)
}
// CreateAuthConfigService creates a new service which loads/saves the auth config from/to different sources depending
// on the current secrets location and cluster context. The sources can be vault, kubernetes secrets or local file.
func (f *FakeFactory) CreateAuthConfigService(fileName string, namespace string,
serverKind string, serviceKind string) (auth.ConfigService, error) {
configService := auth.NewMemoryAuthConfigService()
username := "fake-username"
url := "https://fake-server.org"
kind := serviceKind
if serverKind == kube.ValueKindGit {
kind = gits.KindGitFake
}
config := &auth.AuthConfig{
Servers: []*auth.AuthServer{
{
URL: url,
Users: []*auth.UserAuth{
{
Username: username,
ApiToken: "fake-token",
},
},
Kind: kind,
Name: serviceKind,
CurrentUser: username,
},
},
CurrentServer: url,
PipeLineUsername: username,
PipeLineServer: url,
}
configService.SetConfig(config)
return configService, nil
}
// SecretsLocation indicates the location where the secrets are stored
func (f *FakeFactory) SecretsLocation() secrets.SecretsLocationKind {
return secrets.FileSystemLocationKind
}
// SetSecretsLocation configures the secrets location. It will persist the value in a config map
// if the persist flag is set.
func (f *FakeFactory) SetSecretsLocation(location secrets.SecretsLocationKind, persist bool) error {
return nil
}
// ResetSecretsLocation resets the location of the secrets stored in memory
func (f *FakeFactory) ResetSecretsLocation() {
f.secretLocation = nil
}
// CreateSystemVaultClient gets the system vault client for managing the secrets
func (f *FakeFactory) CreateSystemVaultClient(namespace string) (vault.Client, error) {
return fake_vault.NewFakeVaultClient(), nil
}
// CreateVaultClient returns the given vault client for managing secrets
// Will use default values for name and namespace if nil values are applied
func (f *FakeFactory) CreateVaultClient(name string, namespace string) (vault.Client, error) {
return fake_vault.NewFakeVaultClient(), nil
}
// CreateKubeClient creates a new Kubernetes client
func (f *FakeFactory) CreateKubeClient() (kubernetes.Interface, string, error) {
if f.kubeClient == nil {
f.kubeClient = fake.NewSimpleClientset()
}
return f.kubeClient, f.namespace, nil
}
// CreateJXClient creates a new Kubernetes client for Jenkins X CRDs
func (f *FakeFactory) CreateJXClient() (versioned.Interface, string, error) {
if f.jxClient == nil {
f.jxClient = v1fake.NewSimpleClientset()
}
return f.jxClient, f.namespace, nil
}
// CreateApiExtensionsClient creates a new Kubernetes ApiExtensions client
func (f *FakeFactory) CreateApiExtensionsClient() (apiextensionsclientset.Interface, error) {
if f.apiClient == nil {
f.apiClient = apifake.NewSimpleClientset()
}
return f.apiClient, nil
}
// CreateProwJobClient creates a new Kubernetes client for ProwJob resources
func (f *FakeFactory) CreateProwJobClient() (prowjobclient.Interface, string, error) {
if f.prowJobClient == nil {
f.prowJobClient = fake_prowjobclient.NewSimpleClientset()
}
return f.prowJobClient, f.namespace, nil
}
// CreateKnativeServeClient create a new Kubernetes client for Knative serve resources
func (f *FakeFactory) CreateKnativeServeClient() (kserve.Interface, string, error) {
if f.kserveClient == nil {
f.kserveClient = kservefake.NewSimpleClientset()
}
return f.kserveClient, f.namespace, nil
}
// CreateTektonClient create a new Kubernetes client for Tekton resources
func (f *FakeFactory) CreateTektonClient() (tektonclient.Interface, string, error) {
if f.tektonClient == nil {
f.tektonClient = tektonfake.NewSimpleClientset()
}
return f.tektonClient, f.namespace, nil
}
// CreateDynamicClient creates a new Kubernetes Dynamic client
func (f *FakeFactory) CreateDynamicClient() (dynamic.Interface, string, error) {
if f.dyncClient == nil {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
f.dyncClient, err = dynamic.NewForConfig(config)
if err != nil {
return nil, ns, err
}
return f.dyncClient, ns, err
}
return f.dyncClient, f.namespace, nil
}
// CreateMetricsClient creates a new Kubernetes metrics client
func (f *FakeFactory) CreateMetricsClient() (metricsclient.Interface, error) {
return fake_metricsclient.NewSimpleClientset(), nil
}
// CreateGitProvider creates a new Git provider
func (f *FakeFactory) CreateGitProvider(gitURL string, message string, authConfigSvc auth.ConfigService,
gitKind string, ghOwner string, batchMode bool, gitter gits.Gitter, handles util.IOFileHandles) (gits.GitProvider, error) {
return f.GetDelegateFactory().CreateGitProvider(gitURL, message, authConfigSvc, gitKind, ghOwner, batchMode, gitter, handles)
}
// CreateKubeConfig creates the kubernetes configuration
func (f *FakeFactory) CreateKubeConfig() (*rest.Config, error) {
return f.GetDelegateFactory().CreateKubeConfig()
}
// CreateTable creates a new table
func (f *FakeFactory) CreateTable(out io.Writer) table.Table {
return table.CreateTable(out)
}
// IsInCDPipeline we should only load the git / issue tracker API tokens if the current pod
// is in a pipeline and running as the Jenkins service account
func (f *FakeFactory) IsInCDPipeline() bool {
// TODO should we let RBAC decide if we can see the Secrets in the dev namespace?
// or we should test if we are in the cluster and get the current ServiceAccount name?
buildNumber := builds.GetBuildNumber()
return buildNumber != "" || os.Getenv("PIPELINE_KIND") != ""
}
// function to tell if we are running incluster
func (f *FakeFactory) IsInCluster() bool {
_, err := rest.InClusterConfig()
return err == nil
}
// CreateComplianceClient creates a new Sonobuoy compliance client
func (f *FakeFactory) CreateComplianceClient() (*client.SonobuoyClient, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, errors.Wrap(err, "compliance client failed to load the Kubernetes configuration")
}
skc, err := sonoboy_dynamic.NewAPIHelperFromRESTConfig(config)
if err != nil {
return nil, errors.Wrap(err, "compliance dynamic client failed to be created")
}
return client.NewSonobuoyClient(config, skc)
}
// CreateVaultOperatorClient creates a new vault operator client
func (f *FakeFactory) CreateVaultOperatorClient() (vaultoperatorclient.Interface, error) {
return fake_vaultoperatorclient.NewSimpleClientset(), nil
}
// CreateHelm creates a new Helm client
func (f *FakeFactory) CreateHelm(verbose bool,
helmBinary string,
noTiller bool,
helmTemplate bool) helm.Helmer {
return f.GetDelegateFactory().CreateHelm(verbose,
helmBinary,
noTiller,
helmTemplate)
}
// CreateCertManagerClient creates a new Kuberntes client for cert-manager resources
func (f *FakeFactory) CreateCertManagerClient() (certmngclient.Interface, error) {
return fake_certmngclient.NewSimpleClientset(), nil
}
// CreateLocalGitAuthConfigService creates a new service which loads/saves the auth config from/to a local file.
func (f *FakeFactory) CreateLocalGitAuthConfigService() (auth.ConfigService, error) {
return f.GetDelegateFactory().CreateLocalGitAuthConfigService()
}
// CreateKustomizer creates a Kustomizer client
func (f *FakeFactory) CreateKustomizer() kustomize.Kustomizer {
return f.GetDelegateFactory().CreateKustomizer()
}
| NewFakeFactory |
annotate_hits.py | #!/usr/bin/env python
# Copyright 2017 Marco Galardini and John Lees
'''Script to annotate kmer hits'''
import sys
import os
import re
import tempfile
import subprocess
import pybedtools
from .bwa import bwa_index
from .bwa import bwa_iter
def get_options():
import argparse
description = 'Iteratively annotate significant kmers from SEER'
parser = argparse.ArgumentParser(description=description, prog="annotate_hits")
parser.add_argument("kmers",
help="Kmers file, filtered output from SEER")
parser.add_argument("references",
help="File of reference annotations. "
"First column fasta sequence, second column gff annotation, "
"third column 'ref' or 'draft'")
parser.add_argument("output",
help="Output file")
parser.add_argument("--bwa",
help="Location of bwa executable "
"[default=bwa]",
default="bwa")
parser.add_argument("--tmp-prefix",
help="Directory to store temporary files "
"[default=./]",
default=os.getcwd())
return parser.parse_args()
# returns first overlapping feature with gene= annotation. Otherwise first feature ID
def extract_genes(bedtools_intervals): | for match in bedtools_intervals.features():
kmer_id, hit_id = match.fields[3].split("_")
annotations[int(kmer_id)] = {}
ID = None
gene = None
for tag in match.fields[15].split(";"):
parse_tag = re.search('^(.+)=(.+)$', tag)
if parse_tag:
if parse_tag.group(1) == "gene":
gene = parse_tag.group(2)
break
elif parse_tag.group(1) == "ID" and ID is None:
ID = parse_tag.group(2)
if gene is None:
if ID is not None:
gene = ID
else:
gene = ""
annotations[int(kmer_id)][int(hit_id)] = gene
return annotations
def main():
options = get_options()
# tmp file locations
remaining_tmp = options.tmp_prefix + "/remaining_kmers.txt"
remaining_next_tmp = options.tmp_prefix + "/remaining_kmers_next.txt"
remaining_fa_tmp = options.tmp_prefix + "/remaining_kmers.fa"
remaining_fa_next_tmp = options.tmp_prefix + "/remaining_kmers_next.fa"
pybedtools.helpers.set_tempdir(options.tmp_prefix)
# read references and drafts into list
references = []
with open(options.references, 'r') as reference_files:
for reference in reference_files:
(fa, gff, ref) = reference.rstrip().split()
references.append((fa, gff, ref))
output_file = open(options.output, 'w')
# Open seer results
# seer_remaining = seer_results
seer_remaining = open(options.kmers, 'r')
header = seer_remaining.readline()
# Write out kmer fasta file, keep track of count
kmers_remaining = 0
with open(remaining_fa_tmp, 'w') as kmer_fa:
for kmer in seer_remaining:
kmers_remaining += 1
kmer_fa.write(">" + str(kmers_remaining) + "\n")
kmer_fa.write(kmer.split("\t")[0] + "\n")
seer_remaining.seek(0)
seer_remaining.readline()
# for each reference, then draft
ref_id = 0
for reference in references:
(ref_fa, ref_gff, ref_type) = reference
ref_id += 1
# print number of kmers remaining. if zero, break
if kmers_remaining == 0:
break
sys.stderr.write(str(kmers_remaining) + " kmers remain\n")
if ref_type == "ref":
sys.stderr.write("Reference " + str(ref_id) + "\n")
else:
sys.stderr.write("Draft reference " + str(ref_id) + "\n")
# index reference sequence
bwa_index(ref_fa)
if ref_type == "ref":
bwa_algorithms = ["mem", "fastmap"]
elif ref_type == "draft":
bwa_algorithms = ["fastmap"]
else:
bwa_algorithms = ["fastmap"]
sys.stderr.write("Unknown reference type " + ref_type + " for " + ref_fa + ". Assuming draft\n")
# Fix ref annotation
tmp_bed = tempfile.NamedTemporaryFile(prefix=options.tmp_prefix + "/")
try:
subprocess.run("gff2bed < " + ref_gff + " > " + tmp_bed.name, shell=True, check=True)
except AttributeError:
# python prior to 3.5
subprocess.check_call("gff2bed < " + ref_gff + " > " + tmp_bed.name, shell=True)
ref_annotation = pybedtools.BedTool(tmp_bed.name)
filtered_ref = ref_annotation.filter(lambda x: True if x[7] == "CDS" else False).saveas('tmp_bed')
ref_annotation = pybedtools.BedTool('tmp_bed')
for bwa_algorithm in bwa_algorithms:
next_seer_remaining = open(remaining_next_tmp, 'w')
next_fasta_remaining = open(remaining_fa_next_tmp, 'w')
# run bwa mem -k 8 for ref, bwa fastmap for draft of remaining.fa
new_idx = 0
kmer_lines = []
map_pos = {}
mapped_kmers = bwa_iter(ref_fa, remaining_fa_tmp, bwa_algorithm)
with tempfile.NamedTemporaryFile('w', prefix=options.tmp_prefix + "/") as query_bed:
kmer_idx = 0
for mapping, kmer_line in zip(mapped_kmers, seer_remaining):
if mapping.mapped:
kmers_remaining -= 1
kmer_lines.append(kmer_line.rstrip())
map_pos[kmer_idx] = []
for hit_idx, (contig, start, end, strand) in enumerate(mapping.positions):
map_pos[kmer_idx].append(contig + ":" + str(start) + "-" + str(end))
query_bed.write('\t'.join([contig, str(start), str(end), str(kmer_idx) + "_" + str(hit_idx), '0', strand]) + "\n")
kmer_idx += 1
else:
# if unmapped write to seer_remaining and remaining.fa
next_seer_remaining.write(kmer_line)
new_idx += 1
next_fasta_remaining.write(">" + str(new_idx) + "\n")
next_fasta_remaining.write(kmer_line.split("\t")[0] + "\n")
if kmer_idx > 0:
query_bed.flush()
query_interval = pybedtools.BedTool(query_bed.name)
sorted_query = query_interval.sort()
in_genes = extract_genes(query_interval.intersect(b=ref_annotation, s=False, stream=True, wb=True))
up_genes = extract_genes(sorted_query.closest(b=ref_annotation, s=False, D="ref", iu=True, stream=True))
down_genes = extract_genes(sorted_query.closest(b=ref_annotation, s=False, D="ref", id=True, stream=True))
for kmer_idx, kmer_line in enumerate(kmer_lines):
annotations = []
for hit_idx, hit in enumerate(map_pos[kmer_idx]):
annotation = hit + ";"
if kmer_idx in down_genes and hit_idx in down_genes[kmer_idx]:
annotation += down_genes[kmer_idx][hit_idx]
annotation += ";"
if kmer_idx in in_genes and hit_idx in in_genes[kmer_idx]:
annotation += in_genes[kmer_idx][hit_idx]
annotation += ";"
if kmer_idx in up_genes and hit_idx in up_genes[kmer_idx]:
annotation += up_genes[kmer_idx][hit_idx]
annotations.append(annotation)
output_file.write("\t".join([kmer_line, ",".join(annotations)]) + "\n")
else:
# something went wrong, write down remaining kmers
for kmer_line in seer_remaining:
# if unmapped write to seer_remaining and remaining.fa
next_seer_remaining.write(kmer_line)
new_idx += 1
next_fasta_remaining.write(">" + str(new_idx) + "\n")
next_fasta_remaining.write(kmer_line.split("\t")[0] + "\n")
pybedtools.cleanup() # delete the bed file
# Clean up
seer_remaining.close()
next_seer_remaining.close()
next_fasta_remaining.close()
os.rename(remaining_next_tmp, remaining_tmp)
os.rename(remaining_fa_next_tmp, remaining_fa_tmp)
# Open next kmer file
seer_remaining = open(remaining_tmp, 'r')
# Clean up
tmp_bed.close()
os.remove('tmp_bed')
sys.stderr.write(str(kmers_remaining) + " kmers remain unannotated\n")
if __name__ == "__main__":
main() | annotations = {} |
model.rs | use serde_derive::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Deserialize)]
pub enum HttpMethod {
DELETE,
GET,
HEAD,
OPTIONS,
PATCH,
POST,
PUT,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ApiRequest {
pub http_method: HttpMethod,
pub resource: String,
pub headers: Option<HashMap<String, String>>,
pub path_parameters: Option<HashMap<String, String>>,
pub query_string_parameters: Option<HashMap<String, String>>,
pub multi_value_query_string_parameters: Option<HashMap<String, HashMap<String, String>>>,
pub body: String,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
#[derive(Default)]
pub struct | {
pub status_code: i32,
pub body: String,
pub is_base64_encoded: bool,
pub headers: HashMap<String, String>,
}
| ApiResponse |
panels.go | package jdx
import (
"github.com/drop-target-pinball/spin"
)
func ModeIntroPanel(e *spin.ScriptEnv, r spin.Renderer, blinkOn bool, text [3]string) {
g := r.Graphics()
r.Fill(spin.ColorOff)
g.Y = 2
g.Font = spin.FontPfArmaFive8
r.Print(g, text[0])
if blinkOn {
g.Y = 12
g.Font = spin.FontPfRondaSevenBold8
r.Print(g, text[1])
g.Y = 22
r.Print(g, text[2])
}
}
func TimerAndScorePanel(e *spin.ScriptEnv, r spin.Renderer, title string, timer int, score int, instruction string) {
g := r.Graphics()
r.Fill(spin.ColorOff)
g.Y = 2
g.Font = spin.FontPfArmaFive8
r.Print(g, title)
g.AnchorY = spin.AnchorBottom
g.Y = r.Height()
r.Print(g, instruction)
yOffset := int32(0)
if instruction == "" {
yOffset = 5
}
g.AnchorX = spin.AnchorLeft
g.X = 5
g.AnchorY = spin.AnchorMiddle
g.Y = r.Height()/2 + yOffset
g.Font = spin.Font14x10
r.Print(g, "%v", timer)
g.X = r.Width() - 2
g.AnchorX = spin.AnchorRight
g.Font = spin.Font09x7
r.Print(g, spin.FormatScore("%v", score))
}
func ModeAndScorePanel(e *spin.ScriptEnv, r spin.Renderer, title string, score int) {
g := r.Graphics()
r.Fill(spin.ColorOff)
g.Y = 2
g.Font = spin.FontPfArmaFive8
r.Print(g, title)
g.Y = 12
g.Font = spin.Font14x10
r.Print(g, spin.FormatScore("%v", score))
}
func ModeAndBlinkingScorePanel(e *spin.ScriptEnv, r spin.Renderer, title string, score int, blinkOn bool) |
func ScoreAndLabelPanel(e *spin.ScriptEnv, r spin.Renderer, score int, label string) {
g := r.Graphics()
r.Fill(spin.ColorOff)
g.Y = 5
g.Font = spin.Font14x10
r.Print(g, spin.FormatScore("%v", score))
g.Y = 22
g.Font = spin.FontPfArmaFive8
r.Print(g, label)
}
func OneLinePanel(e *spin.ScriptEnv, r spin.Renderer, text string) {
g := r.Graphics()
r.Fill(spin.ColorOff)
g.AnchorY = spin.AnchorMiddle
g.Font = spin.FontPfRondaSevenBold8
r.Print(g, text)
}
func OneLineBigPanel(e *spin.ScriptEnv, r spin.Renderer, text string) {
g := r.Graphics()
r.Fill(spin.ColorOff)
g.AnchorY = spin.AnchorMiddle
g.Font = spin.FontPfRondaSevenBold16
r.Print(g, text)
}
func GameOverPanel(e *spin.ScriptEnv) {
}
| {
g := r.Graphics()
r.Fill(spin.ColorOff)
g.Y = 2
g.Font = spin.FontPfArmaFive8
r.Print(g, title)
g.Y = 12
if blinkOn {
g.Font = spin.Font14x10
score := spin.FormatScore("%10d", score)
r.Print(g, score)
}
} |
timelord.py | import asyncio
import dataclasses
import io
import logging
import random
import time
import traceback
from typing import Callable, Dict, List, Optional, Tuple, Set
from chiavdf import create_discriminant
from btcgreen.consensus.constants import ConsensusConstants
from btcgreen.consensus.pot_iterations import calculate_sp_iters, is_overflow_block
from btcgreen.protocols import timelord_protocol
from btcgreen.protocols.protocol_message_types import ProtocolMessageTypes
from btcgreen.server.outbound_message import NodeType, make_msg
from btcgreen.server.server import BTCgreenServer
from btcgreen.timelord.iters_from_block import iters_from_block
from btcgreen.timelord.timelord_state import LastState
from btcgreen.timelord.types import Chain, IterationType, StateType
from btcgreen.types.blockchain_format.classgroup import ClassgroupElement
from btcgreen.types.blockchain_format.reward_chain_block import RewardChainBlock
from btcgreen.types.blockchain_format.sized_bytes import bytes32
from btcgreen.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from btcgreen.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from btcgreen.types.blockchain_format.vdf import VDFInfo, VDFProof
from btcgreen.types.end_of_slot_bundle import EndOfSubSlotBundle
from btcgreen.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class Timelord:
def __init__(self, root_path, config: Dict, constants: ConsensusConstants):
self.config = config
self.root_path = root_path
self.constants = constants
self._shut_down = False
self.free_clients: List[Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = []
self.potential_free_clients: List = []
self.ip_whitelist = self.config["vdf_clients"]["ip"]
self.server: Optional[BTCgreenServer] = None
self.chain_type_to_stream: Dict[Chain, Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = {}
self.chain_start_time: Dict = {}
# Chains that currently don't have a vdf_client.
self.unspawned_chains: List[Chain] = [
Chain.CHALLENGE_CHAIN,
Chain.REWARD_CHAIN,
Chain.INFUSED_CHALLENGE_CHAIN,
]
# Chains that currently accept iterations.
self.allows_iters: List[Chain] = []
# Last peak received, None if it's already processed.
self.new_peak: Optional[timelord_protocol.NewPeakTimelord] = None
# Last end of subslot bundle, None if we built a peak on top of it.
self.new_subslot_end: Optional[EndOfSubSlotBundle] = None
# Last state received. Can either be a new peak or a new EndOfSubslotBundle.
# Unfinished block info, iters adjusted to the last peak.
self.unfinished_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Signage points iters, adjusted to the last peak.
self.signage_point_iters: List[Tuple[uint64, uint8]] = []
# For each chain, send those info when the process spawns.
self.iters_to_submit: Dict[Chain, List[uint64]] = {}
self.iters_submitted: Dict[Chain, List[uint64]] = {}
self.iters_finished: Set = set()
# For each iteration submitted, know if it's a signage point, an infusion point or an end of slot.
self.iteration_to_proof_type: Dict[uint64, IterationType] = {}
# List of proofs finished.
self.proofs_finished: List[Tuple[Chain, VDFInfo, VDFProof, int]] = []
# Data to send at vdf_client initialization.
self.overflow_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Incremented each time `reset_chains` has been called.
# Used to label proofs in `finished_proofs` and to only filter proofs corresponding to the most recent state.
self.num_resets: int = 0
self.process_communication_tasks: List[asyncio.Task] = []
self.main_loop = None
self.vdf_server = None
self._shut_down = False
self.vdf_failures: List[Tuple[Chain, Optional[int]]] = []
self.vdf_failures_count: int = 0
self.vdf_failure_time: float = 0
self.total_unfinished: int = 0
self.total_infused: int = 0
self.state_changed_callback: Optional[Callable] = None
self.sanitizer_mode = self.config["sanitizer_mode"]
self.pending_bluebox_info: List[Tuple[float, timelord_protocol.RequestCompactProofOfTime]] = []
self.last_active_time = time.time()
async def _start(self):
self.lock: asyncio.Lock = asyncio.Lock()
self.vdf_server = await asyncio.start_server(
self._handle_client,
self.config["vdf_server"]["host"],
self.config["vdf_server"]["port"],
)
self.last_state: LastState = LastState(self.constants)
if not self.sanitizer_mode:
self.main_loop = asyncio.create_task(self._manage_chains())
else:
self.main_loop = asyncio.create_task(self._manage_discriminant_queue_sanitizer())
log.info("Started timelord.")
def _close(self):
self._shut_down = True
for task in self.process_communication_tasks:
task.cancel()
if self.main_loop is not None:
self.main_loop.cancel()
async def _await_closed(self):
pass
def set_server(self, server: BTCgreenServer):
self.server = server
async def _handle_client(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
async with self.lock:
client_ip = writer.get_extra_info("peername")[0]
log.debug(f"New timelord connection from client: {client_ip}.")
if client_ip in self.ip_whitelist:
self.free_clients.append((client_ip, reader, writer))
log.debug(f"Added new VDF client {client_ip}.")
for ip, end_time in list(self.potential_free_clients):
if ip == client_ip:
self.potential_free_clients.remove((ip, end_time))
break
async def _stop_chain(self, chain: Chain):
try:
while chain not in self.allows_iters:
self.lock.release()
await asyncio.sleep(0.05)
log.error(f"Trying to stop {chain} before its initialization.")
await self.lock.acquire()
if chain not in self.chain_type_to_stream:
log.warning(f"Trying to stop a crashed chain: {chain}.")
return None
stop_ip, _, stop_writer = self.chain_type_to_stream[chain]
self.potential_free_clients.append((stop_ip, time.time()))
stop_writer.write(b"010")
await stop_writer.drain()
if chain in self.allows_iters:
self.allows_iters.remove(chain)
if chain not in self.unspawned_chains:
self.unspawned_chains.append(chain)
if chain in self.chain_type_to_stream:
del self.chain_type_to_stream[chain]
except ConnectionResetError as e:
log.error(f"{e}")
def _can_infuse_unfinished_block(self, block: timelord_protocol.NewUnfinishedBlockTimelord) -> Optional[uint64]:
assert self.last_state is not None
sub_slot_iters = self.last_state.get_sub_slot_iters()
difficulty = self.last_state.get_difficulty()
ip_iters = self.last_state.get_last_ip()
rc_block = block.reward_chain_block
try:
block_sp_iters, block_ip_iters = iters_from_block(
self.constants,
rc_block,
sub_slot_iters,
difficulty,
)
except Exception as e:
log.warning(f"Received invalid unfinished block: {e}.")
return None
block_sp_total_iters = self.last_state.total_iters - ip_iters + block_sp_iters
if is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
block_sp_total_iters -= self.last_state.get_sub_slot_iters()
found_index = -1
for index, (rc, total_iters) in enumerate(self.last_state.reward_challenge_cache):
if rc == block.rc_prev:
found_index = index
break
if found_index == -1:
log.warning(f"Will not infuse {block.rc_prev} because its reward chain challenge is not in the chain")
return None
if ip_iters > block_ip_iters:
log.warning("Too late to infuse block")
return None
new_block_iters = uint64(block_ip_iters - ip_iters)
if len(self.last_state.reward_challenge_cache) > found_index + 1:
if self.last_state.reward_challenge_cache[found_index + 1][1] < block_sp_total_iters:
log.warning(
f"Will not infuse unfinished block {block.rc_prev} sp total iters {block_sp_total_iters}, "
f"because there is another infusion before its SP"
)
return None
if self.last_state.reward_challenge_cache[found_index][1] > block_sp_total_iters:
if not is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
log.error(
f"Will not infuse unfinished block {block.rc_prev}, sp total iters: {block_sp_total_iters}, "
f"because its iters are too low"
)
return None
if new_block_iters > 0:
return new_block_iters
return None
async def _reset_chains(self, first_run=False, only_eos=False):
# First, stop all chains.
self.last_active_time = time.time()
log.debug("Resetting chains")
ip_iters = self.last_state.get_last_ip()
sub_slot_iters = self.last_state.get_sub_slot_iters()
if not first_run:
for chain in list(self.chain_type_to_stream.keys()):
await self._stop_chain(chain)
# Adjust all signage points iterations to the peak.
iters_per_signage = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
self.signage_point_iters = [
(k * iters_per_signage - ip_iters, k)
for k in range(1, self.constants.NUM_SPS_SUB_SLOT)
if k * iters_per_signage - ip_iters > 0
]
for sp, k in self.signage_point_iters:
assert k * iters_per_signage > 0
assert k * iters_per_signage < sub_slot_iters
# Adjust all unfinished blocks iterations to the peak.
new_unfinished_blocks = []
self.iters_finished = set()
self.proofs_finished = []
self.num_resets += 1
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
self.iters_to_submit[chain] = []
self.iters_submitted[chain] = []
self.iteration_to_proof_type = {}
if not only_eos:
for block in self.unfinished_blocks + self.overflow_blocks:
new_block_iters: Optional[uint64] = self._can_infuse_unfinished_block(block)
# Does not add duplicates, or blocks that we cannot infuse
if new_block_iters and new_block_iters not in self.iters_to_submit[Chain.CHALLENGE_CHAIN]:
if block not in self.unfinished_blocks:
self.total_unfinished += 1
new_unfinished_blocks.append(block)
for chain in [Chain.REWARD_CHAIN, Chain.CHALLENGE_CHAIN]:
self.iters_to_submit[chain].append(new_block_iters)
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
self.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
# Remove all unfinished blocks that have already passed.
self.unfinished_blocks = new_unfinished_blocks
# Signage points.
if not only_eos and len(self.signage_point_iters) > 0:
count_signage = 0
for signage, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
self.iters_to_submit[chain].append(signage)
self.iteration_to_proof_type[signage] = IterationType.SIGNAGE_POINT
count_signage += 1
if count_signage == 3:
break
left_subslot_iters = sub_slot_iters - ip_iters
assert left_subslot_iters > 0
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.REWARD_CHAIN].append(left_subslot_iters)
self.iteration_to_proof_type[left_subslot_iters] = IterationType.END_OF_SUBSLOT
for chain, iters in self.iters_to_submit.items():
for iteration in iters:
assert iteration > 0
async def _handle_new_peak(self):
assert self.new_peak is not None
self.last_state.set_state(self.new_peak)
if self.total_unfinished > 0:
remove_unfinished = []
for unf_block_timelord in self.unfinished_blocks + self.overflow_blocks:
if (
unf_block_timelord.reward_chain_block.get_hash()
== self.new_peak.reward_chain_block.get_unfinished().get_hash()
):
if unf_block_timelord not in self.unfinished_blocks:
# We never got the EOS for this, but we have the block in overflow list
self.total_unfinished += 1
remove_unfinished.append(unf_block_timelord)
if len(remove_unfinished) > 0:
self.total_infused += 1
for block in remove_unfinished:
if block in self.unfinished_blocks:
self.unfinished_blocks.remove(block)
if block in self.overflow_blocks:
self.overflow_blocks.remove(block)
infusion_rate = round(self.total_infused / self.total_unfinished * 100.0, 2)
log.info(
f"Total unfinished blocks: {self.total_unfinished}. "
f"Total infused blocks: {self.total_infused}. "
f"Infusion rate: {infusion_rate}%."
)
self.new_peak = None
await self._reset_chains()
async def _handle_subslot_end(self):
self.last_state.set_state(self.new_subslot_end)
for block in self.unfinished_blocks:
if self._can_infuse_unfinished_block(block) is not None:
self.total_unfinished += 1
self.new_subslot_end = None
await self._reset_chains()
async def _map_chains_with_vdf_clients(self):
while not self._shut_down:
picked_chain = None
async with self.lock:
if len(self.free_clients) == 0:
break
ip, reader, writer = self.free_clients[0]
for chain_type in self.unspawned_chains:
challenge = self.last_state.get_challenge(chain_type)
initial_form = self.last_state.get_initial_form(chain_type)
if challenge is not None and initial_form is not None:
picked_chain = chain_type
break
if picked_chain is None:
break
picked_chain = self.unspawned_chains[0]
self.chain_type_to_stream[picked_chain] = (ip, reader, writer)
self.free_clients = self.free_clients[1:]
self.unspawned_chains = self.unspawned_chains[1:]
self.chain_start_time[picked_chain] = time.time()
log.debug(f"Mapping free vdf_client with chain: {picked_chain}.")
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
picked_chain, challenge, initial_form, ip, reader, writer, proof_label=self.num_resets
)
)
)
async def _submit_iterations(self):
|
def _clear_proof_list(self, iters: uint64):
return [
(chain, info, proof, label)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations != iters
]
async def _check_for_new_sp(self, iter_to_look_for: uint64):
signage_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.SIGNAGE_POINT
]
if len(signage_iters) == 0:
return None
to_remove = []
for potential_sp_iters, signage_point_index in self.signage_point_iters:
if potential_sp_iters not in signage_iters or potential_sp_iters != iter_to_look_for:
continue
signage_iter = potential_sp_iters
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == signage_iter and label == self.num_resets
]
# Wait for both cc and rc to have the signage point.
if len(proofs_with_iter) == 2:
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient signage point data {signage_iter}")
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"SP: Do not have correct challenge {rc_challenge.hex()}" f" has {rc_info.challenge}")
# This proof is on an outdated challenge, so don't use it
continue
iters_from_sub_slot_start = cc_info.number_of_iterations + self.last_state.get_last_ip()
response = timelord_protocol.NewSignagePointVDF(
signage_point_index,
dataclasses.replace(cc_info, number_of_iterations=iters_from_sub_slot_start),
cc_proof,
rc_info,
rc_proof,
)
if self.server is not None:
msg = make_msg(ProtocolMessageTypes.new_signage_point_vdf, response)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# Cleanup the signage point from memory.
to_remove.append((signage_iter, signage_point_index))
self.proofs_finished = self._clear_proof_list(signage_iter)
# Send the next 3 signage point to the chains.
next_iters_count = 0
for next_sp, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
if next_sp not in self.iters_submitted[chain] and next_sp not in self.iters_to_submit[chain]:
self.iters_to_submit[chain].append(next_sp)
self.iteration_to_proof_type[next_sp] = IterationType.SIGNAGE_POINT
next_iters_count += 1
if next_iters_count == 3:
break
# Break so we alternate between checking SP and IP
break
for r in to_remove:
self.signage_point_iters.remove(r)
async def _check_for_new_ip(self, iter_to_look_for: uint64):
if len(self.unfinished_blocks) == 0:
return None
infusion_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.INFUSION_POINT
]
for iteration in infusion_iters:
if iteration != iter_to_look_for:
continue
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == iteration and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(proofs_with_iter) == chain_count:
block = None
ip_iters = None
for unfinished_block in self.unfinished_blocks:
try:
_, ip_iters = iters_from_block(
self.constants,
unfinished_block.reward_chain_block,
self.last_state.get_sub_slot_iters(),
self.last_state.get_difficulty(),
)
except Exception as e:
log.error(f"Error {e}")
continue
if ip_iters - self.last_state.get_last_ip() == iteration:
block = unfinished_block
break
assert ip_iters is not None
if block is not None:
ip_total_iters = self.last_state.get_total_iters() + iteration
challenge = block.reward_chain_block.get_hash()
icc_info: Optional[VDFInfo] = None
icc_proof: Optional[VDFProof] = None
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_info = info
icc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient VDF proofs for infusion point ch: {challenge} iterations:{iteration}")
return None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(
f"Do not have correct challenge {rc_challenge.hex()} "
f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}"
)
# This proof is on an outdated challenge, so don't use it
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
log.debug(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.")
overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index)
if not self.last_state.can_infuse_block(overflow):
log.warning("Too many blocks, or overflow in new epoch, cannot infuse, discarding")
return None
cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters)
response = timelord_protocol.NewInfusionPointVDF(
challenge,
cc_info,
cc_proof,
rc_info,
rc_proof,
icc_info,
icc_proof,
)
msg = make_msg(ProtocolMessageTypes.new_infusion_point_vdf, response)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self.proofs_finished = self._clear_proof_list(iteration)
if (
self.last_state.get_last_block_total_iters() is None
and not self.last_state.state_type == StateType.FIRST_SUB_SLOT
):
# We don't know when the last block was, so we can't make peaks
return None
sp_total_iters = (
ip_total_iters
- ip_iters
+ calculate_sp_iters(
self.constants,
block.sub_slot_iters,
block.reward_chain_block.signage_point_index,
)
- (block.sub_slot_iters if overflow else 0)
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
is_transaction_block = True
height: uint32 = uint32(0)
else:
last_block_ti = self.last_state.get_last_block_total_iters()
assert last_block_ti is not None
is_transaction_block = last_block_ti < sp_total_iters
height = uint32(self.last_state.get_height() + 1)
if height < 5:
# Don't directly update our state for the first few blocks, because we cannot validate
# whether the pre-farm is correct
return None
new_reward_chain_block = RewardChainBlock(
uint128(self.last_state.get_weight() + block.difficulty),
height,
uint128(ip_total_iters),
block.reward_chain_block.signage_point_index,
block.reward_chain_block.pos_ss_cc_challenge_hash,
block.reward_chain_block.proof_of_space,
block.reward_chain_block.challenge_chain_sp_vdf,
block.reward_chain_block.challenge_chain_sp_signature,
cc_info,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_block.reward_chain_sp_signature,
rc_info,
icc_info,
is_transaction_block,
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
# Genesis
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
elif overflow and self.last_state.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
if self.last_state.peak is not None:
assert self.last_state.subslot_end is None
# This means the previous block is also an overflow block, and did not manage
# to lower the deficit, therefore we cannot lower it either. (new slot)
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
# This means we are the first infusion in this sub-slot. This may be a new slot or not.
assert self.last_state.subslot_end is not None
if self.last_state.subslot_end.infused_challenge_chain is None:
# There is no ICC, which means we are not finishing a slot. We can reduce the deficit.
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
else:
# There is an ICC, which means we are finishing a slot. Different slot, so can't change
# the deficit
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
new_deficit = max(self.last_state.deficit - 1, 0)
if new_deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
last_csb_or_eos = ip_total_iters
else:
last_csb_or_eos = self.last_state.last_challenge_sb_or_eos_total_iters
if self.last_state.just_infused_sub_epoch_summary():
new_sub_epoch_summary = None
passed_ses_height_but_not_yet_included = False
else:
new_sub_epoch_summary = block.sub_epoch_summary
if new_reward_chain_block.height % self.constants.SUB_EPOCH_BLOCKS == 0:
passed_ses_height_but_not_yet_included = True
else:
passed_ses_height_but_not_yet_included = (
self.last_state.get_passed_ses_height_but_not_yet_included()
)
self.new_peak = timelord_protocol.NewPeakTimelord(
new_reward_chain_block,
block.difficulty,
uint8(new_deficit),
block.sub_slot_iters,
new_sub_epoch_summary,
self.last_state.reward_challenge_cache,
uint128(last_csb_or_eos),
passed_ses_height_but_not_yet_included,
)
await self._handle_new_peak()
# Break so we alternate between checking SP and IP
break
async def _check_for_end_of_subslot(self, iter_to_look_for: uint64):
left_subslot_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.END_OF_SUBSLOT
]
if len(left_subslot_iters) == 0:
return None
if left_subslot_iters[0] != iter_to_look_for:
return None
chains_finished = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == left_subslot_iters[0] and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(chains_finished) == chain_count:
icc_ip_vdf: Optional[VDFInfo] = None
icc_ip_proof: Optional[VDFProof] = None
cc_vdf: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_vdf: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in chains_finished:
if chain == Chain.CHALLENGE_CHAIN:
cc_vdf = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_vdf = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_ip_vdf = info
icc_ip_proof = proof
assert cc_proof is not None and rc_proof is not None and cc_vdf is not None and rc_vdf is not None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_vdf.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"Do not have correct challenge {rc_challenge.hex()} has" f" {rc_vdf.challenge}")
# This proof is on an outdated challenge, so don't use it
return None
log.debug("Collected end of subslot vdfs.")
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
iters_from_sub_slot_start = cc_vdf.number_of_iterations + self.last_state.get_last_ip()
cc_vdf = dataclasses.replace(cc_vdf, number_of_iterations=iters_from_sub_slot_start)
if icc_ip_vdf is not None:
if self.last_state.peak is not None:
total_iters = (
self.last_state.get_total_iters()
- self.last_state.get_last_ip()
+ self.last_state.get_sub_slot_iters()
)
else:
total_iters = self.last_state.get_total_iters() + self.last_state.get_sub_slot_iters()
iters_from_cb = uint64(total_iters - self.last_state.last_challenge_sb_or_eos_total_iters)
if iters_from_cb > self.last_state.sub_slot_iters:
log.error(f"{self.last_state.peak}")
log.error(f"{self.last_state.subslot_end}")
assert False
assert iters_from_cb <= self.last_state.sub_slot_iters
icc_ip_vdf = dataclasses.replace(icc_ip_vdf, number_of_iterations=iters_from_cb)
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = (
None if icc_ip_vdf is None else InfusedChallengeChainSubSlot(icc_ip_vdf)
)
if self.last_state.get_deficit() == 0:
assert icc_sub_slot is not None
icc_sub_slot_hash = icc_sub_slot.get_hash()
else:
icc_sub_slot_hash = None
next_ses: Optional[SubEpochSummary] = self.last_state.get_next_sub_epoch_summary()
if next_ses is not None:
log.info(f"Including sub epoch summary{next_ses}")
ses_hash = next_ses.get_hash()
new_sub_slot_iters = next_ses.new_sub_slot_iters
new_difficulty = next_ses.new_difficulty
else:
ses_hash = None
new_sub_slot_iters = None
new_difficulty = None
cc_sub_slot = ChallengeChainSubSlot(cc_vdf, icc_sub_slot_hash, ses_hash, new_sub_slot_iters, new_difficulty)
eos_deficit: uint8 = (
self.last_state.get_deficit()
if self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK > self.last_state.get_deficit() > 0
else self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
rc_sub_slot = RewardChainSubSlot(
rc_vdf,
cc_sub_slot.get_hash(),
icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
eos_deficit,
)
eos_bundle = EndOfSubSlotBundle(
cc_sub_slot,
icc_sub_slot,
rc_sub_slot,
SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
)
if self.server is not None:
msg = make_msg(
ProtocolMessageTypes.new_end_of_sub_slot_vdf,
timelord_protocol.NewEndOfSubSlotVDF(eos_bundle),
)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
log.info(
f"Built end of subslot bundle. cc hash: {eos_bundle.challenge_chain.get_hash()}. New_difficulty: "
f"{eos_bundle.challenge_chain.new_difficulty} New ssi: {eos_bundle.challenge_chain.new_sub_slot_iters}"
)
if next_ses is None or next_ses.new_difficulty is None:
self.unfinished_blocks = self.overflow_blocks.copy()
else:
# No overflow blocks in a new epoch
self.unfinished_blocks = []
self.overflow_blocks = []
self.new_subslot_end = eos_bundle
await self._handle_subslot_end()
async def _handle_failures(self):
if len(self.vdf_failures) > 0:
# This can happen if one of the VDF processes has an issue. In this case, we abort all other
# infusion points and signage points, and go straight to the end of slot, so we avoid potential
# issues with the number of iterations that failed.
failed_chain, proof_label = self.vdf_failures[0]
log.error(
f"Vdf clients failed {self.vdf_failures_count} times. Last failure: {failed_chain}, "
f"label {proof_label}, current: {self.num_resets}"
)
if proof_label == self.num_resets:
await self._reset_chains(only_eos=True)
self.vdf_failure_time = time.time()
self.vdf_failures = []
# If something goes wrong in the VDF client due to a failed thread, we might get stuck in a situation where we
# are waiting for that client to finish. Usually other peers will finish the VDFs and reset us. In the case that
# there are no other timelords, this reset should bring the timelord back to a running state.
if time.time() - self.vdf_failure_time < self.constants.SUB_SLOT_TIME_TARGET * 3:
# If we have recently had a failure, allow some more time to finish the slot (we can be up to 3x slower)
active_time_threshold = self.constants.SUB_SLOT_TIME_TARGET * 3
else:
# If there were no failures recently trigger a reset after 60 seconds of no activity.
# Signage points should be every 9 seconds
active_time_threshold = 60
if time.time() - self.last_active_time > active_time_threshold:
log.error(f"Not active for {active_time_threshold} seconds, restarting all chains")
await self._reset_chains()
async def _manage_chains(self):
async with self.lock:
await asyncio.sleep(5)
await self._reset_chains(True)
while not self._shut_down:
try:
await asyncio.sleep(0.1)
async with self.lock:
await self._handle_failures()
# We've got a new peak, process it.
if self.new_peak is not None:
await self._handle_new_peak()
# Map free vdf_clients to unspawned chains.
await self._map_chains_with_vdf_clients()
async with self.lock:
# Submit pending iterations.
await self._submit_iterations()
not_finished_iters = [
it for it in self.iters_submitted[Chain.REWARD_CHAIN] if it not in self.iters_finished
]
if len(not_finished_iters) == 0:
await asyncio.sleep(0.1)
continue
selected_iter = min(not_finished_iters)
# Check for new infusion point and broadcast it if present.
await self._check_for_new_ip(selected_iter)
# Check for new signage point and broadcast it if present.
await self._check_for_new_sp(selected_iter)
# Check for end of subslot, respawn chains and build EndOfSubslotBundle.
await self._check_for_end_of_subslot(selected_iter)
except Exception:
tb = traceback.format_exc()
log.error(f"Error while handling message: {tb}")
async def _do_process_communication(
self,
chain: Chain,
challenge: bytes32,
initial_form: ClassgroupElement,
ip: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
# Data specific only when running in bluebox mode.
bluebox_iteration: Optional[uint64] = None,
header_hash: Optional[bytes32] = None,
height: Optional[uint32] = None,
field_vdf: Optional[uint8] = None,
# Labels a proof to the current state only
proof_label: Optional[int] = None,
):
disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS)
try:
# Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
# the timelord tells the vdf_client what to execute.
async with self.lock:
if self.sanitizer_mode:
writer.write(b"S")
else:
if self.config["fast_algorithm"]:
# Run n-wesolowski (fast) algorithm.
writer.write(b"N")
else:
# Run two-wesolowski (slow) algorithm.
writer.write(b"T")
await writer.drain()
prefix = str(len(str(disc)))
if len(prefix) == 1:
prefix = "00" + prefix
if len(prefix) == 2:
prefix = "0" + prefix
async with self.lock:
writer.write((prefix + str(disc)).encode())
await writer.drain()
# Send initial_form prefixed with its length.
async with self.lock:
writer.write(bytes([len(initial_form.data)]) + initial_form.data)
await writer.drain()
try:
ok = await reader.readexactly(2)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
return None
if ok.decode() != "OK":
return None
log.debug("Got handshake with VDF client.")
if not self.sanitizer_mode:
async with self.lock:
self.allows_iters.append(chain)
else:
async with self.lock:
assert chain is Chain.BLUEBOX
assert bluebox_iteration is not None
prefix = str(len(str(bluebox_iteration)))
if len(str(bluebox_iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(bluebox_iteration)
writer.write(iter_str.encode())
await writer.drain()
# Listen to the client until "STOP" is received.
while True:
try:
data = await reader.readexactly(4)
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
msg = ""
try:
msg = data.decode()
except Exception:
pass
if msg == "STOP":
log.debug(f"Stopped client running on ip {ip}.")
async with self.lock:
writer.write(b"ACK")
await writer.drain()
break
else:
try:
# This must be a proof, 4 bytes is length prefix
length = int.from_bytes(data, "big")
proof = await reader.readexactly(length)
stdout_bytes_io: io.BytesIO = io.BytesIO(bytes.fromhex(proof.decode()))
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
iterations_needed = uint64(int.from_bytes(stdout_bytes_io.read(8), "big", signed=True))
y_size_bytes = stdout_bytes_io.read(8)
y_size = uint64(int.from_bytes(y_size_bytes, "big", signed=True))
y_bytes = stdout_bytes_io.read(y_size)
witness_type = uint8(int.from_bytes(stdout_bytes_io.read(1), "big", signed=True))
proof_bytes: bytes = stdout_bytes_io.read()
# Verifies our own proof just in case
form_size = ClassgroupElement.get_size(self.constants)
output = ClassgroupElement.from_bytes(y_bytes[:form_size])
if not self.sanitizer_mode:
time_taken = time.time() - self.chain_start_time[chain]
ips = int(iterations_needed / time_taken * 10) / 10
log.info(
f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
f" iters, "
f"Estimated IPS: {ips}, Chain: {chain}"
)
vdf_info: VDFInfo = VDFInfo(
challenge,
iterations_needed,
output,
)
vdf_proof: VDFProof = VDFProof(
witness_type,
proof_bytes,
self.sanitizer_mode,
)
if not vdf_proof.is_valid(self.constants, initial_form, vdf_info):
log.error("Invalid proof of time!")
if not self.sanitizer_mode:
async with self.lock:
assert proof_label is not None
self.proofs_finished.append((chain, vdf_info, vdf_proof, proof_label))
else:
async with self.lock:
writer.write(b"010")
await writer.drain()
assert header_hash is not None
assert field_vdf is not None
assert height is not None
response = timelord_protocol.RespondCompactProofOfTime(
vdf_info, vdf_proof, header_hash, height, field_vdf
)
if self.server is not None:
message = make_msg(ProtocolMessageTypes.respond_compact_proof_of_time, response)
await self.server.send_to_all([message], NodeType.FULL_NODE)
except ConnectionResetError as e:
log.debug(f"Connection reset with VDF client {e}")
async def _manage_discriminant_queue_sanitizer(self):
while not self._shut_down:
async with self.lock:
try:
while len(self.pending_bluebox_info) > 0 and len(self.free_clients) > 0:
# Select randomly the field_vdf we're creating a compact vdf for.
# This is done because CC_SP and CC_IP are more frequent than
# CC_EOS and ICC_EOS. This guarantees everything is picked uniformly.
target_field_vdf = random.randint(1, 4)
info = next(
(info for info in self.pending_bluebox_info if info[1].field_vdf == target_field_vdf),
None,
)
if info is None:
# Nothing found with target_field_vdf, just pick the first VDFInfo.
info = self.pending_bluebox_info[0]
ip, reader, writer = self.free_clients[0]
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
Chain.BLUEBOX,
info[1].new_proof_of_time.challenge,
ClassgroupElement.get_default_element(),
ip,
reader,
writer,
info[1].new_proof_of_time.number_of_iterations,
info[1].header_hash,
info[1].height,
info[1].field_vdf,
)
)
)
self.pending_bluebox_info.remove(info)
self.free_clients = self.free_clients[1:]
except Exception as e:
log.error(f"Exception manage discriminant queue: {e}")
await asyncio.sleep(0.1)
| for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
if chain in self.allows_iters:
_, _, writer = self.chain_type_to_stream[chain]
for iteration in self.iters_to_submit[chain]:
if iteration in self.iters_submitted[chain]:
continue
log.debug(f"Submitting iterations to {chain}: {iteration}")
assert iteration > 0
prefix = str(len(str(iteration)))
if len(str(iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(iteration)
writer.write(iter_str.encode())
await writer.drain()
self.iters_submitted[chain].append(iteration) |
params.go | package types
import (
"fmt"
sdk "github.com/ownesthq/cosmos-sdk/types"
"github.com/ownesthq/cosmos-sdk/x/params"
)
// Parameter store keys
var (
KeyMintDenom = []byte("MintDenom")
KeyInflationRateChange = []byte("InflationRateChange")
KeyInflationMax = []byte("InflationMax")
KeyInflationMin = []byte("InflationMin")
KeyGoalBonded = []byte("GoalBonded")
KeyBlocksPerYear = []byte("BlocksPerYear")
)
// mint parameters
type Params struct {
MintDenom string `json:"mint_denom"` // type of coin to mint
InflationRateChange sdk.Dec `json:"inflation_rate_change"` // maximum annual change in inflation rate
InflationMax sdk.Dec `json:"inflation_max"` // maximum inflation rate
InflationMin sdk.Dec `json:"inflation_min"` // minimum inflation rate
GoalBonded sdk.Dec `json:"goal_bonded"` // goal of percent bonded atoms
BlocksPerYear uint64 `json:"blocks_per_year"` // expected blocks per year
}
// ParamTable for minting module.
func ParamKeyTable() params.KeyTable {
return params.NewKeyTable().RegisterParamSet(&Params{})
}
func NewParams(mintDenom string, inflationRateChange, inflationMax,
inflationMin, goalBonded sdk.Dec, blocksPerYear uint64) Params {
return Params{
MintDenom: mintDenom,
InflationRateChange: inflationRateChange,
InflationMax: inflationMax,
InflationMin: inflationMin,
GoalBonded: goalBonded,
BlocksPerYear: blocksPerYear,
}
}
// default minting module parameters
func DefaultParams() Params {
return Params{
MintDenom: sdk.DefaultBondDenom,
InflationRateChange: sdk.NewDecWithPrec(13, 2),
InflationMax: sdk.NewDecWithPrec(20, 2),
InflationMin: sdk.NewDecWithPrec(7, 2),
GoalBonded: sdk.NewDecWithPrec(67, 2),
BlocksPerYear: uint64(60 * 60 * 8766 / 5), // assuming 5 second block times
}
}
// validate params
func ValidateParams(params Params) error {
if params.GoalBonded.LT(sdk.ZeroDec()) {
return fmt.Errorf("mint parameter GoalBonded should be positive, is %s ", params.GoalBonded.String())
}
if params.GoalBonded.GT(sdk.OneDec()) {
return fmt.Errorf("mint parameter GoalBonded must be <= 1, is %s", params.GoalBonded.String())
}
if params.InflationMax.LT(params.InflationMin) |
if params.MintDenom == "" {
return fmt.Errorf("mint parameter MintDenom can't be an empty string")
}
return nil
}
func (p Params) String() string {
return fmt.Sprintf(`Minting Params:
Mint Denom: %s
Inflation Rate Change: %s
Inflation Max: %s
Inflation Min: %s
Goal Bonded: %s
Blocks Per Year: %d
`,
p.MintDenom, p.InflationRateChange, p.InflationMax,
p.InflationMin, p.GoalBonded, p.BlocksPerYear,
)
}
// Implements params.ParamSet
func (p *Params) ParamSetPairs() params.ParamSetPairs {
return params.ParamSetPairs{
{KeyMintDenom, &p.MintDenom},
{KeyInflationRateChange, &p.InflationRateChange},
{KeyInflationMax, &p.InflationMax},
{KeyInflationMin, &p.InflationMin},
{KeyGoalBonded, &p.GoalBonded},
{KeyBlocksPerYear, &p.BlocksPerYear},
}
}
| {
return fmt.Errorf("mint parameter Max inflation must be greater than or equal to min inflation")
} |
parsers.rs | // Copyright 2020 - developers of the `grammers` project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![cfg(any(feature = "markdown", feature = "html"))]
use grammers_tl_types as tl;
#[cfg(feature = "html")]
const CODE_LANG_PREFIX: &str = "language-";
/// The length of a string, according to Telegram.
///
/// Telegram considers the length of the string with surrogate pairs.
fn telegram_string_len(string: &str) -> i32 {
// https://en.wikipedia.org/wiki/Plane_(Unicode)#Overview
string.encode_utf16().count() as i32
}
/// Pushes a new `MessageEntity` instance with zero-length to the specified vector.
///
/// # Examples
///
/// ```
/// let mut vec = Vec::new();
/// push_entity!(MessageEntityBold(1) => vec);
/// push_entity!(MessageEntityPre(2, language = "rust".to_string()) => vec);
/// ```
macro_rules! push_entity {
( $ty:ident($offset:expr) => $vector:expr ) => {
$vector.push(
tl::types::$ty {
offset: $offset,
length: 0,
}
.into(),
)
};
( $ty:ident($offset:expr, $field:ident = $value:expr) => $vector:expr ) => {
$vector.push(
tl::types::$ty {
offset: $offset,
length: 0,
$field: $value,
}
.into(),
)
};
}
/// Updates the length of the latest `MessageEntity` inside the specified vector.
///
/// # Examples
///
/// ```
/// let mut vec = Vec::new();
/// push_entity!(MessageEntityBold(1) => vec);
/// update_entity_len!(MessageEntityBold(2) => vec);
/// ```
macro_rules! update_entity_len {
( $ty:ident($end_offset:expr) => $vector:expr ) => {
let mut remove = false;
let end_offset = $end_offset;
let pos = $vector.iter_mut().rposition(|e| match e {
tl::enums::MessageEntity::$ty(e) => {
e.length = end_offset - e.offset;
remove = e.length == 0;
true
}
_ => false,
});
if remove {
$vector.remove(pos.unwrap());
}
};
}
#[cfg(feature = "markdown")]
pub fn parse_markdown_message(message: &str) -> (String, Vec<tl::enums::MessageEntity>) {
use pulldown_cmark::{CodeBlockKind, Event, Parser, Tag};
let mut text = String::with_capacity(message.len());
let mut entities = Vec::new();
let mut offset = 0;
Parser::new(message).for_each(|event| match event {
// text
Event::Text(string) => |
// `code`
Event::Code(string) => {
text.push_str(&string);
let length = telegram_string_len(&string);
entities.push(tl::types::MessageEntityCode { offset, length }.into());
offset += length;
}
// **bold text**
Event::Start(Tag::Strong) => {
push_entity!(MessageEntityBold(offset) => entities);
}
Event::End(Tag::Strong) => {
update_entity_len!(Bold(offset) => entities);
}
// *italic text*
Event::Start(Tag::Emphasis) => {
push_entity!(MessageEntityItalic(offset) => entities);
}
Event::End(Tag::Emphasis) => {
update_entity_len!(Italic(offset) => entities);
}
// [text link](https://example.com)
Event::Start(Tag::Link(_kind, url, _title)) => {
push_entity!(MessageEntityTextUrl(offset, url = url.to_string()) => entities);
}
Event::End(Tag::Link(_kindd, _url, _title)) => {
update_entity_len!(TextUrl(offset) => entities);
}
// ```lang\npre```
Event::Start(Tag::CodeBlock(kind)) => {
let lang = match kind {
CodeBlockKind::Indented => "".to_string(),
CodeBlockKind::Fenced(lang) => lang.to_string(),
}
.to_string();
push_entity!(MessageEntityPre(offset, language = lang) => entities);
}
Event::End(Tag::CodeBlock(_kind)) => {
update_entity_len!(Pre(offset) => entities);
}
// "\\\n"
Event::HardBreak => {
text.push('\n');
offset += 1;
}
// "\n\n"
Event::End(Tag::Paragraph) => {
text.push_str("\n\n");
offset += 2;
}
_ => {}
});
text.truncate(text.trim_end().len());
(text, entities)
}
#[cfg(feature = "html")]
pub fn parse_html_message(message: &str) -> (String, Vec<tl::enums::MessageEntity>) {
use html5ever::tendril::StrTendril;
use html5ever::tokenizer::{
BufferQueue, Tag, TagKind, Token, TokenSink, TokenSinkResult, Tokenizer,
};
// We could also convert the atoms we receive into lowercase strings and
// match against those, but that would defeat the purpose. We do however
// give the atoms we use better names.
use html5ever::{
ATOM_LOCALNAME__61 as TAG_A, ATOM_LOCALNAME__62 as TAG_B,
ATOM_LOCALNAME__62_6C_6F_63_6B_71_75_6F_74_65 as TAG_BLOCKQUOTE,
ATOM_LOCALNAME__63_6C_61_73_73 as ATTR_CLASS, ATOM_LOCALNAME__63_6F_64_65 as TAG_CODE,
ATOM_LOCALNAME__64_65_6C as TAG_DEL, ATOM_LOCALNAME__65_6D as TAG_EM,
ATOM_LOCALNAME__68_72_65_66 as ATTR_HREF, ATOM_LOCALNAME__69 as TAG_I,
ATOM_LOCALNAME__70_72_65 as TAG_PRE, ATOM_LOCALNAME__73 as TAG_S,
ATOM_LOCALNAME__73_74_72_6F_6E_67 as TAG_STRONG, ATOM_LOCALNAME__75 as TAG_U,
};
struct Sink {
text: String,
entities: Vec<tl::enums::MessageEntity>,
offset: i32,
}
impl TokenSink for Sink {
type Handle = ();
fn process_token(&mut self, token: Token, _line_number: u64) -> TokenSinkResult<()> {
match token {
Token::TagToken(Tag {
kind: TagKind::StartTag,
name,
self_closing: _,
attrs,
}) => match name {
n if n == TAG_B || n == TAG_STRONG => {
push_entity!(MessageEntityBold(self.offset) => self.entities);
}
n if n == TAG_I || n == TAG_EM => {
push_entity!(MessageEntityItalic(self.offset) => self.entities);
}
n if n == TAG_S || n == TAG_DEL => {
push_entity!(MessageEntityStrike(self.offset) => self.entities);
}
TAG_U => {
push_entity!(MessageEntityUnderline(self.offset) => self.entities);
}
TAG_BLOCKQUOTE => {
push_entity!(MessageEntityBlockquote(self.offset) => self.entities);
}
TAG_CODE => {
match self.entities.iter_mut().rev().next() {
// If the previous tag is an open `<pre>`, don't add `<code>`;
// we most likely want to indicate `class="language-foo"`.
Some(tl::enums::MessageEntity::Pre(e)) if e.length == 0 => {
e.language = attrs
.into_iter()
.find(|a| {
a.name.local == ATTR_CLASS
&& a.value.starts_with(CODE_LANG_PREFIX)
})
.map(|a| a.value[CODE_LANG_PREFIX.len()..].to_string())
.unwrap_or_else(|| "".to_string());
}
_ => {
push_entity!(MessageEntityCode(self.offset) => self.entities);
}
}
}
TAG_PRE => {
push_entity!(MessageEntityPre(self.offset, language = "".to_string())
=> self.entities);
}
TAG_A => {
let url = attrs
.into_iter()
.find(|a| a.name.local == ATTR_HREF)
.map(|a| a.value.to_string())
.unwrap_or_else(|| "".to_string());
push_entity!(MessageEntityTextUrl(self.offset, url = url)
=> self.entities);
}
_ => {}
},
Token::TagToken(Tag {
kind: TagKind::EndTag,
name,
self_closing: _,
attrs: _,
}) => match name {
n if n == TAG_B || n == TAG_STRONG => {
update_entity_len!(Bold(self.offset) => self.entities);
}
n if n == TAG_I || n == TAG_EM => {
update_entity_len!(Italic(self.offset) => self.entities);
}
n if n == TAG_S || n == TAG_DEL => {
update_entity_len!(Strike(self.offset) => self.entities);
}
TAG_U => {
update_entity_len!(Underline(self.offset) => self.entities);
}
TAG_BLOCKQUOTE => {
update_entity_len!(Blockquote(self.offset) => self.entities);
}
TAG_CODE => {
match self.entities.iter_mut().rev().next() {
// If the previous tag is an open `<pre>`, don't update `<code>` len;
// we most likely want to indicate `class="language-foo"`.
Some(tl::enums::MessageEntity::Pre(e)) if e.length == 0 => {}
_ => {
update_entity_len!(Code(self.offset) => self.entities);
}
}
}
TAG_PRE => {
update_entity_len!(Pre(self.offset) => self.entities);
}
TAG_A => {
update_entity_len!(TextUrl(self.offset) => self.entities);
}
_ => {}
},
Token::CharacterTokens(string) => {
self.text.push_str(&string);
self.offset += telegram_string_len(&string);
}
_ => {}
}
TokenSinkResult::Continue
}
}
let mut input = BufferQueue::new();
input.push_back(StrTendril::from_slice(message).try_reinterpret().unwrap());
let mut tok = Tokenizer::new(
Sink {
text: String::with_capacity(message.len()),
entities: Vec::new(),
offset: 0,
},
Default::default(),
);
let _ = tok.feed(&mut input);
tok.end();
let Sink { text, entities, .. } = tok.sink;
(text, entities)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[cfg(feature = "markdown")]
fn parse_leading_markdown() {
let (text, entities) = parse_markdown_message("**Hello** world!");
assert_eq!(text, "Hello world!");
assert_eq!(
entities,
vec![tl::types::MessageEntityBold {
offset: 0,
length: 5
}
.into()]
);
}
#[test]
#[cfg(feature = "markdown")]
fn parse_trailing_markdown() {
let (text, entities) = parse_markdown_message("Hello **world!**");
assert_eq!(text, "Hello world!");
assert_eq!(
entities,
vec![tl::types::MessageEntityBold {
offset: 6,
length: 6
}
.into()]
);
}
#[test]
#[cfg(feature = "markdown")]
fn parse_emoji_markdown() {
let (text, entities) = parse_markdown_message("A **little 🦀** here");
assert_eq!(text, "A little 🦀 here");
assert_eq!(
entities,
vec![tl::types::MessageEntityBold {
offset: 2,
length: 9
}
.into()]
);
}
#[test]
#[cfg(feature = "markdown")]
fn parse_all_entities_markdown() {
let (text, entities) = parse_markdown_message(
"Some **bold** (__strong__), *italics* (_cursive_), inline `code`, \
a\n```rust\npre\n```\nblock, and [links](https://example.com)",
);
assert_eq!(
text,
"Some bold (strong), italics (cursive), inline code, apre\nblock, and links"
);
assert_eq!(
entities,
vec![
tl::types::MessageEntityBold {
offset: 5,
length: 4
}
.into(),
tl::types::MessageEntityBold {
offset: 11,
length: 6
}
.into(),
tl::types::MessageEntityItalic {
offset: 20,
length: 7
}
.into(),
tl::types::MessageEntityItalic {
offset: 29,
length: 7
}
.into(),
tl::types::MessageEntityCode {
offset: 46,
length: 4
}
.into(),
tl::types::MessageEntityPre {
offset: 53,
length: 4,
language: "rust".to_string()
}
.into(),
tl::types::MessageEntityTextUrl {
offset: 68,
length: 5,
url: "https://example.com".to_string()
}
.into(),
]
);
}
#[test]
#[cfg(feature = "markdown")]
fn parse_nested_entities_markdown() {
// CommonMark won't allow the following: "Some **bold _both** italics_"
let (text, entities) = parse_markdown_message("Some **bold _both_** _italics_");
assert_eq!(text, "Some bold both italics");
assert_eq!(
entities,
vec![
tl::types::MessageEntityBold {
offset: 5,
length: 9
}
.into(),
tl::types::MessageEntityItalic {
offset: 10,
length: 4
}
.into(),
tl::types::MessageEntityItalic {
offset: 15,
length: 7
}
.into(),
]
);
}
#[test]
#[cfg(feature = "html")]
fn parse_leading_html() {
// Intentionally use different casing to make sure that is handled well
let (text, entities) = parse_html_message("<B>Hello</b> world!");
assert_eq!(text, "Hello world!");
assert_eq!(
entities,
vec![tl::types::MessageEntityBold {
offset: 0,
length: 5
}
.into()]
);
}
#[test]
#[cfg(feature = "html")]
fn parse_trailing_html() {
let (text, entities) = parse_html_message("Hello <strong>world!</strong>");
assert_eq!(text, "Hello world!");
assert_eq!(
entities,
vec![tl::types::MessageEntityBold {
offset: 6,
length: 6
}
.into()]
);
}
#[test]
#[cfg(feature = "html")]
fn parse_emoji_html() {
let (text, entities) = parse_html_message("A <b>little 🦀</b> here");
assert_eq!(text, "A little 🦀 here");
assert_eq!(
entities,
vec![tl::types::MessageEntityBold {
offset: 2,
length: 9
}
.into()]
);
}
#[test]
#[cfg(feature = "html")]
fn parse_all_entities_html() {
let (text, entities) = parse_html_message(
"Some <b>bold</b> (<strong>strong</strong>), <i>italics</i> \
(<em>cursive</em>), inline <code>code</code>, a <pre>pre</pre> \
block, and <a href=\"https://example.com\">links</a>",
);
assert_eq!(
text,
"Some bold (strong), italics (cursive), inline code, a pre block, and links"
);
assert_eq!(
entities,
vec![
tl::types::MessageEntityBold {
offset: 5,
length: 4
}
.into(),
tl::types::MessageEntityBold {
offset: 11,
length: 6
}
.into(),
tl::types::MessageEntityItalic {
offset: 20,
length: 7
}
.into(),
tl::types::MessageEntityItalic {
offset: 29,
length: 7
}
.into(),
tl::types::MessageEntityCode {
offset: 46,
length: 4
}
.into(),
tl::types::MessageEntityPre {
offset: 54,
length: 3,
language: "".to_string()
}
.into(),
tl::types::MessageEntityTextUrl {
offset: 69,
length: 5,
url: "https://example.com".to_string()
}
.into(),
]
);
}
#[test]
#[cfg(feature = "html")]
fn parse_pre_with_lang_html() {
let (text, entities) = parse_html_message(
"Some <pre>pre</pre>, <code>normal</code> and \
<pre><code class=\"language-rust\">rusty</code></pre> code",
);
assert_eq!(text, "Some pre, normal and rusty code");
assert_eq!(
entities,
vec![
tl::types::MessageEntityPre {
offset: 5,
length: 3,
language: "".to_string()
}
.into(),
tl::types::MessageEntityCode {
offset: 10,
length: 6,
}
.into(),
tl::types::MessageEntityPre {
offset: 21,
length: 5,
language: "rust".to_string()
}
.into(),
]
);
}
#[test]
#[cfg(feature = "html")]
fn parse_empty_pre_and_lang_html() {
let (text, entities) = parse_html_message(
"Some empty <pre></pre> and <code class=\"language-rust\">code</code>",
);
assert_eq!(text, "Some empty and code");
assert_eq!(
entities,
vec![tl::types::MessageEntityCode {
offset: 16,
length: 4,
}
.into(),]
);
}
#[test]
#[cfg(feature = "html")]
fn parse_link_no_href_html() {
let (text, entities) = parse_html_message("Some <a>empty link</a>, it does nothing");
assert_eq!(text, "Some empty link, it does nothing");
assert_eq!(
entities,
vec![tl::types::MessageEntityTextUrl {
offset: 5,
length: 10,
url: "".to_string()
}
.into(),]
);
}
#[test]
#[cfg(feature = "html")]
fn parse_nested_entities_html() {
let (text, entities) = parse_html_message("Some <b>bold <i>both</b> italics</i>");
assert_eq!(text, "Some bold both italics");
assert_eq!(
entities,
vec![
tl::types::MessageEntityBold {
offset: 5,
length: 9
}
.into(),
tl::types::MessageEntityItalic {
offset: 10,
length: 12
}
.into(),
]
);
}
}
| {
text.push_str(&string);
offset += telegram_string_len(&string);
} |
__init__.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .auto_storage_base_properties import AutoStorageBaseProperties
from .key_vault_reference import KeyVaultReference
from .batch_account_create_parameters import BatchAccountCreateParameters
from .auto_storage_properties import AutoStorageProperties
from .batch_account import BatchAccount
from .batch_account_update_parameters import BatchAccountUpdateParameters
from .batch_account_regenerate_key_parameters import BatchAccountRegenerateKeyParameters
from .batch_account_keys import BatchAccountKeys
from .activate_application_package_parameters import ActivateApplicationPackageParameters
from .application_create_parameters import ApplicationCreateParameters
from .application_package import ApplicationPackage
from .application import Application
from .application_update_parameters import ApplicationUpdateParameters
from .batch_location_quota import BatchLocationQuota
from .resource import Resource
from .operation_display import OperationDisplay
from .operation import Operation
from .batch_account_paged import BatchAccountPaged
from .application_paged import ApplicationPaged
from .operation_paged import OperationPaged
from .batch_management_client_enums import (
PoolAllocationMode,
ProvisioningState,
AccountKeyType,
PackageState,
)
__all__ = [
'AutoStorageBaseProperties',
'KeyVaultReference',
'BatchAccountCreateParameters',
'AutoStorageProperties',
'BatchAccount',
'BatchAccountUpdateParameters',
'BatchAccountRegenerateKeyParameters',
'BatchAccountKeys',
'ActivateApplicationPackageParameters',
'ApplicationCreateParameters',
'ApplicationPackage', | 'Application',
'ApplicationUpdateParameters',
'BatchLocationQuota',
'Resource',
'OperationDisplay',
'Operation',
'BatchAccountPaged',
'ApplicationPaged',
'OperationPaged',
'PoolAllocationMode',
'ProvisioningState',
'AccountKeyType',
'PackageState',
] | |
main.go | package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"strings"
"github.com/emicklei/go-restful"
"github.com/go-openapi/spec"
_ "github.com/openshift/api/operator/v1"
"k8s.io/kube-openapi/pkg/builder"
"k8s.io/kube-openapi/pkg/common"
_ "kubevirt.io/client-go/apis/snapshot/v1alpha1"
_ "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1"
_ "github.com/harvester/harvester-network-controller/pkg/apis/network.harvesterhci.io/v1beta1"
"github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
"github.com/harvester/harvester/pkg/genswagger/rest"
)
var outputFile = flag.String("output", "api/openapi-spec/swagger.json", "Output file.")
var kindToTagMappings = map[string]string{
"VirtualMachine": "Virtual Machines",
"VirtualMachineInstance": "Virtual Machines",
"VirtualMachineTemplate": "Virtual Machine Templates",
"VirtualMachineTemplateVersion": "Virtual Machine Templates",
"PersistentVolumeClaim": "Volumes",
"VirtualMachineImage": "Images",
"VirtualMachineBackup": "Backups",
"VirtualMachineRestore": "Restores",
"VirtualMachineInstanceMigration": "Migrations",
"KeyPair": "SSH Keys",
"Setting": "Settings",
"SupportBundle": "Support Bundles",
"Upgrade": "Upgrades",
"ClusterNetwork": "Networks",
"NodeNetwork": "Networks",
"NetworkAttachmentDefinition": "Networks",
}
// Generate OpenAPI spec definitions for Harvester Resource
func | () {
flag.Parse()
config := createConfig()
webServices := rest.AggregatedWebServices()
swagger, err := builder.BuildOpenAPISpec(webServices, config)
if err != nil {
log.Fatal(err.Error())
}
jsonBytes, err := json.MarshalIndent(swagger, "", " ")
if err != nil {
log.Fatal(err.Error())
}
if err := ioutil.WriteFile(*outputFile, jsonBytes, 0644); err != nil {
log.Fatal(err.Error())
}
}
func createConfig() *common.Config {
return &common.Config{
CommonResponses: map[int]spec.Response{
401: {
ResponseProps: spec.ResponseProps{
Description: "Unauthorized",
},
},
},
Info: &spec.Info{
InfoProps: spec.InfoProps{
Title: "Harvester APIs",
Version: "v1beta1",
},
},
GetDefinitions: func(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return rest.SetDefinitions(v1beta1.GetOpenAPIDefinitions(ref))
},
GetDefinitionName: func(name string) (string, spec.Extensions) {
//adapting k8s style
name = strings.ReplaceAll(name, "github.com/harvester/harvester/pkg/apis/harvesterhci.io", "harvesterhci.io")
name = strings.ReplaceAll(name, "github.com/harvester/harvester-network-controller/pkg/apis/network.harvesterhci.io", "network.harvesterhci.io")
name = strings.ReplaceAll(name, "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io", "k8s.cni.cncf.io")
name = strings.ReplaceAll(name, "k8s.io/api/core", "k8s.io")
name = strings.ReplaceAll(name, "k8s.io/apimachinery/pkg/apis/meta", "k8s.io")
name = strings.ReplaceAll(name, "kubevirt.io/client-go/api", "kubevirt.io")
name = strings.ReplaceAll(name, "kubevirt.io/containerized-data-importer/pkg/apis/core", "cdi.kubevirt.io")
name = strings.ReplaceAll(name, "/", ".")
return name, nil
},
GetOperationIDAndTags: func(r *restful.Route) (string, []string, error) {
var tag string
if _, ok := r.Metadata["kind"]; ok {
kind := fmt.Sprint(r.Metadata["kind"])
tag = kindToTagMappings[kind]
}
return r.Operation, []string{tag}, nil
},
}
}
| main |
uwb_channel.py | import numpy as np
import matplotlib.pyplot as plt
def gen_channel(parameters, fc=5E9, fs=2E9, dynamic_range=30):
# Calculate samples/nanosec ratio
nanosec_to_samples = int(1E-9 * fs)
#####################################
# Unpack parameters and convert units
cluster_rate = parameters['cluster_rate'] / nanosec_to_samples
inter_cluster_rate_1 = parameters['inter_cluster_rate_1'] / nanosec_to_samples
inter_cluster_rate_2 = parameters['inter_cluster_rate_2'] / nanosec_to_samples
beta = parameters['beta']
cluster_decay = parameters['cluster_decay'] * nanosec_to_samples
inter_cluster_decay = parameters['inter_cluster_decay'] * nanosec_to_samples
mean_m = parameters['mean_m']
std_m = parameters['std_m']
std_cluster_shadowing = parameters['std_cluster_shadowing']
kf = parameters['kf']
#########################
# Obtain impulse response
if inter_cluster_decay > cluster_decay:
raise ValueError("Inter cluster decay cannot be larger than cluster decay.")
max_t = int(dynamic_range * cluster_decay * np.log(10) / 10)
h = np.zeros(max_t, dtype=complex)
t = 0
while t < max_t:
tau = 0
max_tau = int((max_t - t) * inter_cluster_decay / cluster_decay)
cluster_power = np.exp(-t / cluster_decay) * np.random.lognormal(mean=0, sigma=std_cluster_shadowing)
while tau < max_tau:
# Mean power for this ray
mean_power = cluster_power * np.exp(-tau / inter_cluster_decay)
# Nakagami m-factor is log normally distributed
m = np.random.lognormal(mean_m, std_m)
# Compute amplitude as Nakagami distributed
a = np.sqrt(np.random.gamma(shape=m, scale=mean_power / m))
# Compute phase as uniformly distributed
phi = np.random.uniform(0, 2 * np.pi)
h[t + tau] = np.array([a * np.exp(-1j * phi)])[0]
if np.random.uniform(0, 1) < beta:
inter_cluster_rate = inter_cluster_rate_1
else:
inter_cluster_rate = inter_cluster_rate_2
tau += round(np.random.exponential(1 / inter_cluster_rate))
t += round(np.random.exponential(1 / cluster_rate))
##########################
# Add frequency dependency
# Zero padding before FFT to avoid artifacts
h = np.append(h, np.zeros(h.size, dtype=complex))
H = np.fft.fft(h, norm='ortho')
# Get frequency array in the same order as produced by the FFT
freq = np.linspace(fc - fs / 2, fc + fs / 2, num=h.size)
freq = np.append(freq[freq.size // 2:], freq[:freq.size // 2])
# Calculate frequency dependency and apply
Gf = np.power(freq, -2 * kf)
H = np.multiply(Gf, H)
# Inverse FFT
h = np.fft.ifft(H, norm='ortho')
# Remove padding
h = h[:h.size // 2]
###############
# Normalization
h = normalize(h)
return h
def normalize(s):
return s / np.sqrt(energy(s))
def energy(s): |
if __name__ == '__main__':
parameters_cm1 = {
'cluster_rate': 0.047,
'inter_cluster_rate_1': 1.54,
'inter_cluster_rate_2': 0.15,
'beta': 0.095,
'cluster_decay': 22.61,
'inter_cluster_decay': 12.53,
'mean_m': 0.67,
'std_m': 0.28,
'std_cluster_shadowing': 2.75,
'kf': 1.12,
'kd': 1.79,
'std_path_shadowing': 2.22
}
h = gen_channel(parameters=parameters_cm1,
fc=(10.6E9 + 3.1E9) / 2,
fs=6E9,
dynamic_range=30)
plt.plot(np.abs(h))
plt.show() | return np.sum(np.square(np.abs(s))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.