file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
process_windows.go | // +build windows
package procutil
import (
"bytes"
"fmt"
"strings"
"time"
"golang.org/x/sys/windows"
"github.com/DataDog/datadog-agent/pkg/util/log"
"github.com/DataDog/datadog-agent/pkg/util/winutil"
"github.com/DataDog/datadog-agent/pkg/util/winutil/pdhutil"
)
var (
counterPaths = []string{
pdhutil.CounterAllProcessPID,
pdhutil.CounterAllProcessParentPID,
pdhutil.CounterAllProcessPctUserTime,
pdhutil.CounterAllProcessPctPrivilegedTime,
pdhutil.CounterAllProcessWorkingSet,
pdhutil.CounterAllProcessPoolPagedBytes,
pdhutil.CounterAllProcessThreadCount,
pdhutil.CounterAllProcessHandleCount,
pdhutil.CounterAllProcessIOReadOpsPerSec,
pdhutil.CounterAllProcessIOWriteOpsPerSec,
pdhutil.CounterAllProcessIOReadBytesPerSec,
pdhutil.CounterAllProcessIOWriteBytesPerSec,
}
)
// NewProcessProbe returns a Probe object
func NewProcessProbe(options ...Option) Probe {
p := &probe{}
p.init()
return p
}
// probe implements Probe on Windows
type probe struct {
hQuery pdhutil.PDH_HQUERY
counters map[string]pdhutil.PDH_HCOUNTER
formatter pdhutil.PdhFormatter
enumSpecs map[string]counterEnumSpec
initError error
instanceToPID map[string]int32
procs map[int32]*Process
}
func (p *probe) init() {
var err error
defer func() {
p.initError = err
if err != nil {
p.Close()
}
}()
status := pdhutil.PdhOpenQuery(0, 0, &p.hQuery)
if status != 0 {
err = fmt.Errorf("PdhOpenQuery failed with 0x%x", status)
return
}
p.counters = make(map[string]pdhutil.PDH_HCOUNTER)
for _, path := range counterPaths {
var hCounter pdhutil.PDH_HCOUNTER
status = pdhutil.PdhAddEnglishCounter(p.hQuery, path, 0, &hCounter)
if status != 0 {
err = fmt.Errorf("PdhAddEnglishCounter for %s failed with 0x%x", path, status)
return
}
p.counters[path] = hCounter
}
// Need to run PdhCollectQueryData once so that we have meaningful metrics on the first run
status = pdhutil.PdhCollectQueryData(p.hQuery)
if status != 0 {
err = fmt.Errorf("PdhCollectQueryData failed with 0x%x", status)
return
}
p.procs = make(map[int32]*Process)
p.initEnumSpecs()
p.instanceToPID = make(map[string]int32)
}
type counterEnumSpec struct {
format uint32
processMeta bool
enumFunc pdhutil.ValueEnumFunc
}
func (p *probe) initEnumSpecs() {
p.enumSpecs = map[string]counterEnumSpec{
pdhutil.CounterAllProcessParentPID: {
format: pdhutil.PDH_FMT_LARGE,
processMeta: true,
enumFunc: valueToUint64(p.mapParentPID),
},
pdhutil.CounterAllProcessPctUserTime: {
format: pdhutil.PDH_FMT_DOUBLE,
enumFunc: valueToFloat64(p.mapPctUserTime),
},
pdhutil.CounterAllProcessPctPrivilegedTime: {
format: pdhutil.PDH_FMT_DOUBLE,
enumFunc: valueToFloat64(p.mapPctPrivilegedTime),
},
pdhutil.CounterAllProcessWorkingSet: {
format: pdhutil.PDH_FMT_LARGE,
enumFunc: valueToUint64(p.mapWorkingSet),
},
pdhutil.CounterAllProcessPoolPagedBytes: {
format: pdhutil.PDH_FMT_LARGE,
enumFunc: valueToUint64(p.mapPoolPagedBytes),
},
pdhutil.CounterAllProcessThreadCount: {
format: pdhutil.PDH_FMT_LARGE,
enumFunc: valueToUint64(p.mapThreadCount),
},
pdhutil.CounterAllProcessHandleCount: {
format: pdhutil.PDH_FMT_LARGE,
enumFunc: valueToUint64(p.mapHandleCount),
},
pdhutil.CounterAllProcessIOReadOpsPerSec: {
format: pdhutil.PDH_FMT_DOUBLE,
enumFunc: valueToFloat64(p.mapIOReadOpsPerSec),
},
pdhutil.CounterAllProcessIOWriteOpsPerSec: {
format: pdhutil.PDH_FMT_DOUBLE,
enumFunc: valueToFloat64(p.mapIOWriteOpsPerSec),
},
pdhutil.CounterAllProcessIOReadBytesPerSec: {
format: pdhutil.PDH_FMT_DOUBLE,
enumFunc: valueToFloat64(p.mapIOReadBytesPerSec),
},
pdhutil.CounterAllProcessIOWriteBytesPerSec: {
format: pdhutil.PDH_FMT_DOUBLE,
enumFunc: valueToFloat64(p.mapIOWriteBytesPerSec),
},
}
}
func valueToFloat64(fn func(string, float64)) pdhutil.ValueEnumFunc {
return func(instance string, value pdhutil.PdhCounterValue) {
fn(instance, value.Double)
}
}
func valueToUint64(fn func(string, uint64)) pdhutil.ValueEnumFunc {
return func(instance string, value pdhutil.PdhCounterValue) {
fn(instance, uint64(value.Large))
}
}
func (p *probe) Close() {
if p.hQuery != pdhutil.PDH_HQUERY(0) {
pdhutil.PdhCloseQuery(p.hQuery)
p.hQuery = pdhutil.PDH_HQUERY(0)
}
}
func (p *probe) StatsForPIDs(pids []int32, now time.Time) (map[int32]*Stats, error) {
err := p.enumCounters(false, true)
if err != nil {
return nil, err
}
statsToReturn := make(map[int32]*Stats, len(pids))
for _, pid := range pids {
if proc, ok := p.procs[pid]; ok {
statsToReturn[pid] = proc.Stats.DeepCopy()
}
}
return statsToReturn, nil
}
func (p *probe) ProcessesByPID(now time.Time, collectStats bool) (map[int32]*Process, error) {
// TODO: reuse PIDs slice across runs
pids, err := getPIDs()
if err != nil {
return nil, err
}
knownPids := make(map[int32]struct{}, len(p.procs))
for pid := range p.procs {
knownPids[pid] = struct{}{}
}
for _, pid := range pids {
if pid == 0 {
// this is the "system idle process". We'll never be able to open it,
// which will cause us to thrash WMI once per check, which we don't
// want to do
continue
}
delete(knownPids, pid)
if _, ok := p.procs[pid]; ok {
// Process already known, no need to collect metadata
continue
}
proc := &Process{
Pid: int32(pid),
Stats: &Stats{
CPUPercent: &CPUPercentStat{},
MemInfo: &MemoryInfoStat{},
CtxSwitches: &NumCtxSwitchesStat{},
IORateStat: &IOCountersRateStat{},
},
}
err := fillProcessDetails(pid, proc)
if err != nil {
continue
}
p.procs[pid] = proc
}
for pid := range knownPids {
proc := p.procs[pid]
log.Debugf("removing process %v %v", pid, proc.Exe)
delete(p.procs, pid)
}
err = p.enumCounters(true, collectStats)
if err != nil {
return nil, err
}
procsToReturn := make(map[int32]*Process, len(p.procs))
for pid, proc := range p.procs {
procsToReturn[pid] = proc.DeepCopy()
}
return procsToReturn, nil
}
func (p *probe) enumCounters(collectMeta bool, collectStats bool) error {
// Reuse map's capacity across runs
for k := range p.instanceToPID {
delete(p.instanceToPID, k)
}
status := pdhutil.PdhCollectQueryData(p.hQuery)
if status != 0 {
return fmt.Errorf("PdhCollectQueryData failed with 0x%x", status)
}
ignored := []string{
"_Total", // Total sum
"Idle", // System Idle process
}
err := p.formatter.Enum(pdhutil.CounterAllProcessPID, p.counters[pdhutil.CounterAllProcessPID], pdhutil.PDH_FMT_LARGE, ignored, valueToUint64(p.mapPID))
if err != nil {
return err
}
// handle case when instanceToPID does not contain some previously collected process PIDs
missingPids := make(map[int32]struct{})
for _, pid := range p.instanceToPID {
if _, ok := p.procs[pid]; !ok {
missingPids[pid] = struct{}{}
}
}
for pid := range missingPids {
delete(p.procs, pid)
}
for counter, spec := range p.enumSpecs {
if spec.processMeta && !collectMeta ||
!spec.processMeta && !collectStats {
continue
}
err := p.formatter.Enum(counter, p.counters[counter], spec.format, ignored, spec.enumFunc)
if err != nil {
return err
}
}
return nil
}
func (p *probe) StatsWithPermByPID(pids []int32) (map[int32]*StatsWithPerm, error) {
return nil, fmt.Errorf("probe(Windows): StatsWithPermByPID is not implemented")
}
func (p *probe) getProc(instance string) *Process {
pid, ok := p.instanceToPID[instance]
if !ok {
log.Debugf("proc - no pid for instance %s", instance)
return nil
}
proc, ok := p.procs[pid]
if !ok {
log.Debugf("proc - no process for pid %d (instance=%s)", pid, instance)
return nil
}
return proc
}
func (p *probe) mapToProc(instance string, fn func(proc *Process)) {
if proc := p.getProc(instance); proc != nil {
fn(proc)
}
}
func (p *probe) mapToStatFloat64(instance string, v float64, fn func(pid int32, proc *Stats, instance string, v float64)) {
if proc := p.getProc(instance); proc != nil {
fn(proc.Pid, proc.Stats, instance, v)
}
}
func (p *probe) mapToStatUint64(instance string, v uint64, fn func(pid int32, proc *Stats, instance string, v uint64)) {
if proc := p.getProc(instance); proc != nil {
fn(proc.Pid, proc.Stats, instance, v)
}
}
func (p *probe) mapPID(instance string, pid uint64) {
p.instanceToPID[instance] = int32(pid)
}
func (p *probe) setProcParentPID(proc *Process, instance string, pid int32) {
proc.Ppid = pid
}
func (p *probe) mapParentPID(instance string, v uint64) {
p.mapToProc(instance, func(proc *Process) {
p.setProcParentPID(proc, instance, int32(v))
})
}
func (p *probe) traceStats(pid int32) bool {
// TODO: in a future PR introduce an Option to configure tracing of stats for individual PIDs
return false
}
func (p *probe) setProcOpenFdCount(pid int32, stats *Stats, instance string, v uint64) {
if p.traceStats(pid) {
log.Tracef("FdCount[%s,pid=%d] %d", instance, pid, v)
}
stats.OpenFdCount = int32(v)
}
func (p *probe) mapHandleCount(instance string, v uint64) {
p.mapToStatUint64(instance, v, p.setProcOpenFdCount)
}
func (p *probe) setProcNumThreads(pid int32, stats *Stats, instance string, v uint64) {
if p.traceStats(pid) {
log.Tracef("NumThreads[%s,pid=%d] %d", instance, pid, v)
}
stats.NumThreads = int32(v)
}
func (p *probe) mapThreadCount(instance string, v uint64) {
p.mapToStatUint64(instance, v, p.setProcNumThreads)
}
func (p *probe) setProcCPUTimeUser(pid int32, stats *Stats, instance string, v float64) {
if p.traceStats(pid) {
log.Tracef("CPU.User[%s,pid=%d] %f", instance, pid, v)
}
stats.CPUPercent.UserPct = v
}
func (p *probe) mapPctUserTime(instance string, v float64) {
p.mapToStatFloat64(instance, v, p.setProcCPUTimeUser)
}
func (p *probe) setProcCPUTimeSystem(pid int32, stats *Stats, instance string, v float64) {
if p.traceStats(pid) {
log.Tracef("CPU.System[%s,pid=%d] %f", instance, pid, v)
}
stats.CPUPercent.SystemPct = v
}
func (p *probe) mapPctPrivilegedTime(instance string, v float64) {
p.mapToStatFloat64(instance, v, p.setProcCPUTimeSystem)
}
func (p *probe) setProcMemRSS(pid int32, stats *Stats, instance string, v uint64) {
if p.traceStats(pid) {
log.Tracef("Mem.RSS[%s,pid=%d] %d", instance, pid, v)
}
stats.MemInfo.RSS = v
}
func (p *probe) mapWorkingSet(instance string, v uint64) {
p.mapToStatUint64(instance, v, p.setProcMemRSS)
}
func (p *probe) setProcMemVMS(pid int32, stats *Stats, instance string, v uint64) {
if p.traceStats(pid) {
log.Tracef("Mem.VMS[%s,pid=%d] %d", instance, pid, v)
}
stats.MemInfo.VMS = v
}
func (p *probe) mapPoolPagedBytes(instance string, v uint64) {
p.mapToStatUint64(instance, v, p.setProcMemVMS)
}
func (p *probe) setProcIOReadOpsRate(pid int32, stats *Stats, instance string, v float64) {
if p.traceStats(pid) {
log.Tracef("ReadRate[%s,pid=%d] %f", instance, pid, v)
}
stats.IORateStat.ReadRate = v
}
func (p *probe) mapIOReadOpsPerSec(instance string, v float64) {
p.mapToStatFloat64(instance, v, p.setProcIOReadOpsRate)
}
func (p *probe) setProcIOWriteOpsRate(pid int32, stats *Stats, instance string, v float64) {
if p.traceStats(pid) {
log.Tracef("WriteRate[%s,pid=%d] %f", instance, pid, v)
}
stats.IORateStat.WriteRate = v
}
func (p *probe) mapIOWriteOpsPerSec(instance string, v float64) {
p.mapToStatFloat64(instance, v, p.setProcIOWriteOpsRate)
}
func (p *probe) setProcIOReadBytesRate(pid int32, stats *Stats, instance string, v float64) {
if p.traceStats(pid) {
log.Tracef("ReadBytesRate[%s,pid=%d] %f", instance, pid, v)
}
stats.IORateStat.ReadBytesRate = v
}
func (p *probe) mapIOReadBytesPerSec(instance string, v float64) {
p.mapToStatFloat64(instance, v, p.setProcIOReadBytesRate)
}
func (p *probe) setProcIOWriteBytesRate(pid int32, stats *Stats, instance string, v float64) {
if p.traceStats(pid) {
log.Tracef("WriteBytesRate[%s,pid=%d] %f", instance, pid, v)
}
stats.IORateStat.WriteBytesRate = v
}
func (p *probe) mapIOWriteBytesPerSec(instance string, v float64) {
p.mapToStatFloat64(instance, v, p.setProcIOWriteBytesRate)
}
func | () ([]int32, error) {
var read uint32
var psSize uint32 = 1024
const dwordSize uint32 = 4
for {
buf := make([]uint32, psSize)
if err := windows.EnumProcesses(buf, &read); err != nil {
return nil, err
}
if uint32(len(buf)) == read {
psSize += 1024
continue
}
pids := make([]int32, read)
for i := range pids {
pids[i] = int32(buf[i])
}
return pids, nil
}
}
func fillProcessDetails(pid int32, proc *Process) error {
procHandle, err := OpenProcessHandle(pid)
if err != nil {
return err
}
defer windows.Close(procHandle)
userName, usererr := GetUsernameForProcess(procHandle)
if usererr != nil {
log.Debugf("Couldn't get process username %v %v", pid, err)
}
proc.Username = userName
cmdParams := getProcessCommandParams(procHandle)
proc.Cmdline = ParseCmdLineArgs(cmdParams.CmdLine)
proc.Exe = cmdParams.ImagePath
var CPU windows.Rusage
if err := windows.GetProcessTimes(procHandle, &CPU.CreationTime, &CPU.ExitTime, &CPU.KernelTime, &CPU.UserTime); err != nil {
log.Errorf("Could not get process times for %v %v", pid, err)
return err
}
ctime := CPU.CreationTime.Nanoseconds() / 1000000
proc.Stats.CreateTime = ctime
return nil
}
func getProcessCommandParams(procHandle windows.Handle) *winutil.ProcessCommandParams {
var err error
if cmdParams, err := winutil.GetCommandParamsForProcess(procHandle, true); err == nil {
return cmdParams
}
log.Debugf("Error retrieving command params %v", err)
if imagePath, err := winutil.GetImagePathForProcess(procHandle); err == nil {
return &winutil.ProcessCommandParams{
CmdLine: imagePath,
ImagePath: imagePath,
}
}
log.Debugf("Error retrieving exe path %v", err)
return &winutil.ProcessCommandParams{}
}
// OpenProcessHandle attempts to open process handle for reading process memory with fallback to query basic info
func OpenProcessHandle(pid int32) (windows.Handle, error) {
// 0x1000 is PROCESS_QUERY_LIMITED_INFORMATION, but that constant isn't
// defined in x/sys/windows
// 0x10 is PROCESS_VM_READ
procHandle, err := windows.OpenProcess(0x1010, false, uint32(pid))
if err != nil {
log.Debugf("Couldn't open process with PROCESS_VM_READ %v %v", pid, err)
procHandle, err = windows.OpenProcess(0x1000, false, uint32(pid))
if err != nil {
log.Debugf("Couldn't open process %v %v", pid, err)
return windows.Handle(0), err
}
}
return procHandle, nil
}
// GetUsernameForProcess returns username for a process
func GetUsernameForProcess(h windows.Handle) (name string, err error) {
err = nil
var t windows.Token
err = windows.OpenProcessToken(h, windows.TOKEN_QUERY, &t)
if err != nil {
log.Debugf("Failed to open process token %v", err)
return
}
defer t.Close()
tokenUser, err := t.GetTokenUser()
user, domain, _, err := tokenUser.User.Sid.LookupAccount("")
if nil != err {
return "", err
}
return domain + "\\" + user, err
}
// ParseCmdLineArgs parses command line arguments to a slice
func ParseCmdLineArgs(cmdline string) (res []string) {
blocks := strings.Split(cmdline, " ")
findCloseQuote := false
donestring := false
var stringInProgress bytes.Buffer
for _, b := range blocks {
numquotes := strings.Count(b, "\"")
if numquotes == 0 {
stringInProgress.WriteString(b)
if !findCloseQuote {
donestring = true
} else {
stringInProgress.WriteString(" ")
}
} else if numquotes == 1 {
stringInProgress.WriteString(b)
if findCloseQuote {
donestring = true
} else {
findCloseQuote = true
stringInProgress.WriteString(" ")
}
} else if numquotes == 2 {
stringInProgress.WriteString(b)
donestring = true
} else {
log.Warnf("unexpected quotes in string, giving up (%v)", cmdline)
return res
}
if donestring {
res = append(res, stringInProgress.String())
stringInProgress.Reset()
findCloseQuote = false
donestring = false
}
}
return res
}
| getPIDs |
sbd.py | import argparse
import os
import pdfkit
import re
import validators
from robobrowser import RoboBrowser
def remove_non_ascii_chars(text):
return re.sub(r'[^\x00-\x7F]+', ' ', text)
base_url = 'https://learning.oreilly.com'
parser = argparse.ArgumentParser(
description='A small program to download books from Safari Books Online for offline storage.')
parser.add_argument('safari_book_url',
help='Safari book url, ex. https://www.safaribooksonline.com/library/view/book-name/book-code/')
def main():
args = parser.parse_args()
url = args.safari_book_url
if not validators.url(url):
print("URL is invalid, please pass proper URL")
exit()
cookies = [{ "BrowserCookie": "xxx" },
{ "csrfsafari": "xxx" },
{ "logged_in": "xxx" },
{ "user_identifier": "xxx" },
{ "sessionid": "xxx" }]
br = RoboBrowser(parser='lxml')
br.open(url)
for cookie in cookies:
br.session.cookies.update(cookie)
error_list = br.parsed.find_all("ul", class_='errorlist')
if error_list.__len__() != 0:
print("Invalid cookies: " + error_list[0].contents[0].text)
exit()
else:
print("Valid cookies")
complete_book = ''
## reopen URL with new cookies
br.open(url)
## include TOC page
content = br.parsed.find("section", {"class": "detail-book"})
for img in content.findAll('img'):
img['src'] = img['src'].replace("/library/", base_url + "/library/")
for links in content.findAll('a'):
links['href'] = links['href'].replace("/library/", base_url + "/library/")
complete_book += remove_non_ascii_chars(content.__str__())
url_list = []
for chapter in br.parsed.find_all("a", class_='t-chapter'):
url_list.append(chapter['href'])
author = br.parsed.find('meta', {"property": 'og:book:author'})['content']
title = br.parsed.find('meta', {"itemprop": 'name'})['content']
author_title = author + ' - ' + title
filename = str(author_title) + '.pdf'
print('Downloading ' + author_title)
# fetch all the book pages | br.open(url_list[x])
content = br.parsed.find("div", {"id": "sbo-rt-content"})
for img in content.findAll('img'):
img['src'] = img['src'].replace("/library/", base_url + "/library/")
for links in content.findAll('a'):
links['href'] = links['href'].replace("/library/", base_url + "/library/")
complete_book += remove_non_ascii_chars(content.__str__())
print("Generating pdf...")
pdfkit.from_string(complete_book, filename, options=dict(encoding="utf-8", quiet=''))
print("Done! Saved as '" + filename + "'") | for x in range(0, url_list.__len__()):
print("Downloading chapter " + str(x + 1) + " out of " + str(url_list.__len__())) |
flag-package-test.go | // [flag package](https://golang.org/pkg/flag/) is the Go equivalent
// of Python [argparse](https://docs.python.org/2/howto/argparse.html).
// While not as powerful, it does what we expect it to do. It
// simplifies adding and parsing command line parameters, leaving us
// to concentrate on the tools. Most of our tools will need them to be
// actually useful (hardcoding URLs and IPs get old too fast).
| package main
import (
"flag"
"fmt"
)
func main() {
// Declare flags
// Remember, flag methods return pointers
ipPtr := flag.String("ip", "127.0.0.1", "target IP")
var port int
flag.IntVar(&port, "port", 8080, "Port")
verbosePtr := flag.Bool("verbose", true, "verbosity")
// Parse flags
flag.Parse()
// Hack IP:port
fmt.Printf("Hacking %s:%d!\n", *ipPtr, port)
// Display progress if verbose flag is set
if *verbosePtr {
fmt.Printf("Pew pew!\n")
}
} | |
factoring.rs | use rand::{thread_rng, Rng};
use crate::{math, primes};
pub fn | (n: u64) -> Vec<u64> {
let mut factors = factor_to_vec(n);
let mut i: usize = 0;
while i < factors.len() {
if !primes::is_prime_list(factors[i]) {
let mut new_factors = factor_to_vec(factors[i]);
while new_factors[0] == factors[i] {
new_factors = factor_to_vec(factors[i]);
}
factors.extend(new_factors);
}
i += 1;
}
factors.push(1);
factors.push(n);
factors.sort_unstable();
factors.dedup();
return factors;
}
pub fn get_factors_simple(n: u64) -> Vec<u64> {
let mut factors: Vec<u64> = Vec::new();
let root: u64 = ((n as f64).sqrt() + 1.0) as u64;
for i in 1..root {
if n % i == 0 {
factors.push(i);
factors.push(n / i);
}
}
return factors;
}
fn factor_to_vec(mut n: u64) -> Vec<u64> {
let orig_n = n;
let mut out: Vec<u64> = Vec::new();
while n != 1 {
let factor: u64 = brent_modified_pollard_rho_factor(n);
n /= factor;
out.push(factor);
}
println!("{} -> {:?}", orig_n, out);
return out;
}
fn brent_modified_pollard_rho_factor(n: u64) -> u64{
// ACK: https://maths-people.anu.edu.au/~brent/pd/rpb051i.pdf
// ACK: https://comeoncodeon.wordpress.com/2010/09/18/pollard-rho-brent-integer-factorization/
if n % 2 == 0 {
return 2;
}
let mut rng = thread_rng();
let mut y: u64 = rng.gen_range(1, n);
let c: u64 = rng.gen_range(1, n);
let m: u64 = rng.gen_range(1, n);
let mut g: u64 = 1;
let mut r: u64 = 1;
let mut q: u64 = 1;
let mut ys: u64 = y;
let mut x: u64 = y;
while g == 1 {
x = y;
for _ in 0..r {
y = (((y * y) % n) + c) % n;
}
let mut k: u64 = 0;
while (k < r) && (g == 1) {
ys = y;
let min: u64 = *vec![m, r - k].iter().min().unwrap();
for _ in 0..min {
y = (((y * y) % n) + c) % n;
q = (q * math::abs(x as i64 - y as i64)) % n;
}
g = math::gcd(q, n);
k += m;
}
r *= 2;
}
if g == n {
loop {
ys = (((ys * ys) % n) + c) % n;
g = math::gcd(math::abs(x as i64 - ys as i64), n);
if g > 1 {
break;
}
}
}
return g;
}
| get_prime_factors |
hash_join.rs | use crate::prelude::*;
use crate::utils::Xob;
use ahash::RandomState;
use std::collections::{HashMap, HashSet};
use std::hash::Hash;
use unsafe_unwrap::UnsafeUnwrap;
macro_rules! hash_join_inner {
($s_right:ident, $ca_left:ident, $type_:ident) => {{
// call the type method series.i32()
let ca_right = $s_right.$type_()?;
$ca_left.hash_join_inner(ca_right)
}};
}
macro_rules! hash_join_left {
($s_right:ident, $ca_left:ident, $type_:ident) => {{
// call the type method series.i32()
let ca_right = $s_right.$type_()?;
$ca_left.hash_join_left(ca_right)
}};
}
macro_rules! hash_join_outer {
($s_right:ident, $ca_left:ident, $type_:ident) => {{
// call the type method series.i32()
let ca_right = $s_right.$type_()?;
$ca_left.hash_join_outer(ca_right)
}};
}
macro_rules! apply_hash_join_on_series {
($s_left:ident, $s_right:ident, $join_macro:ident) => {{
match $s_left {
Series::UInt8(ca_left) => $join_macro!($s_right, ca_left, u8),
Series::UInt16(ca_left) => $join_macro!($s_right, ca_left, u16),
Series::UInt32(ca_left) => $join_macro!($s_right, ca_left, u32),
Series::UInt64(ca_left) => $join_macro!($s_right, ca_left, u64),
Series::Int8(ca_left) => $join_macro!($s_right, ca_left, i8),
Series::Int16(ca_left) => $join_macro!($s_right, ca_left, i16),
Series::Int32(ca_left) => $join_macro!($s_right, ca_left, i32),
Series::Int64(ca_left) => $join_macro!($s_right, ca_left, i64),
Series::Bool(ca_left) => $join_macro!($s_right, ca_left, bool),
Series::Utf8(ca_left) => $join_macro!($s_right, ca_left, utf8),
Series::Date32(ca_left) => $join_macro!($s_right, ca_left, date32),
Series::Date64(ca_left) => $join_macro!($s_right, ca_left, date64),
Series::Time32Millisecond(ca_left) => {
$join_macro!($s_right, ca_left, time32_millisecond)
}
Series::Time32Second(ca_left) => $join_macro!($s_right, ca_left, time32_second),
Series::Time64Nanosecond(ca_left) => $join_macro!($s_right, ca_left, time64_nanosecond),
Series::Time64Microsecond(ca_left) => {
$join_macro!($s_right, ca_left, time64_microsecond)
}
Series::DurationMillisecond(ca_left) => {
$join_macro!($s_right, ca_left, duration_millisecond)
}
Series::DurationSecond(ca_left) => $join_macro!($s_right, ca_left, duration_second),
Series::DurationNanosecond(ca_left) => {
$join_macro!($s_right, ca_left, duration_nanosecond)
}
Series::DurationMicrosecond(ca_left) => {
$join_macro!($s_right, ca_left, duration_microsecond)
}
Series::TimestampMillisecond(ca_left) => {
$join_macro!($s_right, ca_left, timestamp_millisecond)
}
Series::TimestampSecond(ca_left) => $join_macro!($s_right, ca_left, timestamp_second),
Series::TimestampNanosecond(ca_left) => {
$join_macro!($s_right, ca_left, timestamp_nanosecond)
}
Series::TimestampMicrosecond(ca_left) => {
$join_macro!($s_right, ca_left, timestamp_microsecond)
}
Series::IntervalDayTime(ca_left) => $join_macro!($s_right, ca_left, interval_daytime),
Series::IntervalYearMonth(ca_left) => {
$join_macro!($s_right, ca_left, interval_year_month)
}
_ => unimplemented!(),
}
}};
}
pub(crate) fn prepare_hashed_relation<T>(
b: impl Iterator<Item = T>,
) -> HashMap<T, Vec<usize>, RandomState>
where
T: Hash + Eq,
{
let mut hash_tbl: HashMap<T, Vec<usize>, ahash::RandomState> =
HashMap::with_capacity_and_hasher(b.size_hint().0 / 10, RandomState::new());
b.enumerate()
.for_each(|(idx, key)| hash_tbl.entry(key).or_insert_with(Vec::new).push(idx));
hash_tbl
}
/// Hash join a and b.
/// b should be the shorter relation.
/// NOTE that T also can be an Option<T>. Nulls are seen as equal.
fn hash_join_tuples_inner<T>(
a: impl Iterator<Item = T>,
b: impl Iterator<Item = T>,
// Because b should be the shorter relation we could need to swap to keep left left and right right.
swap: bool,
) -> Vec<(usize, usize)>
where
T: Hash + Eq + Copy,
{
let mut results = Vec::new();
// First we hash one relation
let hash_tbl = prepare_hashed_relation(b);
// Next we probe the other relation in the hash table
// code duplication is because we want to only do the swap check once
if swap {
a.enumerate().for_each(|(idx_a, key)| {
if let Some(indexes_b) = hash_tbl.get(&key) {
let tuples = indexes_b.iter().map(|&idx_b| (idx_b, idx_a));
results.extend(tuples)
}
});
} else {
a.enumerate().for_each(|(idx_a, key)| {
if let Some(indexes_b) = hash_tbl.get(&key) {
let tuples = indexes_b.iter().map(|&idx_b| (idx_a, idx_b));
results.extend(tuples)
}
});
}
results
}
/// Hash join left. None/ Nulls are regarded as Equal
/// All left values are joined so no Option<usize> there.
fn hash_join_tuples_left<T>(
a: impl Iterator<Item = T>,
b: impl Iterator<Item = T>,
) -> Vec<(usize, Option<usize>)>
where
T: Hash + Eq + Copy,
{
let mut results = Vec::new();
// First we hash one relation
let hash_tbl = prepare_hashed_relation(b);
// Next we probe the other relation in the hash table
a.enumerate().for_each(|(idx_a, key)| {
match hash_tbl.get(&key) {
// left and right matches
Some(indexes_b) => results.extend(indexes_b.iter().map(|&idx_b| (idx_a, Some(idx_b)))),
// only left values, right = null
None => results.push((idx_a, None)),
}
});
results
}
/// Hash join outer. Both left and right can have no match so Options
/// We accept a closure as we need to do two passes over the same iterators.
fn hash_join_tuples_outer<T, I, J>(a: I, b: J, swap: bool) -> Vec<(Option<usize>, Option<usize>)>
where
I: Iterator<Item = T>,
J: Iterator<Item = T>,
T: Hash + Eq + Copy + Sync,
{
let mut results = Vec::with_capacity(a.size_hint().0 + b.size_hint().0);
// prepare hash table
let mut hash_tbl = prepare_hashed_relation(b);
// probe the hash table.
// Note: indexes from b that are not matched will be None, Some(idx_b)
// Therefore we remove the matches and the remaining will be joined from the right
// code duplication is because we want to only do the swap check once
if swap {
a.enumerate().for_each(|(idx_a, key)| {
match hash_tbl.remove(&key) {
// left and right matches
Some(indexes_b) => {
results.extend(indexes_b.iter().map(|&idx_b| (Some(idx_b), Some(idx_a))))
}
// only left values, right = null
None => {
results.push((None, Some(idx_a)));
}
}
});
hash_tbl.iter().for_each(|(_k, indexes_b)| {
// remaining joined values from the right table
results.extend(indexes_b.iter().map(|&idx_b| (Some(idx_b), None)))
});
} else {
a.enumerate().for_each(|(idx_a, key)| {
match hash_tbl.remove(&key) {
// left and right matches
Some(indexes_b) => {
results.extend(indexes_b.iter().map(|&idx_b| (Some(idx_a), Some(idx_b))))
}
// only left values, right = null
None => {
results.push((Some(idx_a), None));
}
}
});
hash_tbl.iter().for_each(|(_k, indexes_b)| {
// remaining joined values from the right table
results.extend(indexes_b.iter().map(|&idx_b| (None, Some(idx_b))))
});
};
results
}
pub trait HashJoin<T> {
fn hash_join_inner(&self, other: &ChunkedArray<T>) -> Vec<(usize, usize)>;
fn hash_join_left(&self, other: &ChunkedArray<T>) -> Vec<(usize, Option<usize>)>;
fn hash_join_outer(&self, other: &ChunkedArray<T>) -> Vec<(Option<usize>, Option<usize>)>;
}
macro_rules! det_hash_prone_order {
($self:expr, $other:expr) => {{
// The shortest relation will be used to create a hash table.
let left_first = $self.len() > $other.len();
let a;
let b;
if left_first {
a = $self;
b = $other;
} else {
b = $self;
a = $other;
}
(a, b, !left_first)
}};
}
impl<T> HashJoin<T> for ChunkedArray<T>
where
T: PolarsNumericType + Sync,
T::Native: Eq + Hash,
{
fn hash_join_inner(&self, other: &ChunkedArray<T>) -> Vec<(usize, usize)> {
let (a, b, swap) = det_hash_prone_order!(self, other);
match (a.cont_slice(), b.cont_slice()) {
(Ok(a_slice), Ok(b_slice)) => {
hash_join_tuples_inner(a_slice.iter(), b_slice.iter(), swap)
}
(Ok(a_slice), Err(_)) => {
hash_join_tuples_inner(
a_slice.iter().map(|v| Some(*v)), // take ownership
b.into_iter(),
swap,
)
}
(Err(_), Ok(b_slice)) => {
hash_join_tuples_inner(a.into_iter(), b_slice.iter().map(|v| Some(*v)), swap)
}
(Err(_), Err(_)) => hash_join_tuples_inner(a.into_iter(), b.into_iter(), swap),
}
}
fn hash_join_left(&self, other: &ChunkedArray<T>) -> Vec<(usize, Option<usize>)> {
match (self.cont_slice(), other.cont_slice()) {
(Ok(a_slice), Ok(b_slice)) => hash_join_tuples_left(a_slice.iter(), b_slice.iter()),
(Ok(a_slice), Err(_)) => {
hash_join_tuples_left(
a_slice.iter().map(|v| Some(*v)), // take ownership
other.into_iter(),
)
}
(Err(_), Ok(b_slice)) => {
hash_join_tuples_left(self.into_iter(), b_slice.iter().map(|v| Some(*v)))
}
(Err(_), Err(_)) => hash_join_tuples_left(self.into_iter(), other.into_iter()),
}
}
fn hash_join_outer(&self, other: &ChunkedArray<T>) -> Vec<(Option<usize>, Option<usize>)> {
let (a, b, swap) = det_hash_prone_order!(self, other);
match (a.cont_slice(), b.cont_slice()) {
(Ok(a_slice), Ok(b_slice)) => {
hash_join_tuples_outer(a_slice.iter(), b_slice.iter(), swap)
}
(Ok(a_slice), Err(_)) => {
hash_join_tuples_outer(
a_slice.iter().map(|v| Some(*v)), // take ownership
b.into_iter(),
swap,
)
}
(Err(_), Ok(b_slice)) => hash_join_tuples_outer(
a.into_iter(),
b_slice.iter().map(|v: &T::Native| Some(*v)),
swap,
),
(Err(_), Err(_)) => hash_join_tuples_outer(a.into_iter(), b.into_iter(), swap),
}
}
}
impl HashJoin<BooleanType> for BooleanChunked {
fn hash_join_inner(&self, other: &BooleanChunked) -> Vec<(usize, usize)> {
let (a, b, swap) = det_hash_prone_order!(self, other);
// Create the join tuples
match (a.is_optimal_aligned(), b.is_optimal_aligned()) {
(true, true) => {
hash_join_tuples_inner(a.into_no_null_iter(), b.into_no_null_iter(), swap)
}
_ => hash_join_tuples_inner(a.into_iter(), b.into_iter(), swap),
}
}
fn hash_join_left(&self, other: &BooleanChunked) -> Vec<(usize, Option<usize>)> {
match (self.is_optimal_aligned(), other.is_optimal_aligned()) {
(true, true) => {
hash_join_tuples_left(self.into_no_null_iter(), other.into_no_null_iter())
}
_ => hash_join_tuples_left(self.into_iter(), other.into_iter()),
}
}
fn hash_join_outer(&self, other: &BooleanChunked) -> Vec<(Option<usize>, Option<usize>)> {
let (a, b, swap) = det_hash_prone_order!(self, other);
match (a.is_optimal_aligned(), b.is_optimal_aligned()) {
(true, true) => {
hash_join_tuples_outer(a.into_no_null_iter(), b.into_no_null_iter(), swap)
}
_ => hash_join_tuples_outer(a.into_iter(), b.into_iter(), swap),
}
}
}
impl HashJoin<Utf8Type> for Utf8Chunked {
fn hash_join_inner(&self, other: &Utf8Chunked) -> Vec<(usize, usize)> {
let (a, b, swap) = det_hash_prone_order!(self, other);
// Create the join tuples
match (a.is_optimal_aligned(), b.is_optimal_aligned()) {
(true, true) => {
hash_join_tuples_inner(a.into_no_null_iter(), b.into_no_null_iter(), swap)
}
_ => hash_join_tuples_inner(a.into_iter(), b.into_iter(), swap),
}
}
fn hash_join_left(&self, other: &Utf8Chunked) -> Vec<(usize, Option<usize>)> {
match (self.is_optimal_aligned(), other.is_optimal_aligned()) {
(true, true) => {
hash_join_tuples_left(self.into_no_null_iter(), other.into_no_null_iter())
}
_ => hash_join_tuples_left(self.into_iter(), other.into_iter()),
}
}
fn hash_join_outer(&self, other: &Utf8Chunked) -> Vec<(Option<usize>, Option<usize>)> {
let (a, b, swap) = det_hash_prone_order!(self, other);
match (a.is_optimal_aligned(), b.is_optimal_aligned()) {
(true, true) => {
hash_join_tuples_outer(a.into_no_null_iter(), b.into_no_null_iter(), swap)
}
_ => hash_join_tuples_outer(a.into_iter(), b.into_iter(), swap),
}
}
}
pub trait ZipOuterJoinColumn {
fn zip_outer_join_column(
&self,
_right_column: &Series,
_opt_join_tuples: &[(Option<usize>, Option<usize>)],
) -> Series {
unimplemented!()
}
}
impl<T> ZipOuterJoinColumn for ChunkedArray<T>
where
T: PolarsIntegerType,
{
fn zip_outer_join_column(
&self,
right_column: &Series,
opt_join_tuples: &[(Option<usize>, Option<usize>)],
) -> Series {
let right_ca = self.unpack_series_matching_type(right_column).unwrap();
let left_rand_access = self.take_rand();
let right_rand_access = right_ca.take_rand();
opt_join_tuples
.iter()
.map(|(opt_left_idx, opt_right_idx)| {
if let Some(left_idx) = opt_left_idx {
unsafe { left_rand_access.get_unchecked(*left_idx) }
} else {
unsafe {
let right_idx = opt_right_idx.unsafe_unwrap();
right_rand_access.get_unchecked(right_idx)
}
}
})
.collect::<Xob<ChunkedArray<T>>>()
.into_inner()
.into_series()
}
}
impl ZipOuterJoinColumn for Float32Chunked {}
impl ZipOuterJoinColumn for Float64Chunked {}
impl ZipOuterJoinColumn for ListChunked {}
impl<T> ZipOuterJoinColumn for ObjectChunked<T> {}
macro_rules! impl_zip_outer_join {
($chunkedtype:ident) => {
impl ZipOuterJoinColumn for $chunkedtype {
fn zip_outer_join_column(
&self,
right_column: &Series,
opt_join_tuples: &[(Option<usize>, Option<usize>)],
) -> Series {
let right_ca = self.unpack_series_matching_type(right_column).unwrap();
let left_rand_access = self.take_rand();
let right_rand_access = right_ca.take_rand();
opt_join_tuples
.iter()
.map(|(opt_left_idx, opt_right_idx)| {
if let Some(left_idx) = opt_left_idx {
unsafe { left_rand_access.get_unchecked(*left_idx) }
} else {
unsafe {
let right_idx = opt_right_idx.unsafe_unwrap();
right_rand_access.get_unchecked(right_idx)
}
} | .into_series()
}
}
};
}
impl_zip_outer_join!(BooleanChunked);
impl_zip_outer_join!(Utf8Chunked);
impl DataFrame {
/// Utility method to finish a join.
fn finish_join(&self, mut df_left: DataFrame, mut df_right: DataFrame) -> Result<DataFrame> {
let mut left_names = HashSet::with_capacity_and_hasher(df_left.width(), RandomState::new());
df_left.columns.iter().for_each(|series| {
left_names.insert(series.name());
});
let mut rename_strs = Vec::with_capacity(df_right.width());
df_right.columns.iter().for_each(|series| {
if left_names.contains(series.name()) {
rename_strs.push(series.name().to_owned())
}
});
for name in rename_strs {
df_right.rename(&name, &format!("{}_right", name))?;
}
df_left.hstack_mut(&df_right.columns)?;
Ok(df_left)
}
fn create_left_df<B: Sync>(&self, join_tuples: &[(usize, B)]) -> DataFrame {
unsafe {
self.take_iter_unchecked_bounds(
join_tuples.iter().map(|(left, _right)| *left),
Some(join_tuples.len()),
)
}
}
/// Perform an inner join on two DataFrames.
///
/// # Example
///
/// ```
/// use polars::prelude::*;
/// fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> {
/// left.inner_join(right, "join_column_left", "join_column_right")
/// }
/// ```
pub fn inner_join(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str,
) -> Result<DataFrame> {
let s_left = self.column(left_on)?;
let s_right = other.column(right_on)?;
self.inner_join_from_series(other, s_left, s_right)
}
pub(crate) fn inner_join_from_series(
&self,
other: &DataFrame,
s_left: &Series,
s_right: &Series,
) -> Result<DataFrame> {
let join_tuples = apply_hash_join_on_series!(s_left, s_right, hash_join_inner);
let (df_left, df_right) = rayon::join(
|| self.create_left_df(&join_tuples),
|| unsafe {
other
.drop(s_right.name())
.unwrap()
.take_iter_unchecked_bounds(
join_tuples.iter().map(|(_left, right)| *right),
Some(join_tuples.len()),
)
},
);
self.finish_join(df_left, df_right)
}
/// Perform a left join on two DataFrames
/// # Example
///
/// ```
/// use polars::prelude::*;
/// fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> {
/// left.left_join(right, "join_column_left", "join_column_right")
/// }
/// ```
pub fn left_join(&self, other: &DataFrame, left_on: &str, right_on: &str) -> Result<DataFrame> {
let s_left = self.column(left_on)?;
let s_right = other.column(right_on)?;
self.left_join_from_series(other, s_left, s_right)
}
pub(crate) fn left_join_from_series(
&self,
other: &DataFrame,
s_left: &Series,
s_right: &Series,
) -> Result<DataFrame> {
let opt_join_tuples = apply_hash_join_on_series!(s_left, s_right, hash_join_left);
let (df_left, df_right) = rayon::join(
|| self.create_left_df(&opt_join_tuples),
|| unsafe {
other
.drop(s_right.name())
.unwrap()
.take_opt_iter_unchecked_bounds(
opt_join_tuples.iter().map(|(_left, right)| *right),
Some(opt_join_tuples.len()),
)
},
);
self.finish_join(df_left, df_right)
}
/// Perform an outer join on two DataFrames
/// # Example
///
/// ```
/// use polars::prelude::*;
/// fn join_dfs(left: &DataFrame, right: &DataFrame) -> Result<DataFrame> {
/// left.outer_join(right, "join_column_left", "join_column_right")
/// }
/// ```
pub fn outer_join(
&self,
other: &DataFrame,
left_on: &str,
right_on: &str,
) -> Result<DataFrame> {
let s_left = self.column(left_on)?;
let s_right = other.column(right_on)?;
self.outer_join_from_series(other, s_left, s_right)
}
pub(crate) fn outer_join_from_series(
&self,
other: &DataFrame,
s_left: &Series,
s_right: &Series,
) -> Result<DataFrame> {
// Get the indexes of the joined relations
let opt_join_tuples: Vec<(Option<usize>, Option<usize>)> =
apply_hash_join_on_series!(s_left, s_right, hash_join_outer);
// Take the left and right dataframes by join tuples
let (mut df_left, df_right) = rayon::join(
|| unsafe {
self.drop(s_left.name())
.unwrap()
.take_opt_iter_unchecked_bounds(
opt_join_tuples.iter().map(|(left, _right)| *left),
Some(opt_join_tuples.len()),
)
},
|| unsafe {
other
.drop(s_right.name())
.unwrap()
.take_opt_iter_unchecked_bounds(
opt_join_tuples.iter().map(|(_left, right)| *right),
Some(opt_join_tuples.len()),
)
},
);
let mut s =
apply_method_all_series!(s_left, zip_outer_join_column, s_right, &opt_join_tuples);
s.rename(s_left.name());
df_left.hstack_mut(&[s])?;
self.finish_join(df_left, df_right)
}
}
#[cfg(test)]
mod test {
use crate::prelude::*;
fn create_frames() -> (DataFrame, DataFrame) {
let s0 = Series::new("days", &[0, 1, 2]);
let s1 = Series::new("temp", &[22.1, 19.9, 7.]);
let s2 = Series::new("rain", &[0.2, 0.1, 0.3]);
let temp = DataFrame::new(vec![s0, s1, s2]).unwrap();
let s0 = Series::new("days", &[1, 2, 3, 1]);
let s1 = Series::new("rain", &[0.1, 0.2, 0.3, 0.4]);
let rain = DataFrame::new(vec![s0, s1]).unwrap();
(temp, rain)
}
#[test]
fn test_inner_join() {
let (temp, rain) = create_frames();
let joined = temp.inner_join(&rain, "days", "days").unwrap();
let join_col_days = Series::new("days", &[1, 2, 1]);
let join_col_temp = Series::new("temp", &[19.9, 7., 19.9]);
let join_col_rain = Series::new("rain", &[0.1, 0.3, 0.1]);
let join_col_rain_right = Series::new("rain_right", [0.1, 0.2, 0.4].as_ref());
let true_df = DataFrame::new(vec![
join_col_days,
join_col_temp,
join_col_rain,
join_col_rain_right,
])
.unwrap();
println!("{}", joined);
assert!(joined.frame_equal(&true_df));
}
#[test]
fn test_left_join() {
let s0 = Series::new("days", &[0, 1, 2, 3, 4]);
let s1 = Series::new("temp", &[22.1, 19.9, 7., 2., 3.]);
let temp = DataFrame::new(vec![s0, s1]).unwrap();
let s0 = Series::new("days", &[1, 2]);
let s1 = Series::new("rain", &[0.1, 0.2]);
let rain = DataFrame::new(vec![s0, s1]).unwrap();
let joined = temp.left_join(&rain, "days", "days").unwrap();
println!("{}", &joined);
assert_eq!(
(joined.column("rain").unwrap().sum::<f32>().unwrap() * 10.).round(),
3.
);
assert_eq!(joined.column("rain").unwrap().null_count(), 3);
// test join on utf8
let s0 = Series::new("days", &["mo", "tue", "wed", "thu", "fri"]);
let s1 = Series::new("temp", &[22.1, 19.9, 7., 2., 3.]);
let temp = DataFrame::new(vec![s0, s1]).unwrap();
let s0 = Series::new("days", &["tue", "wed"]);
let s1 = Series::new("rain", &[0.1, 0.2]);
let rain = DataFrame::new(vec![s0, s1]).unwrap();
let joined = temp.left_join(&rain, "days", "days").unwrap();
println!("{}", &joined);
assert_eq!(
(joined.column("rain").unwrap().sum::<f32>().unwrap() * 10.).round(),
3.
);
assert_eq!(joined.column("rain").unwrap().null_count(), 3);
}
#[test]
fn test_outer_join() {
let (temp, rain) = create_frames();
let joined = temp.outer_join(&rain, "days", "days").unwrap();
println!("{:?}", &joined);
assert_eq!(joined.height(), 5);
assert_eq!(joined.column("days").unwrap().sum::<i32>(), Some(7));
}
#[test]
fn test_join_with_nulls() {
let dts = &[20, 21, 22, 23, 24, 25, 27, 28];
let vals = &[1.2, 2.4, 4.67, 5.8, 4.4, 3.6, 7.6, 6.5];
let df = DataFrame::new(vec![Series::new("date", dts), Series::new("val", vals)]).unwrap();
let vals2 = &[Some(1.1), None, Some(3.3), None, None];
let df2 = DataFrame::new(vec![
Series::new("date", &dts[3..]),
Series::new("val2", vals2),
])
.unwrap();
let joined = df.left_join(&df2, "date", "date").unwrap();
assert_eq!(
joined
.column("val2")
.unwrap()
.f64()
.unwrap()
.get(joined.height() - 1),
None
);
}
} | })
.collect::<$chunkedtype>() |
av98.py | #!/usr/bin/env python3
# AV-98 Gemini client
# Dervied from VF-1 (https://github.com/solderpunk/VF-1),
# (C) 2019, 2020 Solderpunk <[email protected]>
# With contributions from:
# - danceka <[email protected]>
# - <[email protected]>
# - <[email protected]>
# - Klaus Alexander Seistrup <[email protected]>
# - govynnus <[email protected]>
import argparse
import cmd
import cgi
import codecs
import collections
import datetime
import fnmatch
import getpass
import glob
import hashlib
import io
import mimetypes
import os
import os.path
import random
import shlex
import shutil
import socket
import sqlite3
import ssl
from ssl import CertificateError
import subprocess
import sys
import tempfile
import time
import urllib.parse
import uuid
import webbrowser
try:
import ansiwrap as textwrap
except ModuleNotFoundError:
import textwrap
try:
from cryptography import x509
from cryptography.hazmat.backends import default_backend
_HAS_CRYPTOGRAPHY = True
_BACKEND = default_backend()
except ModuleNotFoundError:
_HAS_CRYPTOGRAPHY = False
_VERSION = "1.0.2dev"
_MAX_REDIRECTS = 5
_MAX_CACHE_SIZE = 10
_MAX_CACHE_AGE_SECS = 180
# Command abbreviations
_ABBREVS = {
"a": "add",
"b": "back",
"bb": "blackbox",
"bm": "bookmarks",
"book": "bookmarks",
"f": "fold",
"fo": "forward",
"g": "go",
"h": "history",
"hist": "history",
"l": "less",
"n": "next",
"p": "previous",
"prev": "previous",
"q": "quit",
"r": "reload",
"s": "save",
"se": "search",
"/": "search",
"t": "tour",
"u": "up",
}
_MIME_HANDLERS = {
"application/pdf": "xpdf %s",
"audio/mpeg": "mpg123 %s",
"audio/ogg": "ogg123 %s",
"image/*": "feh %s",
"text/html": "lynx -dump -force_html %s",
"text/*": "cat %s",
}
# monkey-patch Gemini support in urllib.parse
# see https://github.com/python/cpython/blob/master/Lib/urllib/parse.py
urllib.parse.uses_relative.append("gemini")
urllib.parse.uses_netloc.append("gemini")
def fix_ipv6_url(url):
if not url.count(":") > 2: # Best way to detect them?
return url
# If there's a pair of []s in there, it's probably fine as is.
if "[" in url and "]" in url:
return url
# Easiest case is a raw address, no schema, no path.
# Just wrap it in square brackets and whack a slash on the end
if "/" not in url:
return "[" + url + "]/"
# Now the trickier cases...
if "://" in url:
schema, schemaless = url.split("://")
else:
schema, schemaless = None, url
if "/" in schemaless:
netloc, rest = schemaless.split("/",1)
schemaless = "[" + netloc + "]" + "/" + rest
if schema:
return schema + "://" + schemaless
return schemaless
standard_ports = {
"gemini": 1965,
"gopher": 70,
}
class GeminiItem():
def __init__(self, url, name=""):
if "://" not in url:
url = "gemini://" + url
self.url = fix_ipv6_url(url)
self.name = name
parsed = urllib.parse.urlparse(self.url)
self.scheme = parsed.scheme
self.host = parsed.hostname
self.port = parsed.port or standard_ports.get(self.scheme, 0)
self.path = parsed.path
def root(self):
return GeminiItem(self._derive_url("/"))
def up(self):
pathbits = list(os.path.split(self.path.rstrip('/')))
# Don't try to go higher than root
if len(pathbits) == 1:
return self
# Get rid of bottom component
pathbits.pop()
new_path = os.path.join(*pathbits)
return GeminiItem(self._derive_url(new_path))
def query(self, query):
query = urllib.parse.quote(query)
return GeminiItem(self._derive_url(query=query))
def _derive_url(self, path="", query=""):
"""
A thin wrapper around urlunparse which avoids inserting standard ports
into URLs just to keep things clean.
"""
return urllib.parse.urlunparse((self.scheme,
self.host if self.port == standard_ports[self.scheme] else self.host + ":" + str(self.port),
path or self.path, "", query, ""))
def absolutise_url(self, relative_url):
"""
Convert a relative URL to an absolute URL by using the URL of this
GeminiItem as a base.
"""
return urllib.parse.urljoin(self.url, relative_url)
def to_map_line(self, name=None):
if name or self.name:
return "=> {} {}\n".format(self.url, name or self.name)
else:
return "=> {}\n".format(self.url)
@classmethod
def from_map_line(cls, line, origin_gi):
assert line.startswith("=>")
assert line[2:].strip()
bits = line[2:].strip().split(maxsplit=1)
bits[0] = origin_gi.absolutise_url(bits[0])
return cls(*bits)
CRLF = '\r\n'
# Cheap and cheerful URL detector
def looks_like_url(word):
return "." in word and word.startswith("gemini://")
class UserAbortException(Exception):
pass
# GeminiClient Decorators
def needs_gi(inner):
def outer(self, *args, **kwargs):
if not self.gi:
print("You need to 'go' somewhere, first")
return None
else:
return inner(self, *args, **kwargs)
outer.__doc__ = inner.__doc__
return outer
def restricted(inner):
def outer(self, *args, **kwargs):
if self.restricted:
print("Sorry, this command is not available in restricted mode!")
return None
else:
return inner(self, *args, **kwargs)
outer.__doc__ = inner.__doc__
return outer
class GeminiClient(cmd.Cmd):
def __init__(self, restricted=False):
cmd.Cmd.__init__(self)
# Set umask so that nothing we create can be read by anybody else.
# The certificate cache and TOFU database contain "browser history"
# type sensitivie information.
os.umask(0o077)
# Find config directory
## Look for something pre-existing
for confdir in ("~/.av98/", "~/.config/av98/"):
confdir = os.path.expanduser(confdir)
if os.path.exists(confdir):
self.config_dir = confdir
break
## Otherwise, make one in .config if it exists
else:
if os.path.exists(os.path.expanduser("~/.config/")):
self.config_dir = os.path.expanduser("~/.config/av98/")
else:
self.config_dir = os.path.expanduser("~/.av98/")
print("Creating config directory {}".format(self.config_dir))
os.makedirs(self.config_dir)
self.no_cert_prompt = "\x1b[38;5;76m" + "AV-98" + "\x1b[38;5;255m" + "> " + "\x1b[0m"
self.cert_prompt = "\x1b[38;5;202m" + "AV-98" + "\x1b[38;5;255m" + "+cert> " + "\x1b[0m"
self.prompt = self.no_cert_prompt
self.gi = None
self.history = []
self.hist_index = 0
self.idx_filename = ""
self.index = []
self.index_index = -1
self.lookup = self.index
self.marks = {}
self.page_index = 0
self.permanent_redirects = {}
self.previous_redirectors = set()
self.restricted = restricted
self.tmp_filename = ""
self.visited_hosts = set()
self.waypoints = []
self.client_certs = {
"active": None
}
self.active_cert_domains = []
self.active_is_transient = False
self.transient_certs_created = []
self.options = {
"debug" : False,
"ipv6" : True,
"timeout" : 10,
"width" : 80,
"auto_follow_redirects" : True,
"gopher_proxy" : None,
"tls_mode" : "tofu",
"cache" : False
}
self.log = {
"start_time": time.time(),
"requests": 0,
"ipv4_requests": 0,
"ipv6_requests": 0,
"bytes_recvd": 0,
"ipv4_bytes_recvd": 0,
"ipv6_bytes_recvd": 0,
"dns_failures": 0,
"refused_connections": 0,
"reset_connections": 0,
"timeouts": 0,
"cache_hits": 0,
}
self._connect_to_tofu_db()
self.cache = {}
self.cache_timestamps = {}
def _connect_to_tofu_db(self):
db_path = os.path.join(self.config_dir, "tofu.db")
self.db_conn = sqlite3.connect(db_path)
self.db_cur = self.db_conn.cursor()
self.db_cur.execute("""CREATE TABLE IF NOT EXISTS cert_cache
(hostname text, address text, fingerprint text,
first_seen date, last_seen date, count integer)""")
def _go_to_gi(self, gi, update_hist=True, check_cache=True, handle=True):
"""This method might be considered "the heart of AV-98".
Everything involved in fetching a gemini resource happens here:
sending the request over the network, parsing the response if
its a menu, storing the response in a temporary file, choosing
and calling a handler program, and updating the history."""
# Don't try to speak to servers running other protocols
if gi.scheme in ("http", "https"):
webbrowser.open_new_tab(gi.url)
return
elif gi.scheme == "gopher" and not self.options.get("gopher_proxy", None):
print("""AV-98 does not speak Gopher natively.
However, you can use `set gopher_proxy hostname:port` to tell it about a
Gopher-to-Gemini proxy (such as a running Agena instance), in which case
you'll be able to transparently follow links to Gopherspace!""")
return
elif gi.scheme not in ("gemini", "gopher"):
print("Sorry, no support for {} links.".format(gi.scheme))
return
# Obey permanent redirects
if gi.url in self.permanent_redirects:
new_gi = GeminiItem(self.permanent_redirects[gi.url], name=gi.name)
self._go_to_gi(new_gi)
return
# Use cache, or hit the network if resource is not cached
if check_cache and self.options["cache"] and self._is_cached(gi.url):
mime, body, tmpfile = self._get_cached(gi.url)
else:
try:
gi, mime, body, tmpfile = self._fetch_over_network(gi)
except UserAbortException:
return
except Exception as err:
# Print an error message
if isinstance(err, socket.gaierror):
self.log["dns_failures"] += 1
print("ERROR: DNS error!")
elif isinstance(err, ConnectionRefusedError):
self.log["refused_connections"] += 1
print("ERROR: Connection refused!")
elif isinstance(err, ConnectionResetError):
self.log["reset_connections"] += 1
print("ERROR: Connection reset!")
elif isinstance(err, (TimeoutError, socket.timeout)):
self.log["timeouts"] += 1
print("""ERROR: Connection timed out!
Slow internet connection? Use 'set timeout' to be more patient.""")
else:
print("ERROR: " + str(err))
return
# Pass file to handler, unless we were asked not to
if handle:
if mime == "text/gemini":
self._handle_gemtext(body, gi)
else:
cmd_str = self._get_handler_cmd(mime)
try:
subprocess.call(shlex.split(cmd_str % tmpfile))
except FileNotFoundError:
print("Handler program %s not found!" % shlex.split(cmd_str)[0])
print("You can use the ! command to specify another handler program or pipeline.")
# Update state
self.gi = gi
self.mime = mime
if update_hist:
self._update_history(gi)
def _fetch_over_network(self, gi):
# Be careful with client certificates!
# Are we crossing a domain boundary?
if self.active_cert_domains and gi.host not in self.active_cert_domains:
if self.active_is_transient:
print("Permanently delete currently active transient certificate?")
resp = input("Y/N? ")
if resp.strip().lower() in ("y", "yes"):
print("Destroying certificate.")
self._deactivate_client_cert()
else:
print("Staying here.")
raise UserAbortException()
else:
print("PRIVACY ALERT: Deactivate client cert before connecting to a new domain?")
resp = input("Y/N? ")
if resp.strip().lower() in ("n", "no"):
print("Keeping certificate active for {}".format(gi.host))
else:
print("Deactivating certificate.")
self._deactivate_client_cert()
# Suggest reactivating previous certs
if not self.client_certs["active"] and gi.host in self.client_certs:
print("PRIVACY ALERT: Reactivate previously used client cert for {}?".format(gi.host))
resp = input("Y/N? ")
if resp.strip().lower() in ("y", "yes"):
self._activate_client_cert(*self.client_certs[gi.host])
else:
print("Remaining unidentified.")
self.client_certs.pop(gi.host)
# Is this a local file?
if not gi.host:
address, f = None, open(gi.path, "rb")
else:
address, f = self._send_request(gi)
# Spec dictates <META> should not exceed 1024 bytes,
# so maximum valid header length is 1027 bytes.
header = f.readline(1027)
header = header.decode("UTF-8")
if not header or header[-1] != '\n':
raise RuntimeError("Received invalid header from server!")
header = header.strip()
self._debug("Response header: %s." % header)
# Validate header
status, meta = header.split(maxsplit=1)
if len(meta) > 1024 or len(status) != 2 or not status.isnumeric():
f.close()
raise RuntimeError("Received invalid header from server!")
# Update redirect loop/maze escaping state
if not status.startswith("3"):
self.previous_redirectors = set()
# Handle non-SUCCESS headers, which don't have a response body
# Inputs
if status.startswith("1"):
print(meta)
if status == "11":
user_input = getpass.getpass("> ")
else:
user_input = input("> ")
return self._fetch_over_network(gi.query(user_input))
# Redirects
elif status.startswith("3"):
new_gi = GeminiItem(gi.absolutise_url(meta))
if new_gi.url == gi.url:
raise RuntimeError("URL redirects to itself!")
elif new_gi.url in self.previous_redirectors:
raise RuntimeError("Caught in redirect loop!")
elif len(self.previous_redirectors) == _MAX_REDIRECTS:
raise RuntimeError("Refusing to follow more than %d consecutive redirects!" % _MAX_REDIRECTS)
# Never follow cross-domain redirects without asking
elif new_gi.host != gi.host:
follow = input("Follow cross-domain redirect to %s? (y/n) " % new_gi.url)
# Never follow cross-protocol redirects without asking
elif new_gi.scheme != gi.scheme:
follow = input("Follow cross-protocol redirect to %s? (y/n) " % new_gi.url)
# Don't follow *any* redirect without asking if auto-follow is off
elif not self.options["auto_follow_redirects"]:
follow = input("Follow redirect to %s? (y/n) " % new_gi.url)
# Otherwise, follow away
else:
follow = "yes"
if follow.strip().lower() not in ("y", "yes"):
raise UserAbortException()
self._debug("Following redirect to %s." % new_gi.url)
self._debug("This is consecutive redirect number %d." % len(self.previous_redirectors))
self.previous_redirectors.add(gi.url)
if status == "31":
# Permanent redirect
self.permanent_redirects[gi.url] = new_gi.url
return self._fetch_over_network(new_gi)
# Errors
elif status.startswith("4") or status.startswith("5"):
raise RuntimeError(meta)
# Client cert
elif status.startswith("6"):
self._handle_cert_request(meta)
return self._fetch_over_network(gi)
# Invalid status
elif not status.startswith("2"):
raise RuntimeError("Server returned undefined status code %s!" % status)
# If we're here, this must be a success and there's a response body
assert status.startswith("2")
mime = meta
if mime == "":
mime = "text/gemini; charset=utf-8"
mime, mime_options = cgi.parse_header(mime)
if "charset" in mime_options:
try:
codecs.lookup(mime_options["charset"])
except LookupError:
raise RuntimeError("Header declared unknown encoding %s" % value)
# Read the response body over the network
body = f.read()
# Save the result in a temporary file
## Set file mode
if mime.startswith("text/"):
mode = "w"
encoding = mime_options.get("charset", "UTF-8")
try:
body = body.decode(encoding)
except UnicodeError:
raise RuntimeError("Could not decode response body using %s encoding declared in header!" % encoding)
else:
mode = "wb"
encoding = None
## Write
tmpf = tempfile.NamedTemporaryFile(mode, encoding=encoding, delete=False)
size = tmpf.write(body)
tmpf.close()
self.tmp_filename = tmpf.name
self._debug("Wrote %d byte response to %s." % (size, self.tmp_filename))
# Maintain cache and log
if self.options["cache"]:
self._add_to_cache(gi.url, mime, self.tmp_filename)
self._log_visit(gi, address, size)
return gi, mime, body, self.tmp_filename
def _send_request(self, gi):
"""Send a selector to a given host and port.
Returns the resolved address and binary file with the reply."""
if gi.scheme == "gemini":
# For Gemini requests, connect to the host and port specified in the URL
host, port = gi.host, gi.port
elif gi.scheme == "gopher":
# For Gopher requests, use the configured proxy
host, port = self.options["gopher_proxy"].rsplit(":", 1)
self._debug("Using gopher proxy: " + self.options["gopher_proxy"])
# Do DNS resolution
addresses = self._get_addresses(host, port)
# Prepare TLS context
protocol = ssl.PROTOCOL_TLS if sys.version_info.minor >=6 else ssl.PROTOCOL_TLSv1_2
context = ssl.SSLContext(protocol)
# Use CAs or TOFU
if self.options["tls_mode"] == "ca":
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_default_certs()
else:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# Impose minimum TLS version
## In 3.7 and above, this is easy...
if sys.version_info.minor >= 7:
context.minimum_version = ssl.TLSVersion.TLSv1_2
## Otherwise, it seems very hard...
## The below is less strict than it ought to be, but trying to disable
## TLS v1.1 here using ssl.OP_NO_TLSv1_1 produces unexpected failures
## with recent versions of OpenSSL. What a mess...
else:
context.options |= ssl.OP_NO_SSLv3
context.options |= ssl.OP_NO_SSLv2
# Try to enforce sensible ciphers
try:
context.set_ciphers("AESGCM+ECDHE:AESGCM+DHE:CHACHA20+ECDHE:CHACHA20+DHE:!DSS:!SHA1:!MD5:@STRENGTH")
except ssl.SSLError:
# Rely on the server to only support sensible things, I guess...
pass
# Load client certificate if needed
if self.client_certs["active"]:
certfile, keyfile = self.client_certs["active"]
context.load_cert_chain(certfile, keyfile)
# Connect to remote host by any address possible
err = None
for address in addresses:
self._debug("Connecting to: " + str(address[4]))
s = socket.socket(address[0], address[1])
s.settimeout(self.options["timeout"])
s = context.wrap_socket(s, server_hostname = gi.host)
try:
s.connect(address[4])
break
except OSError as e:
err = e
else:
# If we couldn't connect to *any* of the addresses, just
# bubble up the exception from the last attempt and deny
# knowledge of earlier failures.
raise err
if sys.version_info.minor >=5:
self._debug("Established {} connection.".format(s.version()))
self._debug("Cipher is: {}.".format(s.cipher()))
# Do TOFU
if self.options["tls_mode"] != "ca":
cert = s.getpeercert(binary_form=True)
self._validate_cert(address[4][0], host, cert)
# Remember that we showed the current cert to this domain...
if self.client_certs["active"]:
self.active_cert_domains.append(gi.host)
self.client_certs[gi.host] = self.client_certs["active"]
# Send request and wrap response in a file descriptor
self._debug("Sending %s<CRLF>" % gi.url)
s.sendall((gi.url + CRLF).encode("UTF-8"))
return address, s.makefile(mode = "rb")
def _get_addresses(self, host, port):
# DNS lookup - will get IPv4 and IPv6 records if IPv6 is enabled
if ":" in host:
# This is likely a literal IPv6 address, so we can *only* ask for
# IPv6 addresses or getaddrinfo will complain
family_mask = socket.AF_INET6
elif socket.has_ipv6 and self.options["ipv6"]:
# Accept either IPv4 or IPv6 addresses
family_mask = 0
else:
# IPv4 only
family_mask = socket.AF_INET
addresses = socket.getaddrinfo(host, port, family=family_mask,
type=socket.SOCK_STREAM)
# Sort addresses so IPv6 ones come first
addresses.sort(key=lambda add: add[0] == socket.AF_INET6, reverse=True)
return addresses
def _is_cached(self, url):
if url not in self.cache:
return False
now = time.time()
cached = self.cache_timestamps[url]
if now - cached > _MAX_CACHE_AGE_SECS:
self._debug("Expiring old cached copy of resource.")
self._remove_from_cache(url)
return False
self._debug("Found cached copy of resource.")
return True
def _remove_from_cache(self, url):
self.cache_timestamps.pop(url)
mime, filename = self.cache.pop(url)
os.unlink(filename)
self._validate_cache()
def _add_to_cache(self, url, mime, filename):
self.cache_timestamps[url] = time.time()
self.cache[url] = (mime, filename)
if len(self.cache) > _MAX_CACHE_SIZE:
self._trim_cache()
self._validate_cache()
def _trim_cache(self):
# Order cache entries by age
lru = [(t, u) for (u, t) in self.cache_timestamps.items()]
lru.sort()
# Drop the oldest entry no matter what
_, url = lru[0]
self._debug("Dropping cached copy of {} from full cache.".format(url))
self._remove_from_cache(url)
# Drop other entries if they are older than the limit
now = time.time()
for cached, url in lru[1:]:
if now - cached > _MAX_CACHE_AGE_SECS:
self._debug("Dropping cached copy of {} from full cache.".format(url))
self._remove_from_cache(url)
else:
break
self._validate_cache()
def _get_cached(self, url):
mime, filename = self.cache[url]
self.log["cache_hits"] += 1
if mime.startswith("text/gemini"):
with open(filename, "r") as fp:
body = fp.read()
return mime, body, filename
else:
return mime, None, filename
def _empty_cache(self):
for mime, filename in self.cache.values():
if os.path.exists(filename):
os.unlink(filename)
def _validate_cache(self):
assert self.cache.keys() == self.cache_timestamps.keys()
for _, filename in self.cache.values():
assert os.path.isfile(filename)
def _handle_cert_request(self, meta):
# Don't do client cert stuff in restricted mode, as in principle
# it could be used to fill up the disk by creating a whole lot of
# certificates
if self.restricted:
print("The server is requesting a client certificate.")
print("These are not supported in restricted mode, sorry.")
raise UserAbortException()
print("SERVER SAYS: ", meta)
# Present different messages for different 6x statuses, but
# handle them the same.
if status in ("64", "65"):
print("The server rejected your certificate because it is either expired or not yet valid.")
elif status == "63":
print("The server did not accept your certificate.")
print("You may need to e.g. coordinate with the admin to get your certificate fingerprint whitelisted.")
else:
print("The site {} is requesting a client certificate.".format(gi.host))
print("This will allow the site to recognise you across requests.")
# Give the user choices
print("What do you want to do?")
print("1. Give up.")
print("2. Generate a new transient certificate.")
print("3. Generate a new persistent certificate.")
print("4. Load a previously generated persistent.")
print("5. Load certificate from an external file.")
choice = input("> ").strip()
if choice == "2":
self._generate_transient_cert_cert()
elif choice == "3":
self._generate_persistent_client_cert()
elif choice == "4":
self._choose_client_cert()
elif choice == "5":
self._load_client_cert()
else:
print("Giving up.")
raise UserAbortException()
def _validate_cert(self, address, host, cert):
"""
Validate a TLS certificate in TOFU mode. | If the cryptography module is installed:
- Check the certificate Common Name or SAN matches `host`
- Check the certificate's not valid before date is in the past
- Check the certificate's not valid after date is in the future
Whether the cryptography module is installed or not, check the
certificate's fingerprint against the TOFU database to see if we've
previously encountered a different certificate for this IP address and
hostname.
"""
now = datetime.datetime.utcnow()
if _HAS_CRYPTOGRAPHY:
# Using the cryptography module we can get detailed access
# to the properties of even self-signed certs, unlike in
# the standard ssl library...
c = x509.load_der_x509_certificate(cert, _BACKEND)
# Check certificate validity dates
if c.not_valid_before >= now:
raise CertificateError("Certificate not valid until: {}!".format(c.not_valid_before))
elif c.not_valid_after <= now:
raise CertificateError("Certificate expired as of: {})!".format(c.not_valid_after))
# Check certificate hostnames
names = []
common_name = c.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME)
if common_name:
names.append(common_name[0].value)
try:
names.extend([alt.value for alt in c.extensions.get_extension_for_oid(x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME).value])
except x509.ExtensionNotFound:
pass
names = set(names)
for name in names:
try:
ssl._dnsname_match(name, host)
break
except CertificateError:
continue
else:
# If we didn't break out, none of the names were valid
raise CertificateError("Hostname does not match certificate common name or any alternative names.")
sha = hashlib.sha256()
sha.update(cert)
fingerprint = sha.hexdigest()
# Have we been here before?
self.db_cur.execute("""SELECT fingerprint, first_seen, last_seen, count
FROM cert_cache
WHERE hostname=? AND address=?""", (host, address))
cached_certs = self.db_cur.fetchall()
# If so, check for a match
if cached_certs:
max_count = 0
most_frequent_cert = None
for cached_fingerprint, first, last, count in cached_certs:
if count > max_count:
max_count = count
most_frequent_cert = cached_fingerprint
if fingerprint == cached_fingerprint:
# Matched!
self._debug("TOFU: Accepting previously seen ({} times) certificate {}".format(count, fingerprint))
self.db_cur.execute("""UPDATE cert_cache
SET last_seen=?, count=?
WHERE hostname=? AND address=? AND fingerprint=?""",
(now, count+1, host, address, fingerprint))
self.db_conn.commit()
break
else:
if _HAS_CRYPTOGRAPHY:
# Load the most frequently seen certificate to see if it has
# expired
certdir = os.path.join(self.config_dir, "cert_cache")
with open(os.path.join(certdir, most_frequent_cert+".crt"), "rb") as fp:
previous_cert = fp.read()
previous_cert = x509.load_der_x509_certificate(previous_cert, _BACKEND)
previous_ttl = previous_cert.not_valid_after - now
print(previous_ttl)
self._debug("TOFU: Unrecognised certificate {}! Raising the alarm...".format(fingerprint))
print("****************************************")
print("[SECURITY WARNING] Unrecognised certificate!")
print("The certificate presented for {} ({}) has never been seen before.".format(host, address))
print("This MIGHT be a Man-in-the-Middle attack.")
print("A different certificate has previously been seen {} times.".format(max_count))
if _HAS_CRYPTOGRAPHY:
if previous_ttl < datetime.timedelta():
print("That certificate has expired, which reduces suspicion somewhat.")
else:
print("That certificate is still valid for: {}".format(previous_ttl))
print("****************************************")
print("Attempt to verify the new certificate fingerprint out-of-band:")
print(fingerprint)
choice = input("Accept this new certificate? Y/N ").strip().lower()
if choice in ("y", "yes"):
self.db_cur.execute("""INSERT INTO cert_cache
VALUES (?, ?, ?, ?, ?, ?)""",
(host, address, fingerprint, now, now, 1))
self.db_conn.commit()
with open(os.path.join(certdir, fingerprint+".crt"), "wb") as fp:
fp.write(cert)
else:
raise Exception("TOFU Failure!")
# If not, cache this cert
else:
self._debug("TOFU: Blindly trusting first ever certificate for this host!")
self.db_cur.execute("""INSERT INTO cert_cache
VALUES (?, ?, ?, ?, ?, ?)""",
(host, address, fingerprint, now, now, 1))
self.db_conn.commit()
certdir = os.path.join(self.config_dir, "cert_cache")
if not os.path.exists(certdir):
os.makedirs(certdir)
with open(os.path.join(certdir, fingerprint+".crt"), "wb") as fp:
fp.write(cert)
def _get_handler_cmd(self, mimetype):
# Now look for a handler for this mimetype
# Consider exact matches before wildcard matches
exact_matches = []
wildcard_matches = []
for handled_mime, cmd_str in _MIME_HANDLERS.items():
if "*" in handled_mime:
wildcard_matches.append((handled_mime, cmd_str))
else:
exact_matches.append((handled_mime, cmd_str))
for handled_mime, cmd_str in exact_matches + wildcard_matches:
if fnmatch.fnmatch(mimetype, handled_mime):
break
else:
# Use "xdg-open" as a last resort.
cmd_str = "xdg-open %s"
self._debug("Using handler: %s" % cmd_str)
return cmd_str
def _handle_gemtext(self, body, menu_gi, display=True):
self.index = []
preformatted = False
if self.idx_filename:
os.unlink(self.idx_filename)
tmpf = tempfile.NamedTemporaryFile("w", encoding="UTF-8", delete=False)
self.idx_filename = tmpf.name
for line in body.splitlines():
if line.startswith("```"):
preformatted = not preformatted
elif preformatted:
tmpf.write(line + "\n")
elif line.startswith("=>"):
try:
gi = GeminiItem.from_map_line(line, menu_gi)
self.index.append(gi)
tmpf.write(self._format_geminiitem(len(self.index), gi) + "\n")
except:
self._debug("Skipping possible link: %s" % line)
elif line.startswith("* "):
line = line[1:].lstrip("\t ")
tmpf.write(textwrap.fill(line, self.options["width"],
initial_indent = "• ", subsequent_indent=" ") + "\n")
elif line.startswith(">"):
line = line[1:].lstrip("\t ")
tmpf.write(textwrap.fill(line, self.options["width"],
initial_indent = "> ", subsequent_indent="> ") + "\n")
elif line.startswith("###"):
line = line[3:].lstrip("\t ")
tmpf.write("\x1b[4m" + line + "\x1b[0m""\n")
elif line.startswith("##"):
line = line[2:].lstrip("\t ")
tmpf.write("\x1b[1m" + line + "\x1b[0m""\n")
elif line.startswith("#"):
line = line[1:].lstrip("\t ")
tmpf.write("\x1b[1m\x1b[4m" + line + "\x1b[0m""\n")
else:
tmpf.write(textwrap.fill(line, self.options["width"]) + "\n")
tmpf.close()
self.lookup = self.index
self.page_index = 0
self.index_index = -1
if display:
cmd_str = self._get_handler_cmd("text/gemini")
subprocess.call(shlex.split(cmd_str % self.idx_filename))
def _format_geminiitem(self, index, gi, url=False):
protocol = "" if gi.scheme == "gemini" else " %s" % gi.scheme
line = "[%d%s] %s" % (index, protocol, gi.name or gi.url)
if gi.name and url:
line += " (%s)" % gi.url
return line
def _show_lookup(self, offset=0, end=None, url=False):
for n, gi in enumerate(self.lookup[offset:end]):
print(self._format_geminiitem(n+offset+1, gi, url))
def _update_history(self, gi):
# Don't duplicate
if self.history and self.history[self.hist_index] == gi:
return
self.history = self.history[0:self.hist_index+1]
self.history.append(gi)
self.hist_index = len(self.history) - 1
def _log_visit(self, gi, address, size):
if not address:
return
self.log["requests"] += 1
self.log["bytes_recvd"] += size
self.visited_hosts.add(address)
if address[0] == socket.AF_INET:
self.log["ipv4_requests"] += 1
self.log["ipv4_bytes_recvd"] += size
elif address[0] == socket.AF_INET6:
self.log["ipv6_requests"] += 1
self.log["ipv6_bytes_recvd"] += size
def _get_active_tmpfile(self):
if self.mime == "text/gemini":
return self.idx_filename
else:
return self.tmp_filename
def _debug(self, debug_text):
if not self.options["debug"]:
return
debug_text = "\x1b[0;32m[DEBUG] " + debug_text + "\x1b[0m"
print(debug_text)
def _load_client_cert(self):
"""
Interactively load a TLS client certificate from the filesystem in PEM
format.
"""
print("Loading client certificate file, in PEM format (blank line to cancel)")
certfile = input("Certfile path: ").strip()
if not certfile:
print("Aborting.")
return
certfile = os.path.expanduser(certfile)
if not os.path.isfile(certfile):
print("Certificate file {} does not exist.".format(certfile))
return
print("Loading private key file, in PEM format (blank line to cancel)")
keyfile = input("Keyfile path: ").strip()
if not keyfile:
print("Aborting.")
return
keyfile = os.path.expanduser(keyfile)
if not os.path.isfile(keyfile):
print("Private key file {} does not exist.".format(keyfile))
return
self._activate_client_cert(certfile, keyfile)
def _generate_transient_cert_cert(self):
"""
Use `openssl` command to generate a new transient client certificate
with 24 hours of validity.
"""
certdir = os.path.join(self.config_dir, "transient_certs")
name = str(uuid.uuid4())
self._generate_client_cert(certdir, name, transient=True)
self.active_is_transient = True
self.transient_certs_created.append(name)
def _generate_persistent_client_cert(self):
"""
Interactively use `openssl` command to generate a new persistent client
certificate with one year of validity.
"""
certdir = os.path.join(self.config_dir, "client_certs")
print("What do you want to name this new certificate?")
print("Answering `mycert` will create `{0}/mycert.crt` and `{0}/mycert.key`".format(certdir))
name = input("> ")
if not name.strip():
print("Aborting.")
return
self._generate_client_cert(certdir, name)
def _generate_client_cert(self, certdir, basename, transient=False):
"""
Use `openssl` binary to generate a client certificate (which may be
transient or persistent) and save the certificate and private key to the
specified directory with the specified basename.
"""
if not os.path.exists(certdir):
os.makedirs(certdir)
certfile = os.path.join(certdir, basename+".crt")
keyfile = os.path.join(certdir, basename+".key")
cmd = "openssl req -x509 -newkey rsa:2048 -days {} -nodes -keyout {} -out {}".format(1 if transient else 365, keyfile, certfile)
if transient:
cmd += " -subj '/CN={}'".format(basename)
os.system(cmd)
self._activate_client_cert(certfile, keyfile)
def _choose_client_cert(self):
"""
Interactively select a previously generated client certificate and
activate it.
"""
certdir = os.path.join(self.config_dir, "client_certs")
certs = glob.glob(os.path.join(certdir, "*.crt"))
if len(certs) == 0:
print("There are no previously generated certificates.")
return
certdir = {}
for n, cert in enumerate(certs):
certdir[str(n+1)] = (cert, os.path.splitext(cert)[0] + ".key")
print("{}. {}".format(n+1, os.path.splitext(os.path.basename(cert))[0]))
choice = input("> ").strip()
if choice in certdir:
certfile, keyfile = certdir[choice]
self._activate_client_cert(certfile, keyfile)
else:
print("What?")
def _activate_client_cert(self, certfile, keyfile):
self.client_certs["active"] = (certfile, keyfile)
self.active_cert_domains = []
self.prompt = self.cert_prompt
self._debug("Using ID {} / {}.".format(*self.client_certs["active"]))
def _deactivate_client_cert(self):
if self.active_is_transient:
for filename in self.client_certs["active"]:
os.remove(filename)
for domain in self.active_cert_domains:
self.client_certs.pop(domain)
self.client_certs["active"] = None
self.active_cert_domains = []
self.prompt = self.no_cert_prompt
self.active_is_transient = False
# Cmd implementation follows
def default(self, line):
if line.strip() == "EOF":
return self.onecmd("quit")
elif line.strip() == "..":
return self.do_up()
elif line.startswith("/"):
return self.do_search(line[1:])
# Expand abbreviated commands
first_word = line.split()[0].strip()
if first_word in _ABBREVS:
full_cmd = _ABBREVS[first_word]
expanded = line.replace(first_word, full_cmd, 1)
return self.onecmd(expanded)
# Try to parse numerical index for lookup table
try:
n = int(line.strip())
except ValueError:
print("What?")
return
try:
gi = self.lookup[n-1]
except IndexError:
print ("Index too high!")
return
self.index_index = n
self._go_to_gi(gi)
### Settings
@restricted
def do_set(self, line):
"""View or set various options."""
if not line.strip():
# Show all current settings
for option in sorted(self.options.keys()):
print("%s %s" % (option, self.options[option]))
elif len(line.split()) == 1:
# Show current value of one specific setting
option = line.strip()
if option in self.options:
print("%s %s" % (option, self.options[option]))
else:
print("Unrecognised option %s" % option)
else:
# Set value of one specific setting
option, value = line.split(" ", 1)
if option not in self.options:
print("Unrecognised option %s" % option)
return
# Validate / convert values
if option == "gopher_proxy":
if ":" not in value:
value += ":1965"
else:
host, port = value.rsplit(":",1)
if not port.isnumeric():
print("Invalid proxy port %s" % port)
return
elif option == "tls_mode":
if value.lower() not in ("ca", "tofu"):
print("TLS mode must be `ca` or `tofu`!")
return
elif value.isnumeric():
value = int(value)
elif value.lower() == "false":
value = False
elif value.lower() == "true":
value = True
else:
try:
value = float(value)
except ValueError:
pass
self.options[option] = value
@restricted
def do_cert(self, line):
"""Manage client certificates"""
print("Managing client certificates")
if self.client_certs["active"]:
print("Active certificate: {}".format(self.client_certs["active"][0]))
print("1. Deactivate client certificate.")
print("2. Generate new certificate.")
print("3. Load previously generated certificate.")
print("4. Load externally created client certificate from file.")
print("Enter blank line to exit certificate manager.")
choice = input("> ").strip()
if choice == "1":
print("Deactivating client certificate.")
self._deactivate_client_cert()
elif choice == "2":
self._generate_persistent_client_cert()
elif choice == "3":
self._choose_client_cert()
elif choice == "4":
self._load_client_cert()
else:
print("Aborting.")
@restricted
def do_handler(self, line):
"""View or set handler commands for different MIME types."""
if not line.strip():
# Show all current handlers
for mime in sorted(_MIME_HANDLERS.keys()):
print("%s %s" % (mime, _MIME_HANDLERS[mime]))
elif len(line.split()) == 1:
mime = line.strip()
if mime in _MIME_HANDLERS:
print("%s %s" % (mime, _MIME_HANDLERS[mime]))
else:
print("No handler set for MIME type %s" % mime)
else:
mime, handler = line.split(" ", 1)
_MIME_HANDLERS[mime] = handler
if "%s" not in handler:
print("Are you sure you don't want to pass the filename to the handler?")
def do_abbrevs(self, *args):
"""Print all AV-98 command abbreviations."""
header = "Command Abbreviations:"
self.stdout.write("\n{}\n".format(str(header)))
if self.ruler:
self.stdout.write("{}\n".format(str(self.ruler * len(header))))
for k, v in _ABBREVS.items():
self.stdout.write("{:<7} {}\n".format(k, v))
self.stdout.write("\n")
### Stuff for getting around
def do_go(self, line):
"""Go to a gemini URL or marked item."""
line = line.strip()
if not line:
print("Go where?")
# First, check for possible marks
elif line in self.marks:
gi = self.marks[line]
self._go_to_gi(gi)
# or a local file
elif os.path.exists(os.path.expanduser(line)):
gi = GeminiItem(None, None, os.path.expanduser(line),
"1", line, False)
self._go_to_gi(gi)
# If this isn't a mark, treat it as a URL
else:
self._go_to_gi(GeminiItem(line))
@needs_gi
def do_reload(self, *args):
"""Reload the current URL."""
self._go_to_gi(self.gi, check_cache=False)
@needs_gi
def do_up(self, *args):
"""Go up one directory in the path."""
self._go_to_gi(self.gi.up())
def do_back(self, *args):
"""Go back to the previous gemini item."""
if not self.history or self.hist_index == 0:
return
self.hist_index -= 1
gi = self.history[self.hist_index]
self._go_to_gi(gi, update_hist=False)
def do_forward(self, *args):
"""Go forward to the next gemini item."""
if not self.history or self.hist_index == len(self.history) - 1:
return
self.hist_index += 1
gi = self.history[self.hist_index]
self._go_to_gi(gi, update_hist=False)
def do_next(self, *args):
"""Go to next item after current in index."""
return self.onecmd(str(self.index_index+1))
def do_previous(self, *args):
"""Go to previous item before current in index."""
self.lookup = self.index
return self.onecmd(str(self.index_index-1))
@needs_gi
def do_root(self, *args):
"""Go to root selector of the server hosting current item."""
self._go_to_gi(self.gi.root())
def do_tour(self, line):
"""Add index items as waypoints on a tour, which is basically a FIFO
queue of gemini items.
Items can be added with `tour 1 2 3 4` or ranges like `tour 1-4`.
All items in current menu can be added with `tour *`.
Current tour can be listed with `tour ls` and scrubbed with `tour clear`."""
line = line.strip()
if not line:
# Fly to next waypoint on tour
if not self.waypoints:
print("End of tour.")
else:
gi = self.waypoints.pop(0)
self._go_to_gi(gi)
elif line == "ls":
old_lookup = self.lookup
self.lookup = self.waypoints
self._show_lookup()
self.lookup = old_lookup
elif line == "clear":
self.waypoints = []
elif line == "*":
self.waypoints.extend(self.lookup)
elif looks_like_url(line):
self.waypoints.append(GeminiItem(line))
else:
for index in line.split():
try:
pair = index.split('-')
if len(pair) == 1:
# Just a single index
n = int(index)
gi = self.lookup[n-1]
self.waypoints.append(gi)
elif len(pair) == 2:
# Two endpoints for a range of indices
for n in range(int(pair[0]), int(pair[1]) + 1):
gi = self.lookup[n-1]
self.waypoints.append(gi)
else:
# Syntax error
print("Invalid use of range syntax %s, skipping" % index)
except ValueError:
print("Non-numeric index %s, skipping." % index)
except IndexError:
print("Invalid index %d, skipping." % n)
@needs_gi
def do_mark(self, line):
"""Mark the current item with a single letter. This letter can then
be passed to the 'go' command to return to the current item later.
Think of it like marks in vi: 'mark a'='ma' and 'go a'=''a'."""
line = line.strip()
if not line:
for mark, gi in self.marks.items():
print("[%s] %s (%s)" % (mark, gi.name, gi.url))
elif line.isalpha() and len(line) == 1:
self.marks[line] = self.gi
else:
print("Invalid mark, must be one letter")
def do_version(self, line):
"""Display version information."""
print("AV-98 " + _VERSION)
### Stuff that modifies the lookup table
def do_ls(self, line):
"""List contents of current index.
Use 'ls -l' to see URLs."""
self.lookup = self.index
self._show_lookup(url = "-l" in line)
self.page_index = 0
def do_gus(self, line):
"""Submit a search query to the GUS search engine."""
gus = GeminiItem("gemini://gus.guru/search")
self._go_to_gi(gus.query(line))
def do_history(self, *args):
"""Display history."""
self.lookup = self.history
self._show_lookup(url=True)
self.page_index = 0
def do_search(self, searchterm):
"""Search index (case insensitive)."""
results = [
gi for gi in self.lookup if searchterm.lower() in gi.name.lower()]
if results:
self.lookup = results
self._show_lookup()
self.page_index = 0
else:
print("No results found.")
def emptyline(self):
"""Page through index ten lines at a time."""
i = self.page_index
if i > len(self.lookup):
return
self._show_lookup(offset=i, end=i+10)
self.page_index += 10
### Stuff that does something to most recently viewed item
@needs_gi
def do_cat(self, *args):
"""Run most recently visited item through "cat" command."""
subprocess.call(shlex.split("cat %s" % self._get_active_tmpfile()))
@needs_gi
def do_less(self, *args):
"""Run most recently visited item through "less" command."""
cmd_str = self._get_handler_cmd(self.mime)
cmd_str = cmd_str % self._get_active_tmpfile()
subprocess.call("%s | less -R" % cmd_str, shell=True)
@needs_gi
def do_fold(self, *args):
"""Run most recently visited item through "fold" command."""
cmd_str = self._get_handler_cmd(self.mime)
cmd_str = cmd_str % self._get_active_tmpfile()
subprocess.call("%s | fold -w 70 -s" % cmd_str, shell=True)
@restricted
@needs_gi
def do_shell(self, line):
"""'cat' most recently visited item through a shell pipeline."""
subprocess.call(("cat %s |" % self._get_active_tmpfile()) + line, shell=True)
@restricted
@needs_gi
def do_save(self, line):
"""Save an item to the filesystem.
'save n filename' saves menu item n to the specified filename.
'save filename' saves the last viewed item to the specified filename.
'save n' saves menu item n to an automagic filename."""
args = line.strip().split()
# First things first, figure out what our arguments are
if len(args) == 0:
# No arguments given at all
# Save current item, if there is one, to a file whose name is
# inferred from the gemini path
if not self.tmp_filename:
print("You need to visit an item first!")
return
else:
index = None
filename = None
elif len(args) == 1:
# One argument given
# If it's numeric, treat it as an index, and infer the filename
try:
index = int(args[0])
filename = None
# If it's not numeric, treat it as a filename and
# save the current item
except ValueError:
index = None
filename = os.path.expanduser(args[0])
elif len(args) == 2:
# Two arguments given
# Treat first as an index and second as filename
index, filename = args
try:
index = int(index)
except ValueError:
print("First argument is not a valid item index!")
return
filename = os.path.expanduser(filename)
else:
print("You must provide an index, a filename, or both.")
return
# Next, fetch the item to save, if it's not the current one.
if index:
last_gi = self.gi
try:
gi = self.lookup[index-1]
self._go_to_gi(gi, update_hist = False, handle = False)
except IndexError:
print ("Index too high!")
self.gi = last_gi
return
else:
gi = self.gi
# Derive filename from current GI's path, if one hasn't been set
if not filename:
filename = os.path.basename(gi.path)
# Check for filename collisions and actually do the save if safe
if os.path.exists(filename):
print("File %s already exists!" % filename)
else:
# Don't use _get_active_tmpfile() here, because we want to save the
# "source code" of menus, not the rendered view - this way AV-98
# can navigate to it later.
shutil.copyfile(self.tmp_filename, filename)
print("Saved to %s" % filename)
# Restore gi if necessary
if index != None:
self._go_to_gi(last_gi, handle=False)
@needs_gi
def do_url(self, *args):
"""Print URL of most recently visited item."""
print(self.gi.url)
### Bookmarking stuff
@restricted
@needs_gi
def do_add(self, line):
"""Add the current URL to the bookmarks menu.
Optionally, specify the new name for the bookmark."""
with open(os.path.join(self.config_dir, "bookmarks.gmi"), "a") as fp:
fp.write(self.gi.to_map_line(line))
def do_bookmarks(self, line):
"""Show or access the bookmarks menu.
'bookmarks' shows all bookmarks.
'bookmarks n' navigates immediately to item n in the bookmark menu.
Bookmarks are stored using the 'add' command."""
bm_file = os.path.join(self.config_dir, "bookmarks.gmi")
if not os.path.exists(bm_file):
print("You need to 'add' some bookmarks, first!")
return
args = line.strip()
if len(args.split()) > 1 or (args and not args.isnumeric()):
print("bookmarks command takes a single integer argument!")
return
with open(bm_file, "r") as fp:
body = fp.read()
gi = GeminiItem("localhost/" + bm_file)
self._handle_gemtext(body, gi, display = not args)
if args:
# Use argument as a numeric index
self.default(line)
### Help
def do_help(self, arg):
"""ALARM! Recursion detected! ALARM! Prepare to eject!"""
if arg == "!":
print("! is an alias for 'shell'")
elif arg == "?":
print("? is an alias for 'help'")
else:
cmd.Cmd.do_help(self, arg)
### Flight recorder
def do_blackbox(self, *args):
"""Display contents of flight recorder, showing statistics for the
current gemini browsing session."""
lines = []
# Compute flight time
now = time.time()
delta = now - self.log["start_time"]
hours, remainder = divmod(delta, 3600)
minutes, seconds = divmod(remainder, 60)
# Count hosts
ipv4_hosts = len([host for host in self.visited_hosts if host[0] == socket.AF_INET])
ipv6_hosts = len([host for host in self.visited_hosts if host[0] == socket.AF_INET6])
# Assemble lines
lines.append(("Patrol duration", "%02d:%02d:%02d" % (hours, minutes, seconds)))
lines.append(("Requests sent:", self.log["requests"]))
lines.append((" IPv4 requests:", self.log["ipv4_requests"]))
lines.append((" IPv6 requests:", self.log["ipv6_requests"]))
lines.append(("Bytes received:", self.log["bytes_recvd"]))
lines.append((" IPv4 bytes:", self.log["ipv4_bytes_recvd"]))
lines.append((" IPv6 bytes:", self.log["ipv6_bytes_recvd"]))
lines.append(("Unique hosts visited:", len(self.visited_hosts)))
lines.append((" IPv4 hosts:", ipv4_hosts))
lines.append((" IPv6 hosts:", ipv6_hosts))
lines.append(("DNS failures:", self.log["dns_failures"]))
lines.append(("Timeouts:", self.log["timeouts"]))
lines.append(("Refused connections:", self.log["refused_connections"]))
lines.append(("Reset connections:", self.log["reset_connections"]))
lines.append(("Cache hits:", self.log["cache_hits"]))
# Print
for key, value in lines:
print(key.ljust(24) + str(value).rjust(8))
### The end!
def do_quit(self, *args):
"""Exit AV-98."""
# Close TOFU DB
self.db_conn.commit()
self.db_conn.close()
# Clean up after ourself
self._empty_cache()
if self.tmp_filename and os.path.exists(self.tmp_filename):
os.unlink(self.tmp_filename)
if self.idx_filename and os.path.exists(self.idx_filename):
os.unlink(self.idx_filename)
for cert in self.transient_certs_created:
for ext in (".crt", ".key"):
certfile = os.path.join(self.config_dir, "transient_certs", cert+ext)
if os.path.exists(certfile):
os.remove(certfile)
print()
print("Thank you for flying AV-98!")
sys.exit()
do_exit = do_quit
# Main function
def main():
# Parse args
parser = argparse.ArgumentParser(description='A command line gemini client.')
parser.add_argument('--bookmarks', action='store_true',
help='start with your list of bookmarks')
parser.add_argument('--tls-cert', metavar='FILE', help='TLS client certificate file')
parser.add_argument('--tls-key', metavar='FILE', help='TLS client certificate private key file')
parser.add_argument('--restricted', action="store_true", help='Disallow shell, add, and save commands')
parser.add_argument('--version', action='store_true',
help='display version information and quit')
parser.add_argument('url', metavar='URL', nargs='*',
help='start with this URL')
args = parser.parse_args()
# Handle --version
if args.version:
print("AV-98 " + _VERSION)
sys.exit()
# Instantiate client
gc = GeminiClient(args.restricted)
# Process config file
rcfile = os.path.join(gc.config_dir, "av98rc")
if os.path.exists(rcfile):
print("Using config %s" % rcfile)
with open(rcfile, "r") as fp:
for line in fp:
line = line.strip()
if ((args.bookmarks or args.url) and
any((line.startswith(x) for x in ("go", "g", "tour", "t")))
):
if args.bookmarks:
print("Skipping rc command \"%s\" due to --bookmarks option." % line)
else:
print("Skipping rc command \"%s\" due to provided URLs." % line)
continue
gc.cmdqueue.append(line)
# Say hi
print("Welcome to AV-98!")
if args.restricted:
print("Restricted mode engaged!")
print("Enjoy your patrol through Geminispace...")
# Act on args
if args.tls_cert:
# If tls_key is None, python will attempt to load the key from tls_cert.
gc._activate_client_cert(args.tls_cert, args.tls_key)
if args.bookmarks:
gc.cmdqueue.append("bookmarks")
elif args.url:
if len(args.url) == 1:
gc.cmdqueue.append("go %s" % args.url[0])
else:
for url in args.url:
if not url.startswith("gemini://"):
url = "gemini://" + url
gc.cmdqueue.append("tour %s" % url)
gc.cmdqueue.append("tour")
# Endless interpret loop
while True:
try:
gc.cmdloop()
except KeyboardInterrupt:
print("")
if __name__ == '__main__':
main() | |
styles.js | import styled, { css } from 'styled-components';
import { Navlink as NavlinkComponent } from '../../Navlink';
import { media } from '@styles';
import { LanguageSwitch as LanguageSwitchComponent } from '../../LanguageSwitch';
export const Nav = styled.nav`
display: flex;
align-items: center;
justify-content: space-between;
height: 75px;
width: 100%;
position: absolute;
z-index: 1;
${media.lg(`
display: none;
`)}
`;
export const HamburgerButton = styled.button.attrs({
'aria-label': 'Menu button',
})`
display: flex;
flex-direction: column;
justify-content: space-between;
height: 15px;
width: 25px;
margin-left: 20px;
z-index: 3;
border-radius: 0;
outline: none;
`;
export const HamburgerBar = styled.div`
width: 25px;
height: 2px;
border-top: 2px solid white;
`;
export const Navigation = styled.ul`
display: flex;
flex-direction: column;
align-items: flex-start;
justify-content: space-between;
position: absolute;
left: 0;
top: 0;
width: 100%;
height: 100vh;
background-color: #48464d;
opacity: 0;
list-style-type: none;
z-index: -1;
padding: 120px 40px 120px;
`;
export const Navlink = styled(NavlinkComponent)`
color: rgba(255, 255, 255, 0.7);
font-family: 'Montserrat', sans-serif;
position: relative;
left: 0;
transition: left 0.2s ease-in-out;
> a {
font-size: 20px;
}
${({ isActive }) =>
isActive &&
css`
color: white;
`}
:hover {
color: white;
left: 10px;
}
`;
export const LanguageSwitch = styled(LanguageSwitchComponent)`
margin: 0 20px 0 0;
`;
// export const PageLink = styled(PageLinkComponent)`
// font-size: 20px;
// color: rgba(255, 255, 255, 0.7);
// position: relative; | // left: 0;
// transition: left 0.2s ease-in-out;
// :hover {
// color: white;
// left: 10px;
// }
// `; | |
common.go | package common
import (
"fmt"
"net/http"
"os"
"strings"
"time"
"unicode"
)
const (
OutDir = "spotted"
JobDir = "jobs"
)
type cfgField struct {
Name string
Value string
}
type conf []cfgField
var TS string
var JobName string
var resultSection string
var Trims = strings.TrimSpace
func TrimQ(ins string) string {
return strings.Trim(ins, "\\'\"")
}
var tr = &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 10 * time.Second,
DisableCompression: true,
}
var htclient = &http.Client{Transport: tr}
func HTTPSend(request *http.Request) []byte {
resp, err := htclient.Do(request)
if err != nil {
fmt.Fprintf(os.Stderr, "HTTP request failed with error: %v\n", err)
os.Exit(1)
}
if resp.ContentLength < 1 {
fmt.Fprintf(os.Stderr, "HTTP bad response length\n")
os.Exit(1)
}
reqResult := make([]byte, resp.ContentLength)
num, _ := resp.Body.Read(reqResult)
if resp.Body != nil {
resp.Body.Close()
}
if num < 1 {
fmt.Fprintf(os.Stderr, "HTTP failed to read response\n")
os.Exit(1)
}
return reqResult
}
func RemoveSpaces(str string) string {
return strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1
}
return r
}, str)
}
func ParseConfig(src string) conf {
var num int
var fullConf conf
cfg := make([]byte, 16384)
f, _ := os.OpenFile(src, os.O_RDONLY, 0444)
if f != nil {
num, _ = f.Read(cfg)
}
if f != nil {
f.Close()
}
if num < 10 {
return fullConf
}
res := strings.Split(string(cfg[:num]), "\n")
for _, field := range res {
fpair := strings.SplitN(RemoveSpaces(field), "=", 2)
if len(fpair) == 2 {
ff := cfgField{fpair[0], fpair[1]}
fullConf = append(fullConf, ff)
}
}
return fullConf
}
func JobRemove() {
_ = os.Remove(fmt.Sprintf("%s%s%s", JobDir, "/", "current.txt"))
fmt.Println("Unfinished job removed!")
os.Exit(0)
}
func JobDone() {
_ = os.Remove(fmt.Sprintf("%s%s%s", JobDir, "/", "current.txt"))
fmt.Println("Job done:")
fmt.Println(JobName)
}
func JobStore(jobID string) {
str := fmt.Sprintf("%s%s%s%s", JobDir, "/", JobName, ".txt")
f, err := os.OpenFile(str, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot create file %q, %v\n", str, err)
os.Exit(1)
}
_, err = f.WriteString(jobID)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write to file %q, %v\n", str, err)
os.Exit(1)
}
if f != nil {
f.Close()
}
str = fmt.Sprintf("%s%s", JobDir, "/current.txt")
f, err = os.OpenFile(str, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot create file, %v\n", err)
os.Exit(1)
}
str = fmt.Sprintf("%s", JobName)
_, err = f.WriteString(str)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write to file, %v\n", err)
os.Exit(1)
}
if f != nil {
f.Close()
}
}
func JobFile(index int64) string {
return fmt.Sprintf("%s%s%s%s%d%s", OutDir, "/", JobName, "_", index, ".ini")
}
func JobGetResult() string |
func JobSetSection(num int, s_timestamp, s_index int64) {
resultSection = fmt.Sprintf("%s%d%s%d%s%d%s", "[f_", num, "_", s_timestamp, "_", s_index, "]\n")
}
func JobSetTimestamp(s_timestamp int64) {
resultSection = fmt.Sprintf("%s%s%d%s", resultSection, "Timestamp=", s_timestamp, "\n")
}
func JobSetFrame(s_frame int64) {
resultSection = fmt.Sprintf("%s%s%d%s", resultSection, "FrameN=", s_frame, "\n")
}
func JobSetBoxLeft(s_left float64) {
resultSection = fmt.Sprintf("%s%s%f%s", resultSection, "Left=", s_left, "\n")
}
func JobSetBoxTop(s_top float64) {
resultSection = fmt.Sprintf("%s%s%f%s", resultSection, "Top=", s_top, "\n")
}
func JobSetBoxWidth(s_width float64) {
resultSection = fmt.Sprintf("%s%s%f%s", resultSection, "Width=", s_width, "\n")
}
func JobSetBoxHeight(s_height float64) {
resultSection = fmt.Sprintf("%s%s%f%s", resultSection, "Height=", s_height, "\n")
}
| {
return resultSection
} |
add_property.go | // _ _
// __ _____ __ ___ ___ __ _| |_ ___
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
// \ V V / __/ (_| |\ V /| | (_| | || __/
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
//
// Copyright © 2016 - 2021 SeMI Technologies B.V. All rights reserved.
//
// CONTACT: [email protected]
//
package schema
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/semi-technologies/weaviate/entities/models"
"github.com/semi-technologies/weaviate/entities/schema"
)
// AddClassProperty to an existing Class
func (m *Manager) AddClassProperty(ctx context.Context, principal *models.Principal,
class string, property *models.Property) error {
err := m.authorizer.Authorize(principal, "update", "schema/objects")
if err != nil {
return err
}
return m.addClassProperty(ctx, principal, class, property)
}
func (m *Manager) addClassProperty(ctx context.Context, principal *models.Principal, className string,
prop *models.Property) error {
m.Lock()
defer m.Unlock()
semanticSchema := m.state.SchemaFor()
class, err := schema.GetClassByName(semanticSchema, className)
if err != nil {
return err
}
prop.Name = lowerCaseFirstLetter(prop.Name)
err = m.validateCanAddProperty(ctx, principal, prop, class)
if err != nil {
return err
}
tx, err := m.cluster.BeginTransaction(ctx, AddProperty,
AddPropertyPayload{className, prop})
if err != nil {
// possible causes for errors could be nodes down (we expect every node to
// the up for a schema transaction) or concurrent transactions from other
// nodes
return errors.Wrap(err, "open cluster-wide transaction")
}
if err := m.cluster.CommitTransaction(ctx, tx); err != nil {
return errors.Wrap(err, "commit cluster-wide transaction")
}
return m.addClassPropertyApplyChanges(ctx, className, prop)
}
func (m *Manager) addClassPropertyApplyChanges(ctx context.Context,
className string, prop *models.Property) error {
semanticSchema := m.state.SchemaFor()
class, err := schema.GetClassByName(semanticSchema, className)
if err != nil {
return err
}
class.Properties = append(class.Properties, prop)
err = m.saveSchema(ctx)
if err != nil {
return nil
}
return m.migrator.AddProperty(ctx, className, prop)
}
func (m *Manager) validateCanAddProperty(ctx context.Context, principal *models.Principal,
property *models.Property, class *models.Class) error {
// Verify format of property.
_, err := schema.ValidatePropertyName(property.Name)
if err != nil {
return err
}
// Verify that property name is not a reserved name
err = schema.ValidateReservedPropertyName(property.Name)
if err != nil {
return err
}
// First check if there is a name clash.
err = validatePropertyNameUniqueness(property.Name, class)
if err != nil {
return err
}
err = m.validatePropertyName(ctx, class.Class, property.Name,
property.ModuleConfig) | return err
}
// Validate data type of property.
schema, err := m.GetSchema(principal)
if err != nil {
return err
}
_, err = (&schema).FindPropertyDataType(property.DataType)
if err != nil {
return fmt.Errorf("Data type of property '%s' is invalid; %v", property.Name, err)
}
// all is fine!
return nil
} | if err != nil { |
fail_fast.rs | use crate::common::jormungandr::JormungandrProcess;
use crate::common::{jormungandr::ConfigurationBuilder, startup};
use chain_impl_mockchain::{block::BlockDate, fragment::Fragment};
use jormungandr_testing_utils::testing::fragments::FaultyTransactionBuilder;
use jormungandr_testing_utils::testing::node::assert_bad_request;
use jormungandr_testing_utils::testing::FragmentSenderSetup;
use jormungandr_testing_utils::testing::FragmentVerifier;
use rstest::*;
use std::time::Duration;
#[fixture]
fn world() -> (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment,
Fragment,
) {
let mut alice = startup::create_new_account_address();
let mut bob = startup::create_new_account_address();
let mut clarice = startup::create_new_account_address();
let mut david = startup::create_new_account_address();
let (jormungandr, _stake_pools) = startup::start_stake_pool(
&[alice.clone()],
&[bob.clone(), clarice.clone()],
&mut ConfigurationBuilder::new(),
)
.unwrap();
let alice_fragment = alice
.transaction_to(
&jormungandr.genesis_block_hash(),
&jormungandr.fees(),
BlockDate::first().next_epoch(),
bob.address(),
100.into(),
)
.unwrap();
let bob_fragment = bob
.transaction_to(
&jormungandr.genesis_block_hash(),
&jormungandr.fees(),
BlockDate::first().next_epoch(),
alice.address(),
100.into(),
)
.unwrap();
let clarice_fragment = clarice
.transaction_to(
&jormungandr.genesis_block_hash(),
&jormungandr.fees(),
BlockDate::first().next_epoch(),
alice.address(),
100.into(),
)
.unwrap();
let late_invalid_fragment = david
.transaction_to(
&jormungandr.genesis_block_hash(),
&jormungandr.fees(),
BlockDate::first().next_epoch(),
alice.address(),
100.into(),
)
.unwrap();
let faulty_tx_builder = FaultyTransactionBuilder::new(
jormungandr.genesis_block_hash(),
jormungandr.fees(),
BlockDate::first().next_epoch(),
);
let early_invalid_fragment = faulty_tx_builder.unbalanced(&alice, &bob);
(
jormungandr,
alice_fragment,
bob_fragment,
clarice_fragment,
early_invalid_fragment,
late_invalid_fragment,
)
}
#[rstest]
pub fn fail_fast_on_all_valid(
world: (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment,
Fragment,
),
) {
let (jormungandr, valid_fragment_1, valid_fragment_2, valid_fragment_3, _, _) = world;
let transaction_sender = jormungandr.fragment_sender(FragmentSenderSetup::resend_3_times());
let tx_ids = transaction_sender
.send_batch_fragments(
vec![valid_fragment_1, valid_fragment_2, valid_fragment_3],
true,
&jormungandr,
)
.unwrap();
FragmentVerifier::wait_for_all_fragments(Duration::from_secs(5), &jormungandr).unwrap();
jormungandr
.correct_state_verifier()
.fragment_logs()
.assert_all_valid(&tx_ids);
}
#[rstest]
pub fn fail_fast_off_all_valid(
world: (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment,
Fragment,
),
) {
let (jormungandr, valid_fragment_1, valid_fragment_2, valid_fragment_3, _, _) = world;
let transaction_sender = jormungandr.fragment_sender(FragmentSenderSetup::resend_3_times());
let tx_ids = transaction_sender
.send_batch_fragments(
vec![valid_fragment_1, valid_fragment_2, valid_fragment_3],
false,
&jormungandr,
)
.unwrap();
FragmentVerifier::wait_for_all_fragments(Duration::from_secs(5), &jormungandr).unwrap();
jormungandr
.correct_state_verifier()
.fragment_logs()
.assert_all_valid(&tx_ids);
}
#[rstest]
pub fn fail_fast_on_first_invalid(
world: (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment, | ) {
let (jormungandr, valid_fragment_1, valid_fragment_2, _, early_invalid_fragment, _) = world;
assert_bad_request(jormungandr.rest().send_fragment_batch(
vec![early_invalid_fragment, valid_fragment_1, valid_fragment_2],
true,
));
FragmentVerifier::wait_for_all_fragments(Duration::from_secs(5), &jormungandr).unwrap();
jormungandr
.correct_state_verifier()
.fragment_logs()
.assert_no_fragments();
}
#[rstest]
pub fn fail_fast_on_first_late_invalid(
world: (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment,
Fragment,
),
) {
let (jormungandr, valid_fragment_1, valid_fragment_2, _, _, late_invalid_fragment) = world;
let transaction_sender = jormungandr.fragment_sender(FragmentSenderSetup::resend_3_times());
let tx_ids = transaction_sender
.send_batch_fragments(
vec![late_invalid_fragment, valid_fragment_1, valid_fragment_2],
true,
&jormungandr,
)
.unwrap();
FragmentVerifier::wait_for_all_fragments(Duration::from_secs(5), &jormungandr).unwrap();
jormungandr
.correct_state_verifier()
.fragment_logs()
.assert_invalid(&tx_ids[0])
.assert_valid(&tx_ids[1])
.assert_valid(&tx_ids[2]);
}
#[rstest]
pub fn fail_fast_off_first_invalid(
world: (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment,
Fragment,
),
) {
let (jormungandr, valid_fragment_1, valid_fragment_2, _, early_invalid_fragment, _) = world;
let tx_ids = assert_bad_request(jormungandr.rest().send_fragment_batch(
vec![valid_fragment_1, valid_fragment_2, early_invalid_fragment],
true,
));
FragmentVerifier::wait_for_all_fragments(Duration::from_secs(5), &jormungandr).unwrap();
jormungandr
.correct_state_verifier()
.fragment_logs()
.assert_not_exist(&tx_ids[2])
.assert_valid(&tx_ids[0])
.assert_valid(&tx_ids[1]);
}
#[rstest]
pub fn fail_fast_off_invalid_in_middle(
world: (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment,
Fragment,
),
) {
let (jormungandr, valid_fragment_1, valid_fragment_2, _, early_invalid_fragment, _) = world;
let tx_ids = assert_bad_request(jormungandr.rest().send_fragment_batch(
vec![valid_fragment_1, early_invalid_fragment, valid_fragment_2],
false,
));
FragmentVerifier::wait_for_all_fragments(Duration::from_secs(5), &jormungandr).unwrap();
jormungandr
.correct_state_verifier()
.fragment_logs()
.assert_valid(&tx_ids[0])
.assert_valid(&tx_ids[2])
.assert_not_exist(&tx_ids[1]);
}
#[rstest]
pub fn fail_fast_on_invalid_in_middle(
world: (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment,
Fragment,
),
) {
let (jormungandr, valid_fragment_1, valid_fragment_2, _, early_invalid_fragment, _) = world;
let tx_ids = assert_bad_request(jormungandr.rest().send_fragment_batch(
vec![valid_fragment_1, early_invalid_fragment, valid_fragment_2],
true,
));
FragmentVerifier::wait_for_all_fragments(Duration::from_secs(5), &jormungandr).unwrap();
jormungandr
.correct_state_verifier()
.fragment_logs()
.assert_valid(&tx_ids[0])
.assert_not_exist(&tx_ids[1])
.assert_not_exist(&tx_ids[2]);
}
#[rstest]
pub fn fail_fast_on_last_invalid(
world: (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment,
Fragment,
),
) {
let (jormungandr, valid_fragment_1, valid_fragment_2, _, early_invalid_fragment, _) = world;
let tx_ids = assert_bad_request(jormungandr.rest().send_fragment_batch(
vec![valid_fragment_1, valid_fragment_2, early_invalid_fragment],
true,
));
FragmentVerifier::wait_for_all_fragments(Duration::from_secs(5), &jormungandr).unwrap();
jormungandr
.correct_state_verifier()
.fragment_logs()
.assert_valid(&tx_ids[0])
.assert_valid(&tx_ids[1])
.assert_not_exist(&tx_ids[2]);
}
#[rstest]
pub fn fail_fast_off_last_invalid(
world: (
JormungandrProcess,
Fragment,
Fragment,
Fragment,
Fragment,
Fragment,
),
) {
let (jormungandr, valid_fragment_1, valid_fragment_2, _, early_invalid_fragment, _) = world;
let tx_ids = assert_bad_request(jormungandr.rest().send_fragment_batch(
vec![valid_fragment_1, valid_fragment_2, early_invalid_fragment],
false,
));
FragmentVerifier::wait_for_all_fragments(Duration::from_secs(5), &jormungandr).unwrap();
jormungandr
.correct_state_verifier()
.fragment_logs()
.assert_valid(&tx_ids[0])
.assert_valid(&tx_ids[1])
.assert_not_exist(&tx_ids[2]);
} | Fragment,
), |
index.js | /**
* Copyright (c) Nicolas Gallagher.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @flow
*/
import { Component } from 'react';
export default class StatusBar extends Component<*> {
static setBackgroundColor() {}
static setBarStyle() {}
static setHidden() {}
static setNetworkActivityIndicatorVisible() {}
static setTranslucent() {}
render() {
return null; | }
} |
|
main.rs | extern crate clap;
#[macro_use]
extern crate log;
extern crate simple_logger;
use clap::{Arg, App};
use log::Level;
fn | () {
let log_level;
let matches = App::new("helm-lintr")
.version("0.1.0")
.author("Matthew Fisher <[email protected]>")
.about("Runs a series of tests to verify that a Helm chart is well-formed.")
.arg(Arg::with_name("config")
.short("c")
.long("config")
.value_name("FILE")
.help("Specify a configuration file")
.takes_value(true))
.arg(Arg::with_name("PATH")
.help("The path to the chart")
.required(true)
.index(1))
.arg(Arg::with_name("v")
.short("v")
.multiple(true)
.help("Sets the level of verbosity"))
.get_matches();
// Vary the output based on how many times the user used the "verbose" flag
// (i.e. 'helm-lintr -v -v -v' or 'helm-lintr -vvv'
match matches.occurrences_of("v") {
0 => log_level = Level::Error,
1 => log_level = Level::Warn,
2 => log_level = Level::Info,
3 => log_level = Level::Debug,
4 | _ => log_level = Level::Trace,
}
simple_logger::init_with_level(log_level).unwrap();
// Gets a value for config if supplied by user, or defaults to "lintr.conf"
let config = matches.value_of("config").unwrap_or("lintr.conf");
debug!("Value for config: {}", config);
// Calling .unwrap() is safe here because "PATH" is required
debug!("Using chart file: {}", matches.value_of("PATH").unwrap());
}
| main |
tcp_proxy_profile.go | package models
// This file is auto-generated.
// Please contact [email protected] for any change requests.
// TCPProxyProfile TCP proxy profile
// swagger:model TCPProxyProfile
type TCPProxyProfile struct {
// Controls the our congestion window to send, normally it's 1 mss, If this option is turned on, we use 10 msses.
AggressiveCongestionAvoidance *bool `json:"aggressive_congestion_avoidance,omitempty"`
// Controls whether the windows are static or supports autogrowth. Maximum that it can grow to is limited to 4MB. Field introduced in 20.1.1.
AutoWindowGrowth *bool `json:"auto_window_growth,omitempty"`
// Dynamically pick the relevant parameters for connections. Allowed in Basic(Allowed values- true) edition, Enterprise edition.
Automatic *bool `json:"automatic,omitempty"`
// Controls the congestion control algorithm we use. Enum options - CC_ALGO_NEW_RENO, CC_ALGO_CUBIC, CC_ALGO_HTCP.
CcAlgo *string `json:"cc_algo,omitempty"`
// Congestion window scaling factor after recovery. Allowed values are 0-8. Field introduced in 17.2.12, 18.1.3, 18.2.1.
CongestionRecoveryScalingFactor *int32 `json:"congestion_recovery_scaling_factor,omitempty"`
// The duration for keepalive probes or session idle timeout. Max value is 3600 seconds, min is 5. Set to 0 to allow infinite idle time. Allowed values are 5-14400. Special values are 0 - 'infinite'. Unit is SEC.
IDLEConnectionTimeout *int32 `json:"idle_connection_timeout,omitempty"`
// Controls the behavior of idle connections. Enum options - KEEP_ALIVE, CLOSE_IDLE.
IDLEConnectionType *string `json:"idle_connection_type,omitempty"`
// A new SYN is accepted from the same 4-tuple even if there is already a connection in TIME_WAIT state. This is equivalent of setting Time Wait Delay to 0.
IgnoreTimeWait *bool `json:"ignore_time_wait,omitempty"`
// Controls the value of the Differentiated Services Code Point field inserted in the IP header. This has two options Set to a specific value, or Pass Through, which uses the incoming DSCP value. Allowed values are 0-63. Special values are MAX - 'Passthrough'.
IPDscp *int32 `json:"ip_dscp,omitempty"`
// Controls whether to keep the connection alive with keepalive messages in the TCP half close state. The interval for sending keepalive messages is 30s. If a timeout is already configured in the network profile, this will not override it. Field introduced in 18.2.6.
KeepaliveInHalfcloseState *bool `json:"keepalive_in_halfclose_state,omitempty"`
// The number of attempts at retransmit before closing the connection. Allowed values are 3-8.
MaxRetransmissions *int32 `json:"max_retransmissions,omitempty"`
// Maximum TCP segment size. Allowed values are 512-9000. Special values are 0 - 'Use Interface MTU'. Unit is BYTES.
MaxSegmentSize *int32 `json:"max_segment_size,omitempty"`
// The maximum number of attempts at retransmitting a SYN packet before giving up. Allowed values are 3-8.
MaxSynRetransmissions *int32 `json:"max_syn_retransmissions,omitempty"`
// The minimum wait time (in millisec) to retransmit packet. Allowed values are 50-5000. Field introduced in 17.2.8. Unit is MILLISECONDS.
MinRexmtTimeout *int32 `json:"min_rexmt_timeout,omitempty"`
// Consolidates small data packets to send clients fewer but larger packets. Adversely affects real time protocols such as telnet or SSH.
NaglesAlgorithm *bool `json:"nagles_algorithm,omitempty"` |
// Size of the receive window. Allowed values are 2-65536. Unit is KB.
ReceiveWindow *int32 `json:"receive_window,omitempty"`
// Controls the number of duplicate acks required to trigger retransmission. Setting a higher value reduces retransmission caused by packet reordering. A larger value is recommended in public cloud environments where packet reordering is quite common. The default value is 8 in public cloud platforms (AWS, Azure, GCP), and 3 in other environments. Allowed values are 1-100. Field introduced in 17.2.7.
ReorderThreshold *int32 `json:"reorder_threshold,omitempty"`
// Congestion window scaling factor during slow start. Allowed values are 0-8. Field introduced in 17.2.12, 18.1.3, 18.2.1.
SlowStartScalingFactor *int32 `json:"slow_start_scaling_factor,omitempty"`
// The time (in millisec) to wait before closing a connection in the TIME_WAIT state. Allowed values are 500-2000. Special values are 0 - 'immediate'. Unit is MILLISECONDS.
TimeWaitDelay *int32 `json:"time_wait_delay,omitempty"`
// Use the interface MTU to calculate the TCP max segment size.
UseInterfaceMtu *bool `json:"use_interface_mtu,omitempty"`
} |
// Maximum number of TCP segments that can be queued for reassembly. Configuring this to 0 disables the feature and provides unlimited queuing. Field introduced in 17.2.13, 18.1.4, 18.2.1.
ReassemblyQueueSize *int32 `json:"reassembly_queue_size,omitempty"` |
test.py | import math
def pra(n):
|
def izpisi():
for i in range (2,200):
if (pra(i)):
print(i)
| for i in range (2,(int)(math.sqrt(n))):
if (n%i==0):
return False
return True |
background.js | 'use strict'
import { app, protocol, BrowserWindow } from 'electron'
import { createProtocol } from 'vue-cli-plugin-electron-builder/lib'
// import installExtension, { VUEJS_DEVTOOLS } from 'electron-devtools-installer'
const isDevelopment = process.env.NODE_ENV !== 'production'
// Keep a global reference of the window object, if you don't, the window will
// be closed automatically when the JavaScript object is garbage collected.
let win
// Scheme must be registered before the app is ready
protocol.registerSchemesAsPrivileged([
{ scheme: 'app', privileges: { secure: true, standard: true } }
])
function createWindow() {
// Create the browser window.
win = new BrowserWindow({
width: 800,
height: 600,
webPreferences: {
// Use pluginOptions.nodeIntegration, leave this alone
// See nklayman.github.io/vue-cli-plugin-electron-builder/guide/security.html#node-integration for more info
nodeIntegration: process.env.ELECTRON_NODE_INTEGRATION
}
})
if (process.env.WEBPACK_DEV_SERVER_URL) {
// Load the url of the dev server if in development mode
win.loadURL(process.env.WEBPACK_DEV_SERVER_URL)
if (!process.env.IS_TEST) win.webContents.openDevTools()
} else {
createProtocol('app')
// Load the index.html when not in development
win.loadURL('app://./index.html')
}
win.on('closed', () => {
win = null
})
}
// Quit when all windows are closed.
app.on('window-all-closed', () => {
// On macOS it is common for applications and their menu bar
// to stay active until the user quits explicitly with Cmd + Q
if (process.platform !== 'darwin') {
app.quit()
}
})
app.on('activate', () => {
// On macOS it's common to re-create a window in the app when the
// dock icon is clicked and there are no other windows open.
if (win === null) {
createWindow()
}
})
// This method will be called when Electron has finished
// initialization and is ready to create browser windows.
// Some APIs can only be used after this event occurs.
app.on('ready', async () => {
if (isDevelopment && !process.env.IS_TEST) {
// Install Vue Devtools
// Devtools extensions are broken in Electron 6/7/<8.25 on Windows
// See https://github.com/nklayman/vue-cli-plugin-electron-builder/issues/378 for more info
// Electron will not launch with Devtools extensions installed on Windows 10 with dark mode
// If you are not using Windows 10 dark mode, you may uncomment the following lines (and the import at the top of the file)
// In addition, if you upgrade to Electron ^8.2.5 or ^9.0.0 then devtools should work fine
// try {
// await installExtension(VUEJS_DEVTOOLS)
// } catch (e) {
// console.error('Vue Devtools failed to install:', e.toString())
// }
}
createWindow()
})
// Exit cleanly on request from parent process in development mode.
if (isDevelopment) {
if (process.platform === 'win32') {
process.on('message', (data) => { | }
})
} else {
process.on('SIGTERM', () => {
app.quit()
})
}
} | if (data === 'graceful-exit') {
app.quit() |
i-c-s-soft-s-t-o-r-m-n-e-t-security-class-e.js | import EditFormController from 'ember-flexberry/controllers/edit-form';
import EditFormControllerOperationsIndicationMixin from 'ember-flexberry/mixins/edit-form-controller-operations-indication';
export default EditFormController.extend(EditFormControllerOperationsIndicationMixin, {
// Caption of this particular edit form.
| parentRoute: 'i-c-s-soft-s-t-o-r-m-n-e-t-security-class-l',
}); |
|
api_error.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class | (Model):
"""Api error.
:param details: The Api error details
:type details: list of :class:`ApiErrorBase
<azure.mgmt.compute.v2015_06_15.models.ApiErrorBase>`
:param innererror: The Api inner error
:type innererror: :class:`InnerError
<azure.mgmt.compute.v2015_06_15.models.InnerError>`
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'details': {'key': 'details', 'type': '[ApiErrorBase]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, details=None, innererror=None, code=None, target=None, message=None):
self.details = details
self.innererror = innererror
self.code = code
self.target = target
self.message = message
| ApiError |
client_test.py | from typing import cast, IO, Tuple, List, Any, Union
from threading import Thread
import shlex
import subprocess as sp
import reckon.reckon_types as t
import reckon.client_runner as cr
def spawn(command: List[str]) -> Tuple[t.Client, sp.Popen[bytes]]:
p = sp.Popen(command, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
return t.Client(cast(IO[bytes], p.stdin), cast(IO[bytes], p.stdout), "Test"), p
def check_alive(p: sp.Popen[Any]) -> bool:
if p.poll():
return False
else:
return True
class DummyWorkload(t.AbstractWorkload):
def __init__(self, client):
self.client = client
self._clients = [client]
@property
def prerequisites(self):
return [
t.Operation(
time=-1,
payload=t.Write(
kind=t.OperationKind.Write, key="preload", value="preload"
),
)
for _ in range(3)
]
@property
def workload(self):
i = 0
while True:
yield (
self.client,
(
t.Operation(
time=i,
payload=t.Write(kind=t.OperationKind.Write, key="k", value="v"),
)
if i % 2 == 0
else t.Operation(
time=i, payload=t.Read(kind=t.OperationKind.Read, key="k")
)
),
)
i += 1
class CannedClient(t.Client):
def __init__(self, canned_output: str):
self._sent_messages: List[t.Message] = []
cat = sp.Popen(f"cat {canned_output}", stdout=sp.PIPE, shell=True)
self.stdout = cat.stdout
self.id = "pseudoclient"
def send(self, msg: t.Message):
self._sent_messages.append(msg)
@property
def sent_messages(self) -> List[t.Message]:
|
def client_bootstrap(input_file, output_file):
with open(input_file, "wb") as f_in:
cat = sp.Popen(f"cat {output_file}", stdout=sp.PIPE, shell=True)
c = t.Client(f_in, cat.stdout, "test") # type: ignore
results = cr.test_steps([c], DummyWorkload(c), [], 10)
print(results)
class TaggedFailure(t.AbstractFailureGenerator):
def __init__(self):
self.failures = []
def get_failures(self, net, system, restarters, stoppers) -> List[t.AbstractFault]:
del net, system, restarters, stoppers # unused
class failure(t.AbstractFault):
def __init__(self, parent, id):
self._id = id
self.parent = parent
@property
def id(self) -> str:
return self._id
def apply_fault(self):
self.parent.failures.append(self.id)
return [failure(self, 1), failure(self, 2)]
def test_client_runner():
canned_output_file = "scripts/out.bin"
expected_input_file = "scripts/in.bin"
c_out = CannedClient(canned_output_file)
fault = TaggedFailure()
results = cr.test_steps(
[c_out], DummyWorkload(c_out), fault.get_failures(1, 2, 3, 4), 10
)
# Test if the sent messages are as expected
c_in = CannedClient(expected_input_file)
expected_sent_messages: List[t.Message] = []
try:
while True:
msg = c_in.recv()
expected_sent_messages.append(msg)
except EOFError:
pass
assert c_out.sent_messages == expected_sent_messages
# Test if the results are as expected
results = results.__root__
assert len(results) == 11
assert all([r.result == "Success" for r in results])
assert len([r for r in results if r.op_kind == t.OperationKind.Write]) == 6
assert len([r for r in results if r.op_kind == t.OperationKind.Read]) == 5
# Test if the faults were applied in the right order
assert fault.failures == [1, 2]
| return self._sent_messages |
securitycenter_service.pb.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// source: google/cloud/securitycenter/v1p1beta1/securitycenter_service.proto
package securitycenter
import (
context "context"
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
duration "github.com/golang/protobuf/ptypes/duration"
empty "github.com/golang/protobuf/ptypes/empty"
_struct "github.com/golang/protobuf/ptypes/struct"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
_ "google.golang.org/genproto/googleapis/api/annotations"
v1 "google.golang.org/genproto/googleapis/iam/v1"
longrunning "google.golang.org/genproto/googleapis/longrunning"
field_mask "google.golang.org/genproto/protobuf/field_mask"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// The change in state of the asset.
//
// When querying across two points in time this describes
// the change between the two points: ADDED, REMOVED, or ACTIVE.
// If there was no compare_duration supplied in the request the state change
// will be: UNUSED
type ListAssetsResponse_ListAssetsResult_StateChange int32
const (
// State change is unused, this is the canonical default for this enum.
ListAssetsResponse_ListAssetsResult_UNUSED ListAssetsResponse_ListAssetsResult_StateChange = 0
// Asset was added between the points in time.
ListAssetsResponse_ListAssetsResult_ADDED ListAssetsResponse_ListAssetsResult_StateChange = 1
// Asset was removed between the points in time.
ListAssetsResponse_ListAssetsResult_REMOVED ListAssetsResponse_ListAssetsResult_StateChange = 2
// Asset was present at both point(s) in time.
ListAssetsResponse_ListAssetsResult_ACTIVE ListAssetsResponse_ListAssetsResult_StateChange = 3
)
// Enum value maps for ListAssetsResponse_ListAssetsResult_StateChange.
var (
ListAssetsResponse_ListAssetsResult_StateChange_name = map[int32]string{
0: "UNUSED",
1: "ADDED",
2: "REMOVED",
3: "ACTIVE",
}
ListAssetsResponse_ListAssetsResult_StateChange_value = map[string]int32{
"UNUSED": 0,
"ADDED": 1,
"REMOVED": 2,
"ACTIVE": 3,
}
)
func (x ListAssetsResponse_ListAssetsResult_StateChange) Enum() *ListAssetsResponse_ListAssetsResult_StateChange {
p := new(ListAssetsResponse_ListAssetsResult_StateChange)
*p = x
return p
}
func (x ListAssetsResponse_ListAssetsResult_StateChange) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ListAssetsResponse_ListAssetsResult_StateChange) Descriptor() protoreflect.EnumDescriptor {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_enumTypes[0].Descriptor()
}
func (ListAssetsResponse_ListAssetsResult_StateChange) Type() protoreflect.EnumType {
return &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_enumTypes[0]
}
func (x ListAssetsResponse_ListAssetsResult_StateChange) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ListAssetsResponse_ListAssetsResult_StateChange.Descriptor instead.
func (ListAssetsResponse_ListAssetsResult_StateChange) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{17, 0, 0}
}
// The change in state of the finding.
//
// When querying across two points in time this describes
// the change in the finding between the two points: CHANGED, UNCHANGED,
// ADDED, or REMOVED. Findings can not be deleted, so REMOVED implies that
// the finding at timestamp does not match the filter specified, but it did
// at timestamp - compare_duration. If there was no compare_duration
// supplied in the request the state change will be: UNUSED
type ListFindingsResponse_ListFindingsResult_StateChange int32
const (
// State change is unused, this is the canonical default for this enum.
ListFindingsResponse_ListFindingsResult_UNUSED ListFindingsResponse_ListFindingsResult_StateChange = 0
// The finding has changed state in some way between the points in time
// and existed at both points.
ListFindingsResponse_ListFindingsResult_CHANGED ListFindingsResponse_ListFindingsResult_StateChange = 1
// The finding has not changed state between the points in time and
// existed at both points.
ListFindingsResponse_ListFindingsResult_UNCHANGED ListFindingsResponse_ListFindingsResult_StateChange = 2
// The finding was created between the points in time.
ListFindingsResponse_ListFindingsResult_ADDED ListFindingsResponse_ListFindingsResult_StateChange = 3
// The finding at timestamp does not match the filter specified, but it
// did at timestamp - compare_duration.
ListFindingsResponse_ListFindingsResult_REMOVED ListFindingsResponse_ListFindingsResult_StateChange = 4
)
// Enum value maps for ListFindingsResponse_ListFindingsResult_StateChange.
var (
ListFindingsResponse_ListFindingsResult_StateChange_name = map[int32]string{
0: "UNUSED",
1: "CHANGED",
2: "UNCHANGED",
3: "ADDED",
4: "REMOVED",
}
ListFindingsResponse_ListFindingsResult_StateChange_value = map[string]int32{
"UNUSED": 0,
"CHANGED": 1,
"UNCHANGED": 2,
"ADDED": 3,
"REMOVED": 4,
}
)
func (x ListFindingsResponse_ListFindingsResult_StateChange) Enum() *ListFindingsResponse_ListFindingsResult_StateChange {
p := new(ListFindingsResponse_ListFindingsResult_StateChange)
*p = x
return p
}
func (x ListFindingsResponse_ListFindingsResult_StateChange) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ListFindingsResponse_ListFindingsResult_StateChange) Descriptor() protoreflect.EnumDescriptor {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_enumTypes[1].Descriptor()
}
func (ListFindingsResponse_ListFindingsResult_StateChange) Type() protoreflect.EnumType {
return &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_enumTypes[1]
}
func (x ListFindingsResponse_ListFindingsResult_StateChange) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ListFindingsResponse_ListFindingsResult_StateChange.Descriptor instead.
func (ListFindingsResponse_ListFindingsResult_StateChange) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{19, 0, 0}
}
// Request message for creating a finding.
type CreateFindingRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Resource name of the new finding's parent. Its format should be
// "organizations/[organization_id]/sources/[source_id]".
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. Unique identifier provided by the client within the parent scope.
FindingId string `protobuf:"bytes,2,opt,name=finding_id,json=findingId,proto3" json:"finding_id,omitempty"`
// Required. The Finding being created. The name and security_marks will be ignored as
// they are both output only fields on this resource.
Finding *Finding `protobuf:"bytes,3,opt,name=finding,proto3" json:"finding,omitempty"`
}
func (x *CreateFindingRequest) Reset() {
*x = CreateFindingRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CreateFindingRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateFindingRequest) ProtoMessage() {}
func (x *CreateFindingRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateFindingRequest.ProtoReflect.Descriptor instead.
func (*CreateFindingRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{0}
}
func (x *CreateFindingRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *CreateFindingRequest) GetFindingId() string {
if x != nil {
return x.FindingId
}
return ""
}
func (x *CreateFindingRequest) GetFinding() *Finding {
if x != nil {
return x.Finding
}
return nil
}
// Request message for creating a notification config.
type CreateNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Resource name of the new notification config's parent. Its format is
// "organizations/[organization_id]".
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required.
// Unique identifier provided by the client within the parent scope.
// It must be between 1 and 128 characters, and contains alphanumeric
// characters, underscores or hyphens only.
ConfigId string `protobuf:"bytes,2,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"`
// Required. The notification config being created. The name and the service account
// will be ignored as they are both output only fields on this resource.
NotificationConfig *NotificationConfig `protobuf:"bytes,3,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"`
}
func (x *CreateNotificationConfigRequest) Reset() {
*x = CreateNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CreateNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateNotificationConfigRequest) ProtoMessage() {}
func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead.
func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{1}
}
func (x *CreateNotificationConfigRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *CreateNotificationConfigRequest) GetConfigId() string {
if x != nil {
return x.ConfigId
}
return ""
}
func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig {
if x != nil {
return x.NotificationConfig
}
return nil
}
// Request message for creating a source.
type CreateSourceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Resource name of the new source's parent. Its format should be
// "organizations/[organization_id]".
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The Source being created, only the display_name and description will be
// used. All other fields will be ignored.
Source *Source `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
}
func (x *CreateSourceRequest) Reset() {
*x = CreateSourceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CreateSourceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateSourceRequest) ProtoMessage() {}
func (x *CreateSourceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateSourceRequest.ProtoReflect.Descriptor instead.
func (*CreateSourceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{2}
}
func (x *CreateSourceRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *CreateSourceRequest) GetSource() *Source {
if x != nil {
return x.Source
}
return nil
}
// Request message for deleting a notification config.
type DeleteNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the notification config to delete. Its format is
// "organizations/[organization_id]/notificationConfigs/[config_id]".
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *DeleteNotificationConfigRequest) Reset() {
*x = DeleteNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DeleteNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteNotificationConfigRequest) ProtoMessage() {}
func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead.
func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{3}
}
func (x *DeleteNotificationConfigRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// Request message for getting a notification config.
type GetNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the notification config to get. Its format is
// "organizations/[organization_id]/notificationConfigs/[config_id]".
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *GetNotificationConfigRequest) Reset() {
*x = GetNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNotificationConfigRequest) ProtoMessage() {}
func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead.
func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{4}
}
func (x *GetNotificationConfigRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// Request message for getting organization settings.
type GetOrganizationSettingsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the organization to get organization settings for. Its format is
// "organizations/[organization_id]/organizationSettings".
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *GetOrganizationSettingsRequest) Reset() {
*x = GetOrganizationSettingsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetOrganizationSettingsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetOrganizationSettingsRequest) ProtoMessage() {}
func (x *GetOrganizationSettingsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetOrganizationSettingsRequest.ProtoReflect.Descriptor instead.
func (*GetOrganizationSettingsRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{5}
}
func (x *GetOrganizationSettingsRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// Request message for getting a source.
type GetSourceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Relative resource name of the source. Its format is
// "organizations/[organization_id]/source/[source_id]".
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (x *GetSourceRequest) Reset() {
*x = GetSourceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetSourceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetSourceRequest) ProtoMessage() {}
func (x *GetSourceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetSourceRequest.ProtoReflect.Descriptor instead.
func (*GetSourceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{6}
}
func (x *GetSourceRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// Request message for grouping by assets.
type GroupAssetsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the organization to groupBy. Its format is
// "organizations/[organization_id]".
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Expression that defines the filter to apply across assets.
// The expression is a list of zero or more restrictions combined via logical
// operators `AND` and `OR`.
// Parentheses are supported, and `OR` has higher precedence than `AND`.
//
// Restrictions have the form `<field> <operator> <value>` and may have a `-`
// character in front of them to indicate negation. The fields map to those
// defined in the Asset resource. Examples include:
//
// * name
// * security_center_properties.resource_name
// * resource_properties.a_property
// * security_marks.marks.marka
//
// The supported operators are:
//
// * `=` for all value types.
// * `>`, `<`, `>=`, `<=` for integer values.
// * `:`, meaning substring matching, for strings.
//
// The supported value types are:
//
// * string literals in quotes.
// * integer literals without quotes.
// * boolean literals `true` and `false` without quotes.
//
// The following field and operator combinations are supported:
//
// * name: `=`
// * update_time: `=`, `>`, `<`, `>=`, `<=`
//
// Usage: This should be milliseconds since epoch or an RFC3339 string.
// Examples:
// `update_time = "2019-06-10T16:07:18-07:00"`
// `update_time = 1560208038000`
//
// * create_time: `=`, `>`, `<`, `>=`, `<=`
//
// Usage: This should be milliseconds since epoch or an RFC3339 string.
// Examples:
// `create_time = "2019-06-10T16:07:18-07:00"`
// `create_time = 1560208038000`
//
// * iam_policy.policy_blob: `=`, `:`
// * resource_properties: `=`, `:`, `>`, `<`, `>=`, `<=`
// * security_marks.marks: `=`, `:`
// * security_center_properties.resource_name: `=`, `:`
// * security_center_properties.resource_name_display_name: `=`, `:`
// * security_center_properties.resource_type: `=`, `:`
// * security_center_properties.resource_parent: `=`, `:`
// * security_center_properties.resource_parent_display_name: `=`, `:`
// * security_center_properties.resource_project: `=`, `:`
// * security_center_properties.resource_project_display_name: `=`, `:`
// * security_center_properties.resource_owners: `=`, `:`
//
// For example, `resource_properties.size = 100` is a valid filter string.
//
// Use a partial match on the empty string to filter based on a property
// existing: `resource_properties.my_property : ""`
//
// Use a negated partial match on the empty string to filter based on a
// property not existing: `-resource_properties.my_property : ""`
Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
// Required. Expression that defines what assets fields to use for grouping. The string
// value should follow SQL syntax: comma separated list of fields. For
// example:
// "security_center_properties.resource_project,security_center_properties.project".
//
// The following fields are supported when compare_duration is not set:
//
// * security_center_properties.resource_project
// * security_center_properties.resource_project_display_name
// * security_center_properties.resource_type
// * security_center_properties.resource_parent
// * security_center_properties.resource_parent_display_name
//
// The following fields are supported when compare_duration is set:
//
// * security_center_properties.resource_type
// * security_center_properties.resource_project_display_name
// * security_center_properties.resource_parent_display_name
GroupBy string `protobuf:"bytes,3,opt,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"`
// When compare_duration is set, the GroupResult's "state_change" property is
// updated to indicate whether the asset was added, removed, or remained
// present during the compare_duration period of time that precedes the
// read_time. This is the time between (read_time - compare_duration) and
// read_time.
//
// The state change value is derived based on the presence of the asset at the
// two points in time. Intermediate state changes between the two times don't
// affect the result. For example, the results aren't affected if the asset is
// removed and re-created again.
//
// Possible "state_change" values when compare_duration is specified:
//
// * "ADDED": indicates that the asset was not present at the start of
// compare_duration, but present at reference_time.
// * "REMOVED": indicates that the asset was present at the start of
// compare_duration, but not present at reference_time.
// * "ACTIVE": indicates that the asset was present at both the
// start and the end of the time period defined by
// compare_duration and reference_time.
//
// If compare_duration is not specified, then the only possible state_change
// is "UNUSED", which will be the state_change set for all assets present at
// read_time.
//
// If this field is set then `state_change` must be a specified field in
// `group_by`.
CompareDuration *duration.Duration `protobuf:"bytes,4,opt,name=compare_duration,json=compareDuration,proto3" json:"compare_duration,omitempty"`
// Time used as a reference point when filtering assets. The filter is limited
// to assets existing at the supplied time and their values are those at that
// specific time. Absence of this field will default to the API's version of
// NOW.
ReadTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=read_time,json=readTime,proto3" json:"read_time,omitempty"`
// The value returned by the last `GroupAssetsResponse`; indicates
// that this is a continuation of a prior `GroupAssets` call, and that the
// system should return the next page of data.
PageToken string `protobuf:"bytes,7,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// The maximum number of results to return in a single response. Default is
// 10, minimum is 1, maximum is 1000.
PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
}
func (x *GroupAssetsRequest) Reset() {
*x = GroupAssetsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GroupAssetsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GroupAssetsRequest) ProtoMessage() {}
func (x *GroupAssetsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GroupAssetsRequest.ProtoReflect.Descriptor instead.
func (*GroupAssetsRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{7}
}
func (x *GroupAssetsRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *GroupAssetsRequest) GetFilter() string {
if x != nil {
return x.Filter
}
return ""
}
func (x *GroupAssetsRequest) GetGroupBy() string {
if x != nil {
return x.GroupBy
}
return ""
}
func (x *GroupAssetsRequest) GetCompareDuration() *duration.Duration {
if x != nil {
return x.CompareDuration
}
return nil
}
func (x *GroupAssetsRequest) GetReadTime() *timestamp.Timestamp {
if x != nil {
return x.ReadTime
}
return nil
}
func (x *GroupAssetsRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
func (x *GroupAssetsRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
// Response message for grouping by assets.
type GroupAssetsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Group results. There exists an element for each existing unique
// combination of property/values. The element contains a count for the number
// of times those specific property/values appear.
GroupByResults []*GroupResult `protobuf:"bytes,1,rep,name=group_by_results,json=groupByResults,proto3" json:"group_by_results,omitempty"`
// Time used for executing the groupBy request.
ReadTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=read_time,json=readTime,proto3" json:"read_time,omitempty"`
// Token to retrieve the next page of results, or empty if there are no more
// results.
NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// The total number of results matching the query.
TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
}
func (x *GroupAssetsResponse) Reset() {
*x = GroupAssetsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GroupAssetsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GroupAssetsResponse) ProtoMessage() {}
func (x *GroupAssetsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GroupAssetsResponse.ProtoReflect.Descriptor instead.
func (*GroupAssetsResponse) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{8}
}
func (x *GroupAssetsResponse) GetGroupByResults() []*GroupResult {
if x != nil {
return x.GroupByResults
}
return nil
}
func (x *GroupAssetsResponse) GetReadTime() *timestamp.Timestamp {
if x != nil {
return x.ReadTime
}
return nil
}
func (x *GroupAssetsResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
func (x *GroupAssetsResponse) GetTotalSize() int32 {
if x != nil {
return x.TotalSize
}
return 0
}
// Request message for grouping by findings.
type GroupFindingsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the source to groupBy. Its format is
// "organizations/[organization_id]/sources/[source_id]". To groupBy across
// all sources provide a source_id of `-`. For example:
// organizations/{organization_id}/sources/-
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Expression that defines the filter to apply across findings.
// The expression is a list of one or more restrictions combined via logical
// operators `AND` and `OR`.
// Parentheses are supported, and `OR` has higher precedence than `AND`.
//
// Restrictions have the form `<field> <operator> <value>` and may have a `-`
// character in front of them to indicate negation. Examples include:
//
// * name
// * source_properties.a_property
// * security_marks.marks.marka
//
// The supported operators are:
//
// * `=` for all value types.
// * `>`, `<`, `>=`, `<=` for integer values.
// * `:`, meaning substring matching, for strings.
//
// The supported value types are:
//
// * string literals in quotes.
// * integer literals without quotes.
// * boolean literals `true` and `false` without quotes.
//
// The following field and operator combinations are supported:
//
// * name: `=`
// * parent: `=`, `:`
// * resource_name: `=`, `:`
// * state: `=`, `:`
// * category: `=`, `:`
// * external_uri: `=`, `:`
// * event_time: `=`, `>`, `<`, `>=`, `<=`
//
// Usage: This should be milliseconds since epoch or an RFC3339 string.
// Examples:
// `event_time = "2019-06-10T16:07:18-07:00"`
// `event_time = 1560208038000`
//
// * security_marks.marks: `=`, `:`
// * source_properties: `=`, `:`, `>`, `<`, `>=`, `<=`
//
// For example, `source_properties.size = 100` is a valid filter string.
//
// Use a partial match on the empty string to filter based on a property
// existing: `source_properties.my_property : ""`
//
// Use a negated partial match on the empty string to filter based on a
// property not existing: `-source_properties.my_property : ""`
Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
// Required. Expression that defines what assets fields to use for grouping (including
// `state_change`). The string value should follow SQL syntax: comma separated
// list of fields. For example: "parent,resource_name".
//
// The following fields are supported:
//
// * resource_name
// * category
// * state
// * parent
//
// The following fields are supported when compare_duration is set:
//
// * state_change
GroupBy string `protobuf:"bytes,3,opt,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"`
// Time used as a reference point when filtering findings. The filter is
// limited to findings existing at the supplied time and their values are
// those at that specific time. Absence of this field will default to the
// API's version of NOW.
ReadTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=read_time,json=readTime,proto3" json:"read_time,omitempty"`
// When compare_duration is set, the GroupResult's "state_change" attribute is
// updated to indicate whether the finding had its state changed, the
// finding's state remained unchanged, or if the finding was added during the
// compare_duration period of time that precedes the read_time. This is the
// time between (read_time - compare_duration) and read_time.
//
// The state_change value is derived based on the presence and state of the
// finding at the two points in time. Intermediate state changes between the
// two times don't affect the result. For example, the results aren't affected
// if the finding is made inactive and then active again.
//
// Possible "state_change" values when compare_duration is specified:
//
// * "CHANGED": indicates that the finding was present and matched the given
// filter at the start of compare_duration, but changed its
// state at read_time.
// * "UNCHANGED": indicates that the finding was present and matched the given
// filter at the start of compare_duration and did not change
// state at read_time.
// * "ADDED": indicates that the finding did not match the given filter or
// was not present at the start of compare_duration, but was
// present at read_time.
// * "REMOVED": indicates that the finding was present and matched the
// filter at the start of compare_duration, but did not match
// the filter at read_time.
//
// If compare_duration is not specified, then the only possible state_change
// is "UNUSED", which will be the state_change set for all findings present
// at read_time.
//
// If this field is set then `state_change` must be a specified field in
// `group_by`.
CompareDuration *duration.Duration `protobuf:"bytes,5,opt,name=compare_duration,json=compareDuration,proto3" json:"compare_duration,omitempty"`
// The value returned by the last `GroupFindingsResponse`; indicates
// that this is a continuation of a prior `GroupFindings` call, and
// that the system should return the next page of data.
PageToken string `protobuf:"bytes,7,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// The maximum number of results to return in a single response. Default is
// 10, minimum is 1, maximum is 1000.
PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
}
func (x *GroupFindingsRequest) Reset() {
*x = GroupFindingsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GroupFindingsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GroupFindingsRequest) ProtoMessage() {}
func (x *GroupFindingsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GroupFindingsRequest.ProtoReflect.Descriptor instead.
func (*GroupFindingsRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{9}
}
func (x *GroupFindingsRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *GroupFindingsRequest) GetFilter() string {
if x != nil {
return x.Filter
}
return ""
}
func (x *GroupFindingsRequest) GetGroupBy() string {
if x != nil {
return x.GroupBy
}
return ""
}
func (x *GroupFindingsRequest) GetReadTime() *timestamp.Timestamp {
if x != nil {
return x.ReadTime
}
return nil
}
func (x *GroupFindingsRequest) GetCompareDuration() *duration.Duration {
if x != nil {
return x.CompareDuration
}
return nil
}
func (x *GroupFindingsRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
func (x *GroupFindingsRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
// Response message for group by findings.
type GroupFindingsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Group results. There exists an element for each existing unique
// combination of property/values. The element contains a count for the number
// of times those specific property/values appear.
GroupByResults []*GroupResult `protobuf:"bytes,1,rep,name=group_by_results,json=groupByResults,proto3" json:"group_by_results,omitempty"`
// Time used for executing the groupBy request.
ReadTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=read_time,json=readTime,proto3" json:"read_time,omitempty"`
// Token to retrieve the next page of results, or empty if there are no more
// results.
NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// The total number of results matching the query.
TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
}
func (x *GroupFindingsResponse) Reset() {
*x = GroupFindingsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GroupFindingsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GroupFindingsResponse) ProtoMessage() {}
func (x *GroupFindingsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GroupFindingsResponse.ProtoReflect.Descriptor instead.
func (*GroupFindingsResponse) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{10}
}
func (x *GroupFindingsResponse) GetGroupByResults() []*GroupResult {
if x != nil {
return x.GroupByResults
}
return nil
}
func (x *GroupFindingsResponse) GetReadTime() *timestamp.Timestamp {
if x != nil {
return x.ReadTime
}
return nil
}
func (x *GroupFindingsResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
func (x *GroupFindingsResponse) GetTotalSize() int32 {
if x != nil {
return x.TotalSize
}
return 0
}
// Result containing the properties and count of a groupBy request.
type GroupResult struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Properties matching the groupBy fields in the request.
Properties map[string]*_struct.Value `protobuf:"bytes,1,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Total count of resources for the given properties.
Count int64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
}
func (x *GroupResult) Reset() {
*x = GroupResult{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GroupResult) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GroupResult) ProtoMessage() {}
func (x *GroupResult) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GroupResult.ProtoReflect.Descriptor instead.
func (*GroupResult) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{11}
}
func (x *GroupResult) GetProperties() map[string]*_struct.Value {
if x != nil {
return x.Properties
}
return nil
}
func (x *GroupResult) GetCount() int64 {
if x != nil {
return x.Count
}
return 0
}
// Request message for listing notification configs.
type ListNotificationConfigsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the organization to list notification configs.
// Its format is "organizations/[organization_id]".
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// The value returned by the last `ListNotificationConfigsResponse`; indicates
// that this is a continuation of a prior `ListNotificationConfigs` call, and
// that the system should return the next page of data.
PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// The maximum number of results to return in a single response. Default is
// 10, minimum is 1, maximum is 1000.
PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
}
func (x *ListNotificationConfigsRequest) Reset() {
*x = ListNotificationConfigsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNotificationConfigsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNotificationConfigsRequest) ProtoMessage() {}
func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead.
func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{12}
}
func (x *ListNotificationConfigsRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *ListNotificationConfigsRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
func (x *ListNotificationConfigsRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
// Response message for listing notification configs.
type ListNotificationConfigsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Notification configs belonging to the requested parent.
NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"`
// Token to retrieve the next page of results, or empty if there are no more
// results.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
}
func (x *ListNotificationConfigsResponse) Reset() {
*x = ListNotificationConfigsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNotificationConfigsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNotificationConfigsResponse) ProtoMessage() {}
func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead.
func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{13}
}
func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig {
if x != nil {
return x.NotificationConfigs
}
return nil
}
func (x *ListNotificationConfigsResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
// Request message for listing sources.
type ListSourcesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Resource name of the parent of sources to list. Its format should be
// "organizations/[organization_id]".
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// The value returned by the last `ListSourcesResponse`; indicates
// that this is a continuation of a prior `ListSources` call, and
// that the system should return the next page of data.
PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// The maximum number of results to return in a single response. Default is
// 10, minimum is 1, maximum is 1000.
PageSize int32 `protobuf:"varint,7,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
}
func (x *ListSourcesRequest) Reset() {
*x = ListSourcesRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListSourcesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListSourcesRequest) ProtoMessage() {}
func (x *ListSourcesRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListSourcesRequest.ProtoReflect.Descriptor instead.
func (*ListSourcesRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{14}
}
func (x *ListSourcesRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *ListSourcesRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
func (x *ListSourcesRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
// Response message for listing sources.
type ListSourcesResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Sources belonging to the requested parent.
Sources []*Source `protobuf:"bytes,1,rep,name=sources,proto3" json:"sources,omitempty"`
// Token to retrieve the next page of results, or empty if there are no more
// results.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
}
func (x *ListSourcesResponse) Reset() {
*x = ListSourcesResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListSourcesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListSourcesResponse) ProtoMessage() {}
func (x *ListSourcesResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListSourcesResponse.ProtoReflect.Descriptor instead.
func (*ListSourcesResponse) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{15}
}
func (x *ListSourcesResponse) GetSources() []*Source {
if x != nil {
return x.Sources
}
return nil
}
func (x *ListSourcesResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
// Request message for listing assets.
type ListAssetsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the organization assets should belong to. Its format is
// "organizations/[organization_id]".
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Expression that defines the filter to apply across assets.
// The expression is a list of zero or more restrictions combined via logical
// operators `AND` and `OR`.
// Parentheses are supported, and `OR` has higher precedence than `AND`.
//
// Restrictions have the form `<field> <operator> <value>` and may have a `-`
// character in front of them to indicate negation. The fields map to those
// defined in the Asset resource. Examples include:
//
// * name
// * security_center_properties.resource_name
// * resource_properties.a_property
// * security_marks.marks.marka
//
// The supported operators are:
//
// * `=` for all value types.
// * `>`, `<`, `>=`, `<=` for integer values.
// * `:`, meaning substring matching, for strings.
//
// The supported value types are:
//
// * string literals in quotes.
// * integer literals without quotes.
// * boolean literals `true` and `false` without quotes.
//
// The following are the allowed field and operator combinations:
//
// * name: `=`
// * update_time: `=`, `>`, `<`, `>=`, `<=`
//
// Usage: This should be milliseconds since epoch or an RFC3339 string.
// Examples:
// `update_time = "2019-06-10T16:07:18-07:00"`
// `update_time = 1560208038000`
//
// * create_time: `=`, `>`, `<`, `>=`, `<=`
//
// Usage: This should be milliseconds since epoch or an RFC3339 string.
// Examples:
// `create_time = "2019-06-10T16:07:18-07:00"`
// `create_time = 1560208038000`
//
// * iam_policy.policy_blob: `=`, `:`
// * resource_properties: `=`, `:`, `>`, `<`, `>=`, `<=`
// * security_marks.marks: `=`, `:`
// * security_center_properties.resource_name: `=`, `:`
// * security_center_properties.resource_display_name: `=`, `:`
// * security_center_properties.resource_type: `=`, `:`
// * security_center_properties.resource_parent: `=`, `:`
// * security_center_properties.resource_parent_display_name: `=`, `:`
// * security_center_properties.resource_project: `=`, `:`
// * security_center_properties.resource_project_display_name: `=`, `:`
// * security_center_properties.resource_owners: `=`, `:`
//
// For example, `resource_properties.size = 100` is a valid filter string.
//
// Use a partial match on the empty string to filter based on a property
// existing: `resource_properties.my_property : ""`
//
// Use a negated partial match on the empty string to filter based on a
// property not existing: `-resource_properties.my_property : ""`
Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
// Expression that defines what fields and order to use for sorting. The
// string value should follow SQL syntax: comma separated list of fields. For
// example: "name,resource_properties.a_property". The default sorting order
// is ascending. To specify descending order for a field, a suffix " desc"
// should be appended to the field name. For example: "name
// desc,resource_properties.a_property". Redundant space characters in the
// syntax are insignificant. "name desc,resource_properties.a_property" and "
// name desc , resource_properties.a_property " are equivalent.
//
// The following fields are supported:
// name
// update_time
// resource_properties
// security_marks.marks
// security_center_properties.resource_name
// security_center_properties.resource_display_name
// security_center_properties.resource_parent
// security_center_properties.resource_parent_display_name
// security_center_properties.resource_project
// security_center_properties.resource_project_display_name
// security_center_properties.resource_type
OrderBy string `protobuf:"bytes,3,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
// Time used as a reference point when filtering assets. The filter is limited
// to assets existing at the supplied time and their values are those at that
// specific time. Absence of this field will default to the API's version of
// NOW.
ReadTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=read_time,json=readTime,proto3" json:"read_time,omitempty"`
// When compare_duration is set, the ListAssetsResult's "state_change"
// attribute is updated to indicate whether the asset was added, removed, or
// remained present during the compare_duration period of time that precedes
// the read_time. This is the time between (read_time - compare_duration) and
// read_time.
//
// The state_change value is derived based on the presence of the asset at the
// two points in time. Intermediate state changes between the two times don't
// affect the result. For example, the results aren't affected if the asset is
// removed and re-created again.
//
// Possible "state_change" values when compare_duration is specified:
//
// * "ADDED": indicates that the asset was not present at the start of
// compare_duration, but present at read_time.
// * "REMOVED": indicates that the asset was present at the start of
// compare_duration, but not present at read_time.
// * "ACTIVE": indicates that the asset was present at both the
// start and the end of the time period defined by
// compare_duration and read_time.
//
// If compare_duration is not specified, then the only possible state_change
// is "UNUSED", which will be the state_change set for all assets present at
// read_time.
CompareDuration *duration.Duration `protobuf:"bytes,5,opt,name=compare_duration,json=compareDuration,proto3" json:"compare_duration,omitempty"`
// A field mask to specify the ListAssetsResult fields to be listed in the
// response.
// An empty field mask will list all fields.
FieldMask *field_mask.FieldMask `protobuf:"bytes,7,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
// The value returned by the last `ListAssetsResponse`; indicates
// that this is a continuation of a prior `ListAssets` call, and
// that the system should return the next page of data.
PageToken string `protobuf:"bytes,8,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// The maximum number of results to return in a single response. Default is
// 10, minimum is 1, maximum is 1000.
PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
}
func (x *ListAssetsRequest) Reset() {
*x = ListAssetsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListAssetsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListAssetsRequest) ProtoMessage() {}
func (x *ListAssetsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListAssetsRequest.ProtoReflect.Descriptor instead.
func (*ListAssetsRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{16}
}
func (x *ListAssetsRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *ListAssetsRequest) GetFilter() string {
if x != nil {
return x.Filter
}
return ""
}
func (x *ListAssetsRequest) GetOrderBy() string {
if x != nil {
return x.OrderBy
}
return ""
}
func (x *ListAssetsRequest) GetReadTime() *timestamp.Timestamp {
if x != nil {
return x.ReadTime
}
return nil
}
func (x *ListAssetsRequest) GetCompareDuration() *duration.Duration {
if x != nil {
return x.CompareDuration
}
return nil
}
func (x *ListAssetsRequest) GetFieldMask() *field_mask.FieldMask {
if x != nil {
return x.FieldMask
}
return nil
}
func (x *ListAssetsRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
func (x *ListAssetsRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
// Response message for listing assets.
type ListAssetsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Assets matching the list request.
ListAssetsResults []*ListAssetsResponse_ListAssetsResult `protobuf:"bytes,1,rep,name=list_assets_results,json=listAssetsResults,proto3" json:"list_assets_results,omitempty"`
// Time used for executing the list request.
ReadTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=read_time,json=readTime,proto3" json:"read_time,omitempty"`
// Token to retrieve the next page of results, or empty if there are no more
// results.
NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// The total number of assets matching the query.
TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
}
func (x *ListAssetsResponse) Reset() {
*x = ListAssetsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListAssetsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListAssetsResponse) ProtoMessage() {}
func (x *ListAssetsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListAssetsResponse.ProtoReflect.Descriptor instead.
func (*ListAssetsResponse) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{17}
}
func (x *ListAssetsResponse) GetListAssetsResults() []*ListAssetsResponse_ListAssetsResult {
if x != nil {
return x.ListAssetsResults
}
return nil
}
func (x *ListAssetsResponse) GetReadTime() *timestamp.Timestamp {
if x != nil {
return x.ReadTime
}
return nil
}
func (x *ListAssetsResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
func (x *ListAssetsResponse) GetTotalSize() int32 {
if x != nil {
return x.TotalSize
}
return 0
}
// Request message for listing findings.
type ListFindingsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the source the findings belong to. Its format is
// "organizations/[organization_id]/sources/[source_id]". To list across all
// sources provide a source_id of `-`. For example:
// organizations/{organization_id}/sources/-
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Expression that defines the filter to apply across findings.
// The expression is a list of one or more restrictions combined via logical
// operators `AND` and `OR`.
// Parentheses are supported, and `OR` has higher precedence than `AND`.
//
// Restrictions have the form `<field> <operator> <value>` and may have a `-`
// character in front of them to indicate negation. Examples include:
//
// * name
// * source_properties.a_property
// * security_marks.marks.marka
//
// The supported operators are:
//
// * `=` for all value types.
// * `>`, `<`, `>=`, `<=` for integer values.
// * `:`, meaning substring matching, for strings.
//
// The supported value types are:
//
// * string literals in quotes.
// * integer literals without quotes.
// * boolean literals `true` and `false` without quotes.
//
// The following field and operator combinations are supported:
//
// name: `=`
// parent: `=`, `:`
// resource_name: `=`, `:`
// state: `=`, `:`
// category: `=`, `:`
// external_uri: `=`, `:`
// event_time: `=`, `>`, `<`, `>=`, `<=`
//
// Usage: This should be milliseconds since epoch or an RFC3339 string.
// Examples:
// `event_time = "2019-06-10T16:07:18-07:00"`
// `event_time = 1560208038000`
//
// security_marks.marks: `=`, `:`
// source_properties: `=`, `:`, `>`, `<`, `>=`, `<=`
//
// For example, `source_properties.size = 100` is a valid filter string.
//
// Use a partial match on the empty string to filter based on a property
// existing: `source_properties.my_property : ""`
//
// Use a negated partial match on the empty string to filter based on a
// property not existing: `-source_properties.my_property : ""`
Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
// Expression that defines what fields and order to use for sorting. The
// string value should follow SQL syntax: comma separated list of fields. For
// example: "name,resource_properties.a_property". The default sorting order
// is ascending. To specify descending order for a field, a suffix " desc"
// should be appended to the field name. For example: "name
// desc,source_properties.a_property". Redundant space characters in the
// syntax are insignificant. "name desc,source_properties.a_property" and "
// name desc , source_properties.a_property " are equivalent.
//
// The following fields are supported:
// name
// parent
// state
// category
// resource_name
// event_time
// source_properties
// security_marks.marks
OrderBy string `protobuf:"bytes,3,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
// Time used as a reference point when filtering findings. The filter is
// limited to findings existing at the supplied time and their values are
// those at that specific time. Absence of this field will default to the
// API's version of NOW.
ReadTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=read_time,json=readTime,proto3" json:"read_time,omitempty"`
// When compare_duration is set, the ListFindingsResult's "state_change"
// attribute is updated to indicate whether the finding had its state changed,
// the finding's state remained unchanged, or if the finding was added in any
// state during the compare_duration period of time that precedes the
// read_time. This is the time between (read_time - compare_duration) and
// read_time.
//
// The state_change value is derived based on the presence and state of the
// finding at the two points in time. Intermediate state changes between the
// two times don't affect the result. For example, the results aren't affected
// if the finding is made inactive and then active again.
//
// Possible "state_change" values when compare_duration is specified:
//
// * "CHANGED": indicates that the finding was present and matched the given
// filter at the start of compare_duration, but changed its
// state at read_time.
// * "UNCHANGED": indicates that the finding was present and matched the given
// filter at the start of compare_duration and did not change
// state at read_time.
// * "ADDED": indicates that the finding did not match the given filter or
// was not present at the start of compare_duration, but was
// present at read_time.
// * "REMOVED": indicates that the finding was present and matched the
// filter at the start of compare_duration, but did not match
// the filter at read_time.
//
// If compare_duration is not specified, then the only possible state_change
// is "UNUSED", which will be the state_change set for all findings present at
// read_time.
CompareDuration *duration.Duration `protobuf:"bytes,5,opt,name=compare_duration,json=compareDuration,proto3" json:"compare_duration,omitempty"`
// A field mask to specify the Finding fields to be listed in the response.
// An empty field mask will list all fields.
FieldMask *field_mask.FieldMask `protobuf:"bytes,7,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
// The value returned by the last `ListFindingsResponse`; indicates
// that this is a continuation of a prior `ListFindings` call, and
// that the system should return the next page of data.
PageToken string `protobuf:"bytes,8,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// The maximum number of results to return in a single response. Default is
// 10, minimum is 1, maximum is 1000.
PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
}
func (x *ListFindingsRequest) Reset() {
*x = ListFindingsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListFindingsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListFindingsRequest) ProtoMessage() {}
func (x *ListFindingsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListFindingsRequest.ProtoReflect.Descriptor instead.
func (*ListFindingsRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{18}
}
func (x *ListFindingsRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
func (x *ListFindingsRequest) GetFilter() string {
if x != nil {
return x.Filter
}
return ""
}
func (x *ListFindingsRequest) GetOrderBy() string {
if x != nil {
return x.OrderBy
}
return ""
}
func (x *ListFindingsRequest) GetReadTime() *timestamp.Timestamp {
if x != nil {
return x.ReadTime
}
return nil
}
func (x *ListFindingsRequest) GetCompareDuration() *duration.Duration {
if x != nil {
return x.CompareDuration
}
return nil
}
func (x *ListFindingsRequest) GetFieldMask() *field_mask.FieldMask {
if x != nil {
return x.FieldMask
}
return nil
}
func (x *ListFindingsRequest) GetPageToken() string {
if x != nil {
return x.PageToken
}
return ""
}
func (x *ListFindingsRequest) GetPageSize() int32 {
if x != nil {
return x.PageSize
}
return 0
}
// Response message for listing findings.
type ListFindingsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Findings matching the list request.
ListFindingsResults []*ListFindingsResponse_ListFindingsResult `protobuf:"bytes,1,rep,name=list_findings_results,json=listFindingsResults,proto3" json:"list_findings_results,omitempty"`
// Time used for executing the list request.
ReadTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=read_time,json=readTime,proto3" json:"read_time,omitempty"`
// Token to retrieve the next page of results, or empty if there are no more
// results.
NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// The total number of findings matching the query.
TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
}
func (x *ListFindingsResponse) Reset() {
*x = ListFindingsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListFindingsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListFindingsResponse) ProtoMessage() {}
func (x *ListFindingsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListFindingsResponse.ProtoReflect.Descriptor instead.
func (*ListFindingsResponse) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{19}
}
func (x *ListFindingsResponse) GetListFindingsResults() []*ListFindingsResponse_ListFindingsResult {
if x != nil {
return x.ListFindingsResults
}
return nil
}
func (x *ListFindingsResponse) GetReadTime() *timestamp.Timestamp {
if x != nil {
return x.ReadTime
}
return nil
}
func (x *ListFindingsResponse) GetNextPageToken() string {
if x != nil {
return x.NextPageToken
}
return ""
}
func (x *ListFindingsResponse) GetTotalSize() int32 {
if x != nil {
return x.TotalSize
}
return 0
}
// Request message for updating a finding's state.
type SetFindingStateRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The relative resource name of the finding. See:
// https://cloud.google.com/apis/design/resource_names#relative_resource_name
// Example:
// "organizations/{organization_id}/sources/{source_id}/finding/{finding_id}".
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. The desired State of the finding.
State Finding_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.cloud.securitycenter.v1p1beta1.Finding_State" json:"state,omitempty"`
// Required. The time at which the updated state takes effect.
StartTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
}
func (x *SetFindingStateRequest) Reset() {
*x = SetFindingStateRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SetFindingStateRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SetFindingStateRequest) ProtoMessage() {}
func (x *SetFindingStateRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SetFindingStateRequest.ProtoReflect.Descriptor instead.
func (*SetFindingStateRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{20}
}
func (x *SetFindingStateRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *SetFindingStateRequest) GetState() Finding_State {
if x != nil {
return x.State
}
return Finding_STATE_UNSPECIFIED
}
func (x *SetFindingStateRequest) GetStartTime() *timestamp.Timestamp {
if x != nil {
return x.StartTime
}
return nil
}
// Request message for running asset discovery for an organization.
type RunAssetDiscoveryRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the organization to run asset discovery for. Its format is
// "organizations/[organization_id]".
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
}
func (x *RunAssetDiscoveryRequest) Reset() {
*x = RunAssetDiscoveryRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RunAssetDiscoveryRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RunAssetDiscoveryRequest) ProtoMessage() {}
func (x *RunAssetDiscoveryRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RunAssetDiscoveryRequest.ProtoReflect.Descriptor instead.
func (*RunAssetDiscoveryRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{21}
}
func (x *RunAssetDiscoveryRequest) GetParent() string {
if x != nil {
return x.Parent
}
return ""
}
// Request message for updating or creating a finding.
type UpdateFindingRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The finding resource to update or create if it does not already exist.
// parent, security_marks, and update_time will be ignored.
//
// In the case of creation, the finding id portion of the name must be
// alphanumeric and less than or equal to 32 characters and greater than 0
// characters in length.
Finding *Finding `protobuf:"bytes,1,opt,name=finding,proto3" json:"finding,omitempty"`
// The FieldMask to use when updating the finding resource. This field should
// not be specified when creating a finding.
//
// When updating a finding, an empty mask is treated as updating all mutable
// fields and replacing source_properties. Individual source_properties can
// be added/updated by using "source_properties.<property key>" in the field
// mask.
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
}
func (x *UpdateFindingRequest) Reset() {
*x = UpdateFindingRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdateFindingRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateFindingRequest) ProtoMessage() {}
func (x *UpdateFindingRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateFindingRequest.ProtoReflect.Descriptor instead.
func (*UpdateFindingRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{22}
}
func (x *UpdateFindingRequest) GetFinding() *Finding {
if x != nil {
return x.Finding
}
return nil
}
func (x *UpdateFindingRequest) GetUpdateMask() *field_mask.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
// Request message for updating a notification config.
type UpdateNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The notification config to update.
NotificationConfig *NotificationConfig `protobuf:"bytes,1,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"`
// The FieldMask to use when updating the notification config.
//
// If empty all mutable fields will be updated.
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
}
func (x *UpdateNotificationConfigRequest) Reset() {
*x = UpdateNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdateNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateNotificationConfigRequest) ProtoMessage() {}
func (x *UpdateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateNotificationConfigRequest.ProtoReflect.Descriptor instead.
func (*UpdateNotificationConfigRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{23}
}
func (x *UpdateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig {
if x != nil {
return x.NotificationConfig
}
return nil
}
func (x *UpdateNotificationConfigRequest) GetUpdateMask() *field_mask.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
// Request message for updating an organization's settings.
type UpdateOrganizationSettingsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The organization settings resource to update.
OrganizationSettings *OrganizationSettings `protobuf:"bytes,1,opt,name=organization_settings,json=organizationSettings,proto3" json:"organization_settings,omitempty"`
// The FieldMask to use when updating the settings resource.
//
// If empty all mutable fields will be updated.
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
}
func (x *UpdateOrganizationSettingsRequest) Reset() {
*x = UpdateOrganizationSettingsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdateOrganizationSettingsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateOrganizationSettingsRequest) ProtoMessage() {}
func (x *UpdateOrganizationSettingsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateOrganizationSettingsRequest.ProtoReflect.Descriptor instead.
func (*UpdateOrganizationSettingsRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{24}
}
func (x *UpdateOrganizationSettingsRequest) GetOrganizationSettings() *OrganizationSettings {
if x != nil {
return x.OrganizationSettings
}
return nil
}
func (x *UpdateOrganizationSettingsRequest) GetUpdateMask() *field_mask.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
// Request message for updating a source.
type UpdateSourceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The source resource to update.
Source *Source `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
// The FieldMask to use when updating the source resource.
//
// If empty all mutable fields will be updated.
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
}
func (x *UpdateSourceRequest) Reset() {
*x = UpdateSourceRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdateSourceRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateSourceRequest) ProtoMessage() {}
func (x *UpdateSourceRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateSourceRequest.ProtoReflect.Descriptor instead.
func (*UpdateSourceRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{25}
}
func (x *UpdateSourceRequest) GetSource() *Source {
if x != nil {
return x.Source
}
return nil
}
func (x *UpdateSourceRequest) GetUpdateMask() *field_mask.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
// Request message for updating a SecurityMarks resource.
type UpdateSecurityMarksRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The security marks resource to update.
SecurityMarks *SecurityMarks `protobuf:"bytes,1,opt,name=security_marks,json=securityMarks,proto3" json:"security_marks,omitempty"`
// The FieldMask to use when updating the security marks resource.
//
// The field mask must not contain duplicate fields.
// If empty or set to "marks", all marks will be replaced. Individual
// marks can be updated using "marks.<mark_key>".
UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// The time at which the updated SecurityMarks take effect.
// If not set uses current server time. Updates will be applied to the
// SecurityMarks that are active immediately preceding this time.
StartTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
}
func (x *UpdateSecurityMarksRequest) Reset() {
*x = UpdateSecurityMarksRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdateSecurityMarksRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateSecurityMarksRequest) ProtoMessage() {}
func (x *UpdateSecurityMarksRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateSecurityMarksRequest.ProtoReflect.Descriptor instead.
func (*UpdateSecurityMarksRequest) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{26}
}
func (x *UpdateSecurityMarksRequest) GetSecurityMarks() *SecurityMarks {
if x != nil {
return x.SecurityMarks
}
return nil
}
func (x *UpdateSecurityMarksRequest) GetUpdateMask() *field_mask.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
func (x *UpdateSecurityMarksRequest) GetStartTime() *timestamp.Timestamp {
if x != nil {
return x.StartTime
}
return nil
}
// Result containing the Asset and its State.
type ListAssetsResponse_ListAssetsResult struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Asset matching the search request.
Asset *Asset `protobuf:"bytes,1,opt,name=asset,proto3" json:"asset,omitempty"`
// State change of the asset between the points in time.
StateChange ListAssetsResponse_ListAssetsResult_StateChange `protobuf:"varint,2,opt,name=state_change,json=stateChange,proto3,enum=google.cloud.securitycenter.v1p1beta1.ListAssetsResponse_ListAssetsResult_StateChange" json:"state_change,omitempty"`
}
func (x *ListAssetsResponse_ListAssetsResult) Reset() {
*x = ListAssetsResponse_ListAssetsResult{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListAssetsResponse_ListAssetsResult) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListAssetsResponse_ListAssetsResult) ProtoMessage() {}
func (x *ListAssetsResponse_ListAssetsResult) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListAssetsResponse_ListAssetsResult.ProtoReflect.Descriptor instead.
func (*ListAssetsResponse_ListAssetsResult) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{17, 0}
}
func (x *ListAssetsResponse_ListAssetsResult) GetAsset() *Asset {
if x != nil {
return x.Asset
}
return nil
}
func (x *ListAssetsResponse_ListAssetsResult) GetStateChange() ListAssetsResponse_ListAssetsResult_StateChange {
if x != nil {
return x.StateChange
}
return ListAssetsResponse_ListAssetsResult_UNUSED
}
// Result containing the Finding and its StateChange.
type ListFindingsResponse_ListFindingsResult struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Finding matching the search request.
Finding *Finding `protobuf:"bytes,1,opt,name=finding,proto3" json:"finding,omitempty"`
// State change of the finding between the points in time.
StateChange ListFindingsResponse_ListFindingsResult_StateChange `protobuf:"varint,2,opt,name=state_change,json=stateChange,proto3,enum=google.cloud.securitycenter.v1p1beta1.ListFindingsResponse_ListFindingsResult_StateChange" json:"state_change,omitempty"`
// Output only. Resource that is associated with this finding.
Resource *ListFindingsResponse_ListFindingsResult_Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
}
func (x *ListFindingsResponse_ListFindingsResult) Reset() {
*x = ListFindingsResponse_ListFindingsResult{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListFindingsResponse_ListFindingsResult) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListFindingsResponse_ListFindingsResult) ProtoMessage() {}
func (x *ListFindingsResponse_ListFindingsResult) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListFindingsResponse_ListFindingsResult.ProtoReflect.Descriptor instead.
func (*ListFindingsResponse_ListFindingsResult) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{19, 0}
}
func (x *ListFindingsResponse_ListFindingsResult) GetFinding() *Finding {
if x != nil {
return x.Finding
}
return nil
}
func (x *ListFindingsResponse_ListFindingsResult) GetStateChange() ListFindingsResponse_ListFindingsResult_StateChange {
if x != nil {
return x.StateChange
}
return ListFindingsResponse_ListFindingsResult_UNUSED
}
func (x *ListFindingsResponse_ListFindingsResult) GetResource() *ListFindingsResponse_ListFindingsResult_Resource {
if x != nil {
return x.Resource
}
return nil
}
// Information related to the Google Cloud resource that is
// associated with this finding.
type ListFindingsResponse_ListFindingsResult_Resource struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The full resource name of the resource. See:
// https://cloud.google.com/apis/design/resource_names#full_resource_name
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// The full resource name of project that the resource belongs to.
ProjectName string `protobuf:"bytes,2,opt,name=project_name,json=projectName,proto3" json:"project_name,omitempty"`
// The human readable name of project that the resource belongs to.
ProjectDisplayName string `protobuf:"bytes,3,opt,name=project_display_name,json=projectDisplayName,proto3" json:"project_display_name,omitempty"`
// The full resource name of resource's parent.
ParentName string `protobuf:"bytes,4,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"`
// The human readable name of resource's parent.
ParentDisplayName string `protobuf:"bytes,5,opt,name=parent_display_name,json=parentDisplayName,proto3" json:"parent_display_name,omitempty"`
}
func (x *ListFindingsResponse_ListFindingsResult_Resource) Reset() {
*x = ListFindingsResponse_ListFindingsResult_Resource{}
if protoimpl.UnsafeEnabled {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListFindingsResponse_ListFindingsResult_Resource) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListFindingsResponse_ListFindingsResult_Resource) ProtoMessage() {}
func (x *ListFindingsResponse_ListFindingsResult_Resource) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListFindingsResponse_ListFindingsResult_Resource.ProtoReflect.Descriptor instead.
func (*ListFindingsResponse_ListFindingsResult_Resource) Descriptor() ([]byte, []int) {
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP(), []int{19, 0, 0}
}
func (x *ListFindingsResponse_ListFindingsResult_Resource) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ListFindingsResponse_ListFindingsResult_Resource) GetProjectName() string {
if x != nil {
return x.ProjectName
}
return ""
}
func (x *ListFindingsResponse_ListFindingsResult_Resource) GetProjectDisplayName() string {
if x != nil {
return x.ProjectDisplayName
}
return ""
}
func (x *ListFindingsResponse_ListFindingsResult_Resource) GetParentName() string {
if x != nil {
return x.ParentName
}
return ""
}
func (x *ListFindingsResponse_ListFindingsResult_Resource) GetParentDisplayName() string {
if x != nil {
return x.ParentDisplayName
}
return ""
}
var File_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto protoreflect.FileDescriptor
var file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDesc = []byte{
0x0a, 0x42, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65,
0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x48, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74,
0x61, 0x31, 0x2f, 0x72, 0x75, 0x6e, 0x5f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x64, 0x69, 0x73,
0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f,
0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62,
0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f,
0x61, 0x73, 0x73, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x33, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74,
0x61, 0x31, 0x2f, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2f, 0x76,
0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65,
0x72, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x73, 0x65, 0x63, 0x75,
0x72, 0x69, 0x74, 0x79, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
0x2f, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x69,
0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06,
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41,
0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65,
0x6e, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65,
0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0a, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x66, 0x69, 0x6e,
0x64, 0x69, 0x6e, 0x67, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e,
0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e,
0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x66, 0x69,
0x6e, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x86, 0x02, 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x06, 0x70, 0x61, 0x72,
0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x38, 0xe0, 0x41, 0x02, 0xfa, 0x41,
0x32, 0x0a, 0x30, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
0xe0, 0x41, 0x02, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x49, 0x64, 0x12, 0x6f, 0x0a,
0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74,
0x61, 0x31, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb3,
0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x38, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x32, 0x0a, 0x30,
0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e,
0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31,
0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x22, 0x6f, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f,
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x38, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x32, 0x0a, 0x30, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x6c, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x42, 0x38, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x32, 0x0a, 0x30, 0x73, 0x65, 0x63,
0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x22, 0x70, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x42, 0x3a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x34, 0x0a, 0x32, 0x73, 0x65, 0x63,
0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x72, 0x67, 0x61, 0x6e,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x54, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a,
0x24, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xd9, 0x02, 0x0a, 0x12,
0x47, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x50, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x42, 0x38, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x32, 0x0a, 0x30, 0x63, 0x6c, 0x6f, 0x75,
0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61,
0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08,
0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
0xe0, 0x41, 0x02, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x12, 0x44, 0x0a, 0x10,
0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70,
0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61,
0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70,
0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xf3, 0x01, 0x0a, 0x13, 0x47, 0x72, 0x6f, 0x75,
0x70, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x5c, 0x0a, 0x10, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x5f, 0x72, 0x65, 0x73, 0x75,
0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61,
0x31, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0e, 0x67,
0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a,
0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x72, 0x65,
0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70,
0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d,
0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xcf, 0x02,
0x0a, 0x14, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06,
0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69,
0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79,
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x67, 0x72, 0x6f,
0x75, 0x70, 0x42, 0x79, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d,
0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a,
0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22,
0xf5, 0x01, 0x0a, 0x15, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x10, 0x67, 0x72, 0x6f,
0x75, 0x70, 0x5f, 0x62, 0x79, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65,
0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x72, 0x6f, 0x75,
0x70, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79,
0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f,
0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65,
0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61,
0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f,
0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xde, 0x01, 0x0a, 0x0b, 0x47, 0x72, 0x6f, 0x75,
0x70, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x62, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65,
0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72,
0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65,
0x74, 0x61, 0x31, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e,
0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e,
0x74, 0x1a, 0x55, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xae, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73,
0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x06, 0x70,
0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x38, 0xe0, 0x41, 0x02,
0xfa, 0x41, 0x32, 0x0a, 0x30, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a,
0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09,
0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52,
0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xb7, 0x01, 0x0a, 0x1f, 0x4c, 0x69,
0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a,
0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72,
0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65,
0x74, 0x61, 0x31, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e,
0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f,
0x6b, 0x65, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x06, 0x70, 0x61,
0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x38, 0xe0, 0x41, 0x02, 0xfa,
0x41, 0x32, 0x0a, 0x30, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x70,
0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08,
0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x86, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73,
0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x47, 0x0a, 0x07, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e,
0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x52, 0x07, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x38, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x32, 0x0a,
0x30, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61,
0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c,
0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
0x72, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x37, 0x0a, 0x09,
0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x72, 0x65, 0x61,
0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65,
0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70,
0x61, 0x72, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x66,
0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x09, 0x66, 0x69, 0x65,
0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65,
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
0x7a, 0x65, 0x22, 0xa3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x13, 0x6c, 0x69, 0x73,
0x74, 0x5f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65,
0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c,
0x69, 0x73, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x75,
0x6c, 0x74, 0x52, 0x11, 0x6c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65,
0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69,
0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26,
0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67,
0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f,
0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61,
0x6c, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x90, 0x02, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x73,
0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x42, 0x0a, 0x05, 0x61, 0x73,
0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61,
0x31, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x52, 0x05, 0x61, 0x73, 0x73, 0x65, 0x74, 0x12, 0x79,
0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0e, 0x32, 0x56, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74,
0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73,
0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e,
0x4c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74,
0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0b, 0x73, 0x74,
0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x3d, 0x0a, 0x0b, 0x53, 0x74, 0x61,
0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x55, 0x53,
0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x44, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12,
0x0b, 0x0a, 0x07, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06,
0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x22, 0x84, 0x03, 0x0a, 0x13, 0x4c, 0x69, 0x73,
0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x44, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26, 0x0a, 0x24, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06,
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19,
0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x65, 0x61,
0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x54, 0x69,
0x6d, 0x65, 0x12, 0x44, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x64, 0x75,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65,
0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d,
0x61, 0x73, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22,
0x8b, 0x07, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x15, 0x6c, 0x69, 0x73,
0x74, 0x5f, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c,
0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31,
0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e,
0x67, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x46, 0x69,
0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a,
0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x72, 0x65,
0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70,
0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d,
0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0xed, 0x04,
0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65,
0x73, 0x75, 0x6c, 0x74, 0x12, 0x48, 0x0a, 0x07, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e,
0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x46, 0x69,
0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7d,
0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0e, 0x32, 0x5a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74,
0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73,
0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65,
0x73, 0x75, 0x6c, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65,
0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x78, 0x0a,
0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x57, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x64,
0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x69, 0x73,
0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e,
0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x72,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xc4, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a,
0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x70,
0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e,
0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x72, 0x6f, 0x6a, 0x65,
0x63, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a,
0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e,
0x0a, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x61, 0x72,
0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d,
0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x0a, 0x0a,
0x06, 0x55, 0x4e, 0x55, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x48, 0x41,
0x4e, 0x47, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x43, 0x48, 0x41, 0x4e,
0x47, 0x45, 0x44, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x44, 0x44, 0x45, 0x44, 0x10, 0x03,
0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x04, 0x22, 0xec, 0x01,
0x0a, 0x16, 0x53, 0x65, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x69,
0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x05, 0x73,
0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74,
0x61, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65,
0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3e, 0x0a, 0x0a,
0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41,
0x02, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x6c, 0x0a, 0x18,
0x52, 0x75, 0x6e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72,
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x38, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x32,
0x0a, 0x30, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa2, 0x01, 0x0a, 0x14, 0x55,
0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74,
0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x46, 0x69, 0x6e,
0x64, 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x66, 0x69, 0x6e, 0x64, 0x69,
0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d,
0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22,
0xcf, 0x01, 0x0a, 0x1f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x6f, 0x0a, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76,
0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02,
0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d,
0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73,
0x6b, 0x22, 0xd7, 0x01, 0x0a, 0x21, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x72, 0x67, 0x61,
0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x75, 0x0a, 0x15, 0x6f, 0x72, 0x67, 0x61, 0x6e,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65,
0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4f,
0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69,
0x6e, 0x67, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x14, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3b,
0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52,
0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x9e, 0x01, 0x0a, 0x13,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65,
0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xf6, 0x01, 0x0a,
0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4d,
0x61, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x60, 0x0a, 0x0e, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65,
0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x75,
0x72, 0x69, 0x74, 0x79, 0x4d, 0x61, 0x72, 0x6b, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4d, 0x61, 0x72, 0x6b, 0x73, 0x12, 0x3b, 0x0a,
0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a,
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74,
0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72,
0x74, 0x54, 0x69, 0x6d, 0x65, 0x32, 0x86, 0x2b, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0xc6, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65,
0x61, 0x74, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61,
0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e,
0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x22, 0x4b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x35, 0x22, 0x2b, 0x2f, 0x76,
0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a,
0x7d, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x3a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0xda, 0x41, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x12, 0xfd, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6e, 0x64,
0x69, 0x6e, 0x67, 0x12, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65,
0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76,
0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67,
0x22, 0x7f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x22, 0x36, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x6f, 0x72, 0x67,
0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73,
0x3a, 0x07, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65,
0x6e, 0x74, 0x2c, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x2c, 0x66, 0x69,
0x6e, 0x64, 0x69, 0x6e, 0x67, 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x66,
0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x2c, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x69,
0x64, 0x12, 0xb8, 0x02, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65,
0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70,
0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65,
0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4e,
0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x22, 0x98, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x22, 0x37, 0x2f, 0x76, 0x31, 0x70,
0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x6f,
0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x73, 0x3a, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0xda, 0x41, 0x24, 0x70, 0x61, 0x72, 0x65, 0x6e,
0x74, 0x2c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x69, 0x64, 0x2c, 0x6e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0xda,
0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xc2, 0x01, 0x0a,
0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61,
0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x46, 0x82, 0xd3, 0xe4, 0x93, 0x02,
0x39, 0x2a, 0x37, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x6e,
0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x12, 0x9d, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x52, 0x82,
0xd3, 0xe4, 0x93, 0x02, 0x41, 0x22, 0x3c, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61,
0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61,
0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c,
0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x12, 0xdf, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x43, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72,
0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65,
0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76,
0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x46, 0x82, 0xd3, 0xe4,
0x93, 0x02, 0x39, 0x12, 0x37, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f,
0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x12, 0xe4, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x67, 0x61, 0x6e,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x67, 0x61, 0x6e,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65,
0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4f,
0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69,
0x6e, 0x67, 0x73, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x72,
0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6f, 0x72,
0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
0x67, 0x73, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xaf, 0x01, 0x0a, 0x09, 0x47,
0x65, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31,
0x2e, 0x47, 0x65, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e,
0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x22, 0x3a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc1, 0x01, 0x0a,
0x0b, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x12, 0x39, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75,
0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e,
0x47, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x35, 0x22, 0x30, 0x2f, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d,
0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d,
0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x73, 0x3a, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x3a, 0x01, 0x2a,
0x12, 0xe5, 0x01, 0x0a, 0x0d, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e,
0x67, 0x73, 0x12, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72,
0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70,
0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, 0x6e,
0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x59, 0x82,
0xd3, 0xe4, 0x93, 0x02, 0x41, 0x22, 0x3c, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61,
0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x3a, 0x67, 0x72,
0x6f, 0x75, 0x70, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c,
0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x12, 0xbe, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73,
0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e,
0x4c, 0x69, 0x73, 0x74, 0x41, 0x73, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e,
0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x73,
0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3,
0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31,
0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x73,
0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xd0, 0x01, 0x0a, 0x0c, 0x4c, 0x69,
0x73, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74,
0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65,
0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c,
0x69, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d,
0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x66, 0x69, 0x6e, 0x64, 0x69,
0x6e, 0x67, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xf2, 0x01, 0x0a,
0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31,
0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x46, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31,
0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x39, 0x12,
0x37, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72,
0x65, 0x6e, 0x74, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
0x74, 0x12, 0xc2, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x73, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e,
0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75,
0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d,
0x12, 0x2b, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x70, 0x61,
0x72, 0x65, 0x6e, 0x74, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0xda, 0x41, 0x06,
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x9c, 0x02, 0x0a, 0x11, 0x52, 0x75, 0x6e, 0x41, 0x73,
0x73, 0x65, 0x74, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x12, 0x3f, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75,
0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x44, 0x69, 0x73,
0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69,
0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x82,
0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x22, 0x37, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61,
0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x74,
0x73, 0x3a, 0x72, 0x75, 0x6e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x01,
0x2a, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0xca, 0x41, 0x58, 0x0a, 0x3f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75,
0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x44, 0x69, 0x73,
0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0xe4, 0x01, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x46, 0x69, 0x6e,
0x64, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61,
0x31, 0x2e, 0x53, 0x65, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31,
0x2e, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x62, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44,
0x22, 0x3f, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
0x6d, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x2f, 0x2a, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x66, 0x69, 0x6e,
0x64, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74,
0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x15, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x73, 0x74, 0x61, 0x74,
0x65, 0x2c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x12, 0xa4, 0x01, 0x0a,
0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65,
0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76,
0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x59, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41,
0x22, 0x3c, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x72, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x2a,
0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01,
0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c,
0x69, 0x63, 0x79, 0x12, 0xcf, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50,
0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49,
0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61,
0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x22, 0x42, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65,
0x74, 0x61, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x6f, 0x72,
0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d,
0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41,
0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73,
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xed, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74,
0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x46, 0x69, 0x6e,
0x64, 0x69, 0x6e, 0x67, 0x22, 0x6f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x49, 0x32, 0x3e, 0x2f, 0x76,
0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e,
0x67, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x2a,
0x2f, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x07, 0x66, 0x69,
0x6e, 0x64, 0x69, 0x6e, 0x67, 0xda, 0x41, 0x07, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0xda,
0x41, 0x13, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0xc0, 0x02, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x12, 0x46, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72,
0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74,
0x61, 0x31, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa0, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x62, 0x32, 0x4b,
0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69,
0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x13, 0x6e, 0x6f, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0xda, 0x41, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0xda, 0x41, 0x1f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x75, 0x70, 0x64,
0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0xa9, 0x02, 0x0a, 0x1a, 0x55, 0x70, 0x64,
0x61, 0x74, 0x65, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63,
0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e,
0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x83,
0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x65, 0x32, 0x4c, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65,
0x74, 0x61, 0x31, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f,
0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74,
0x69, 0x6e, 0x67, 0x73, 0x7d, 0x3a, 0x15, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0xda, 0x41, 0x15, 0x6f,
0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
0x69, 0x6e, 0x67, 0x73, 0x12, 0xdb, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e,
0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x55, 0x70,
0x64, 0x61, 0x74, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e,
0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x22, 0x60, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x32, 0x32, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x6e, 0x61, 0x6d,
0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
0x2a, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x06, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0xda, 0x41, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0xda, 0x41, 0x12,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61,
0x73, 0x6b, 0x12, 0x86, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63,
0x75, 0x72, 0x69, 0x74, 0x79, 0x4d, 0x61, 0x72, 0x6b, 0x73, 0x12, 0x41, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74,
0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
0x79, 0x4d, 0x61, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63,
0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31,
0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4d, 0x61,
0x72, 0x6b, 0x73, 0x22, 0xf5, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xc0, 0x01, 0x32, 0x47, 0x2f,
0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x75, 0x72,
0x69, 0x74, 0x79, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f,
0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x61,
0x73, 0x73, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
0x4d, 0x61, 0x72, 0x6b, 0x73, 0x7d, 0x3a, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x5a, 0x65, 0x32, 0x53, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6d,
0x61, 0x72, 0x6b, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x73, 0x2f, 0x2a, 0x2f, 0x66, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4d, 0x61, 0x72, 0x6b, 0x73, 0x7d, 0x3a, 0x0e, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0xda, 0x41, 0x0e,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0xda, 0x41,
0x1a, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x2c,
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0x51, 0xca, 0x41, 0x1d,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x2e,
0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f,
0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0xfd,
0x01, 0x0a, 0x29, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74,
0x65, 0x72, 0x2e, 0x76, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x50, 0x01, 0x5a, 0x53,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x73, 0x65, 0x63, 0x75,
0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x70, 0x31, 0x62,
0x65, 0x74, 0x61, 0x31, 0x3b, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x63, 0x65, 0x6e,
0x74, 0x65, 0x72, 0xaa, 0x02, 0x25, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, 0x65, 0x6e, 0x74, 0x65,
0x72, 0x2e, 0x56, 0x31, 0x50, 0x31, 0x42, 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x25, 0x47, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x65, 0x63, 0x75, 0x72,
0x69, 0x74, 0x79, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x5c, 0x56, 0x31, 0x70, 0x31, 0x62, 0x65,
0x74, 0x61, 0x31, 0xea, 0x02, 0x28, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, 0x65, 0x6e,
0x74, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x70, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x50, 0x00,
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescOnce sync.Once
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescData = file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDesc
)
func file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescGZIP() []byte {
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescOnce.Do(func() {
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescData)
})
return file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDescData
}
var file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes = make([]protoimpl.MessageInfo, 31)
var file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_goTypes = []interface{}{
(ListAssetsResponse_ListAssetsResult_StateChange)(0), // 0: google.cloud.securitycenter.v1p1beta1.ListAssetsResponse.ListAssetsResult.StateChange
(ListFindingsResponse_ListFindingsResult_StateChange)(0), // 1: google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.ListFindingsResult.StateChange
(*CreateFindingRequest)(nil), // 2: google.cloud.securitycenter.v1p1beta1.CreateFindingRequest
(*CreateNotificationConfigRequest)(nil), // 3: google.cloud.securitycenter.v1p1beta1.CreateNotificationConfigRequest
(*CreateSourceRequest)(nil), // 4: google.cloud.securitycenter.v1p1beta1.CreateSourceRequest
(*DeleteNotificationConfigRequest)(nil), // 5: google.cloud.securitycenter.v1p1beta1.DeleteNotificationConfigRequest
(*GetNotificationConfigRequest)(nil), // 6: google.cloud.securitycenter.v1p1beta1.GetNotificationConfigRequest
(*GetOrganizationSettingsRequest)(nil), // 7: google.cloud.securitycenter.v1p1beta1.GetOrganizationSettingsRequest
(*GetSourceRequest)(nil), // 8: google.cloud.securitycenter.v1p1beta1.GetSourceRequest
(*GroupAssetsRequest)(nil), // 9: google.cloud.securitycenter.v1p1beta1.GroupAssetsRequest
(*GroupAssetsResponse)(nil), // 10: google.cloud.securitycenter.v1p1beta1.GroupAssetsResponse
(*GroupFindingsRequest)(nil), // 11: google.cloud.securitycenter.v1p1beta1.GroupFindingsRequest
(*GroupFindingsResponse)(nil), // 12: google.cloud.securitycenter.v1p1beta1.GroupFindingsResponse
(*GroupResult)(nil), // 13: google.cloud.securitycenter.v1p1beta1.GroupResult
(*ListNotificationConfigsRequest)(nil), // 14: google.cloud.securitycenter.v1p1beta1.ListNotificationConfigsRequest
(*ListNotificationConfigsResponse)(nil), // 15: google.cloud.securitycenter.v1p1beta1.ListNotificationConfigsResponse
(*ListSourcesRequest)(nil), // 16: google.cloud.securitycenter.v1p1beta1.ListSourcesRequest
(*ListSourcesResponse)(nil), // 17: google.cloud.securitycenter.v1p1beta1.ListSourcesResponse
(*ListAssetsRequest)(nil), // 18: google.cloud.securitycenter.v1p1beta1.ListAssetsRequest
(*ListAssetsResponse)(nil), // 19: google.cloud.securitycenter.v1p1beta1.ListAssetsResponse
(*ListFindingsRequest)(nil), // 20: google.cloud.securitycenter.v1p1beta1.ListFindingsRequest
(*ListFindingsResponse)(nil), // 21: google.cloud.securitycenter.v1p1beta1.ListFindingsResponse
(*SetFindingStateRequest)(nil), // 22: google.cloud.securitycenter.v1p1beta1.SetFindingStateRequest
(*RunAssetDiscoveryRequest)(nil), // 23: google.cloud.securitycenter.v1p1beta1.RunAssetDiscoveryRequest
(*UpdateFindingRequest)(nil), // 24: google.cloud.securitycenter.v1p1beta1.UpdateFindingRequest
(*UpdateNotificationConfigRequest)(nil), // 25: google.cloud.securitycenter.v1p1beta1.UpdateNotificationConfigRequest
(*UpdateOrganizationSettingsRequest)(nil), // 26: google.cloud.securitycenter.v1p1beta1.UpdateOrganizationSettingsRequest
(*UpdateSourceRequest)(nil), // 27: google.cloud.securitycenter.v1p1beta1.UpdateSourceRequest
(*UpdateSecurityMarksRequest)(nil), // 28: google.cloud.securitycenter.v1p1beta1.UpdateSecurityMarksRequest
nil, // 29: google.cloud.securitycenter.v1p1beta1.GroupResult.PropertiesEntry
(*ListAssetsResponse_ListAssetsResult)(nil), // 30: google.cloud.securitycenter.v1p1beta1.ListAssetsResponse.ListAssetsResult
(*ListFindingsResponse_ListFindingsResult)(nil), // 31: google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.ListFindingsResult
(*ListFindingsResponse_ListFindingsResult_Resource)(nil), // 32: google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.ListFindingsResult.Resource
(*Finding)(nil), // 33: google.cloud.securitycenter.v1p1beta1.Finding
(*NotificationConfig)(nil), // 34: google.cloud.securitycenter.v1p1beta1.NotificationConfig
(*Source)(nil), // 35: google.cloud.securitycenter.v1p1beta1.Source
(*duration.Duration)(nil), // 36: google.protobuf.Duration
(*timestamp.Timestamp)(nil), // 37: google.protobuf.Timestamp
(*field_mask.FieldMask)(nil), // 38: google.protobuf.FieldMask
(Finding_State)(0), // 39: google.cloud.securitycenter.v1p1beta1.Finding.State
(*OrganizationSettings)(nil), // 40: google.cloud.securitycenter.v1p1beta1.OrganizationSettings
(*SecurityMarks)(nil), // 41: google.cloud.securitycenter.v1p1beta1.SecurityMarks
(*_struct.Value)(nil), // 42: google.protobuf.Value
(*Asset)(nil), // 43: google.cloud.securitycenter.v1p1beta1.Asset
(*v1.GetIamPolicyRequest)(nil), // 44: google.iam.v1.GetIamPolicyRequest
(*v1.SetIamPolicyRequest)(nil), // 45: google.iam.v1.SetIamPolicyRequest
(*v1.TestIamPermissionsRequest)(nil), // 46: google.iam.v1.TestIamPermissionsRequest
(*empty.Empty)(nil), // 47: google.protobuf.Empty
(*v1.Policy)(nil), // 48: google.iam.v1.Policy
(*longrunning.Operation)(nil), // 49: google.longrunning.Operation
(*v1.TestIamPermissionsResponse)(nil), // 50: google.iam.v1.TestIamPermissionsResponse
}
var file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_depIdxs = []int32{
33, // 0: google.cloud.securitycenter.v1p1beta1.CreateFindingRequest.finding:type_name -> google.cloud.securitycenter.v1p1beta1.Finding
34, // 1: google.cloud.securitycenter.v1p1beta1.CreateNotificationConfigRequest.notification_config:type_name -> google.cloud.securitycenter.v1p1beta1.NotificationConfig
35, // 2: google.cloud.securitycenter.v1p1beta1.CreateSourceRequest.source:type_name -> google.cloud.securitycenter.v1p1beta1.Source
36, // 3: google.cloud.securitycenter.v1p1beta1.GroupAssetsRequest.compare_duration:type_name -> google.protobuf.Duration
37, // 4: google.cloud.securitycenter.v1p1beta1.GroupAssetsRequest.read_time:type_name -> google.protobuf.Timestamp
13, // 5: google.cloud.securitycenter.v1p1beta1.GroupAssetsResponse.group_by_results:type_name -> google.cloud.securitycenter.v1p1beta1.GroupResult
37, // 6: google.cloud.securitycenter.v1p1beta1.GroupAssetsResponse.read_time:type_name -> google.protobuf.Timestamp
37, // 7: google.cloud.securitycenter.v1p1beta1.GroupFindingsRequest.read_time:type_name -> google.protobuf.Timestamp
36, // 8: google.cloud.securitycenter.v1p1beta1.GroupFindingsRequest.compare_duration:type_name -> google.protobuf.Duration
13, // 9: google.cloud.securitycenter.v1p1beta1.GroupFindingsResponse.group_by_results:type_name -> google.cloud.securitycenter.v1p1beta1.GroupResult
37, // 10: google.cloud.securitycenter.v1p1beta1.GroupFindingsResponse.read_time:type_name -> google.protobuf.Timestamp
29, // 11: google.cloud.securitycenter.v1p1beta1.GroupResult.properties:type_name -> google.cloud.securitycenter.v1p1beta1.GroupResult.PropertiesEntry
34, // 12: google.cloud.securitycenter.v1p1beta1.ListNotificationConfigsResponse.notification_configs:type_name -> google.cloud.securitycenter.v1p1beta1.NotificationConfig
35, // 13: google.cloud.securitycenter.v1p1beta1.ListSourcesResponse.sources:type_name -> google.cloud.securitycenter.v1p1beta1.Source
37, // 14: google.cloud.securitycenter.v1p1beta1.ListAssetsRequest.read_time:type_name -> google.protobuf.Timestamp
36, // 15: google.cloud.securitycenter.v1p1beta1.ListAssetsRequest.compare_duration:type_name -> google.protobuf.Duration
38, // 16: google.cloud.securitycenter.v1p1beta1.ListAssetsRequest.field_mask:type_name -> google.protobuf.FieldMask
30, // 17: google.cloud.securitycenter.v1p1beta1.ListAssetsResponse.list_assets_results:type_name -> google.cloud.securitycenter.v1p1beta1.ListAssetsResponse.ListAssetsResult
37, // 18: google.cloud.securitycenter.v1p1beta1.ListAssetsResponse.read_time:type_name -> google.protobuf.Timestamp
37, // 19: google.cloud.securitycenter.v1p1beta1.ListFindingsRequest.read_time:type_name -> google.protobuf.Timestamp
36, // 20: google.cloud.securitycenter.v1p1beta1.ListFindingsRequest.compare_duration:type_name -> google.protobuf.Duration
38, // 21: google.cloud.securitycenter.v1p1beta1.ListFindingsRequest.field_mask:type_name -> google.protobuf.FieldMask
31, // 22: google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.list_findings_results:type_name -> google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.ListFindingsResult
37, // 23: google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.read_time:type_name -> google.protobuf.Timestamp
39, // 24: google.cloud.securitycenter.v1p1beta1.SetFindingStateRequest.state:type_name -> google.cloud.securitycenter.v1p1beta1.Finding.State
37, // 25: google.cloud.securitycenter.v1p1beta1.SetFindingStateRequest.start_time:type_name -> google.protobuf.Timestamp
33, // 26: google.cloud.securitycenter.v1p1beta1.UpdateFindingRequest.finding:type_name -> google.cloud.securitycenter.v1p1beta1.Finding
38, // 27: google.cloud.securitycenter.v1p1beta1.UpdateFindingRequest.update_mask:type_name -> google.protobuf.FieldMask
34, // 28: google.cloud.securitycenter.v1p1beta1.UpdateNotificationConfigRequest.notification_config:type_name -> google.cloud.securitycenter.v1p1beta1.NotificationConfig
38, // 29: google.cloud.securitycenter.v1p1beta1.UpdateNotificationConfigRequest.update_mask:type_name -> google.protobuf.FieldMask
40, // 30: google.cloud.securitycenter.v1p1beta1.UpdateOrganizationSettingsRequest.organization_settings:type_name -> google.cloud.securitycenter.v1p1beta1.OrganizationSettings
38, // 31: google.cloud.securitycenter.v1p1beta1.UpdateOrganizationSettingsRequest.update_mask:type_name -> google.protobuf.FieldMask
35, // 32: google.cloud.securitycenter.v1p1beta1.UpdateSourceRequest.source:type_name -> google.cloud.securitycenter.v1p1beta1.Source
38, // 33: google.cloud.securitycenter.v1p1beta1.UpdateSourceRequest.update_mask:type_name -> google.protobuf.FieldMask
41, // 34: google.cloud.securitycenter.v1p1beta1.UpdateSecurityMarksRequest.security_marks:type_name -> google.cloud.securitycenter.v1p1beta1.SecurityMarks
38, // 35: google.cloud.securitycenter.v1p1beta1.UpdateSecurityMarksRequest.update_mask:type_name -> google.protobuf.FieldMask
37, // 36: google.cloud.securitycenter.v1p1beta1.UpdateSecurityMarksRequest.start_time:type_name -> google.protobuf.Timestamp
42, // 37: google.cloud.securitycenter.v1p1beta1.GroupResult.PropertiesEntry.value:type_name -> google.protobuf.Value
43, // 38: google.cloud.securitycenter.v1p1beta1.ListAssetsResponse.ListAssetsResult.asset:type_name -> google.cloud.securitycenter.v1p1beta1.Asset
0, // 39: google.cloud.securitycenter.v1p1beta1.ListAssetsResponse.ListAssetsResult.state_change:type_name -> google.cloud.securitycenter.v1p1beta1.ListAssetsResponse.ListAssetsResult.StateChange
33, // 40: google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.ListFindingsResult.finding:type_name -> google.cloud.securitycenter.v1p1beta1.Finding
1, // 41: google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.ListFindingsResult.state_change:type_name -> google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.ListFindingsResult.StateChange
32, // 42: google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.ListFindingsResult.resource:type_name -> google.cloud.securitycenter.v1p1beta1.ListFindingsResponse.ListFindingsResult.Resource
4, // 43: google.cloud.securitycenter.v1p1beta1.SecurityCenter.CreateSource:input_type -> google.cloud.securitycenter.v1p1beta1.CreateSourceRequest
2, // 44: google.cloud.securitycenter.v1p1beta1.SecurityCenter.CreateFinding:input_type -> google.cloud.securitycenter.v1p1beta1.CreateFindingRequest
3, // 45: google.cloud.securitycenter.v1p1beta1.SecurityCenter.CreateNotificationConfig:input_type -> google.cloud.securitycenter.v1p1beta1.CreateNotificationConfigRequest
5, // 46: google.cloud.securitycenter.v1p1beta1.SecurityCenter.DeleteNotificationConfig:input_type -> google.cloud.securitycenter.v1p1beta1.DeleteNotificationConfigRequest
44, // 47: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
6, // 48: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GetNotificationConfig:input_type -> google.cloud.securitycenter.v1p1beta1.GetNotificationConfigRequest
7, // 49: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GetOrganizationSettings:input_type -> google.cloud.securitycenter.v1p1beta1.GetOrganizationSettingsRequest
8, // 50: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GetSource:input_type -> google.cloud.securitycenter.v1p1beta1.GetSourceRequest
9, // 51: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GroupAssets:input_type -> google.cloud.securitycenter.v1p1beta1.GroupAssetsRequest
11, // 52: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GroupFindings:input_type -> google.cloud.securitycenter.v1p1beta1.GroupFindingsRequest
18, // 53: google.cloud.securitycenter.v1p1beta1.SecurityCenter.ListAssets:input_type -> google.cloud.securitycenter.v1p1beta1.ListAssetsRequest
20, // 54: google.cloud.securitycenter.v1p1beta1.SecurityCenter.ListFindings:input_type -> google.cloud.securitycenter.v1p1beta1.ListFindingsRequest
14, // 55: google.cloud.securitycenter.v1p1beta1.SecurityCenter.ListNotificationConfigs:input_type -> google.cloud.securitycenter.v1p1beta1.ListNotificationConfigsRequest
16, // 56: google.cloud.securitycenter.v1p1beta1.SecurityCenter.ListSources:input_type -> google.cloud.securitycenter.v1p1beta1.ListSourcesRequest
23, // 57: google.cloud.securitycenter.v1p1beta1.SecurityCenter.RunAssetDiscovery:input_type -> google.cloud.securitycenter.v1p1beta1.RunAssetDiscoveryRequest
22, // 58: google.cloud.securitycenter.v1p1beta1.SecurityCenter.SetFindingState:input_type -> google.cloud.securitycenter.v1p1beta1.SetFindingStateRequest
45, // 59: google.cloud.securitycenter.v1p1beta1.SecurityCenter.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
46, // 60: google.cloud.securitycenter.v1p1beta1.SecurityCenter.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
24, // 61: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateFinding:input_type -> google.cloud.securitycenter.v1p1beta1.UpdateFindingRequest
25, // 62: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateNotificationConfig:input_type -> google.cloud.securitycenter.v1p1beta1.UpdateNotificationConfigRequest
26, // 63: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateOrganizationSettings:input_type -> google.cloud.securitycenter.v1p1beta1.UpdateOrganizationSettingsRequest
27, // 64: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateSource:input_type -> google.cloud.securitycenter.v1p1beta1.UpdateSourceRequest
28, // 65: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateSecurityMarks:input_type -> google.cloud.securitycenter.v1p1beta1.UpdateSecurityMarksRequest
35, // 66: google.cloud.securitycenter.v1p1beta1.SecurityCenter.CreateSource:output_type -> google.cloud.securitycenter.v1p1beta1.Source
33, // 67: google.cloud.securitycenter.v1p1beta1.SecurityCenter.CreateFinding:output_type -> google.cloud.securitycenter.v1p1beta1.Finding
34, // 68: google.cloud.securitycenter.v1p1beta1.SecurityCenter.CreateNotificationConfig:output_type -> google.cloud.securitycenter.v1p1beta1.NotificationConfig
47, // 69: google.cloud.securitycenter.v1p1beta1.SecurityCenter.DeleteNotificationConfig:output_type -> google.protobuf.Empty
48, // 70: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GetIamPolicy:output_type -> google.iam.v1.Policy
34, // 71: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GetNotificationConfig:output_type -> google.cloud.securitycenter.v1p1beta1.NotificationConfig
40, // 72: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GetOrganizationSettings:output_type -> google.cloud.securitycenter.v1p1beta1.OrganizationSettings
35, // 73: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GetSource:output_type -> google.cloud.securitycenter.v1p1beta1.Source
10, // 74: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GroupAssets:output_type -> google.cloud.securitycenter.v1p1beta1.GroupAssetsResponse
12, // 75: google.cloud.securitycenter.v1p1beta1.SecurityCenter.GroupFindings:output_type -> google.cloud.securitycenter.v1p1beta1.GroupFindingsResponse
19, // 76: google.cloud.securitycenter.v1p1beta1.SecurityCenter.ListAssets:output_type -> google.cloud.securitycenter.v1p1beta1.ListAssetsResponse
21, // 77: google.cloud.securitycenter.v1p1beta1.SecurityCenter.ListFindings:output_type -> google.cloud.securitycenter.v1p1beta1.ListFindingsResponse
15, // 78: google.cloud.securitycenter.v1p1beta1.SecurityCenter.ListNotificationConfigs:output_type -> google.cloud.securitycenter.v1p1beta1.ListNotificationConfigsResponse
17, // 79: google.cloud.securitycenter.v1p1beta1.SecurityCenter.ListSources:output_type -> google.cloud.securitycenter.v1p1beta1.ListSourcesResponse
49, // 80: google.cloud.securitycenter.v1p1beta1.SecurityCenter.RunAssetDiscovery:output_type -> google.longrunning.Operation
33, // 81: google.cloud.securitycenter.v1p1beta1.SecurityCenter.SetFindingState:output_type -> google.cloud.securitycenter.v1p1beta1.Finding
48, // 82: google.cloud.securitycenter.v1p1beta1.SecurityCenter.SetIamPolicy:output_type -> google.iam.v1.Policy
50, // 83: google.cloud.securitycenter.v1p1beta1.SecurityCenter.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
33, // 84: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateFinding:output_type -> google.cloud.securitycenter.v1p1beta1.Finding
34, // 85: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateNotificationConfig:output_type -> google.cloud.securitycenter.v1p1beta1.NotificationConfig
40, // 86: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateOrganizationSettings:output_type -> google.cloud.securitycenter.v1p1beta1.OrganizationSettings
35, // 87: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateSource:output_type -> google.cloud.securitycenter.v1p1beta1.Source
41, // 88: google.cloud.securitycenter.v1p1beta1.SecurityCenter.UpdateSecurityMarks:output_type -> google.cloud.securitycenter.v1p1beta1.SecurityMarks
66, // [66:89] is the sub-list for method output_type
43, // [43:66] is the sub-list for method input_type
43, // [43:43] is the sub-list for extension type_name
43, // [43:43] is the sub-list for extension extendee
0, // [0:43] is the sub-list for field type_name
}
func init() { file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_init() }
func file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_init() {
if File_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto != nil {
return
}
file_google_cloud_securitycenter_v1p1beta1_run_asset_discovery_response_proto_init()
file_google_cloud_securitycenter_v1p1beta1_asset_proto_init()
file_google_cloud_securitycenter_v1p1beta1_finding_proto_init()
file_google_cloud_securitycenter_v1p1beta1_notification_config_proto_init()
file_google_cloud_securitycenter_v1p1beta1_organization_settings_proto_init()
file_google_cloud_securitycenter_v1p1beta1_security_marks_proto_init()
file_google_cloud_securitycenter_v1p1beta1_source_proto_init()
if !protoimpl.UnsafeEnabled {
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CreateFindingRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CreateNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CreateSourceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetOrganizationSettingsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSourceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GroupAssetsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GroupAssetsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GroupFindingsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GroupFindingsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GroupResult); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNotificationConfigsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNotificationConfigsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListSourcesRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListSourcesResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListAssetsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListAssetsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListFindingsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListFindingsResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetFindingStateRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RunAssetDiscoveryRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateFindingRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateOrganizationSettingsRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateSourceRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateSecurityMarksRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListAssetsResponse_ListAssetsResult); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListFindingsResponse_ListFindingsResult); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListFindingsResponse_ListFindingsResult_Resource); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDesc,
NumEnums: 2,
NumMessages: 31,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_goTypes,
DependencyIndexes: file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_depIdxs,
EnumInfos: file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_enumTypes,
MessageInfos: file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_msgTypes,
}.Build()
File_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto = out.File
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_rawDesc = nil
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_goTypes = nil
file_google_cloud_securitycenter_v1p1beta1_securitycenter_service_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// SecurityCenterClient is the client API for SecurityCenter service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SecurityCenterClient interface {
// Creates a source.
CreateSource(ctx context.Context, in *CreateSourceRequest, opts ...grpc.CallOption) (*Source, error)
// Creates a finding. The corresponding source must exist for finding
// creation to succeed.
CreateFinding(ctx context.Context, in *CreateFindingRequest, opts ...grpc.CallOption) (*Finding, error)
// Creates a notification config.
CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
// Deletes a notification config.
DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error)
// Gets the access control policy on the specified Source.
GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error)
// Gets a notification config.
GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
// Gets the settings for an organization.
GetOrganizationSettings(ctx context.Context, in *GetOrganizationSettingsRequest, opts ...grpc.CallOption) (*OrganizationSettings, error)
// Gets a source.
GetSource(ctx context.Context, in *GetSourceRequest, opts ...grpc.CallOption) (*Source, error)
// Filters an organization's assets and groups them by their specified
// properties.
GroupAssets(ctx context.Context, in *GroupAssetsRequest, opts ...grpc.CallOption) (*GroupAssetsResponse, error)
// Filters an organization or source's findings and groups them by their
// specified properties.
//
// To group across all sources provide a `-` as the source id.
// Example: /v1p1beta1/organizations/{organization_id}/sources/-/findings
GroupFindings(ctx context.Context, in *GroupFindingsRequest, opts ...grpc.CallOption) (*GroupFindingsResponse, error)
// Lists an organization's assets.
ListAssets(ctx context.Context, in *ListAssetsRequest, opts ...grpc.CallOption) (*ListAssetsResponse, error)
// Lists an organization or source's findings.
//
// To list across all sources provide a `-` as the source id.
// Example: /v1p1beta1/organizations/{organization_id}/sources/-/findings
ListFindings(ctx context.Context, in *ListFindingsRequest, opts ...grpc.CallOption) (*ListFindingsResponse, error)
// Lists notification configs.
ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error)
// Lists all sources belonging to an organization.
ListSources(ctx context.Context, in *ListSourcesRequest, opts ...grpc.CallOption) (*ListSourcesResponse, error)
// Runs asset discovery. The discovery is tracked with a long-running
// operation.
//
// This API can only be called with limited frequency for an organization. If
// it is called too frequently the caller will receive a TOO_MANY_REQUESTS
// error.
RunAssetDiscovery(ctx context.Context, in *RunAssetDiscoveryRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
// Updates the state of a finding.
SetFindingState(ctx context.Context, in *SetFindingStateRequest, opts ...grpc.CallOption) (*Finding, error)
// Sets the access control policy on the specified Source.
SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error)
// Returns the permissions that a caller has on the specified source.
TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error)
// Creates or updates a finding. The corresponding source must exist for a
// finding creation to succeed.
UpdateFinding(ctx context.Context, in *UpdateFindingRequest, opts ...grpc.CallOption) (*Finding, error)
// Updates a notification config. The following update
// fields are allowed: description, pubsub_topic, streaming_config.filter
UpdateNotificationConfig(ctx context.Context, in *UpdateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
// Updates an organization's settings.
UpdateOrganizationSettings(ctx context.Context, in *UpdateOrganizationSettingsRequest, opts ...grpc.CallOption) (*OrganizationSettings, error)
// Updates a source.
UpdateSource(ctx context.Context, in *UpdateSourceRequest, opts ...grpc.CallOption) (*Source, error)
// Updates security marks.
UpdateSecurityMarks(ctx context.Context, in *UpdateSecurityMarksRequest, opts ...grpc.CallOption) (*SecurityMarks, error)
}
type securityCenterClient struct {
cc grpc.ClientConnInterface
}
func NewSecurityCenterClient(cc grpc.ClientConnInterface) SecurityCenterClient {
return &securityCenterClient{cc}
}
func (c *securityCenterClient) CreateSource(ctx context.Context, in *CreateSourceRequest, opts ...grpc.CallOption) (*Source, error) {
out := new(Source)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/CreateSource", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) CreateFinding(ctx context.Context, in *CreateFindingRequest, opts ...grpc.CallOption) (*Finding, error) {
out := new(Finding)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/CreateFinding", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
out := new(NotificationConfig)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/CreateNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/DeleteNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) {
out := new(v1.Policy)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetIamPolicy", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
out := new(NotificationConfig)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) GetOrganizationSettings(ctx context.Context, in *GetOrganizationSettingsRequest, opts ...grpc.CallOption) (*OrganizationSettings, error) {
out := new(OrganizationSettings)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetOrganizationSettings", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) GetSource(ctx context.Context, in *GetSourceRequest, opts ...grpc.CallOption) (*Source, error) {
out := new(Source)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetSource", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) GroupAssets(ctx context.Context, in *GroupAssetsRequest, opts ...grpc.CallOption) (*GroupAssetsResponse, error) {
out := new(GroupAssetsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GroupAssets", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) GroupFindings(ctx context.Context, in *GroupFindingsRequest, opts ...grpc.CallOption) (*GroupFindingsResponse, error) {
out := new(GroupFindingsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GroupFindings", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) ListAssets(ctx context.Context, in *ListAssetsRequest, opts ...grpc.CallOption) (*ListAssetsResponse, error) {
out := new(ListAssetsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListAssets", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) ListFindings(ctx context.Context, in *ListFindingsRequest, opts ...grpc.CallOption) (*ListFindingsResponse, error) {
out := new(ListFindingsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListFindings", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) {
out := new(ListNotificationConfigsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListNotificationConfigs", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) ListSources(ctx context.Context, in *ListSourcesRequest, opts ...grpc.CallOption) (*ListSourcesResponse, error) {
out := new(ListSourcesResponse)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListSources", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) RunAssetDiscovery(ctx context.Context, in *RunAssetDiscoveryRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
out := new(longrunning.Operation)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/RunAssetDiscovery", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) SetFindingState(ctx context.Context, in *SetFindingStateRequest, opts ...grpc.CallOption) (*Finding, error) {
out := new(Finding)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/SetFindingState", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) {
out := new(v1.Policy)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/SetIamPolicy", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) {
out := new(v1.TestIamPermissionsResponse)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/TestIamPermissions", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) UpdateFinding(ctx context.Context, in *UpdateFindingRequest, opts ...grpc.CallOption) (*Finding, error) {
out := new(Finding)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateFinding", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) UpdateNotificationConfig(ctx context.Context, in *UpdateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
out := new(NotificationConfig)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) UpdateOrganizationSettings(ctx context.Context, in *UpdateOrganizationSettingsRequest, opts ...grpc.CallOption) (*OrganizationSettings, error) {
out := new(OrganizationSettings)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateOrganizationSettings", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) UpdateSource(ctx context.Context, in *UpdateSourceRequest, opts ...grpc.CallOption) (*Source, error) {
out := new(Source)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateSource", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *securityCenterClient) UpdateSecurityMarks(ctx context.Context, in *UpdateSecurityMarksRequest, opts ...grpc.CallOption) (*SecurityMarks, error) {
out := new(SecurityMarks)
err := c.cc.Invoke(ctx, "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateSecurityMarks", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// SecurityCenterServer is the server API for SecurityCenter service.
type SecurityCenterServer interface {
// Creates a source.
CreateSource(context.Context, *CreateSourceRequest) (*Source, error)
// Creates a finding. The corresponding source must exist for finding
// creation to succeed.
CreateFinding(context.Context, *CreateFindingRequest) (*Finding, error)
// Creates a notification config.
CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error)
// Deletes a notification config.
DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*empty.Empty, error)
// Gets the access control policy on the specified Source.
GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error)
// Gets a notification config.
GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error)
// Gets the settings for an organization.
GetOrganizationSettings(context.Context, *GetOrganizationSettingsRequest) (*OrganizationSettings, error)
// Gets a source.
GetSource(context.Context, *GetSourceRequest) (*Source, error)
// Filters an organization's assets and groups them by their specified
// properties.
GroupAssets(context.Context, *GroupAssetsRequest) (*GroupAssetsResponse, error)
// Filters an organization or source's findings and groups them by their
// specified properties.
//
// To group across all sources provide a `-` as the source id.
// Example: /v1p1beta1/organizations/{organization_id}/sources/-/findings
GroupFindings(context.Context, *GroupFindingsRequest) (*GroupFindingsResponse, error)
// Lists an organization's assets.
ListAssets(context.Context, *ListAssetsRequest) (*ListAssetsResponse, error)
// Lists an organization or source's findings.
//
// To list across all sources provide a `-` as the source id.
// Example: /v1p1beta1/organizations/{organization_id}/sources/-/findings
ListFindings(context.Context, *ListFindingsRequest) (*ListFindingsResponse, error)
// Lists notification configs.
ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error)
// Lists all sources belonging to an organization.
ListSources(context.Context, *ListSourcesRequest) (*ListSourcesResponse, error)
// Runs asset discovery. The discovery is tracked with a long-running
// operation.
//
// This API can only be called with limited frequency for an organization. If
// it is called too frequently the caller will receive a TOO_MANY_REQUESTS
// error.
RunAssetDiscovery(context.Context, *RunAssetDiscoveryRequest) (*longrunning.Operation, error)
// Updates the state of a finding.
SetFindingState(context.Context, *SetFindingStateRequest) (*Finding, error)
// Sets the access control policy on the specified Source.
SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error)
// Returns the permissions that a caller has on the specified source.
TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error)
// Creates or updates a finding. The corresponding source must exist for a
// finding creation to succeed.
UpdateFinding(context.Context, *UpdateFindingRequest) (*Finding, error)
// Updates a notification config. The following update
// fields are allowed: description, pubsub_topic, streaming_config.filter
UpdateNotificationConfig(context.Context, *UpdateNotificationConfigRequest) (*NotificationConfig, error)
// Updates an organization's settings.
UpdateOrganizationSettings(context.Context, *UpdateOrganizationSettingsRequest) (*OrganizationSettings, error)
// Updates a source.
UpdateSource(context.Context, *UpdateSourceRequest) (*Source, error)
// Updates security marks.
UpdateSecurityMarks(context.Context, *UpdateSecurityMarksRequest) (*SecurityMarks, error)
}
// UnimplementedSecurityCenterServer can be embedded to have forward compatible implementations.
type UnimplementedSecurityCenterServer struct {
}
func (*UnimplementedSecurityCenterServer) CreateSource(context.Context, *CreateSourceRequest) (*Source, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateSource not implemented")
}
func (*UnimplementedSecurityCenterServer) CreateFinding(context.Context, *CreateFindingRequest) (*Finding, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateFinding not implemented")
}
func (*UnimplementedSecurityCenterServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented")
}
func (*UnimplementedSecurityCenterServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*empty.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented")
}
func (*UnimplementedSecurityCenterServer) GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
}
func (*UnimplementedSecurityCenterServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented")
}
func (*UnimplementedSecurityCenterServer) GetOrganizationSettings(context.Context, *GetOrganizationSettingsRequest) (*OrganizationSettings, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetOrganizationSettings not implemented")
}
func (*UnimplementedSecurityCenterServer) GetSource(context.Context, *GetSourceRequest) (*Source, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSource not implemented")
}
func (*UnimplementedSecurityCenterServer) GroupAssets(context.Context, *GroupAssetsRequest) (*GroupAssetsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GroupAssets not implemented")
}
func (*UnimplementedSecurityCenterServer) GroupFindings(context.Context, *GroupFindingsRequest) (*GroupFindingsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GroupFindings not implemented")
}
func (*UnimplementedSecurityCenterServer) ListAssets(context.Context, *ListAssetsRequest) (*ListAssetsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListAssets not implemented")
}
func (*UnimplementedSecurityCenterServer) ListFindings(context.Context, *ListFindingsRequest) (*ListFindingsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListFindings not implemented")
}
func (*UnimplementedSecurityCenterServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented")
}
func (*UnimplementedSecurityCenterServer) ListSources(context.Context, *ListSourcesRequest) (*ListSourcesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListSources not implemented")
}
func (*UnimplementedSecurityCenterServer) RunAssetDiscovery(context.Context, *RunAssetDiscoveryRequest) (*longrunning.Operation, error) {
return nil, status.Errorf(codes.Unimplemented, "method RunAssetDiscovery not implemented")
}
func (*UnimplementedSecurityCenterServer) SetFindingState(context.Context, *SetFindingStateRequest) (*Finding, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetFindingState not implemented")
}
func (*UnimplementedSecurityCenterServer) SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
}
func (*UnimplementedSecurityCenterServer) TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
}
func (*UnimplementedSecurityCenterServer) UpdateFinding(context.Context, *UpdateFindingRequest) (*Finding, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateFinding not implemented")
}
func (*UnimplementedSecurityCenterServer) UpdateNotificationConfig(context.Context, *UpdateNotificationConfigRequest) (*NotificationConfig, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationConfig not implemented")
}
func (*UnimplementedSecurityCenterServer) UpdateOrganizationSettings(context.Context, *UpdateOrganizationSettingsRequest) (*OrganizationSettings, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateOrganizationSettings not implemented")
}
func (*UnimplementedSecurityCenterServer) UpdateSource(context.Context, *UpdateSourceRequest) (*Source, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateSource not implemented")
}
func (*UnimplementedSecurityCenterServer) UpdateSecurityMarks(context.Context, *UpdateSecurityMarksRequest) (*SecurityMarks, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateSecurityMarks not implemented")
}
func RegisterSecurityCenterServer(s *grpc.Server, srv SecurityCenterServer) {
s.RegisterService(&_SecurityCenter_serviceDesc, srv)
}
func _SecurityCenter_CreateSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateSourceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).CreateSource(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/CreateSource",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).CreateSource(ctx, req.(*CreateSourceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_CreateFinding_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateFindingRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).CreateFinding(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/CreateFinding",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).CreateFinding(ctx, req.(*CreateFindingRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateNotificationConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).CreateNotificationConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/CreateNotificationConfig",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteNotificationConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).DeleteNotificationConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/DeleteNotificationConfig",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(v1.GetIamPolicyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).GetIamPolicy(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetIamPolicy",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).GetIamPolicy(ctx, req.(*v1.GetIamPolicyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetNotificationConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).GetNotificationConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetNotificationConfig",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_GetOrganizationSettings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetOrganizationSettingsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).GetOrganizationSettings(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetOrganizationSettings",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).GetOrganizationSettings(ctx, req.(*GetOrganizationSettingsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_GetSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetSourceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).GetSource(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetSource",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).GetSource(ctx, req.(*GetSourceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_GroupAssets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GroupAssetsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).GroupAssets(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GroupAssets",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).GroupAssets(ctx, req.(*GroupAssetsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_GroupFindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GroupFindingsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).GroupFindings(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GroupFindings",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).GroupFindings(ctx, req.(*GroupFindingsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_ListAssets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListAssetsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).ListAssets(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListAssets",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).ListAssets(ctx, req.(*ListAssetsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_ListFindings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListFindingsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).ListFindings(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListFindings",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).ListFindings(ctx, req.(*ListFindingsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListNotificationConfigsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).ListNotificationConfigs(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListNotificationConfigs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_ListSources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListSourcesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).ListSources(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListSources",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).ListSources(ctx, req.(*ListSourcesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_RunAssetDiscovery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RunAssetDiscoveryRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).RunAssetDiscovery(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/RunAssetDiscovery",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).RunAssetDiscovery(ctx, req.(*RunAssetDiscoveryRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_SetFindingState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SetFindingStateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).SetFindingState(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/SetFindingState",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).SetFindingState(ctx, req.(*SetFindingStateRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(v1.SetIamPolicyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).SetIamPolicy(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/SetIamPolicy",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).SetIamPolicy(ctx, req.(*v1.SetIamPolicyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(v1.TestIamPermissionsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).TestIamPermissions(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/TestIamPermissions",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).TestIamPermissions(ctx, req.(*v1.TestIamPermissionsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_UpdateFinding_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateFindingRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).UpdateFinding(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateFinding",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).UpdateFinding(ctx, req.(*UpdateFindingRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_UpdateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) |
func _SecurityCenter_UpdateOrganizationSettings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateOrganizationSettingsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).UpdateOrganizationSettings(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateOrganizationSettings",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).UpdateOrganizationSettings(ctx, req.(*UpdateOrganizationSettingsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_UpdateSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateSourceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).UpdateSource(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateSource",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).UpdateSource(ctx, req.(*UpdateSourceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SecurityCenter_UpdateSecurityMarks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateSecurityMarksRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).UpdateSecurityMarks(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateSecurityMarks",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).UpdateSecurityMarks(ctx, req.(*UpdateSecurityMarksRequest))
}
return interceptor(ctx, in, info, handler)
}
var _SecurityCenter_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.cloud.securitycenter.v1p1beta1.SecurityCenter",
HandlerType: (*SecurityCenterServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateSource",
Handler: _SecurityCenter_CreateSource_Handler,
},
{
MethodName: "CreateFinding",
Handler: _SecurityCenter_CreateFinding_Handler,
},
{
MethodName: "CreateNotificationConfig",
Handler: _SecurityCenter_CreateNotificationConfig_Handler,
},
{
MethodName: "DeleteNotificationConfig",
Handler: _SecurityCenter_DeleteNotificationConfig_Handler,
},
{
MethodName: "GetIamPolicy",
Handler: _SecurityCenter_GetIamPolicy_Handler,
},
{
MethodName: "GetNotificationConfig",
Handler: _SecurityCenter_GetNotificationConfig_Handler,
},
{
MethodName: "GetOrganizationSettings",
Handler: _SecurityCenter_GetOrganizationSettings_Handler,
},
{
MethodName: "GetSource",
Handler: _SecurityCenter_GetSource_Handler,
},
{
MethodName: "GroupAssets",
Handler: _SecurityCenter_GroupAssets_Handler,
},
{
MethodName: "GroupFindings",
Handler: _SecurityCenter_GroupFindings_Handler,
},
{
MethodName: "ListAssets",
Handler: _SecurityCenter_ListAssets_Handler,
},
{
MethodName: "ListFindings",
Handler: _SecurityCenter_ListFindings_Handler,
},
{
MethodName: "ListNotificationConfigs",
Handler: _SecurityCenter_ListNotificationConfigs_Handler,
},
{
MethodName: "ListSources",
Handler: _SecurityCenter_ListSources_Handler,
},
{
MethodName: "RunAssetDiscovery",
Handler: _SecurityCenter_RunAssetDiscovery_Handler,
},
{
MethodName: "SetFindingState",
Handler: _SecurityCenter_SetFindingState_Handler,
},
{
MethodName: "SetIamPolicy",
Handler: _SecurityCenter_SetIamPolicy_Handler,
},
{
MethodName: "TestIamPermissions",
Handler: _SecurityCenter_TestIamPermissions_Handler,
},
{
MethodName: "UpdateFinding",
Handler: _SecurityCenter_UpdateFinding_Handler,
},
{
MethodName: "UpdateNotificationConfig",
Handler: _SecurityCenter_UpdateNotificationConfig_Handler,
},
{
MethodName: "UpdateOrganizationSettings",
Handler: _SecurityCenter_UpdateOrganizationSettings_Handler,
},
{
MethodName: "UpdateSource",
Handler: _SecurityCenter_UpdateSource_Handler,
},
{
MethodName: "UpdateSecurityMarks",
Handler: _SecurityCenter_UpdateSecurityMarks_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/cloud/securitycenter/v1p1beta1/securitycenter_service.proto",
}
| {
in := new(UpdateNotificationConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SecurityCenterServer).UpdateNotificationConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateNotificationConfig",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SecurityCenterServer).UpdateNotificationConfig(ctx, req.(*UpdateNotificationConfigRequest))
}
return interceptor(ctx, in, info, handler)
} |
yscript.py | #! /usr/bin/python
from molmod.units import *
from yaff import *
import h5py, numpy as np
#Setting up system and force field
system = System.from_file('system.chk')
ff = ForceField.generate(system, 'pars.txt', rcut=15.0*angstrom, alpha_scale=3.2, gcut_scale=1.5, smooth_ei=True)
#Setting up output
f = h5py.File('output.h5', mode='w')
hdf5 = HDF5Writer(f, step=1)
r = h5py.File('restart.h5', mode='w') |
#Setting up simulation
energy = ff.compute()
system.to_hdf5(f)
f['system/energy'] = energy | restart = RestartWriter(r, step=10000)
hooks = [hdf5, restart] |
networksystem_protomessages.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0-devel
// protoc v3.6.1
// source: networksystem_protomessages.proto
package dota
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type NetMessageSplitscreenUserChanged struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Slot *uint32 `protobuf:"varint,1,opt,name=slot" json:"slot,omitempty"`
}
func (x *NetMessageSplitscreenUserChanged) Reset() {
*x = NetMessageSplitscreenUserChanged{}
if protoimpl.UnsafeEnabled {
mi := &file_networksystem_protomessages_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NetMessageSplitscreenUserChanged) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetMessageSplitscreenUserChanged) ProtoMessage() {}
func (x *NetMessageSplitscreenUserChanged) ProtoReflect() protoreflect.Message {
mi := &file_networksystem_protomessages_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetMessageSplitscreenUserChanged.ProtoReflect.Descriptor instead.
func (*NetMessageSplitscreenUserChanged) Descriptor() ([]byte, []int) {
return file_networksystem_protomessages_proto_rawDescGZIP(), []int{0}
}
func (x *NetMessageSplitscreenUserChanged) GetSlot() uint32 {
if x != nil && x.Slot != nil {
return *x.Slot
}
return 0
}
type NetMessageConnectionClosed struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Reason *uint32 `protobuf:"varint,1,opt,name=reason" json:"reason,omitempty"`
}
func (x *NetMessageConnectionClosed) Reset() {
*x = NetMessageConnectionClosed{}
if protoimpl.UnsafeEnabled {
mi := &file_networksystem_protomessages_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NetMessageConnectionClosed) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetMessageConnectionClosed) ProtoMessage() {}
func (x *NetMessageConnectionClosed) ProtoReflect() protoreflect.Message {
mi := &file_networksystem_protomessages_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetMessageConnectionClosed.ProtoReflect.Descriptor instead.
func (*NetMessageConnectionClosed) Descriptor() ([]byte, []int) {
return file_networksystem_protomessages_proto_rawDescGZIP(), []int{1}
}
func (x *NetMessageConnectionClosed) GetReason() uint32 {
if x != nil && x.Reason != nil {
return *x.Reason
}
return 0
}
type NetMessageConnectionCrashed struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Reason *uint32 `protobuf:"varint,1,opt,name=reason" json:"reason,omitempty"`
}
func (x *NetMessageConnectionCrashed) Reset() {
*x = NetMessageConnectionCrashed{}
if protoimpl.UnsafeEnabled {
mi := &file_networksystem_protomessages_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NetMessageConnectionCrashed) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetMessageConnectionCrashed) ProtoMessage() {}
func (x *NetMessageConnectionCrashed) ProtoReflect() protoreflect.Message {
mi := &file_networksystem_protomessages_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetMessageConnectionCrashed.ProtoReflect.Descriptor instead.
func (*NetMessageConnectionCrashed) Descriptor() ([]byte, []int) {
return file_networksystem_protomessages_proto_rawDescGZIP(), []int{2}
}
func (x *NetMessageConnectionCrashed) GetReason() uint32 {
if x != nil && x.Reason != nil {
return *x.Reason
}
return 0
}
type NetMessagePacketStart struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *NetMessagePacketStart) Reset() {
*x = NetMessagePacketStart{}
if protoimpl.UnsafeEnabled {
mi := &file_networksystem_protomessages_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NetMessagePacketStart) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetMessagePacketStart) ProtoMessage() {}
func (x *NetMessagePacketStart) ProtoReflect() protoreflect.Message {
mi := &file_networksystem_protomessages_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetMessagePacketStart.ProtoReflect.Descriptor instead.
func (*NetMessagePacketStart) Descriptor() ([]byte, []int) {
return file_networksystem_protomessages_proto_rawDescGZIP(), []int{3}
}
type NetMessagePacketEnd struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *NetMessagePacketEnd) Reset() {
*x = NetMessagePacketEnd{}
if protoimpl.UnsafeEnabled {
mi := &file_networksystem_protomessages_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NetMessagePacketEnd) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetMessagePacketEnd) ProtoMessage() {}
func (x *NetMessagePacketEnd) ProtoReflect() protoreflect.Message {
mi := &file_networksystem_protomessages_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetMessagePacketEnd.ProtoReflect.Descriptor instead.
func (*NetMessagePacketEnd) Descriptor() ([]byte, []int) {
return file_networksystem_protomessages_proto_rawDescGZIP(), []int{4}
}
var File_networksystem_protomessages_proto protoreflect.FileDescriptor
var file_networksystem_protomessages_proto_rawDesc = []byte{
0x0a, 0x21, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x04, 0x64, 0x6f, 0x74, 0x61, 0x22, 0x36, 0x0a, 0x20, 0x4e, 0x65, 0x74,
0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x73, 0x63, 0x72, 0x65,
0x65, 0x6e, 0x55, 0x73, 0x65, 0x72, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x12, 0x0a,
0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x6c, 0x6f,
0x74, 0x22, 0x34, 0x0a, 0x1a, 0x4e, 0x65, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x43,
0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12,
0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x1b, 0x4e, 0x65, 0x74, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43,
0x72, 0x61, 0x73, 0x68, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x17,
0x0a, 0x15, 0x4e, 0x65, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x61, 0x63, 0x6b,
0x65, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x22, 0x15, 0x0a, 0x13, 0x4e, 0x65, 0x74, 0x4d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x45, 0x6e, 0x64, 0x42, 0x03,
0x80, 0x01, 0x00,
}
var (
file_networksystem_protomessages_proto_rawDescOnce sync.Once
file_networksystem_protomessages_proto_rawDescData = file_networksystem_protomessages_proto_rawDesc
)
func file_networksystem_protomessages_proto_rawDescGZIP() []byte |
var file_networksystem_protomessages_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_networksystem_protomessages_proto_goTypes = []interface{}{
(*NetMessageSplitscreenUserChanged)(nil), // 0: dota.NetMessageSplitscreenUserChanged
(*NetMessageConnectionClosed)(nil), // 1: dota.NetMessageConnectionClosed
(*NetMessageConnectionCrashed)(nil), // 2: dota.NetMessageConnectionCrashed
(*NetMessagePacketStart)(nil), // 3: dota.NetMessagePacketStart
(*NetMessagePacketEnd)(nil), // 4: dota.NetMessagePacketEnd
}
var file_networksystem_protomessages_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_networksystem_protomessages_proto_init() }
func file_networksystem_protomessages_proto_init() {
if File_networksystem_protomessages_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_networksystem_protomessages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NetMessageSplitscreenUserChanged); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_networksystem_protomessages_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NetMessageConnectionClosed); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_networksystem_protomessages_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NetMessageConnectionCrashed); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_networksystem_protomessages_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NetMessagePacketStart); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_networksystem_protomessages_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NetMessagePacketEnd); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_networksystem_protomessages_proto_rawDesc,
NumEnums: 0,
NumMessages: 5,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_networksystem_protomessages_proto_goTypes,
DependencyIndexes: file_networksystem_protomessages_proto_depIdxs,
MessageInfos: file_networksystem_protomessages_proto_msgTypes,
}.Build()
File_networksystem_protomessages_proto = out.File
file_networksystem_protomessages_proto_rawDesc = nil
file_networksystem_protomessages_proto_goTypes = nil
file_networksystem_protomessages_proto_depIdxs = nil
}
| {
file_networksystem_protomessages_proto_rawDescOnce.Do(func() {
file_networksystem_protomessages_proto_rawDescData = protoimpl.X.CompressGZIP(file_networksystem_protomessages_proto_rawDescData)
})
return file_networksystem_protomessages_proto_rawDescData
} |
main.go | /*
https://www.acwing.com/problem/content/1962/
*/
package main
import "fmt"
const N = 1 << 16
var (
n int
m uint64
state uint64
p [N]uint64 // p[state] = i 表示state状态是第i个得到的
)
func init() {
for i := 0; i < N; i++ {
p[i] = 0x3f3f3f3f
}
}
func pr(x uint64) {
for i := 0; i < n; i++ {
fmt.Println(x >> i & 1)
}
}
func main() {
fmt.Scan(&n, &m)
var x uint64
for i := 0; i < n; i++ {
fmt.Scan(&x)
state |= x << i
}
p[state] = 0
var i uint64
for i = 1; ; i++ {
state = update(state)
switch {
case i == m:
pr(state)
return
case p[state] == 0x3f3f3f3f:
p[state] = i
default:
circleLen := i - p[state]
// 注意这里要减去已经走了的步数
r := (m - i) % circleLen
for r > 0 {
state = update(state)
r--
} | return
}
}
}
func update(state uint64) uint64 {
var res uint64 = 0
for i := 0; i < n; i++ {
currentBit := (state >> i) & 1
preBit := (state >> ((i - 1 + n) % n)) & 1
x := currentBit ^ preBit
res |= x << i
}
return res
} | pr(state) |
test_vm_utils.py | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import uuid
from eventlet import greenthread
import fixtures
import mock
from mox3 import mox
from oslo.config import cfg
from oslo.config import fixture as config_fixture
from oslo.utils import timeutils
from oslo.utils import units
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
import six
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import vm_mode
from nova import context
from nova import exception
from nova.i18n import _
from nova import objects
from nova import test
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.virt.xenapi import stubs
from nova.tests.unit.virt.xenapi import test_xenapi
from nova import utils
from nova.virt import hardware
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import vm_utils
CONF = cfg.CONF
XENSM_TYPE = 'xensm'
ISCSI_TYPE = 'iscsi'
def get_fake_connection_data(sr_type):
fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
'server': 'myserver',
'serverpath': '/local/scratch/myname',
'sr_type': 'nfs',
'introduce_sr_keys': ['server',
'serverpath',
'sr_type'],
'vdi_uuid': 'falseVDI'},
ISCSI_TYPE: {'volume_id': 'fake_volume_id',
'target_lun': 1,
'target_iqn': 'fake_iqn:volume-fake_volume_id',
'target_portal': u'localhost:3260',
'target_discovered': False}, }
return fakes[sr_type]
def _get_fake_session(error=None):
session = mock.Mock()
xenapi_session.apply_session_helpers(session)
if error is not None:
class FakeException(Exception):
details = [error, "a", "b", "c"]
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
return session
@contextlib.contextmanager
def contextified(result):
yield result
def _fake_noop(*args, **kwargs):
return
class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
pass
class LookupTestCase(VMUtilsTestBase):
def setUp(self):
super(LookupTestCase, self).setUp()
self.session = self.mox.CreateMockAnything('Fake Session')
self.name_label = 'my_vm'
def _do_mock(self, result):
self.session.call_xenapi(
"VM.get_by_name_label", self.name_label).AndReturn(result)
self.mox.ReplayAll()
def test_normal(self):
self._do_mock(['x'])
result = vm_utils.lookup(self.session, self.name_label)
self.assertEqual('x', result)
def test_no_result(self):
self._do_mock([])
result = vm_utils.lookup(self.session, self.name_label)
self.assertIsNone(result)
def test_too_many(self):
self._do_mock(['a', 'b'])
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label)
def test_rescue_none(self):
self.session.call_xenapi(
"VM.get_by_name_label", self.name_label + '-rescue').AndReturn([])
self._do_mock(['x'])
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('x', result)
def test_rescue_found(self):
self.session.call_xenapi(
"VM.get_by_name_label",
self.name_label + '-rescue').AndReturn(['y'])
self.mox.ReplayAll()
result = vm_utils.lookup(self.session, self.name_label,
check_rescue=True)
self.assertEqual('y', result)
def test_rescue_too_many(self):
self.session.call_xenapi(
"VM.get_by_name_label",
self.name_label + '-rescue').AndReturn(['a', 'b', 'c'])
self.mox.ReplayAll()
self.assertRaises(exception.InstanceExists,
vm_utils.lookup,
self.session, self.name_label,
check_rescue=True)
class GenerateConfigDriveTestCase(VMUtilsTestBase):
def test_no_admin_pass(self):
instance = {}
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr('session').AndReturn('sr_ref')
self.mox.StubOutWithMock(vm_utils, 'create_vdi')
vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
'configdrive',
64 * units.Mi).AndReturn('vdi_ref')
self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
vm_utils.vdi_attached_here(
'session', 'vdi_ref', read_only=False).AndReturn(
contextified('mounted_dev'))
class FakeInstanceMetadata(object):
def __init__(_self, instance, content=None, extra_md=None,
network_info=None):
self.assertEqual(network_info, "nw_info")
def metadata_for_config_drive(_self):
return []
self.useFixture(fixtures.MonkeyPatch(
'nova.api.metadata.base.InstanceMetadata',
FakeInstanceMetadata))
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
'-allow-lowercase', '-allow-multidot', '-l',
'-publisher', mox.IgnoreArg(), '-quiet',
'-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
attempts=1, run_as_root=False).AndReturn(None)
utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), run_as_root=True).AndReturn(None)
self.mox.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
bootable=False, read_only=True).AndReturn(None)
self.mox.ReplayAll()
# And the actual call we're testing
vm_utils.generate_configdrive('session', instance, 'vm_ref',
'userdevice', "nw_info")
@mock.patch.object(vm_utils, "destroy_vdi")
@mock.patch.object(vm_utils, "vdi_attached_here")
@mock.patch.object(vm_utils, "create_vdi")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached,
mock_destroy):
mock_create_vdi.return_value = 'vdi_ref'
mock_attached.side_effect = test.TestingException
mock_destroy.side_effect = exception.StorageError(reason="")
instance = {"uuid": "asdf"}
self.assertRaises(test.TestingException,
vm_utils.generate_configdrive,
'session', instance, 'vm_ref', 'userdevice',
'nw_info')
mock_destroy.assert_called_once_with('session', 'vdi_ref')
class XenAPIGetUUID(VMUtilsTestBase):
def test_get_this_vm_uuid_new_kernel(self):
self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
vm_utils._get_sys_hypervisor_uuid().AndReturn(
'2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
self.mox.ReplayAll()
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
vm_utils.get_this_vm_uuid(None))
self.mox.VerifyAll()
def test_get_this_vm_uuid_old_kernel_reboot(self):
self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
self.mox.StubOutWithMock(utils, 'execute')
vm_utils._get_sys_hypervisor_uuid().AndRaise(
IOError(13, 'Permission denied'))
utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn(
('27', ''))
utils.execute('xenstore-read', '/local/domain/27/vm',
run_as_root=True).AndReturn(
('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', ''))
self.mox.ReplayAll()
self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
vm_utils.get_this_vm_uuid(None))
self.mox.VerifyAll()
class FakeSession(object):
def call_xenapi(self, *args):
pass
def call_plugin(self, *args):
pass
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
pass
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, *args, **kwargs):
pass
class FetchVhdImageTestCase(VMUtilsTestBase):
def setUp(self):
super(FetchVhdImageTestCase, self).setUp()
self.context = context.get_admin_context()
self.context.auth_token = 'auth_token'
self.session = FakeSession()
self.instance = {"uuid": "uuid"}
self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack')
vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(self.session).AndReturn('sr_path')
def _stub_glance_download_vhd(self, raise_exc=None):
self.mox.StubOutWithMock(
self.session, 'call_plugin_serialized_with_retry')
func = self.session.call_plugin_serialized_with_retry(
'glance', 'download_vhd', 0, mox.IgnoreArg(), mox.IgnoreArg(),
extra_headers={'X-Service-Catalog': '[]',
'X-Auth-Token': 'auth_token',
'X-Roles': '',
'X-Tenant-Id': None,
'X-User-Id': None,
'X-Identity-Status': 'Confirmed'},
image_id='image_id',
uuid_stack=["uuid_stack"],
sr_path='sr_path')
if raise_exc:
func.AndRaise(raise_exc)
else:
func.AndReturn({'root': {'uuid': 'vdi'}})
def _stub_bittorrent_download_vhd(self, raise_exc=None):
self.mox.StubOutWithMock(
self.session, 'call_plugin_serialized')
func = self.session.call_plugin_serialized(
'bittorrent', 'download_vhd',
image_id='image_id',
uuid_stack=["uuid_stack"],
sr_path='sr_path',
torrent_download_stall_cutoff=600,
torrent_listen_port_start=6881,
torrent_listen_port_end=6891,
torrent_max_last_accessed=86400,
torrent_max_seeder_processes_per_host=1,
torrent_seed_chance=1.0,
torrent_seed_duration=3600,
torrent_url='http://foo/image_id.torrent'
)
if raise_exc:
func.AndRaise(raise_exc)
else:
func.AndReturn({'root': {'uuid': 'vdi'}})
def test_fetch_vhd_image_works_with_glance(self):
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(False)
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(
self.context, self.session, self.instance, "vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_fetch_vhd_image_works_with_bittorrent(self):
cfg.CONF.import_opt('torrent_base_url',
'nova.virt.xenapi.image.bittorrent',
group='xenserver')
self.flags(torrent_base_url='http://foo', group='xenserver')
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(True)
self._stub_bittorrent_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_fetch_vhd_image_cleans_up_vdi_on_fail(self):
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(False)
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi").AndRaise(exception.FlavorDiskTooSmall)
self.mox.StubOutWithMock(self.session, 'call_xenapi')
self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref")
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
vm_utils.destroy_vdi(self.session,
"ref").AndRaise(exception.StorageError(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.FlavorDiskTooSmall,
vm_utils._fetch_vhd_image, self.context, self.session,
self.instance, 'image_id')
self.mox.VerifyAll()
def test_fallback_to_default_handler(self):
cfg.CONF.import_opt('torrent_base_url',
'nova.virt.xenapi.image.bittorrent',
group='xenserver')
self.flags(torrent_base_url='http://foo', group='xenserver')
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(True)
self._stub_bittorrent_download_vhd(raise_exc=RuntimeError)
vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
vm_utils.get_sr_path(self.session).AndReturn('sr_path')
self._stub_glance_download_vhd()
self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
vm_utils.safe_find_sr(self.session).AndReturn("sr")
self.mox.StubOutWithMock(vm_utils, '_scan_sr')
vm_utils._scan_sr(self.session, "sr")
self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
vm_utils._check_vdi_size(self.context, self.session, self.instance,
"vdi")
self.mox.ReplayAll()
self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
self.session, self.instance, 'image_id')['root']['uuid'])
self.mox.VerifyAll()
def test_default_handler_does_not_fallback_to_itself(self):
cfg.CONF.import_opt('torrent_base_url',
'nova.virt.xenapi.image.bittorrent',
group='xenserver')
self.flags(torrent_base_url='http://foo', group='xenserver')
self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
vm_utils._image_uses_bittorrent(
self.context, self.instance).AndReturn(False)
self._stub_glance_download_vhd(raise_exc=RuntimeError)
self.mox.ReplayAll()
self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
self.context, self.session, self.instance, 'image_id')
self.mox.VerifyAll()
class TestImageCompression(VMUtilsTestBase):
def test_image_compression(self):
# Testing for nova.conf, too low, negative, and a correct value.
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=0, group='xenserver')
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=-6, group='xenserver')
self.assertIsNone(vm_utils.get_compression_level())
self.flags(image_compression_level=6, group='xenserver')
self.assertEqual(vm_utils.get_compression_level(), 6)
class ResizeHelpersTestCase(VMUtilsTestBase):
def test_repair_filesystem(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('e2fsck', '-f', "-y", "fakepath",
run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
("size is: 42", ""))
self.mox.ReplayAll()
vm_utils._repair_filesystem("fakepath")
def _call_tune2fs_remove_journal(self, path):
utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
def _call_tune2fs_add_journal(self, path):
utils.execute("tune2fs", "-j", path, run_as_root=True)
def _call_parted_mkpart(self, path, start, end):
utils.execute('parted', '--script', path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', path, 'mkpart',
'primary', '%ds' % start, '%ds' % end, run_as_root=True)
def _call_parted_boot_flag(sef, path):
utils.execute('parted', '--script', path, 'set', '1',
'boot', 'on', run_as_root=True)
def test_resize_part_and_fs_down_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
self._call_parted_mkpart(dev_path, 0, 9)
self._call_parted_boot_flag(dev_path)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot")
def test_log_progress_if_required(self):
self.mox.StubOutWithMock(vm_utils.LOG, "debug")
vm_utils.LOG.debug(_("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy"),
{"complete_pct": 50.0, "left": 1})
current = timeutils.utcnow()
timeutils.set_time_override(current)
timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
self.mox.ReplayAll()
vm_utils._log_progress_if_required(1, current, 2)
def test_log_progress_if_not_required(self):
self.mox.StubOutWithMock(vm_utils.LOG, "debug")
current = timeutils.utcnow()
timeutils.set_time_override(current)
timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
self.mox.ReplayAll()
vm_utils._log_progress_if_required(1, current, 2)
def test_resize_part_and_fs_down_fails_disk_too_big(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
new_sectors = 10
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
mobj = utils.execute("resize2fs",
partition_path,
"%ss" % new_sectors,
run_as_root=True)
mobj.AndRaise(processutils.ProcessExecutionError)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vm_utils._resize_part_and_fs,
"fake", 0, 20, 10, "boot")
def test_resize_part_and_fs_up_succeeds(self):
self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
self.mox.StubOutWithMock(utils, 'execute')
dev_path = "/dev/fake"
partition_path = "%s1" % dev_path
vm_utils._repair_filesystem(partition_path)
self._call_tune2fs_remove_journal(partition_path)
self._call_parted_mkpart(dev_path, 0, 29)
utils.execute("resize2fs", partition_path, run_as_root=True)
self._call_tune2fs_add_journal(partition_path)
self.mox.ReplayAll()
vm_utils._resize_part_and_fs("fake", 0, 20, 30, "")
def test_resize_disk_throws_on_zero_size(self):
self.assertRaises(exception.ResizeError,
vm_utils.resize_disk, "session", "instance", "vdi_ref",
{"root_gb": 0})
def test_auto_config_disk_returns_early_on_zero_size(self):
vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
@mock.patch.object(utils, "execute")
def test_get_partitions(self, mock_execute):
parted_return = "BYT;\n...\n"
parted_return += "1:2s:11s:10s:ext3::boot;\n"
parted_return += "2:20s:11s:10s::bob:;\n"
mock_execute.return_value = (parted_return, None)
partitions = vm_utils._get_partitions("abc")
self.assertEqual(2, len(partitions))
self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0])
self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1])
class CheckVDISizeTestCase(VMUtilsTestBase):
def setUp(self):
super(CheckVDISizeTestCase, self).setUp()
self.context = 'fakecontext'
self.session = 'fakesession'
self.instance = objects.Instance(uuid=str(uuid.uuid4()))
self.flavor = objects.Flavor()
self.vdi_uuid = 'fakeuuid'
def test_not_too_large(self):
self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
vm_utils._get_vdi_chain_size(self.session,
self.vdi_uuid).AndReturn(1073741824)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 1
get.return_value = self.flavor
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
def test_too_large(self):
self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
vm_utils._get_vdi_chain_size(self.session,
self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 1
get.return_value = self.flavor
self.assertRaises(exception.FlavorDiskTooSmall,
vm_utils._check_vdi_size, self.context,
self.session, self.instance, self.vdi_uuid)
def test_zero_root_gb_disables_check(self):
with mock.patch.object(self.instance, 'get_flavor') as get:
self.flavor.root_gb = 0
get.return_value = self.flavor
vm_utils._check_vdi_size(self.context, self.session, self.instance,
self.vdi_uuid)
class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
def setUp(self):
super(GetInstanceForVdisForSrTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
def test_get_instance_vdis_for_sr(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
vdi_1 = fake.create_vdi('vdiname1', sr_ref)
vdi_2 = fake.create_vdi('vdiname2', sr_ref)
for vdi_ref in [vdi_1, vdi_2]:
fake.create_vbd(vm_ref, vdi_ref)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([vdi_1, vdi_2], result)
def test_get_instance_vdis_for_sr_no_vbd(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEqual([], result)
class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
def test_lookup_call(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn('ignored')
mock.ReplayAll()
vm_utils.vm_ref_or_raise('session', 'somename')
mock.VerifyAll()
def test_return_value(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref')
mock.ReplayAll()
self.assertEqual(
'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
mock.VerifyAll()
class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
def test_exception_raised(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn(None)
mock.ReplayAll()
self.assertRaises(
exception.InstanceNotFound,
lambda: vm_utils.vm_ref_or_raise('session', 'somename')
)
mock.VerifyAll()
def test_exception_msg_contains_vm_name(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn(None)
mock.ReplayAll()
try:
vm_utils.vm_ref_or_raise('session', 'somename')
except exception.InstanceNotFound as e:
self.assertIn('somename', six.text_type(e))
mock.VerifyAll()
@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
class CreateCachedImageTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateCachedImageTestCase, self).setUp()
self.session = _get_fake_session()
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
@mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
None, None, None, 'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
def test_no_cow_no_ext(self, mock_safe_find_sr):
self.flags(use_cow_images=False)
self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
'vdi_ref', None, None, None,
'vdi_uuid']
self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
@mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
@mock.patch.object(vm_utils, '_fetch_image',
return_value={'root': {'uuid': 'vdi_uuid',
'file': None}})
def test_noncached(self, mock_fetch_image, mock_clone_vdi,
mock_safe_find_sr):
self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
None, None, None, None, None,
None, 'vdi_uuid']
self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
vm_utils._create_cached_image('context', self.session,
'instance', 'name', 'uuid',
vm_utils.ImageType.DISK_VHD))
class BittorrentTestCase(VMUtilsTestBase):
def setUp(self):
super(BittorrentTestCase, self).setUp()
self.context = context.get_admin_context()
def test_image_uses_bittorrent(self):
instance = {'system_metadata': {'image_bittorrent': True}}
self.flags(torrent_images='some', group='xenserver')
self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
instance))
def _test_create_image(self, cache_type):
instance = {'system_metadata': {'image_cache_in_nova': True}}
self.flags(cache_images=cache_type, group='xenserver')
was = {'called': None}
def fake_create_cached_image(*args):
was['called'] = 'some'
return (False, {})
self.stubs.Set(vm_utils, '_create_cached_image',
fake_create_cached_image)
def fake_fetch_image(*args):
was['called'] = 'none'
return {}
self.stubs.Set(vm_utils, '_fetch_image',
fake_fetch_image)
vm_utils.create_image(self.context, None, instance,
'foo', 'bar', 'baz')
self.assertEqual(was['called'], cache_type)
def test_create_image_cached(self):
self._test_create_image('some')
def test_create_image_uncached(self):
self._test_create_image('none')
class ShutdownTestCase(VMUtilsTestBase):
def test_hardshutdown_should_return_true_when_vm_is_shutdown(self):
self.mock = mox.Mox()
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
self.mock.StubOutWithMock(vm_utils, 'LOG')
self.assertTrue(vm_utils.hard_shutdown_vm(
session, instance, vm_ref))
def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self):
self.mock = mox.Mox()
session = FakeSession()
instance = "instance"
vm_ref = "vm-ref"
self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
self.mock.StubOutWithMock(vm_utils, 'LOG')
self.assertTrue(vm_utils.clean_shutdown_vm(
session, instance, vm_ref))
class CreateVBDTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateVBDTestCase, self).setUp()
self.session = FakeSession()
self.mock = mox.Mox()
self.mock.StubOutWithMock(self.session, 'call_xenapi')
self.vbd_rec = self._generate_vbd_rec()
def _generate_vbd_rec(self):
vbd_rec = {}
vbd_rec['VM'] = 'vm_ref'
vbd_rec['VDI'] = 'vdi_ref'
vbd_rec['userdevice'] = '0'
vbd_rec['bootable'] = False
vbd_rec['mode'] = 'RW'
vbd_rec['type'] = 'disk'
vbd_rec['unpluggable'] = True
vbd_rec['empty'] = False
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
return vbd_rec
def test_create_vbd_default_args(self):
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_create_vbd_osvol(self):
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref",
"osvol", "True")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
osvol=True)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_create_vbd_extra_args(self):
self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
self.vbd_rec['type'] = 'a'
self.vbd_rec['mode'] = 'RO'
self.vbd_rec['bootable'] = True
self.vbd_rec['empty'] = True
self.vbd_rec['unpluggable'] = False
self.session.call_xenapi('VBD.create',
self.vbd_rec).AndReturn("vbd_ref")
self.mock.ReplayAll()
result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
vbd_type="a", read_only=True, bootable=True,
empty=True, unpluggable=False)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
def test_attach_cd(self):
self.mock.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.create_vbd(self.session, "vm_ref", None, 1,
vbd_type='cd', read_only=True, bootable=True,
empty=True, unpluggable=False).AndReturn("vbd_ref")
self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref")
self.mock.ReplayAll()
result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
self.assertEqual(result, "vbd_ref")
self.mock.VerifyAll()
class UnplugVbdTestCase(VMUtilsTestBase):
@mock.patch.object(greenthread, 'sleep')
def test_unplug_vbd_works(self, mock_sleep):
session = _get_fake_session()
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
self.assertEqual(0, mock_sleep.call_count)
def test_unplug_vbd_raises_unexpected_error(self):
session = _get_fake_session()
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
session.call_xenapi.side_effect = test.TestingException()
self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_detached_works(self):
error = "DEVICE_ALREADY_DETACHED"
session = _get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
session = _get_fake_session("")
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vbd_ref, vm_ref)
self.assertEqual(1, session.call_xenapi.call_count)
def _test_uplug_vbd_retries(self, mock_sleep, error):
session = _get_fake_session(error)
vbd_ref = "vbd_ref"
vm_ref = 'vm_ref'
self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
session, vm_ref, vbd_ref)
self.assertEqual(11, session.call_xenapi.call_count)
self.assertEqual(10, mock_sleep.call_count)
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"DEVICE_DETACH_REJECTED")
@mock.patch.object(greenthread, 'sleep')
def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
self._test_uplug_vbd_retries(mock_sleep,
"INTERNAL_ERROR")
class VDIOtherConfigTestCase(VMUtilsTestBase):
"""Tests to ensure that the code is populating VDI's `other_config`
attribute with the correct metadta.
"""
def setUp(self):
super(VDIOtherConfigTestCase, self).setUp()
class _FakeSession():
def call_xenapi(self, operation, *args, **kwargs):
# VDI.add_to_other_config -> VDI_add_to_other_config
method = getattr(self, operation.replace('.', '_'), None)
if method:
return method(*args, **kwargs)
self.operation = operation
self.args = args
self.kwargs = kwargs
self.session = _FakeSession()
self.context = context.get_admin_context()
self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
'name': 'myinstance'}
def test_create_vdi(self):
# Some images are registered with XenServer explicitly by calling
# `create_vdi`
vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
'myvdi', 'root', 1024, read_only=True)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, self.session.args[0]['other_config'])
def test_create_image(self):
# Other images are registered implicitly when they are dropped into
# the SR by a dom0 plugin or some other process
self.flags(cache_images='none', group='xenserver')
def fake_fetch_image(*args):
return {'root': {'uuid': 'fake-uuid'}}
self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
vm_utils.create_image(self.context, self.session, self.fake_instance,
'myvdi', 'image1', vm_utils.ImageType.DISK_VHD)
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
def test_import_migrated_vhds(self):
# Migrated images should preserve the `other_config`
other_config = {}
def VDI_add_to_other_config(ref, key, value):
other_config[key] = value
def call_plugin_serialized(*args, **kwargs):
return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
# Stubbing on the session object and not class so we don't pollute
# other tests
self.session.VDI_add_to_other_config = VDI_add_to_other_config
self.session.VDI_get_other_config = lambda vdi: {}
self.session.call_plugin_serialized = call_plugin_serialized
self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None)
self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None)
vm_utils._import_migrated_vhds(self.session, self.fake_instance,
"disk_label", "root", "vdi_label")
expected = {'nova_disk_type': 'root',
'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
self.assertEqual(expected, other_config)
class GenerateDiskTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateDiskTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
self.vm_ref = fake.create_vm("foo", "Running")
def tearDown(self):
super(GenerateDiskTestCase, self).tearDown()
fake.destroy_vm(self.vm_ref)
def _expect_parted_calls(self):
self.mox.StubOutWithMock(utils, "execute")
self.mox.StubOutWithMock(utils, "trycmd")
self.mox.StubOutWithMock(vm_utils, "destroy_vdi")
self.mox.StubOutWithMock(vm_utils.os.path, "exists")
if self.session.is_local_connection:
utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
'msdos', check_exit_code=False, run_as_root=True)
utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
'primary', '0', '-0',
check_exit_code=False, run_as_root=True)
vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True)
utils.trycmd('kpartx', '-a', '/dev/fakedev',
discard_warnings=True, run_as_root=True)
else:
utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
'msdos', check_exit_code=True, run_as_root=True)
utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
'primary', '0', '-0',
check_exit_code=True, run_as_root=True)
def _check_vdi(self, vdi_ref, check_attached=True):
vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
if check_attached:
vbd_ref = vdi_rec["VBDs"][0]
vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
self.assertEqual(self.vm_ref, vbd_rec['VM'])
else:
self.assertEqual(0, len(vdi_rec["VBDs"]))
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_with_no_fs_given(self):
self._expect_parted_calls()
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "user", 10, None)
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_swap(self):
self._expect_parted_calls()
utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "swap", 10, "linux-swap")
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral(self):
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ensure_cleanup_called(self):
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
run_as_root=True).AndRaise(test.TestingException)
vm_utils.destroy_vdi(self.session,
mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
self.mox.ReplayAll()
self.assertRaises(test.TestingException, vm_utils._generate_disk,
self.session, {"uuid": "fake_uuid"},
self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral_local_not_attached(self):
self.session.is_local_connection = True
self._expect_parted_calls()
utils.execute('mkfs', '-t', 'ext4', '/dev/mapper/fakedev1',
run_as_root=True)
self.mox.ReplayAll()
vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
None, "2", "name", "ephemeral", 10, "ext4")
self._check_vdi(vdi_ref, check_attached=False)
class GenerateEphemeralTestCase(VMUtilsTestBase):
def setUp(self):
super(GenerateEphemeralTestCase, self).setUp()
self.session = "session"
self.instance = "instance"
self.vm_ref = "vm_ref"
self.name_label = "name"
self.ephemeral_name_label = "name ephemeral"
self.userdevice = 4
self.mox.StubOutWithMock(vm_utils, "_generate_disk")
self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
def test_get_ephemeral_disk_sizes_simple(self):
result = vm_utils.get_ephemeral_disk_sizes(20)
expected = [20]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_three_disks_2000(self):
result = vm_utils.get_ephemeral_disk_sizes(4030)
expected = [2000, 2000, 30]
self.assertEqual(expected, list(result))
def test_get_ephemeral_disk_sizes_two_disks_1024(self):
result = vm_utils.get_ephemeral_disk_sizes(2048)
expected = [1024, 1024]
self.assertEqual(expected, list(result))
def _expect_generate_disk(self, size, device, name_label):
vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
str(device), name_label, 'ephemeral',
size * 1024, None).AndReturn(device)
def test_generate_ephemeral_adds_one_disk(self):
self._expect_generate_disk(20, self.userdevice,
self.ephemeral_name_label)
self.mox.ReplayAll()
vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 20)
def test_generate_ephemeral_adds_multiple_disks(self):
self._expect_generate_disk(2000, self.userdevice,
self.ephemeral_name_label)
self._expect_generate_disk(2000, self.userdevice + 1,
self.ephemeral_name_label + " (1)")
self._expect_generate_disk(30, self.userdevice + 2,
self.ephemeral_name_label + " (2)")
self.mox.ReplayAll()
vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4030)
def test_generate_ephemeral_cleans_up_on_error(self):
self._expect_generate_disk(1024, self.userdevice,
self.ephemeral_name_label)
self._expect_generate_disk(1024, self.userdevice + 1,
self.ephemeral_name_label + " (1)")
vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
units.Mi, None).AndRaise(exception.NovaException)
vm_utils.safe_destroy_vdis(self.session, [4, 5])
self.mox.ReplayAll()
self.assertRaises(exception.NovaException, vm_utils.generate_ephemeral,
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4096)
class FakeFile(object):
def __init__(self):
self._file_operations = []
def seek(self, offset):
self._file_operations.append((self.seek, offset))
class StreamDiskTestCase(VMUtilsTestBase):
def setUp(self):
import __builtin__
super(StreamDiskTestCase, self).setUp()
self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path')
self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown')
self.mox.StubOutWithMock(vm_utils, '_write_partition')
# NOTE(matelakat): This might hide the fail reason, as test runners
# are unhappy with a mocked out open.
self.mox.StubOutWithMock(__builtin__, 'open')
self.image_service_func = self.mox.CreateMockAnything()
def test_non_ami(self):
fake_file = FakeFile()
vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
vm_utils.utils.temporary_chown(
'some_path').AndReturn(contextified(None))
open('some_path', 'wb').AndReturn(contextified(fake_file))
self.image_service_func(fake_file)
self.mox.ReplayAll()
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.KERNEL, None, 'dev')
self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations)
def test_ami_disk(self):
fake_file = FakeFile()
vm_utils._write_partition("session", 100, 'dev')
vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
vm_utils.utils.temporary_chown(
'some_path').AndReturn(contextified(None))
open('some_path', 'wb').AndReturn(contextified(fake_file))
self.image_service_func(fake_file)
self.mox.ReplayAll()
vm_utils._stream_disk("session", self.image_service_func,
vm_utils.ImageType.DISK, 100, 'dev')
self.assertEqual(
[(fake_file.seek, vm_utils.MBR_SIZE_BYTES)],
fake_file._file_operations)
class VMUtilsSRPath(VMUtilsTestBase):
def setUp(self):
super(VMUtilsSRPath, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.session.is_local_connection = False
def test_defined(self):
self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
self.mox.StubOutWithMock(self.session, "call_xenapi")
vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
self.session.host_ref = "host_ref"
self.session.call_xenapi('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
{'pbd_ref': {'device_config': {'path': 'sr_path'}}})
self.mox.ReplayAll()
self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path")
def test_default(self):
self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
self.mox.StubOutWithMock(self.session, "call_xenapi")
vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
self.session.host_ref = "host_ref"
self.session.call_xenapi('PBD.get_all_records_where',
'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
{'pbd_ref': {'device_config': {}}})
self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn(
{'uuid': 'sr_uuid', 'type': 'ext'})
self.mox.ReplayAll()
self.assertEqual(vm_utils.get_sr_path(self.session),
"/var/run/sr-mount/sr_uuid")
class CreateKernelRamdiskTestCase(VMUtilsTestBase):
def setUp(self):
super(CreateKernelRamdiskTestCase, self).setUp()
self.context = "context"
self.session = FakeSession()
self.instance = {"kernel_id": None, "ramdisk_id": None}
self.name_label = "name"
self.mox.StubOutWithMock(self.session, "call_plugin")
self.mox.StubOutWithMock(uuid, "uuid4")
self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
def test_create_kernel_and_ramdisk_no_create(self):
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual((None, None), result)
def test_create_kernel_and_ramdisk_create_both_cached(self):
kernel_id = "kernel"
ramdisk_id = "ramdisk"
self.instance["kernel_id"] = kernel_id
self.instance["ramdisk_id"] = ramdisk_id
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuid.uuid4().AndReturn("fake_uuid1")
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
args_kernel).AndReturn("k")
args_ramdisk = {}
args_ramdisk['cached-image'] = ramdisk_id
args_ramdisk['new-image-uuid'] = "fake_uuid2"
uuid.uuid4().AndReturn("fake_uuid2")
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
args_ramdisk).AndReturn("r")
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", "r"), result)
def test_create_kernel_and_ramdisk_create_kernel_not_cached(self):
kernel_id = "kernel"
self.instance["kernel_id"] = kernel_id
args_kernel = {}
args_kernel['cached-image'] = kernel_id
args_kernel['new-image-uuid'] = "fake_uuid1"
uuid.uuid4().AndReturn("fake_uuid1")
self.session.call_plugin('kernel', 'create_kernel_ramdisk',
args_kernel).AndReturn("")
kernel = {"kernel": {"file": "k"}}
vm_utils._fetch_disk_image(self.context, self.session, self.instance,
self.name_label, kernel_id, 0).AndReturn(kernel)
self.mox.ReplayAll()
result = vm_utils.create_kernel_and_ramdisk(self.context,
self.session, self.instance, self.name_label)
self.assertEqual(("k", None), result)
class ScanSrTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, "_scan_sr")
@mock.patch.object(vm_utils, "safe_find_sr")
def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
mock_safe_find_sr.return_value = "sr_ref"
self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
def test_scan_sr_works(self):
session = mock.Mock()
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
def test_scan_sr_unknown_error_fails_once(self):
session = mock.Mock()
session.call_xenapi.side_effect = test.TestingException
self.assertRaises(test.TestingException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
self.assertRaises(FakeException,
vm_utils._scan_sr, session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(4, session.call_xenapi.call_count)
mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
@mock.patch.object(greenthread, 'sleep')
def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
session = mock.Mock()
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session.XenAPI.Failure = FakeException
def fake_call_xenapi(*args):
fake_call_xenapi.count += 1
if fake_call_xenapi.count != 2:
raise FakeException()
fake_call_xenapi.count = 0
session.call_xenapi.side_effect = fake_call_xenapi
vm_utils._scan_sr(session, "sr_ref")
session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
self.assertEqual(2, session.call_xenapi.call_count)
mock_sleep.assert_called_once_with(2)
@mock.patch.object(flavors, 'extract_flavor',
return_value={
'memory_mb': 1024,
'vcpus': 1,
'vcpu_weight': 1.0,
})
class CreateVmTestCase(VMUtilsTestBase):
def test_vss_provider(self, mock_extract):
self.flags(vcpu_pin_set="2,3")
session = _get_fake_session()
instance = objects.Instance(uuid="uuid",
os_type="windows",
system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
vm_utils.create_vm(session, instance, "label",
"kernel", "ramdisk")
vm_rec = {
'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '', 'tags': [],
'VCPUs_max': '4',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': 'uuid'},
'name_label': 'label',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '4',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': 'true',
'acpi': 'true'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False
}
session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
def test_invalid_cpu_mask_raises(self, mock_extract):
self.flags(vcpu_pin_set="asdf")
session = mock.Mock()
instance = objects.Instance(uuid=str(uuid.uuid4()),
system_metadata={})
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self.assertRaises(exception.Invalid,
vm_utils.create_vm,
session, instance, "label",
"kernel", "ramdisk")
def test_destroy_vm(self, mock_extract):
session = mock.Mock()
instance = objects.Instance(uuid=str(uuid.uuid4()))
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
def test_destroy_vm_silently_fails(self, mock_extract):
session = mock.Mock()
exc = test.TestingException()
session.XenAPI.Failure = test.TestingException
session.VM.destroy.side_effect = exc
instance = objects.Instance(uuid=str(uuid.uuid4()))
vm_utils.destroy_vm(session, instance, "vm_ref")
session.VM.destroy.assert_called_once_with("vm_ref")
class DetermineVmModeTestCase(VMUtilsTestBase):
def test_determine_vm_mode_returns_xen_mode(self):
instance = {"vm_mode": "xen"}
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_mode(self):
instance = {"vm_mode": "hvm"}
self.assertEqual(vm_mode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_linux(self):
instance = {"vm_mode": None, "os_type": "linux"}
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_for_windows(self):
instance = {"vm_mode": None, "os_type": "windows"}
self.assertEqual(vm_mode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_hvm_by_default(self):
instance = {"vm_mode": None, "os_type": None}
self.assertEqual(vm_mode.HVM,
vm_utils.determine_vm_mode(instance, None))
def test_determine_vm_mode_returns_xen_for_VHD(self):
instance = {"vm_mode": None, "os_type": None}
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
def test_determine_vm_mode_returns_xen_for_DISK(self):
instance = {"vm_mode": None, "os_type": None}
self.assertEqual(vm_mode.XEN,
vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
class CallXenAPIHelpersTestCase(VMUtilsTestBase):
def test_vm_get_vbd_refs(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
def test_vbd_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
session.call_xenapi.assert_called_once_with("VBD.get_record",
"vbd_ref")
def test_vdi_get_rec(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.get_record",
"vdi_ref")
def test_vdi_snapshot(self):
session = mock.Mock()
session.call_xenapi.return_value = "foo"
self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
session.call_xenapi.assert_called_once_with("VDI.snapshot",
"vdi_ref", {})
def test_vdi_get_virtual_size(self):
session = mock.Mock()
session.call_xenapi.return_value = "123"
self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
"ref")
@mock.patch.object(vm_utils, '_get_resize_func_name')
def test_vdi_resize(self, mock_get_resize_func_name):
session = mock.Mock()
mock_get_resize_func_name.return_value = "VDI.fake"
vm_utils._vdi_resize(session, "ref", 123)
session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
mock_get_size.return_value = (1024 ** 3) - 1
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3
instance = {"uuid": "a"}
vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_resize')
@mock.patch.object(vm_utils, '_vdi_get_virtual_size')
def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
mock_resize):
mock_get_size.return_value = 1024 ** 3 + 1
instance = {"uuid": "a"}
self.assertRaises(exception.ResizeError,
vm_utils.update_vdi_virtual_size,
"s", instance, "ref", 1)
mock_get_size.assert_called_once_with("s", "ref")
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, '_vdi_get_rec')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetVdiForVMTestCase(VMUtilsTestBase):
def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
vdi_get_rec.return_value = {}
result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
self.assertEqual(('vdi_ref', {}), result)
vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
vbd_get_rec.assert_called_once_with(session, "a")
vdi_get_rec.assert_called_once_with(session, "vdi_ref")
def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_rec):
session = "session"
vm_get_vbd_refs.return_value = ["a", "b"]
vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
self.assertRaises(exception.NovaException,
vm_utils.get_vdi_for_vm_safely,
session, "vm_ref", userdevice='1')
self.assertEqual([], vdi_get_rec.call_args_list)
self.assertEqual(2, len(vbd_get_rec.call_args_list))
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vbd_get_rec')
@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
class GetAllVdiForVMTestCase(VMUtilsTestBase):
def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
def fake_vbd_get_rec(session, vbd_ref):
return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
def fake_vdi_get_uuid(session, vdi_ref):
return vdi_ref
vm_get_vbd_refs.return_value = ["0", "2"]
vbd_get_rec.side_effect = fake_vbd_get_rec
vdi_get_uuid.side_effect = fake_vdi_get_uuid
def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
expected = ['vdi_ref_0', 'vdi_ref_2']
self.assertEqual(expected, list(result))
def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid):
self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
vbd_get_rec, vdi_get_uuid)
result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
min_userdevice=1)
expected = ["vdi_ref_2"]
self.assertEqual(expected, list(result))
class GetAllVdisTestCase(VMUtilsTestBase):
def test_get_all_vdis_in_sr(self):
def fake_get_rec(record_type, ref):
if ref == "2":
return "vdi_rec_2"
session = mock.Mock()
session.call_xenapi.return_value = ["1", "2"]
session.get_rec.side_effect = fake_get_rec
sr_ref = "sr_ref"
actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
self.assertEqual(actual, [('2', 'vdi_rec_2')])
session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
class VDIAttachedHere(VMUtilsTestBase):
@mock.patch.object(vm_utils, 'destroy_vbd')
@mock.patch.object(vm_utils, '_get_this_vm_ref')
@mock.patch.object(vm_utils, 'create_vbd')
@mock.patch.object(vm_utils, '_remap_vbd_dev')
@mock.patch.object(vm_utils, '_wait_for_device')
@mock.patch.object(utils, 'execute')
def test_sync_called(self, mock_execute, mock_wait_for_device,
mock_remap_vbd_dev, mock_create_vbd,
mock_get_this_vm_ref, mock_destroy_vbd):
session = _get_fake_session()
with vm_utils.vdi_attached_here(session, 'vdi_ref'):
pass
mock_execute.assert_called_with('sync', run_as_root=True)
class SnapshotAttachedHereTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
def test_snapshot_attached_here(self, mock_impl):
def fake_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
self.assertEqual("session", session)
self.assertEqual("instance", instance)
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("label", label)
self.assertEqual('0', userdevice)
self.assertIsNone(post_snapshot_callback)
yield "fake"
mock_impl.side_effect = fake_impl
with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
"label") as result:
self.assertEqual("fake", result)
mock_impl.assert_called_once_with("session", "instance", "vm_ref",
"label", '0', None)
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
@mock.patch.object(vm_utils, '_vdi_get_uuid')
@mock.patch.object(vm_utils, '_vdi_snapshot')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
mock_vdi_snapshot, mock_vdi_get_uuid,
mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
session = "session"
instance = {"uuid": "uuid"}
mock_callback = mock.Mock()
mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
{"SR": "sr_ref",
"uuid": "vdi_uuid"})
mock_vdi_snapshot.return_value = "snap_ref"
mock_vdi_get_uuid.return_value = "snap_uuid"
mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
try:
with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
"label", '2', mock_callback) as result:
self.assertEqual(["a", "b"], result)
raise test.TestingException()
self.assertTrue(False)
except test.TestingException:
pass
mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
'2')
mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
"sr_ref", "vdi_ref", ['a', 'b'])
mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
mock.call(session, "snap_uuid")])
mock_callback.assert_called_once_with(
task_state="image_pending_upload")
mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
instance, ['a', 'b'], "sr_ref")
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid"])
self.assertFalse(mock_sleep.called)
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(greenthread, 'sleep')
def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
mock_count):
mock_count.return_value = 2
instance = {"uuid": "fake"}
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertFalse(mock_sleep.called)
self.assertTrue(mock_count.called)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
self.assertRaises(exception.NovaException,
vm_utils._wait_for_vhd_coalesce, "session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertTrue(mock_count.called)
self.assertEqual(20, mock_sleep.call_count)
self.assertEqual(20, mock_scan_sr.call_count)
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
@mock.patch.object(vm_utils, '_count_children')
@mock.patch.object(vm_utils, '_scan_sr')
def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
mock_count, mock_get_vhd_parent_uuid, mock_sleep):
mock_count.return_value = 1
instance = {"uuid": "fake"}
mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
vm_utils._wait_for_vhd_coalesce("session", instance,
"sr_ref", "vdi_ref", ["uuid1", "uuid2"])
self.assertEqual(1, mock_sleep.call_count)
self.assertEqual(2, mock_scan_sr.call_count)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_count_children(self, mock_get_all_vdis_in_sr):
vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
mock_get_all_vdis_in_sr.return_value = vdis
self.assertEqual(2, vm_utils._count_children('session',
'parent1', 'sr'))
class ImportMigratedDisksTestCase(VMUtilsTestBase):
@mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
@mock.patch.object(vm_utils, '_import_migrated_root_disk')
def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
|
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrated_root_disk(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = {"uuid": "uuid", "name": "name"}
result = vm_utils._import_migrated_root_disk("s", instance)
self.assertEqual("foo", result)
mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
"name")
@mock.patch.object(vm_utils, '_import_migrated_vhds')
def test_import_migrate_ephemeral_disks(self, mock_migrate):
mock_migrate.return_value = "foo"
instance = {"uuid": "uuid", "name": "name", "ephemeral_gb": 4000}
result = vm_utils._import_migrate_ephemeral_disks("s", instance)
self.assertEqual({'4': 'foo', '5': 'foo'}, result)
expected_calls = [mock.call("s", instance, "uuid_ephemeral_1",
"ephemeral", "name ephemeral (1)"),
mock.call("s", instance, "uuid_ephemeral_2",
"ephemeral", "name ephemeral (2)")]
self.assertEqual(expected_calls, mock_migrate.call_args_list)
@mock.patch.object(vm_utils, '_set_vdi_info')
@mock.patch.object(vm_utils, 'scan_default_sr')
@mock.patch.object(vm_utils, 'get_sr_path')
def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
mock_set_info):
session = mock.Mock()
instance = {"uuid": "uuid"}
session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}}
session.call_xenapi.return_value = "vdi_ref"
mock_get_sr_path.return_value = "sr_path"
result = vm_utils._import_migrated_vhds(session, instance,
'chain_label', 'disk_type', 'vdi_label')
expected = {'uuid': "a", 'ref': "vdi_ref"}
self.assertEqual(expected, result)
mock_get_sr_path.assert_called_once_with(session)
session.call_plugin_serialized.assert_called_once_with('migration',
'move_vhds_into_sr', instance_uuid='chain_label',
sr_path='sr_path', uuid_stack=mock.ANY)
mock_scan_sr.assert_called_once_with(session)
session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
'vdi_label', 'disk_type', instance)
def test_get_vhd_parent_uuid_rec_provided(self):
session = mock.Mock()
vdi_ref = 'vdi_ref'
vdi_rec = {'sm_config': {}}
self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
vdi_ref,
vdi_rec))
self.assertFalse(session.call_xenapi.called)
class MigrateVHDTestCase(VMUtilsTestBase):
def _assert_transfer_called(self, session, label):
session.call_plugin_serialized.assert_called_once_with(
'migration', 'transfer_vhd', instance_uuid=label, host="dest",
vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
def test_migrate_vhd_root(self):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2)
self._assert_transfer_called(session, "a")
def test_migrate_vhd_ephemeral(self):
session = mock.Mock()
instance = {"uuid": "a"}
vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
"sr_path", 2, 2)
self._assert_transfer_called(session, "a_ephemeral_2")
def test_migrate_vhd_converts_exceptions(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.call_plugin_serialized.side_effect = test.TestingException()
instance = {"uuid": "a"}
self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
session, instance, "vdi_uuid", "dest", "sr_path", 2)
self._assert_transfer_called(session, "a")
class StripBaseMirrorTestCase(VMUtilsTestBase):
def test_strip_base_mirror_from_vdi_works(self):
session = mock.Mock()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
def test_strip_base_mirror_from_vdi_hides_error(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.call_xenapi.side_effect = test.TestingException()
vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
session.call_xenapi.assert_called_once_with(
"VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
@mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
def test_strip_base_mirror_from_vdis(self, mock_strip):
def call_xenapi(method, arg):
if method == "VM.get_VBDs":
return ['VBD_ref_1', 'VBD_ref_2']
if method == "VBD.get_VDI":
return 'VDI' + arg[3:]
return "Unexpected call_xenapi: %s.%s" % (method, arg)
session = mock.Mock()
session.call_xenapi.side_effect = call_xenapi
vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
expected = [mock.call('VM.get_VBDs', "vm_ref"),
mock.call('VBD.get_VDI', "VBD_ref_1"),
mock.call('VBD.get_VDI', "VBD_ref_2")]
self.assertEqual(expected, session.call_xenapi.call_args_list)
expected = [mock.call(session, "VDI_ref_1"),
mock.call(session, "VDI_ref_2")]
self.assertEqual(expected, mock_strip.call_args_list)
class DeviceIdTestCase(VMUtilsTestBase):
def test_device_id_is_none_if_not_specified_in_meta_data(self):
image_meta = {}
session = mock.Mock()
session.product_version = (6, 1, 0)
self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
image_meta = {'xenapi_device_id': '0002'}
session = mock.Mock()
session.product_version = (6, 2, 0)
self.assertEqual('0002',
vm_utils.get_vm_device_id(session, image_meta))
session.product_version = (6, 3, 1)
self.assertEqual('0002',
vm_utils.get_vm_device_id(session, image_meta))
def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
image_meta = {'xenapi_device_id': '0002'}
session = mock.Mock()
session.product_version = (6, 0)
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 0002 specified is not supported by "
"hypervisor version (6, 0)", exc.message)
session.product_version = ('6a')
exc = self.assertRaises(exception.NovaException,
vm_utils.get_vm_device_id, session, image_meta)
self.assertEqual("Device id 0002 specified is not supported by "
"hypervisor version 6a", exc.message)
class CreateVmRecordTestCase(VMUtilsTestBase):
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_linux(self, mock_extract_flavor):
instance = objects.Instance(uuid="uuid123",
os_type="linux")
self._test_create_vm_record(mock_extract_flavor, instance, False)
@mock.patch.object(flavors, 'extract_flavor')
def test_create_vm_record_windows(self, mock_extract_flavor):
instance = objects.Instance(uuid="uuid123",
os_type="windows")
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor._from_db_object(
None, objects.Flavor(), test_flavor.fake_flavor)
self._test_create_vm_record(mock_extract_flavor, instance, True)
def _test_create_vm_record(self, mock_extract_flavor, instance,
is_viridian):
session = _get_fake_session()
flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
mock_extract_flavor.return_value = flavor
with mock.patch.object(instance, 'get_flavor') as get:
get.return_value = objects.Flavor(memory_mb=1024,
vcpus=1,
vcpu_weight=2)
vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
device_id="0002")
is_viridian_str = str(is_viridian).lower()
expected_vm_rec = {
'VCPUs_params': {'cap': '0', 'weight': '2'},
'PV_args': '',
'memory_static_min': '0',
'ha_restart_priority': '',
'HVM_boot_policy': 'BIOS order',
'PV_bootloader': '',
'tags': [],
'VCPUs_max': '1',
'memory_static_max': '1073741824',
'actions_after_shutdown': 'destroy',
'memory_dynamic_max': '1073741824',
'user_version': '0',
'xenstore_data': {'vm-data/allowvssprovider': 'false'},
'blocked_operations': {},
'is_a_template': False,
'name_description': '',
'memory_dynamic_min': '1073741824',
'actions_after_crash': 'destroy',
'memory_target': '1073741824',
'PV_ramdisk': '',
'PV_bootloader_args': '',
'PCI_bus': '',
'other_config': {'nova_uuid': 'uuid123'},
'name_label': 'name',
'actions_after_reboot': 'restart',
'VCPUs_at_startup': '1',
'HVM_boot_params': {'order': 'dc'},
'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
'timeoffset': '0', 'viridian': is_viridian_str,
'acpi': 'true', 'device_id': '0002'},
'PV_legacy_args': '',
'PV_kernel': '',
'affinity': '',
'recommendations': '',
'ha_always_run': False}
session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
def test_list_vms(self):
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
fake.create_vm("foo1", "Halted")
vm_ref = fake.create_vm("foo2", "Running")
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.list_vms(driver._session))
# Will have 3 VMs - but one is Dom0 and one is not running on the host
self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
self.assertEqual(len(result), 1)
result_keys = [key for (key, value) in result]
self.assertIn(vm_ref, result_keys)
class ChildVHDsTestCase(test.NoDBTestCase):
all_vdis = [
("my-vdi-ref",
{"uuid": "my-uuid", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("non-parent",
{"uuid": "uuid-1", "sm_config": {},
"is_a_snapshot": False, "other_config": {}}),
("diff-parent",
{"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child",
{"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": False, "other_config": {}}),
("child-snap",
{"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
"is_a_snapshot": True, "other_config": {}}),
]
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_defaults(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
self.assertEqual(['uuid-child', 'uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_only_snapshots(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
@mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
def test_child_vhds_chain(self, mock_get_all):
mock_get_all.return_value = self.all_vdis
result = vm_utils._child_vhds("session", "sr_ref",
["my-uuid", "other-uuid"], old_snapshots_only=True)
self.assertEqual(['uuid-child-snap'], result)
def test_is_vdi_a_snapshot_works(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {}}
self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_base_images_false(self):
vdi_rec = {"is_a_snapshot": True,
"other_config": {"image-id": "fake"}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
vdi_rec = {"is_a_snapshot": False,
"other_config": {}}
self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, '_walk_vdi_chain')
@mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
instance = {"uuid": "fake"}
mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
vm_utils.remove_old_snapshots("session", instance, "vm_ref")
mock_delete.assert_called_once_with("session", instance,
["uuid1", "uuid2"], "sr_ref")
mock_get.assert_called_once_with("session", "vm_ref")
mock_walk.assert_called_once_with("session", "vdi")
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
instance = {"uuid": "fake"}
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid"], "sr")
self.assertFalse(mock_child.called)
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
instance = {"uuid": "fake"}
mock_child.return_value = []
vm_utils._delete_snapshots_in_vdi_chain("session", instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with("session", "sr", ["uuid2"],
old_snapshots_only=True)
@mock.patch.object(vm_utils, '_scan_sr')
@mock.patch.object(vm_utils, 'safe_destroy_vdis')
@mock.patch.object(vm_utils, '_child_vhds')
def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
mock_destroy, mock_scan):
instance = {"uuid": "fake"}
mock_child.return_value = ["suuid1", "suuid2"]
session = mock.Mock()
session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
vm_utils._delete_snapshots_in_vdi_chain(session, instance,
["uuid1", "uuid2"], "sr")
mock_child.assert_called_once_with(session, "sr", ["uuid2"],
old_snapshots_only=True)
session.VDI.get_by_uuid.assert_has_calls([
mock.call("suuid1"), mock.call("suuid2")])
mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
mock_scan.assert_called_once_with(session, "sr")
class ResizeFunctionTestCase(test.NoDBTestCase):
def _call_get_resize_func_name(self, brand, version):
session = mock.Mock()
session.product_brand = brand
session.product_version = version
return vm_utils._get_resize_func_name(session)
def _test_is_resize(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize", result)
def _test_is_resize_online(self, brand, version):
result = self._call_get_resize_func_name(brand, version)
self.assertEqual("VDI.resize_online", result)
def test_xenserver_5_5(self):
self._test_is_resize_online("XenServer", (5, 5, 0))
def test_xenserver_6_0(self):
self._test_is_resize("XenServer", (6, 0, 0))
def test_xcp_1_1(self):
self._test_is_resize_online("XCP", (1, 1, 0))
def test_xcp_1_2(self):
self._test_is_resize("XCP", (1, 2, 0))
def test_xcp_2_0(self):
self._test_is_resize("XCP", (2, 0, 0))
def test_random_brand(self):
self._test_is_resize("asfd", (1, 1, 0))
def test_default(self):
self._test_is_resize(None, None)
def test_empty(self):
self._test_is_resize("", "")
def test_bad_version(self):
self._test_is_resize("XenServer", "asdf")
class VMInfoTests(VMUtilsTestBase):
def setUp(self):
super(VMInfoTests, self).setUp()
self.session = mock.Mock()
def test_get_power_state_valid(self):
# Save on test setup calls by having these simple tests in one method
self.session.call_xenapi.return_value = "Running"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.RUNNING)
self.session.call_xenapi.return_value = "Halted"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SHUTDOWN)
self.session.call_xenapi.return_value = "Paused"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.PAUSED)
self.session.call_xenapi.return_value = "Suspended"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.SUSPENDED)
self.session.call_xenapi.return_value = "Crashed"
self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
power_state.CRASHED)
def test_get_power_state_invalid(self):
self.session.call_xenapi.return_value = "Invalid"
self.assertRaises(KeyError,
vm_utils.get_power_state, self.session, "ref")
_XAPI_record = {'power_state': 'Running',
'memory_static_max': str(10 << 10),
'memory_dynamic_max': str(9 << 10),
'VCPUs_max': '5'}
def test_compile_info(self):
def call_xenapi(method, *args):
if method.startswith('VM.get_') and args[0] == 'dummy':
return self._XAPI_record[method[7:]]
self.session.call_xenapi.side_effect = call_xenapi
info = vm_utils.compile_info(self.session, "dummy")
self.assertEqual(hardware.InstanceInfo(state=power_state.RUNNING,
max_mem_kb=10L, mem_kb=9L,
num_cpu='5', cpu_time_ns=0),
info)
| session = "session"
instance = "instance"
mock_root.return_value = "root_vdi"
mock_ephemeral.return_value = ["a", "b"]
result = vm_utils.import_all_migrated_disks(session, instance)
expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
self.assertEqual(expected, result)
mock_root.assert_called_once_with(session, instance)
mock_ephemeral.assert_called_once_with(session, instance) |
alert.py | #!/usr/bin/python3
# alert.py
import math
from data import mongo
from data import gdacs
from data import wildfires
import geopy.distance
import pymongo
from time import sleep
from datetime import datetime
from config import MONGODB_USER, MONGODB_PASS
def monitor_danger(time_threshold=5 * 60, distance_thresholds={"hurricanes": 200, "floods": 50, "wildfires": 50}):
|
while True:
gdacs.download_geojson()
documents = gdacs.get_disasters() + wildfires.get_wildfires()
mongo.add_disaster_documents(documents)
client = pymongo.MongoClient("mongodb+srv://" + MONGODB_USER + ":" + MONGODB_PASS + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
#for user in client["users"].list_collection_names():
# mongo.cleanup_user(user)
for disaster in client["disasters"].list_collection_names():
mongo.cleanup_disaster(disaster)
db = client["alerts"]
user_collection = db["users"]
user_collection.delete_many({})
danger = monitor_danger()
if len(danger) > 0:
user_collection.insert_many(danger)
client.close()
sleep(300) | client = pymongo.MongoClient("mongodb+srv://" + MONGODB_USER + ":" + MONGODB_PASS + "@alrt-ypzt7.mongodb.net/test?retryWrites=true&w=majority")
users = client["users"]
threshold_difference = datetime.now().timestamp() - time_threshold
output = []
for user in users.list_collection_names():
results = list(users[user].find({"time": {"$gte": threshold_difference}}))
if len(results) == 0:
# Location off
last_location = users[user].find().sort("time", pymongo.DESCENDING).limit(1)[0]
disasters = client["disasters"]
for disaster in disasters.list_collection_names():
for x in disasters[disaster].find():
if (disaster == "earthquakes" and geopy.distance.distance((x["lat"], x["lon"]), (last_location["lat"], last_location["lon"])).mi < math.exp(x["magnitude"] / 1.01 - 0.13) * 1000 * 0.00062137) or (disaster != "earthquakes" and geopy.distance.distance((x["lat"], x["lon"]), (last_location["lat"], last_location["lon"])).mi < distance_thresholds[disaster]):
if x["time"] >= last_location["time"] - 60 * 60 * 24:
output.append({"user": user, "last_location": last_location, "disaster": x})
client.close()
return output |
web3.js | export default (state = [], action) => {
switch (action.type) { | }
} | case 'INIT_WEB3':
return action.payload
default:
return state |
SQFormCheckboxGroup.js | import React from 'react';
import PropTypes from 'prop-types';
import InputLabel from '@material-ui/core/InputLabel';
import Grid from '@material-ui/core/Grid';
import FormHelperText from '@material-ui/core/FormHelperText';
import FormGroup from '@material-ui/core/FormGroup';
import {SQFormCheckboxGroupItem, useSQFormContext} from '../../../src';
import {useForm} from './useForm';
function | ({
name,
groupLabel,
isRequired = false,
onChange,
shouldDisplayInRow = false,
shouldUseSelectAll = false,
size = 'auto',
children
}) {
const {
fieldHelpers: {handleChange, HelperTextComponent}
} = useForm({
name,
isRequired,
onChange
});
const {setFieldValue} = useSQFormContext();
const handleSelectAllChange = event => {
if (!event.target.checked) {
setFieldValue(name, []);
return;
}
const enabledGroupValues = children.reduce((acc, checkboxOption) => {
const {value, isDisabled} = checkboxOption;
if (!isDisabled) {
return [...acc, value];
}
return acc;
}, []);
setFieldValue(name, enabledGroupValues);
};
const childrenToCheckboxGroupItems = () => {
const providedCheckboxItems = children.map(checkboxOption => {
const {label, value, isDisabled, inputProps} = checkboxOption;
return (
<SQFormCheckboxGroupItem
groupName={name}
label={label}
value={value}
isRowDisplay={shouldDisplayInRow}
onChange={handleChange}
isDisabled={isDisabled}
inputProps={inputProps}
key={`SQFormCheckboxGroupItem_${value}`}
/>
);
});
if (shouldUseSelectAll) {
return [
<SQFormCheckboxGroupItem
groupName="selectAll"
label="All"
value="selectAll"
isRowDisplay={shouldDisplayInRow}
onChange={handleSelectAllChange}
/>,
...providedCheckboxItems
];
}
return providedCheckboxItems;
};
return (
<Grid item sm={size}>
<InputLabel id={groupLabel.toLowerCase()}>{groupLabel}</InputLabel>
<FormGroup row={shouldDisplayInRow}>
{childrenToCheckboxGroupItems()}
</FormGroup>
<FormHelperText required={isRequired}>
{HelperTextComponent}
</FormHelperText>
</Grid>
);
}
SQFormCheckboxGroup.propTypes = {
/** Name of the checkbox group */
name: PropTypes.string.isRequired,
/** Label to display above the group */
groupLabel: PropTypes.string.isRequired,
/** Whether this a selection in this group is required */
isRequired: PropTypes.bool,
/** Function to call on value change */
onChange: PropTypes.func,
/** Whether to display the group in a row */
shouldDisplayInRow: PropTypes.bool,
/** Whether to display the select all checkbox */
shouldUseSelectAll: PropTypes.bool,
/** Size of the input given full-width is 12. */
size: PropTypes.oneOf(['auto', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
/** Children must be an array of object with checkbox label and value information */
children: PropTypes.arrayOf(
PropTypes.shape({
label: PropTypes.string.isRequired,
value: PropTypes.any.isRequired,
isDisabled: PropTypes.bool,
inputProps: PropTypes.object
}).isRequired
).isRequired
};
export default SQFormCheckboxGroup;
| SQFormCheckboxGroup |
loss.py | #!/usr/bin/env python
"""
#
#
# File Name: loss_function.py
# Description:
"""
import torch
import torch.nn.functional as F
import math
def kl_divergence(mu, logvar):
"""
Computes the KL-divergence of
some element z.
KL(q||p) = -∫ q(z) log [ p(z) / q(z) ]
= -E[log p(z) - log q(z)]
"""
return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
def binary_cross_entropy(recon_x, x):
return -torch.sum(x * torch.log(recon_x + 1e-8) + (1 - x) * torch.log(1 - recon_x + 1e-8), dim=-1)
def elbo(recon_x, x, z_params, binary=True):
"" |
def elbo_scIVA(recon_x, x, gamma, c_params, z_params, binary=True):
"""
L elbo(x) = Eq(z,c|x)[ log p(x|z) ] - KL(q(z,c|x)||p(z,c))
= Eq(z,c|x)[ log p(x|z) + log p(z|c) + log p(c) - log q(z|x) - log q(c|x) ]
"""
mu_c, var_c, pi = c_params; #print(mu_c.size(), var_c.size(), pi.size())
n_centroids = pi.size(1)
mu, logvar = z_params
mu_expand = mu.unsqueeze(2).expand(mu.size(0), mu.size(1), n_centroids)
logvar_expand = logvar.unsqueeze(2).expand(logvar.size(0), logvar.size(1), n_centroids)
# log p(x|z)
if binary:
likelihood = -binary_cross_entropy(recon_x, x) #;print(logvar_expand.size()) #, torch.exp(logvar_expand)/var_c)
else:
likelihood = -F.mse_loss(recon_x, x)
# log p(z|c)
logpzc = -0.5*torch.sum(gamma*torch.sum(math.log(2*math.pi) + \
torch.log(var_c) + \
torch.exp(logvar_expand)/var_c + \
(mu_expand-mu_c)**2/var_c, dim=1), dim=1)
# log p(c)
logpc = torch.sum(gamma*torch.log(pi), 1)
# log q(z|x) or q entropy
qentropy = -0.5*torch.sum(1+logvar+math.log(2*math.pi), 1)
# log q(c|x)
logqcx = torch.sum(gamma*torch.log(gamma), 1)
kld = -logpzc - logpc + qentropy + logqcx
return torch.sum(likelihood), torch.sum(kld)
| "
elbo = likelihood - kl_divergence
L = -elbo
Params:
recon_x:
x:
"""
mu, logvar = z_params
kld = kl_divergence(mu, logvar)
if binary:
likelihood = -binary_cross_entropy(recon_x, x)
else:
likelihood = -F.mse_loss(recon_x, x)
return torch.sum(likelihood), torch.sum(kld)
# return likelihood, kld
|
main.go | package main
import (
"context"
"errors"
"fmt"
"log"
"math/rand"
"net/http"
"os"
"os/signal"
"time"
nats "github.com/nats-io/go-nats"
// This is the package containing the generated *.pb.go and *.nrpc.go
// files.
"github.com/rapidloop/nrpc/examples/metrics_helloworld/helloworld"
// If you've used the prometheus plugin when generating the code, you
// can import the HTTP handler of Prometheus to serve up the metrics.
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// server implements the helloworld.GreeterServer interface.
type server struct{}
// SayHello is an implementation of the SayHello method from the definition of
// the Greeter service.
func (s *server) SayHello(ctx context.Context, req helloworld.HelloRequest) (resp helloworld.HelloReply, err error) {
resp.Message = "Hello " + req.Name
if rand.Intn(10) < 7 { // will fail 70% of the time
err = errors.New("random failure simulated")
}
time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) // random delay
return
}
func main() | {
// Connect to the NATS server.
nc, err := nats.Connect(nats.DefaultURL, nats.Timeout(5*time.Second))
if err != nil {
log.Fatal(err)
}
defer nc.Close()
// Our server implementation.
s := &server{}
rand.Seed(time.Now().UnixNano())
// The NATS handler from the helloworld.nrpc.proto file.
h := helloworld.NewGreeterHandler(context.TODO(), nc, s)
// Start a NATS subscription using the handler. You can also use the
// QueueSubscribe() method for a load-balanced set of servers.
sub, err := nc.Subscribe(h.Subject(), h.Handler)
if err != nil {
log.Fatal(err)
}
defer sub.Unsubscribe()
// Do this block only if you generated the code with the prometheus plugin.
http.Handle("/metrics", promhttp.Handler())
go http.ListenAndServe(":6060", nil)
// Keep running until ^C.
fmt.Println("server is running, ^C quits.")
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c
close(c)
} |
|
dseg.rs | ///! TODO: Don't use Data segment at all
use crate::align;
use core::mem::size_of;
#[derive(Debug, Clone)]
#[repr(C)]
pub struct DSeg {
entries: Vec<Entry>,
size: i32,
}
#[derive(Debug, Clone)]
#[repr(C)]
pub struct Entry {
disp: i32,
value: Value,
}
#[derive(Copy, Clone, Debug, PartialEq)]
#[repr(C)]
pub struct f32x4(pub f32, pub f32, pub f32, pub f32);
#[derive(Debug, PartialEq, Clone)]
#[repr(C)]
pub enum Value {
Ptr(*const u8),
Float(f32),
Double(f64),
Int(i32),
F4(f32x4),
}
impl Value {
pub extern "C" fn size(&self) -> i32 {
match self {
&Value::Ptr(_) => size_of::<*const u8>() as i32,
&Value::Int(_) => size_of::<i32>() as i32,
&Value::Float(_) => size_of::<f32>() as i32,
&Value::Double(_) => size_of::<f64>() as i32,
&Value::F4(_) => size_of::<f32x4>() as i32,
}
}
}
impl DSeg {
pub extern "C" fn new() -> DSeg {
DSeg { entries: Vec::new(),
size: 0 }
}
pub extern "C" fn size(&self) -> i32 { self.size }
fn add_value(&mut self, v: Value) -> i32 {
let size = v.size();
self.size = align(self.size() + size, size);
let entry = Entry { disp: self.size(),
value: v };
self.entries.push(entry);
self.size
}
pub extern "C" fn finish(&self, ptr: *const u8) {
for entry in &self.entries {
let offset = self.size - entry.disp;
unsafe {
let entry_ptr = ptr.offset(offset as isize);
match entry.value {
Value::Ptr(v) => *(entry_ptr as *mut (*const u8)) = v,
Value::Float(v) => {
*(entry_ptr as *mut f32) = v;
}
Value::Double(v) => {
*(entry_ptr as *mut f64) = v;
}
Value::Int(v) => {
*(entry_ptr as *mut i32) = v;
}
Value::F4(v) => {
*(entry_ptr as *mut f32x4) = v; | }
pub extern "C" fn add_addr_reuse(&mut self, ptr: *const u8) -> i32 {
for entry in &self.entries {
if entry.value == Value::Ptr(ptr) {
return entry.disp;
}
}
self.add_addr(ptr)
}
pub extern "C" fn add_f32x4(&mut self, value: f32x4) -> i32 { self.add_value(Value::F4(value)) }
pub extern "C" fn add_int(&mut self, value: i32) -> i32 { self.add_value(Value::Int(value)) }
pub extern "C" fn add_addr(&mut self, value: *const u8) -> i32 {
self.add_value(Value::Ptr(value))
}
pub extern "C" fn add_double(&mut self, value: f64) -> i32 {
self.add_value(Value::Double(value))
}
pub extern "C" fn add_float(&mut self, value: f32) -> i32 {
self.add_value(Value::Float(value))
}
pub extern "C" fn align(&mut self, size: i32) -> i32 {
assert!(size > 0);
self.size = align(self.size, size);
self.size
}
} | }
}
}
} |
peer_test.rs | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use super::*;
use address::Address;
use async_std::sync::channel;
use async_std::task;
use beacon::MockBeacon;
use blocks::BlockHeader;
use db::MemoryDB;
use fil_types::verifier::MockVerifier;
use forest_libp2p::{hello::HelloRequest, rpc::ResponseChannel};
use libp2p::core::PeerId;
use state_manager::StateManager;
use std::time::Duration;
#[test]
fn peer_manager_update() | {
let db = Arc::new(MemoryDB::default());
let chain_store = Arc::new(ChainStore::new(db.clone()));
let (local_sender, _test_receiver) = channel(20);
let (event_sender, event_receiver) = channel(20);
let msg_root = compute_msg_meta(chain_store.blockstore(), &[], &[]).unwrap();
let dummy_header = BlockHeader::builder()
.miner_address(Address::new_id(1000))
.messages(msg_root)
.message_receipts(Cid::new_from_cbor(&[1, 2, 3], Blake2b256))
.state_root(Cid::new_from_cbor(&[1, 2, 3], Blake2b256))
.build_and_validate()
.unwrap();
let gen_hash = chain_store.set_genesis(&dummy_header).unwrap();
let genesis_ts = Arc::new(Tipset::new(vec![dummy_header]).unwrap());
let beacon = Arc::new(MockBeacon::new(Duration::from_secs(1)));
let state_manager = Arc::new(StateManager::new(db));
let cs = ChainSyncer::<_, _, MockVerifier>::new(
chain_store,
state_manager,
beacon,
local_sender,
event_receiver,
genesis_ts.clone(),
)
.unwrap();
let peer_manager = Arc::clone(&cs.network.peer_manager_cloned());
task::spawn(async {
cs.start(0).await;
});
let source = PeerId::random();
let source_clone = source.clone();
let (sender, _) = channel(1);
let gen_cloned = genesis_ts.clone();
task::block_on(async {
event_sender
.send(NetworkEvent::HelloRequest {
request: HelloRequest {
heaviest_tip_set: gen_cloned.key().cids().to_vec(),
heaviest_tipset_height: gen_cloned.epoch(),
heaviest_tipset_weight: gen_cloned.weight().clone(),
genesis_hash: gen_hash,
},
channel: ResponseChannel {
peer: source,
sender,
},
})
.await;
// Would be ideal to not have to sleep here and have it deterministic
task::sleep(Duration::from_millis(1000)).await;
assert_eq!(peer_manager.len().await, 1);
assert_eq!(peer_manager.sorted_peers().await, &[source_clone]);
});
} |
|
notebookDisposeService.ts | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { inject, injectable } from 'inversify';
import { NotebookDocument } from 'vscode';
import { IExtensionSingleActivationService } from '../../activation/types';
import { IVSCodeNotebook } from '../../common/application/types';
import { UseVSCodeNotebookEditorApi } from '../../common/constants';
import { IDisposableRegistry } from '../../common/types';
import { noop } from '../../common/utils/misc';
import { IKernelProvider } from '../jupyter/kernels/types';
import { INotebookProvider } from '../types';
@injectable()
export class | implements IExtensionSingleActivationService {
constructor(
@inject(IVSCodeNotebook) private readonly vscNotebook: IVSCodeNotebook,
@inject(IDisposableRegistry) private readonly disposables: IDisposableRegistry,
@inject(INotebookProvider) private readonly notebookProvider: INotebookProvider,
@inject(IKernelProvider) private readonly kernelProvider: IKernelProvider,
@inject(UseVSCodeNotebookEditorApi) private readonly useNativeNb: boolean
) {}
public async activate(): Promise<void> {
if (!this.useNativeNb) {
return;
}
this.vscNotebook.onDidCloseNotebookDocument(this.onDidCloseNotebookDocument, this, this.disposables);
}
private onDidCloseNotebookDocument(document: NotebookDocument) {
const kernel = this.kernelProvider.get(document.uri);
if (kernel) {
kernel.dispose().catch(noop);
}
this.notebookProvider.disposeAssociatedNotebook({ identity: document.uri });
}
}
| NotebookDisposeService |
utils.py | import functools
def dict_cmp(x, y, key): | else:
return 0
def sort_dict(dictionary, cmp_func):
arr = []
for key in dictionary:
arr.append((key, dictionary[key]))
arr.sort(key=functools.cmp_to_key(lambda x, y : cmp_func(x[1], y[1])))
return arr | if str(x[key]) > str(y[key]):
return 1
elif str(x[key]) < str(y[key]):
return -1 |
0002_auto_20210201_1602.py | # Generated by Django 3.1.6 on 2021-02-01 16:02
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'), | operations = [
migrations.AlterField(
model_name='dataset',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 1, 16, 2, 48, 685488, tzinfo=utc)),
),
] | ]
|
Intro.tsx | import Button from '../shared/Button';
import { IC_MASK } from '../../utils/Icons';
import React from 'react';
import { RootStackNavigationProps } from '../navigation/RootStackNavigator';
import { User } from '../../types';
import { View } from 'react-native';
import { getString } from '../../../STRINGS';
import styled from 'styled-components/native';
import { useAppContext } from '../../providers/AppProvider';
import { useThemeContext } from '../../providers/ThemeProvider';
const Container = styled.View`
flex: 1;
align-self: stretch;
overflow: scroll;
background-color: ${({ theme }): string => theme.background};
flex-direction: column;
justify-content: flex-start;
align-items: center;
overflow: hidden;
`;
const ContentWrapper = styled.View`
flex-direction: column;
height: 100%;
width: 100%;
justify-content: flex-start;
align-items: center;
`;
const ButtonWrapper = styled.View`
position: absolute;
flex-direction: column;
bottom: 40px;
width: 85%;
align-self: center;
`;
const StyledText = styled.Text`
font-size: 18px;
line-height: 27px;
color: ${({ theme }): string => theme.fontColor};
`;
interface Props {
navigation: RootStackNavigationProps<'Intro'>;
}
function Intro(props: Props): React.ReactElement {
let timer: number;
const { state: { user }, setUser } = useAppContext();
const { changeThemeType } = useThemeContext();
const [isLoggingIn, setIsLoggingIn] = React.useState<boolean>(false);
const onLogin = (): void => {
setIsLoggingIn(true);
timer = setTimeout(() => {
const user: User = {
displayName: 'dooboolab',
age: 30,
job: 'developer',
};
setUser(user);
setIsLoggingIn(false);
clearTimeout(timer);
}, 1000);
};
return (
<Container>
<ContentWrapper>
<StyledText
style={{
marginTop: 100,
}}
>
{user ? user.displayName : ''}
</StyledText>
<StyledText>{user ? user.age : ''}</StyledText>
<StyledText>{user ? user.job : ''}</StyledText>
</ContentWrapper>
<ButtonWrapper>
<Button
testID="btn-login"
imgLeftSrc={IC_MASK}
isLoading={isLoggingIn}
onClick={(): void => onLogin()}
text={getString('LOGIN')}
/>
<View style={{ marginTop: 8 }} />
<Button
testID="btn-navigate"
onClick={(): void => props.navigation.navigate('Temp', {
param: 'GO BACK',
})}
text={getString('NAVIGATE', { name: 'Temp' })}
/>
<View style={{ marginTop: 8 }} /> | testID="btn-theme"
onClick={(): void => changeThemeType()}
text={getString('CHANGE_THEME')}
/>
</ButtonWrapper>
</Container>
);
}
export default Intro; | <Button |
eval_order_dependence.rs | use clippy_utils::diagnostics::{span_lint, span_lint_and_note};
use clippy_utils::{get_parent_expr, path_to_local, path_to_local_id};
use if_chain::if_chain;
use rustc_hir::intravisit::{walk_expr, NestedVisitorMap, Visitor};
use rustc_hir::{BinOpKind, Block, Expr, ExprKind, Guard, HirId, Local, Node, Stmt, StmtKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::Map;
use rustc_middle::ty;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for a read and a write to the same variable where
/// whether the read occurs before or after the write depends on the evaluation
/// order of sub-expressions.
///
/// ### Why is this bad?
/// It is often confusing to read. In addition, the
/// sub-expression evaluation order for Rust is not well documented.
///
/// ### Known problems
/// Code which intentionally depends on the evaluation
/// order, or which is correct for any evaluation order.
///
/// ### Example
/// ```rust
/// let mut x = 0;
///
/// // Bad
/// let a = {
/// x = 1;
/// 1
/// } + x;
/// // Unclear whether a is 1 or 2.
///
/// // Good
/// let tmp = {
/// x = 1;
/// 1
/// };
/// let a = tmp + x;
/// ```
pub EVAL_ORDER_DEPENDENCE,
suspicious,
"whether a variable read occurs before a write depends on sub-expression evaluation order"
}
declare_clippy_lint! {
/// ### What it does
/// Checks for diverging calls that are not match arms or
/// statements.
///
/// ### Why is this bad?
/// It is often confusing to read. In addition, the
/// sub-expression evaluation order for Rust is not well documented.
///
/// ### Known problems
/// Someone might want to use `some_bool || panic!()` as a
/// shorthand.
///
/// ### Example
/// ```rust,no_run
/// # fn b() -> bool { true }
/// # fn c() -> bool { true }
/// let a = b() || panic!() || c();
/// // `c()` is dead, `panic!()` is only called if `b()` returns `false`
/// let x = (a, b, c, panic!());
/// // can simply be replaced by `panic!()`
/// ```
pub DIVERGING_SUB_EXPRESSION,
complexity,
"whether an expression contains a diverging sub expression"
}
declare_lint_pass!(EvalOrderDependence => [EVAL_ORDER_DEPENDENCE, DIVERGING_SUB_EXPRESSION]);
impl<'tcx> LateLintPass<'tcx> for EvalOrderDependence {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
// Find a write to a local variable.
let var = if_chain! {
if let ExprKind::Assign(lhs, ..) | ExprKind::AssignOp(_, lhs, _) = expr.kind;
if let Some(var) = path_to_local(lhs);
if expr.span.desugaring_kind().is_none();
then { var } else { return; }
};
let mut visitor = ReadVisitor {
cx,
var,
write_expr: expr,
last_expr: expr,
};
check_for_unsequenced_reads(&mut visitor);
}
fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx Stmt<'_>) {
match stmt.kind {
StmtKind::Local(local) => {
if let Local { init: Some(e), .. } = local {
DivergenceVisitor { cx }.visit_expr(e);
}
},
StmtKind::Expr(e) | StmtKind::Semi(e) => DivergenceVisitor { cx }.maybe_walk_expr(e),
StmtKind::Item(..) => {},
}
}
}
struct DivergenceVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
}
impl<'a, 'tcx> DivergenceVisitor<'a, 'tcx> {
fn maybe_walk_expr(&mut self, e: &'tcx Expr<'_>) |
fn report_diverging_sub_expr(&mut self, e: &Expr<'_>) {
span_lint(self.cx, DIVERGING_SUB_EXPRESSION, e.span, "sub-expression diverges");
}
}
impl<'a, 'tcx> Visitor<'tcx> for DivergenceVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, e: &'tcx Expr<'_>) {
match e.kind {
ExprKind::Continue(_) | ExprKind::Break(_, _) | ExprKind::Ret(_) => self.report_diverging_sub_expr(e),
ExprKind::Call(func, _) => {
let typ = self.cx.typeck_results().expr_ty(func);
match typ.kind() {
ty::FnDef(..) | ty::FnPtr(_) => {
let sig = typ.fn_sig(self.cx.tcx);
if let ty::Never = self.cx.tcx.erase_late_bound_regions(sig).output().kind() {
self.report_diverging_sub_expr(e);
}
},
_ => {},
}
},
ExprKind::MethodCall(..) => {
let borrowed_table = self.cx.typeck_results();
if borrowed_table.expr_ty(e).is_never() {
self.report_diverging_sub_expr(e);
}
},
_ => {
// do not lint expressions referencing objects of type `!`, as that required a
// diverging expression
// to begin with
},
}
self.maybe_walk_expr(e);
}
fn visit_block(&mut self, _: &'tcx Block<'_>) {
// don't continue over blocks, LateLintPass already does that
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
}
/// Walks up the AST from the given write expression (`vis.write_expr`) looking
/// for reads to the same variable that are unsequenced relative to the write.
///
/// This means reads for which there is a common ancestor between the read and
/// the write such that
///
/// * evaluating the ancestor necessarily evaluates both the read and the write (for example, `&x`
/// and `|| x = 1` don't necessarily evaluate `x`), and
///
/// * which one is evaluated first depends on the order of sub-expression evaluation. Blocks, `if`s,
/// loops, `match`es, and the short-circuiting logical operators are considered to have a defined
/// evaluation order.
///
/// When such a read is found, the lint is triggered.
fn check_for_unsequenced_reads(vis: &mut ReadVisitor<'_, '_>) {
let map = &vis.cx.tcx.hir();
let mut cur_id = vis.write_expr.hir_id;
loop {
let parent_id = map.get_parent_node(cur_id);
if parent_id == cur_id {
break;
}
let parent_node = match map.find(parent_id) {
Some(parent) => parent,
None => break,
};
let stop_early = match parent_node {
Node::Expr(expr) => check_expr(vis, expr),
Node::Stmt(stmt) => check_stmt(vis, stmt),
Node::Item(_) => {
// We reached the top of the function, stop.
break;
},
_ => StopEarly::KeepGoing,
};
match stop_early {
StopEarly::Stop => break,
StopEarly::KeepGoing => {},
}
cur_id = parent_id;
}
}
/// Whether to stop early for the loop in `check_for_unsequenced_reads`. (If
/// `check_expr` weren't an independent function, this would be unnecessary and
/// we could just use `break`).
enum StopEarly {
KeepGoing,
Stop,
}
fn check_expr<'a, 'tcx>(vis: &mut ReadVisitor<'a, 'tcx>, expr: &'tcx Expr<'_>) -> StopEarly {
if expr.hir_id == vis.last_expr.hir_id {
return StopEarly::KeepGoing;
}
match expr.kind {
ExprKind::Array(_)
| ExprKind::Tup(_)
| ExprKind::MethodCall(..)
| ExprKind::Call(_, _)
| ExprKind::Assign(..)
| ExprKind::Index(_, _)
| ExprKind::Repeat(_, _)
| ExprKind::Struct(_, _, _) => {
walk_expr(vis, expr);
},
ExprKind::Binary(op, _, _) | ExprKind::AssignOp(op, _, _) => {
if op.node == BinOpKind::And || op.node == BinOpKind::Or {
// x && y and x || y always evaluate x first, so these are
// strictly sequenced.
} else {
walk_expr(vis, expr);
}
},
ExprKind::Closure(_, _, _, _, _) => {
// Either
//
// * `var` is defined in the closure body, in which case we've reached the top of the enclosing
// function and can stop, or
//
// * `var` is captured by the closure, in which case, because evaluating a closure does not evaluate
// its body, we don't necessarily have a write, so we need to stop to avoid generating false
// positives.
//
// This is also the only place we need to stop early (grrr).
return StopEarly::Stop;
},
// All other expressions either have only one child or strictly
// sequence the evaluation order of their sub-expressions.
_ => {},
}
vis.last_expr = expr;
StopEarly::KeepGoing
}
fn check_stmt<'a, 'tcx>(vis: &mut ReadVisitor<'a, 'tcx>, stmt: &'tcx Stmt<'_>) -> StopEarly {
match stmt.kind {
StmtKind::Expr(expr) | StmtKind::Semi(expr) => check_expr(vis, expr),
// If the declaration is of a local variable, check its initializer
// expression if it has one. Otherwise, keep going.
StmtKind::Local(local) => local
.init
.as_ref()
.map_or(StopEarly::KeepGoing, |expr| check_expr(vis, expr)),
StmtKind::Item(..) => StopEarly::KeepGoing,
}
}
/// A visitor that looks for reads from a variable.
struct ReadVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
/// The ID of the variable we're looking for.
var: HirId,
/// The expressions where the write to the variable occurred (for reporting
/// in the lint).
write_expr: &'tcx Expr<'tcx>,
/// The last (highest in the AST) expression we've checked, so we know not
/// to recheck it.
last_expr: &'tcx Expr<'tcx>,
}
impl<'a, 'tcx> Visitor<'tcx> for ReadVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx Expr<'_>) {
if expr.hir_id == self.last_expr.hir_id {
return;
}
if path_to_local_id(expr, self.var) {
// Check that this is a read, not a write.
if !is_in_assignment_position(self.cx, expr) {
span_lint_and_note(
self.cx,
EVAL_ORDER_DEPENDENCE,
expr.span,
&format!("unsequenced read of `{}`", self.cx.tcx.hir().name(self.var)),
Some(self.write_expr.span),
"whether read occurs before this write depends on evaluation order",
);
}
}
match expr.kind {
// We're about to descend a closure. Since we don't know when (or
// if) the closure will be evaluated, any reads in it might not
// occur here (or ever). Like above, bail to avoid false positives.
ExprKind::Closure(_, _, _, _, _) |
// We want to avoid a false positive when a variable name occurs
// only to have its address taken, so we stop here. Technically,
// this misses some weird cases, eg.
//
// ```rust
// let mut x = 0;
// let a = foo(&{x = 1; x}, x);
// ```
//
// TODO: fix this
ExprKind::AddrOf(_, _, _) => {
return;
}
_ => {}
}
walk_expr(self, expr);
}
fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
NestedVisitorMap::None
}
}
/// Returns `true` if `expr` is the LHS of an assignment, like `expr = ...`.
fn is_in_assignment_position(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
if let Some(parent) = get_parent_expr(cx, expr) {
if let ExprKind::Assign(lhs, ..) = parent.kind {
return lhs.hir_id == expr.hir_id;
}
}
false
}
| {
match e.kind {
ExprKind::Closure(..) => {},
ExprKind::Match(e, arms, _) => {
self.visit_expr(e);
for arm in arms {
if let Some(Guard::If(if_expr)) = arm.guard {
self.visit_expr(if_expr);
}
// make sure top level arm expressions aren't linted
self.maybe_walk_expr(&*arm.body);
}
},
_ => walk_expr(self, e),
}
} |
main.rs | fn | () {
let mut count = 0;
loop {
println!("again!");
count += 1;
if count == 2 {
break;
}
}
count = 0;
let result = loop {
count += 1;
if count == 10 {
break count * 2;
}
};
println!("The result is {}", result);
let mut number = 3;
while number != 0 {
println!("{}!", number);
number -= 1;
}
println!("LIFTOFF!!!");
let a = [10, 20, 30, 40, 50];
let mut index = 0;
// error prone, what if index >= 5? rust will panic.
// slow, compiler adds runtime code for the conditional check on every iteration
while index < 5 {
println!("the value is: {}", a[index]);
index += 1;
}
// instead
for element in a.iter() {
println!("the value is : {}", element);
}
// let's replace our countdown with a for loop
for number in (1..4).rev() { // rev => reverse
println!("{}!", number);
}
println!("LIFTOFF!!");
// TODO: convert temperatures between Fahrenheit and Celsius
// TODO: generate the nth Fibonacci number
// TODO: Print the lyrics to the Christmas carol "The Twelve Days of Christmas"
// taking advantage of the repetition in the song.
// TODO: compile to webassembly and host these small programs on my website
}
| main |
service_alicloud_mns.go | package alicloud
import (
"strings"
"github.com/dxh031/ali_mns"
)
func (client *AliyunClient) MnsQueueManager() (ali_mns.AliQueueManager, error) {
mnsClient, err := client.Mnsconn()
if err != nil {
return nil, err
}
queueManager := ali_mns.NewMNSQueueManager(*mnsClient)
return queueManager, nil
}
func (client *AliyunClient) MnsSubscriptionManager(topicName string) (ali_mns.AliMNSTopic, error) {
| if err != nil {
return nil, err
}
subscriptionManager := ali_mns.NewMNSTopic(topicName, *mnsClient)
return subscriptionManager, nil
}
func (client *AliyunClient) MnsTopicManager() (ali_mns.AliTopicManager, error) {
mnsClient, err := client.Mnsconn()
if err != nil {
return nil, err
}
topicManager := ali_mns.NewMNSTopicManager(*mnsClient)
return topicManager, nil
}
func GetTopicNameAndSubscriptionName(subscriptionId string) (string, string) {
arr := strings.Split(subscriptionId, COLON_SEPARATED)
return arr[0], arr[1]
}
func SubscriptionNotExistFunc(err error) bool {
return strings.Contains(err.Error(), SubscriptionNotExist)
}
func TopicNotExistFunc(err error) bool {
return strings.Contains(err.Error(), TopicNotExist)
}
func QueueNotExistFunc(err error) bool {
return strings.Contains(err.Error(), QueueNotExist)
} | mnsClient, err := client.Mnsconn() |
runner_test.go | package boomer
import (
"sync/atomic"
"testing"
"time"
)
type HitOutput struct {
onStart bool
onEvent bool
onStop bool
}
func (o *HitOutput) OnStart() {
o.onStart = true
}
func (o *HitOutput) OnEvent(data map[string]interface{}) {
o.onEvent = true
}
func (o *HitOutput) OnStop() {
o.onStop = true
}
func TestSafeRun(t *testing.T) {
runner := &runner{}
runner.safeRun(func() {
panic("Runner will catch this panic")
})
}
func TestOutputOnStart(t *testing.T) {
hitOutput := &HitOutput{}
hitOutput2 := &HitOutput{}
runner := &runner{}
runner.addOutput(hitOutput)
runner.addOutput(hitOutput2)
runner.outputOnStart()
if !hitOutput.onStart {
t.Error("hitOutput's OnStart has not been called")
}
if !hitOutput2.onStart {
t.Error("hitOutput2's OnStart has not been called")
}
}
func TestOutputOnEevent(t *testing.T) {
hitOutput := &HitOutput{}
hitOutput2 := &HitOutput{}
runner := &runner{}
runner.addOutput(hitOutput)
runner.addOutput(hitOutput2)
runner.outputOnEevent(nil)
if !hitOutput.onEvent |
if !hitOutput2.onEvent {
t.Error("hitOutput2's OnEvent has not been called")
}
}
func TestOutputOnStop(t *testing.T) {
hitOutput := &HitOutput{}
hitOutput2 := &HitOutput{}
runner := &runner{}
runner.addOutput(hitOutput)
runner.addOutput(hitOutput2)
runner.outputOnStop()
if !hitOutput.onStop {
t.Error("hitOutput's OnStop has not been called")
}
if !hitOutput2.onStop {
t.Error("hitOutput2's OnStop has not been called")
}
}
func TestLocalRunner(t *testing.T) {
taskA := &Task{
Weight: 10,
Fn: func() {
time.Sleep(time.Second)
},
Name: "TaskA",
}
tasks := []*Task{taskA}
runner := newLocalRunner(tasks, nil, 2, 2)
go runner.run()
time.Sleep(4 * time.Second)
runner.close()
}
func TestSpawnWorkers(t *testing.T) {
taskA := &Task{
Weight: 10,
Fn: func() {
time.Sleep(time.Second)
},
Name: "TaskA",
}
tasks := []*Task{taskA}
runner := newSlaveRunner("localhost", 5557, tasks, nil)
defer runner.close()
runner.client = newClient("localhost", 5557, runner.nodeID)
runner.hatchRate = 10
go runner.spawnWorkers(10, runner.stopChan, runner.hatchComplete)
time.Sleep(2 * time.Millisecond)
currentClients := atomic.LoadInt32(&runner.numClients)
if currentClients > 3 {
t.Error("Spawning goroutines too fast, current count", currentClients)
}
}
func TestHatchAndStop(t *testing.T) {
taskA := &Task{
Fn: func() {
time.Sleep(time.Second)
},
}
taskB := &Task{
Fn: func() {
time.Sleep(2 * time.Second)
},
}
tasks := []*Task{taskA, taskB}
runner := newSlaveRunner("localhost", 5557, tasks, nil)
defer runner.close()
runner.client = newClient("localhost", 5557, runner.nodeID)
go func() {
var ticker = time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
t.Error("Timeout waiting for message sent by startHatching()")
return
case <-runner.stats.clearStatsChan:
// just quit
return
}
}
}()
runner.startHatching(10, float64(10), runner.hatchComplete)
// wait for spawning goroutines
time.Sleep(1100 * time.Millisecond)
if runner.numClients != 10 {
t.Error("Number of goroutines mismatches, expected: 10, current count", runner.numClients)
}
msg := <-runner.client.sendChannel()
if msg.Type != "hatch_complete" {
t.Error("Runner should send hatch_complete message when hatching completed, got", msg.Type)
}
runner.stop()
runner.onQuiting()
msg = <-runner.client.sendChannel()
if msg.Type != "quit" {
t.Error("Runner should send quit message on quitting, got", msg.Type)
}
}
func TestStop(t *testing.T) {
taskA := &Task{
Fn: func() {
time.Sleep(time.Second)
},
}
tasks := []*Task{taskA}
runner := newSlaveRunner("localhost", 5557, tasks, nil)
runner.stopChan = make(chan bool)
stopped := false
handler := func() {
stopped = true
}
Events.Subscribe("boomer:stop", handler)
defer Events.Unsubscribe("boomer:stop", handler)
runner.stop()
if stopped != true {
t.Error("Expected stopped to be true, was", stopped)
}
}
func TestOnHatchMessage(t *testing.T) {
taskA := &Task{
Fn: func() {
time.Sleep(time.Second)
},
}
runner := newSlaveRunner("localhost", 5557, []*Task{taskA}, nil)
defer runner.close()
runner.client = newClient("localhost", 5557, runner.nodeID)
runner.state = stateInit
workers, hatchRate := 0, float64(0)
callback := func(param1 int, param2 float64) {
workers = param1
hatchRate = param2
}
Events.Subscribe("boomer:hatch", callback)
defer Events.Unsubscribe("boomer:hatch", callback)
go func() {
// consumes clearStatsChannel
for {
select {
case <-runner.stats.clearStatsChan:
return
}
}
}()
runner.onHatchMessage(newMessage("hatch", map[string]interface{}{
"hatch_rate": float64(20),
"num_clients": int64(20),
}, runner.nodeID))
if workers != 20 {
t.Error("workers should be overwrote by callback function, expected: 20, was:", workers)
}
if hatchRate != 20 {
t.Error("hatchRate should be overwrote by callback function, expected: 20, was:", hatchRate)
}
runner.onMessage(newMessage("stop", nil, runner.nodeID))
}
func TestOnQuitMessage(t *testing.T) {
runner := newSlaveRunner("localhost", 5557, nil, nil)
defer runner.close()
runner.client = newClient("localhost", 5557, "test")
runner.state = stateInit
quitMessages := make(chan bool, 10)
receiver := func() {
quitMessages <- true
}
Events.Subscribe("boomer:quit", receiver)
defer Events.Unsubscribe("boomer:quit", receiver)
var ticker = time.NewTicker(20 * time.Millisecond)
runner.onMessage(newMessage("quit", nil, runner.nodeID))
select {
case <-quitMessages:
break
case <-ticker.C:
t.Error("Runner should fire boomer:quit message when it receives a quit message from the master.")
break
}
runner.state = stateRunning
runner.stopChan = make(chan bool)
runner.onMessage(newMessage("quit", nil, runner.nodeID))
select {
case <-quitMessages:
break
case <-ticker.C:
t.Error("Runner should fire boomer:quit message when it receives a quit message from the master.")
break
}
if runner.state != stateInit {
t.Error("Runner's state should be stateInit")
}
runner.state = stateStopped
runner.onMessage(newMessage("quit", nil, runner.nodeID))
select {
case <-quitMessages:
break
case <-ticker.C:
t.Error("Runner should fire boomer:quit message when it receives a quit message from the master.")
break
}
if runner.state != stateInit {
t.Error("Runner's state should be stateInit")
}
}
func TestOnMessage(t *testing.T) {
taskA := &Task{
Fn: func() {
time.Sleep(time.Second)
},
}
taskB := &Task{
Fn: func() {
time.Sleep(2 * time.Second)
},
}
tasks := []*Task{taskA, taskB}
runner := newSlaveRunner("localhost", 5557, tasks, nil)
defer runner.close()
runner.client = newClient("localhost", 5557, runner.nodeID)
runner.state = stateInit
go func() {
// consumes clearStatsChannel
count := 0
for {
select {
case <-runner.stats.clearStatsChan:
// receive two hatch message from master
if count >= 2 {
return
}
count++
}
}
}()
// start hatching
runner.onMessage(newMessage("hatch", map[string]interface{}{
"hatch_rate": float64(10),
"num_clients": int64(10),
}, runner.nodeID))
msg := <-runner.client.sendChannel()
if msg.Type != "hatching" {
t.Error("Runner should send hatching message when starting hatch, got", msg.Type)
}
// hatch complete and running
time.Sleep(1100 * time.Millisecond)
if runner.state != stateRunning {
t.Error("State of runner is not running after hatch, got", runner.state)
}
if runner.numClients != 10 {
t.Error("Number of goroutines mismatches, expected: 10, current count:", runner.numClients)
}
msg = <-runner.client.sendChannel()
if msg.Type != "hatch_complete" {
t.Error("Runner should send hatch_complete message when hatch completed, got", msg.Type)
}
// increase num_clients while running
runner.onMessage(newMessage("hatch", map[string]interface{}{
"hatch_rate": float64(20),
"num_clients": int64(20),
}, runner.nodeID))
msg = <-runner.client.sendChannel()
if msg.Type != "hatching" {
t.Error("Runner should send hatching message when starting hatch, got", msg.Type)
}
time.Sleep(1100 * time.Millisecond)
if runner.state != stateRunning {
t.Error("State of runner is not running after hatch, got", runner.state)
}
if runner.numClients != 20 {
t.Error("Number of goroutines mismatches, expected: 20, current count:", runner.numClients)
}
msg = <-runner.client.sendChannel()
if msg.Type != "hatch_complete" {
t.Error("Runner should send hatch_complete message when hatch completed, got", msg.Type)
}
// stop all the workers
runner.onMessage(newMessage("stop", nil, runner.nodeID))
if runner.state != stateInit {
t.Error("State of runner is not init, got", runner.state)
}
msg = <-runner.client.sendChannel()
if msg.Type != "client_stopped" {
t.Error("Runner should send client_stopped message, got", msg.Type)
}
msg = <-runner.client.sendChannel()
if msg.Type != "client_ready" {
t.Error("Runner should send client_ready message, got", msg.Type)
}
// hatch again
runner.onMessage(newMessage("hatch", map[string]interface{}{
"hatch_rate": float64(10),
"num_clients": uint64(10),
}, runner.nodeID))
msg = <-runner.client.sendChannel()
if msg.Type != "hatching" {
t.Error("Runner should send hatching message when starting hatch, got", msg.Type)
}
// hatch complete and running
time.Sleep(1100 * time.Millisecond)
if runner.state != stateRunning {
t.Error("State of runner is not running after hatch, got", runner.state)
}
if runner.numClients != 10 {
t.Error("Number of goroutines mismatches, expected: 10, current count:", runner.numClients)
}
msg = <-runner.client.sendChannel()
if msg.Type != "hatch_complete" {
t.Error("Runner should send hatch_complete message when hatch completed, got", msg.Type)
}
// stop all the workers
runner.onMessage(newMessage("stop", nil, runner.nodeID))
if runner.state != stateInit {
t.Error("State of runner is not init, got", runner.state)
}
msg = <-runner.client.sendChannel()
if msg.Type != "client_stopped" {
t.Error("Runner should send client_stopped message, got", msg.Type)
}
msg = <-runner.client.sendChannel()
if msg.Type != "client_ready" {
t.Error("Runner should send client_ready message, got", msg.Type)
}
}
func TestGetReady(t *testing.T) {
masterHost := "127.0.0.1"
masterPort := 6557
server := newTestServer(masterHost, masterPort)
defer server.close()
server.start()
rateLimiter := NewStableRateLimiter(100, time.Second)
r := newSlaveRunner(masterHost, masterPort, nil, rateLimiter)
defer r.close()
defer Events.Unsubscribe("boomer:quit", r.onQuiting)
r.run()
msg := <-server.fromClient
if msg.Type != "client_ready" {
t.Error("Runner should send client_ready message to server.")
}
r.numClients = 10
// it's not really running
r.state = stateRunning
data := make(map[string]interface{})
r.stats.messageToRunnerChan <- data
msg = <-server.fromClient
if msg.Type != "stats" {
t.Error("Runner should send stats message to server.")
}
userCount := msg.Data["user_count"].(int64)
if userCount != int64(10) {
t.Error("User count mismatch, expect: 10, got:", userCount)
}
}
| {
t.Error("hitOutput's OnEvent has not been called")
} |
bitcoin_es_MX.ts | <?xml version="1.0" ?><!DOCTYPE TS><TS language="es_MX" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Ozoncoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+39"/>
<source><b>Ozoncoin</b> version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The Ozoncoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Haga doble clic para editar el domicilio o la etiqueta</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Crear una dirección nueva</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copiar el domicilio seleccionado al portapapeles del sistema</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-46"/>
<source>These are your Ozoncoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<source>&Copy Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Ozoncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Verify a message to ensure it was signed with a specified Ozoncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Borrar</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Archivo separado por comas (*.CSV)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Domicilio</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Ingrese la contraseña</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nueva contraseña</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Repita la nueva contraseña</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+35"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Ingrese la nueva contraseña a la cartera<br/>Por favor use una contraseña de<b>10 o más caracteres aleatorios</b> o <b>ocho o más palabras</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Cartera encriptada.</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Esta operación necesita la contraseña de su cartera para desbloquear su cartera.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Desbloquear cartera.</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Esta operación necesita la contraseña de su cartera para desencriptar su cartera.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Desencriptar la cartera</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Cambiar contraseña</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Ingrese la antugüa y nueva contraseña de la cartera</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Confirmar la encriptación de cartera</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Cartera encriptada</translation>
</message>
<message>
<location line="-58"/>
<source>Ozoncoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>La encriptación de la cartera falló</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>La encriptación de la cartera falló debido a un error interno. Su cartera no fue encriptada.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>Las contraseñas dadas no coinciden</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>El desbloqueo de la cartera Fallo</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La contraseña ingresada para la des encriptación de la cartera es incorrecto</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>La desencriptación de la cartera fallo</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+282"/>
<source>Sign &message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+251"/>
<source>Synchronizing with network...</source>
<translation>Sincronizando con la red...</translation>
</message>
<message>
<location line="-319"/>
<source>&Overview</source>
<translation>&Vista previa</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Mostrar la vista previa general de la cartera</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Transacciones</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Explorar el historial de transacciones</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-13"/>
<source>&Receive coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-7"/>
<source>&Send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>E&xit</source>
<translation>S&alir</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Salir de la aplicación</translation>
</message>
<message>
<location line="+6"/>
<source>Show information about Ozoncoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Opciones</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+259"/>
<source>~%n block(s) remaining</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-256"/>
<source>&Export...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-64"/>
<source>Send coins to a Ozoncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Modify configuration options for Ozoncoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup wallet to another location</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Cambiar la contraseña usada para la encriptación de la cartera</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-202"/>
<source>Ozoncoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+180"/>
<source>&About Ozoncoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>&File</source>
<translation>&Archivo</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&Configuraciones</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>&Ayuda</translation>
</message>
<message>
<location line="+12"/>
<source>Tabs toolbar</source>
<translation>Pestañas</translation>
</message>
<message>
<location line="+8"/>
<source>Actions toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+9"/>
<source>[testnet]</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+60"/>
<source>Ozoncoin client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+75"/>
<source>%n active connection(s) to Ozoncoin network</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+40"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+413"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-403"/>
<source>%n second(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="-312"/>
<source>About Ozoncoin card</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show information about Ozoncoin card</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+297"/>
<source>%n minute(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Up to date</source>
<translation>Actualizado al dia </translation>
</message>
<message>
<location line="+7"/>
<source>Catching up...</source>
<translation>Resiviendo...</translation>
</message>
<message>
<location line="+10"/>
<source>Last received block was generated %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Enviar Transacción</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Transacción entrante</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid Ozoncoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>La cartera esta <b>encriptada</b> y <b>desbloqueada</b> actualmente </translation>
</message>
<message>
<location line="+10"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>La cartera esta <b>encriptada</b> y <b>bloqueada</b> actualmente </translation>
</message>
<message>
<location line="+25"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+76"/>
<source>%n second(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+18"/>
<source>Not staking</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+109"/>
<source>A fatal error occurred. Ozoncoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+90"/>
<source>Network Alert</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>Monto:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation>Prioridad:</translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation>Cuota:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+551"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Monto</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Domicilio</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>Confirmado </translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation>Copiar dirección </translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copiar capa </translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>copiar monto</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation>copiar cantidad</translation>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation>copiar cuota</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>copiar despues de cuota</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>copiar bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>copiar prioridad</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>copiar cambio</translation>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Editar dirección</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Etiqueta</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Dirección</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+20"/>
<source>New receiving address</source>
<translation>Nueva dirección de entregas</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Nueva dirección de entregas</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Editar dirección de entregas</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Editar dirección de envios</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>El domicilio ingresado "%1" ya existe en la libreta de direcciones</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Ozoncoin address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>No se puede desbloquear la cartera</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>La generación de la nueva clave fallo</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+420"/>
<location line="+12"/>
<source>Ozoncoin-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Opciones</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Ozoncoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Ozoncoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Detach databases at shutdown</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Ozoncoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Connect to the Ozoncoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Ozoncoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Whether to show Ozoncoin addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Whether to show coin control features or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+55"/>
<source>default</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+149"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Ozoncoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Formulario</translation>
</message>
<message>
<location line="+33"/>
<location line="+231"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Ozoncoin network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-160"/>
<source>Stake:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-107"/>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Total:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Transacciones recientes</b></translation>
</message>
<message>
<location line="-108"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+113"/>
<location line="+1"/>
<source>out of sync</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+348"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Ozoncoin-Qt help message to get a list with possible Ozoncoin command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-104"/>
<source>Ozoncoin - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Ozoncoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Open the Ozoncoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-33"/>
<source>Welcome to the Ozoncoin RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Mandar monedas</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>Monto:</translation>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 KEY</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation>Prioridad:</translation>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation>Cuota:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Enviar a múltiples receptores a la vez</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+16"/>
<source>123.456 KEY</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Confirme la acción de enviar</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-173"/>
<source>Enter a Ozoncoin address (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation>copiar cantidad</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>copiar monto</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation>copiar cuota</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>copiar despues de cuota</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>copiar bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>copiar prioridad</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>copiar cambio</translation>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Confirme para mandar monedas</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>El monto a pagar debe ser mayor a 0</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+251"/>
<source>WARNING: Invalid Ozoncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>M&onto</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Pagar &a:</translation>
</message>
<message>
<location line="+24"/>
<location filename="../sendcoinsentry.cpp" line="+25"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Ingrese una etiqueta para esta dirección para agregarlo en su libreta de direcciones.</translation>
</message>
<message>
<location line="+9"/>
<source>&Label:</source>
<translation>&Etiqueta</translation>
</message>
<message>
<location line="+18"/>
<source>The address to send the payment to (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Pegar dirección del portapapeles</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Ozoncoin address (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>Pegar dirección del portapapeles</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Ozoncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Ozoncoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Ozoncoin address (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Enter Ozoncoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+19"/>
<source>Open until %1</source>
<translation>Abrir hasta %1</translation>
</message>
<message numerus="yes">
<location line="-2"/>
<source>Open for %n block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+8"/>
<source>conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/No confirmado</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 confirmaciones</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Monto</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-211"/>
<source>, has not been successfully broadcast yet</source>
<translation>, no ha sido transmitido aun</translation>
</message>
<message>
<location line="+35"/>
<source>unknown</source>
<translation>desconocido</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Detalles de la transacción</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Este panel muestras una descripción detallada de la transacción</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+226"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Domicilio</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Monto</translation>
</message>
<message>
<location line="+60"/>
<source>Open until %1</source>
<translation>Abrir hasta %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Confimado (%1 confirmaciones)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Este bloque no fue recibido por ningun nodo y probablemente no fue aceptado !</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Generado pero no aprovado</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Recivido con</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Enviar a</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Pagar a si mismo</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Minado</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(n/a)</translation>
</message>
<message>
<location line="+190"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Fecha y hora en que la transacción fue recibida </translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Escriba una transacción</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Direccion del destinatario de la transacción</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Cantidad removida del saldo o agregada </translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+55"/>
<location line="+16"/>
<source>All</source>
<translation>Todo</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Hoy</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Esta semana </translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Este mes </translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>El mes pasado </translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Este año</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Recivido con</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Enviar a</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Para ti mismo</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Minado </translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Otro</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Ingrese dirección o capa a buscar </translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Monto minimo </translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Copiar dirección </translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copiar capa </translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>copiar monto</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Editar capa </translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+144"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Arhchivo separado por comas (*.CSV)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Confirmado </translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Domicilio</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Monto</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>Para</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+206"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+33"/>
<source>Ozoncoin version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>Uso:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or Ozoncoind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>Lista de comandos</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: Ozoncoin.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: Ozoncoind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 15714 or testnet: 25714)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Stake your coins to support network and gain reward (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+109"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 25715)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-11"/>
<source>Accept command line and JSON-RPC commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+101"/>
<source>Error: Transaction creation failed </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-8"/>
<source>Importing blockchain data file.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Importing bootstrap blockchain data file.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-88"/>
<source>Run in the background as a daemon and accept commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-38"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+117"/>
<source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-20"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+61"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Ozoncoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-31"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-30"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-62"/>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+94"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-90"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+83"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-82"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-74"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-42"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+109"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<source>Username for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+57"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-48"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-54"/>
<source>Password for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=Ozoncoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Ozoncoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+53"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-158"/>
<source>This help message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+95"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot obtain a lock on data directory %s. Ozoncoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-98"/>
<source>Ozoncoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+140"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-130"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message> | <source>Loading addresses...</source>
<translation>Cargando direcciones...</translation>
</message>
<message>
<location line="-15"/>
<source>Error loading blkindex.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of Ozoncoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart Ozoncoin to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-24"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<source>Error: could not start node</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-34"/>
<source>Loading block index...</source>
<translation>Cargando indice de bloques... </translation>
</message>
<message>
<location line="-103"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+122"/>
<source>Unable to bind to %s on this computer. Ozoncoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-97"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Loading wallet...</source>
<translation>Cargando billetera...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot initialize keypool</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Done loading</source>
<translation>Carga completa</translation>
</message>
<message>
<location line="-167"/>
<source>To use the %s option</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"/>
</message>
</context>
</TS> | <location line="+122"/> |
0007_auto__add_pyconlightningtalkproposal__add_field_pyconposterproposal_ad.py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class | (SchemaMigration):
def forwards(self, orm):
# Adding model 'PyConLightningTalkProposal'
db.create_table(u'pycon_pyconlightningtalkproposal', (
(u'proposalbase_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['proposals.ProposalBase'], unique=True, primary_key=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['pycon.PyConProposalCategory'])),
('audience_level', self.gf('django.db.models.fields.IntegerField')()),
('overall_status', self.gf('django.db.models.fields.IntegerField')(default=1)),
('damaged_score', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('rejection_status', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('recording_release', self.gf('django.db.models.fields.BooleanField')(default=True)),
('additional_requirements', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'pycon', ['PyConLightningTalkProposal'])
def backwards(self, orm):
# Deleting model 'PyConLightningTalkProposal'
db.delete_table(u'pycon_pyconlightningtalkproposal')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'default': "'US/Eastern'", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'conference.section': {
'Meta': {'object_name': 'Section'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Conference']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.additionalspeaker': {
'Meta': {'unique_together': "(('speaker', 'proposalbase'),)", 'object_name': 'AdditionalSpeaker', 'db_table': "'proposals_proposalbase_additional_speakers'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposalbase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalBase']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['speakers.Speaker']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'proposals.proposalbase': {
'Meta': {'object_name': 'ProposalBase'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['speakers.Speaker']", 'symmetrical': 'False', 'through': u"orm['proposals.AdditionalSpeaker']", 'blank': 'True'}),
'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalKind']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': u"orm['speakers.Speaker']"}),
'submitted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.proposalkind': {
'Meta': {'object_name': 'ProposalKind'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposal_kinds'", 'to': u"orm['conference.Section']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'pycon.pyconlightningtalkproposal': {
'Meta': {'object_name': 'PyConLightningTalkProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'pycon.pyconposterproposal': {
'Meta': {'object_name': 'PyConPosterProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'pycon.pyconproposalcategory': {
'Meta': {'object_name': 'PyConProposalCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'pycon.pyconsponsortutorialproposal': {
'Meta': {'object_name': 'PyConSponsorTutorialProposal', '_ormbases': [u'proposals.ProposalBase']},
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'})
},
u'pycon.pycontalkproposal': {
'Meta': {'object_name': 'PyConTalkProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'outline': ('django.db.models.fields.TextField', [], {}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'pycon.pycontutorialproposal': {
'Meta': {'object_name': 'PyConTutorialProposal'},
'additional_requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'audience': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'audience_level': ('django.db.models.fields.IntegerField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pycon.PyConProposalCategory']"}),
'damaged_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'domain_level': ('django.db.models.fields.IntegerField', [], {}),
'more_info': ('django.db.models.fields.TextField', [], {}),
'outline': ('django.db.models.fields.TextField', [], {}),
'overall_status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'perceived_value': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
u'proposalbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['proposals.ProposalBase']", 'unique': 'True', 'primary_key': 'True'}),
'recording_release': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rejection_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'speakers.speaker': {
'Meta': {'object_name': 'Speaker'},
'annotation': ('django.db.models.fields.TextField', [], {}),
'biography': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_email': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'invite_token': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'sessions_preference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'speaker_profile'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['pycon']
| Migration |
yield_from_generator.py | from typing import Union, cast
import libcst as cst
import libcst.matchers as m | """
libcst based transformer to convert 'for x in generator: yield x' to 'yield from generator'.
"""
__author__ = "Gina Häußge <[email protected]>"
__license__ = "MIT"
class YieldFromGenerator(CodeMod):
DESCRIPTION: str = "Converts 'for x in generator: yield x' to 'yield from generator'."
def leave_For(
self, original_node: cst.For, updated_node: cst.For
) -> Union[cst.For, cst.SimpleStatementLine]:
if m.matches(
updated_node,
m.For(
target=m.Name(),
body=m.IndentedBlock(
body=[m.SimpleStatementLine(body=[m.Expr(value=m.Yield(m.Name()))])]
),
),
):
target = updated_node.target.value
block = cast(cst.IndentedBlock, updated_node.body)
simple_stmt = cast(cst.SimpleStatementLine, block.body[0])
expr_stmt = cast(cst.Expr, simple_stmt.body[0])
yield_stmt = cast(cst.Yield, expr_stmt.value)
yielded = cast(cst.Name, yield_stmt.value).value
if target == yielded:
self._report_node(original_node)
self.count += 1
updated_node = cst.SimpleStatementLine(
body=[
cst.Expr(value=cst.Yield(value=cst.From(item=updated_node.iter)))
]
)
return updated_node
def main():
runner(YieldFromGenerator)
if __name__ == "__main__":
main() |
from .util import CodeMod, runner
|
akun-alamat.module.ts | import { NgModule } from '@angular/core'; | import { CommonModule } from '@angular/common';
import { FormsModule } from '@angular/forms';
import { Routes, RouterModule } from '@angular/router';
import { IonicModule } from '@ionic/angular';
import { AkunAlamatPage } from './akun-alamat.page';
const routes: Routes = [
{
path: '',
component: AkunAlamatPage
}
];
@NgModule({
imports: [
CommonModule,
FormsModule,
IonicModule,
RouterModule.forChild(routes)
],
declarations: [AkunAlamatPage]
})
export class AkunAlamatPageModule {} | |
main.go | package main
import (
"fmt"
"net"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
"github.com/hatena/Hatena-Intern-2020/services/renderer-go/config"
server "github.com/hatena/Hatena-Intern-2020/services/renderer-go/grpc"
"github.com/hatena/Hatena-Intern-2020/services/renderer-go/log"
pb "github.com/hatena/Hatena-Intern-2020/services/renderer-go/pb/renderer"
"go.uber.org/zap"
"google.golang.org/grpc"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
)
func main() {
if err := run(os.Args); err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
os.Exit(1)
}
}
func run(args []string) error {
// 設定をロード
conf, err := config.Load()
if err != nil {
return fmt.Errorf("failed to load config: %+v", err)
}
// ロガーを初期化
logger, err := log.NewLogger(log.Config{Mode: conf.Mode})
if err != nil {
return fmt.Errorf("failed to create logger: %+v", err)
}
defer logger.Sync()
// サーバーを起動
logger.Info(fmt.Sprintf("starting gRPC server (port = %v)", conf.GRPCPort))
lis, err := net.Listen("tcp", ":"+strconv.Itoa(conf.GRPCPort))
if err != nil {
return fmt.Errorf("failed to listen: %+v", err)
}
s := grpc.NewServer(
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_ctxtags.UnaryServerInterceptor(),
grpc_zap.UnaryServerInterceptor(
logger,
grpc_zap.WithDecider(func(fullMethodName string, err error) bool {
// ヘルスチェックのログを無効化
return !strings.HasPrefix(fullMethodName, "/grpc.health.v1.Health/")
}),
),
grpc_recovery.UnaryServerInterceptor(),
)),
)
svr := server.NewServer()
pb.RegisterRendererServer(s, svr)
healthpb.RegisterHealthServer(s, svr)
go stop(s, conf.GracefulStopTimeout, logger)
if err := s.Serve(lis); err != nil {
return fmt.Errorf("failed to serve: %+v", err)
}
return nil
}
func stop(s *grpc.Server, timeout time.Duration, logger *zap.Logger) {
s | an := make(chan os.Signal)
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM)
sig := <-sigChan
logger.Info(fmt.Sprintf("gracefully stopping server (sig = %v)", sig))
t := time.NewTimer(timeout)
defer t.Stop()
stopped := make(chan struct{})
go func() {
s.GracefulStop()
close(stopped)
}()
select {
case <-t.C:
logger.Warn(fmt.Sprintf("stopping server (not stopped in %s)", timeout.String()))
s.Stop()
case <-stopped:
}
}
| igCh |
utils.rs | use proc_macro2::{Span, TokenStream};
use proc_macro_crate::{crate_name, FoundCrate};
use quote::quote;
use syn::Ident;
pub(crate) fn get_crate_name() -> TokenStream | {
let name = match crate_name("poem") {
Ok(FoundCrate::Name(name)) => name,
Ok(FoundCrate::Itself) | Err(_) => "poem".to_string(),
};
let name = Ident::new(&name, Span::call_site());
quote!(#name)
} |
|
conn.go | /*
Conn is a wrapper of the origin network connection. It resolves the handshake
information from the first version message including magic number, PID, network
address etc.
*/
package hub
import (
"bytes"
"fmt"
"io"
"net"
"github.com/elastos/Elastos.ELA/dpos/p2p/msg"
"github.com/elastos/Elastos.ELA/p2p"
)
// Conn is a wrapper of the origin network connection.
type Conn struct {
net.Conn // The origin network connection.
buf *bytes.Buffer
magic uint32
pid [33]byte
target [16]byte
addr net.Addr
}
// Magic returns the magic number resolved from message header.
func (c *Conn) Magic() uint32 {
return c.magic
}
// PID returns the PID resolved from the version message. It represents who
// is connecting.
func (c *Conn) PID() [33]byte {
return c.pid
}
// Target returns the Target PID resolved from the version message. It used
// when a service behind the hub want to connect to another service,
// representing who the service is going to connect.
func (c *Conn) Target() [16]byte {
return c.target
}
// NetAddr returns the network address resolve from the origin connection and
// the version message.
func (c *Conn) NetAddr() net.Addr {
return c.addr
}
// Read warps the origin Read method without knowing we have intercepted the
// version message.
func (c *Conn) Read(b []byte) (n int, err error) {
n, err = c.buf.Read(b)
if n > 0 {
return n, err
}
return c.Conn.Read(b)
}
// WrapConn warps the origin network connection and returns a hub connection
// with the handshake information resolved from version message.
func WrapConn(c net.Conn) (conn *Conn, err error) |
// newNetAddr creates a net.Addr with the origin net.Addr and port.
func newNetAddr(addr net.Addr, port uint16) net.Addr {
// addr will be a net.TCPAddr when not using a proxy.
if tcpAddr, ok := addr.(*net.TCPAddr); ok {
return &net.TCPAddr{IP: tcpAddr.IP, Port: int(port)}
}
// For the most part, addr should be one of the two above cases, but
// to be safe, fall back to trying to parse the information from the
// address string as a last resort.
host, _, err := net.SplitHostPort(addr.String())
if err != nil {
return nil
}
return &net.TCPAddr{IP: net.ParseIP(host), Port: int(port)}
}
| {
// Read message header
var headerBytes [p2p.HeaderSize]byte
if _, err = io.ReadFull(c, headerBytes[:]); err != nil {
return
}
// Deserialize message header
var hdr p2p.Header
if err = hdr.Deserialize(headerBytes[:]); err != nil {
return
}
if hdr.GetCMD() != p2p.CmdVersion {
err = fmt.Errorf("invalid message %s, expecting version",
hdr.GetCMD())
return
}
// Read payload
payload := make([]byte, hdr.Length)
if _, err = io.ReadFull(c, payload[:]); err != nil {
return
}
// Verify checksum
if err = hdr.Verify(payload); err != nil {
return
}
v := &msg.Version{}
err = v.Deserialize(bytes.NewReader(payload))
if err != nil {
return
}
buf := bytes.NewBuffer(headerBytes[:])
buf.Write(payload)
conn = &Conn{
Conn: c,
buf: buf,
magic: hdr.Magic,
pid: v.PID,
target: v.Target,
addr: newNetAddr(c.RemoteAddr(), v.Port),
}
return
} |
achievement.js | import mongoose from 'mongoose'
const rangeSchema = new mongoose.Schema({
name: {
type: String,
required: true
},
value: {
type: Number,
required: true
},
earnedDate: {
type: Date,
required: false
}
})
const ratingSchema = new mongoose.Schema({
name: {
type: String,
required: true
},
xp: {
type: Number,
required: true
},
ranges: [
{
type: rangeSchema,
require: true
}
]
})
const achievementSchema = new mongoose.Schema({
name: {
type: String,
required: true
},
kind: {
type: String,
required: true
},
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true
},
total: { | ratings: [
{
type: ratingSchema,
require: true
}
]
})
export default mongoose.model('Achievement', achievementSchema) | type: Number,
require: true
}, |
StringIO.py | r"""File-like objects that read from or write to a string buffer.
This implements (nearly) all stdio methods.
f = StringIO() # ready for writing
f = StringIO(buf) # ready for reading
f.close() # explicitly release resources held
flag = f.isatty() # always false
pos = f.tell() # get current position
f.seek(pos) # set current position
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
buf = f.read() # read until EOF
buf = f.read(n) # read up to n bytes
buf = f.readline() # read until end of line ('\n') or EOF
list = f.readlines()# list of f.readline() results until EOF
f.truncate([size]) # truncate file at to at most size (default: current pos)
f.write(buf) # write at current position
f.writelines(list) # for line in list: f.write(line)
f.getvalue() # return whole file's contents as a string
Notes:
- Using a real file is often faster (but less convenient).
- There's also a much faster implementation in C, called cStringIO, but
it's not subclassable.
- fileno() is left unimplemented so that code which uses it triggers
an exception early.
- Seeking far beyond EOF and then writing will insert real null
bytes that occupy space in the buffer.
- There's a simple test set (see end of this file).
"""
try:
from errno import EINVAL
except ImportError:
EINVAL = 22
__all__ = ["StringIO"]
def _complain_ifclosed(closed):
if closed:
raise ValueError, "I/O operation on closed file"
class StringIO:
"""class StringIO([buffer])
When a StringIO object is created, it can be initialized to an existing
string by passing the string to the constructor. If no string is given,
the StringIO will start empty.
The StringIO object can accept either Unicode or 8-bit strings, but
mixing the two may take some care. If both are used, 8-bit strings that
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
a UnicodeError to be raised when getvalue() is called.
"""
def __init__(self, buf = ''):
# Force self.buf to be a string or unicode
if not isinstance(buf, basestring):
buf = str(buf)
self.buf = buf
self.len = len(buf)
self.buflist = []
self.pos = 0
self.closed = False
self.softspace = 0
def __iter__(self):
return self
def next(self):
"""A file object is its own iterator, for example iter(f) returns f
(unless f is closed). When a file is used as an iterator, typically
in a for loop (for example, for line in f: print line), the next()
method is called repeatedly. This method returns the next input line,
or raises StopIteration when EOF is hit.
"""
_complain_ifclosed(self.closed)
r = self.readline()
if not r:
raise StopIteration
return r
def close(self):
"""Free the memory buffer.
"""
if not self.closed:
self.closed = True
del self.buf, self.pos
def isatty(self):
|
def seek(self, pos, mode = 0):
"""Set the file's current position.
The mode argument is optional and defaults to 0 (absolute file
positioning); other values are 1 (seek relative to the current
position) and 2 (seek relative to the file's end).
There is no return value.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if mode == 1:
pos += self.pos
elif mode == 2:
pos += self.len
self.pos = max(0, pos)
def tell(self):
"""Return the file's current position."""
_complain_ifclosed(self.closed)
return self.pos
def read(self, n = -1):
"""Read at most size bytes from the file
(less if the read hits EOF before obtaining size bytes).
If the size argument is negative or omitted, read all data until EOF
is reached. The bytes are returned as a string object. An empty
string is returned when EOF is encountered immediately.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if n is None or n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readline(self, length=None):
r"""Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent
when a file ends with an incomplete line). If the size argument is
present and non-negative, it is a maximum byte count (including the
trailing newline) and an incomplete line may be returned.
An empty string is returned only when EOF is encountered immediately.
Note: Unlike stdio's fgets(), the returned string contains null
characters ('\0') if they occurred in the input.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
i = self.buf.find('\n', self.pos)
if i < 0:
newpos = self.len
else:
newpos = i+1
if length is not None and length > 0:
if self.pos + length < newpos:
newpos = self.pos + length
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readlines(self, sizehint = 0):
"""Read until EOF using readline() and return a list containing the
lines thus read.
If the optional sizehint argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (or more
to accommodate a final whole line).
"""
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def truncate(self, size=None):
"""Truncate the file's size.
If the optional size argument is present, the file is truncated to
(at most) that size. The size defaults to the current position.
The current file position is not changed unless the position
is beyond the new file size.
If the specified size exceeds the file's current size, the
file remains unchanged.
"""
_complain_ifclosed(self.closed)
if size is None:
size = self.pos
elif size < 0:
raise IOError(EINVAL, "Negative size not allowed")
elif size < self.pos:
self.pos = size
self.buf = self.getvalue()[:size]
self.len = size
def write(self, s):
"""Write a string to the file.
There is no return value.
"""
_complain_ifclosed(self.closed)
if not s: return
# Force s to be a string or unicode
if not isinstance(s, basestring):
s = str(s)
spos = self.pos
slen = self.len
if spos == slen:
self.buflist.append(s)
self.len = self.pos = spos + len(s)
return
if spos > slen:
self.buflist.append('\0'*(spos - slen))
slen = spos
newpos = spos + len(s)
if spos < slen:
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
self.buf = ''
if newpos > slen:
slen = newpos
else:
self.buflist.append(s)
slen = newpos
self.len = slen
self.pos = newpos
def writelines(self, iterable):
"""Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. There
is no return value.
(The name is intended to match readlines(); writelines() does not add
line separators.)
"""
write = self.write
for line in iterable:
write(line)
def flush(self):
"""Flush the internal buffer
"""
_complain_ifclosed(self.closed)
def getvalue(self):
"""
Retrieve the entire contents of the "file" at any time before
the StringIO object's close() method is called.
The StringIO object can accept either Unicode or 8-bit strings,
but mixing the two may take some care. If both are used, 8-bit
strings that cannot be interpreted as 7-bit ASCII (that use the
8th bit) will cause a UnicodeError to be raised when getvalue()
is called.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
return self.buf
# A little test suite
def test():
import sys
if sys.argv[1:]:
file = sys.argv[1]
else:
file = '/etc/passwd'
lines = open(file, 'r').readlines()
text = open(file, 'r').read()
f = StringIO()
for line in lines[:-2]:
f.write(line)
f.writelines(lines[-2:])
if f.getvalue() != text:
raise RuntimeError, 'write failed'
length = f.tell()
print 'File length =', length
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print 'First line =', repr(f.readline())
print 'Position =', f.tell()
line = f.readline()
print 'Second line =', repr(line)
f.seek(-len(line), 1)
line2 = f.read(len(line))
if line != line2:
raise RuntimeError, 'bad result after seek back'
f.seek(len(line2), 1)
list = f.readlines()
line = list[-1]
f.seek(f.tell() - len(line))
line2 = f.read()
if line != line2:
raise RuntimeError, 'bad result after seek back from EOF'
print 'Read', len(list), 'more lines'
print 'File length =', f.tell()
if f.tell() != length:
raise RuntimeError, 'bad length'
f.truncate(length/2)
f.seek(0, 2)
print 'Truncated length =', f.tell()
if f.tell() != length/2:
raise RuntimeError, 'truncate did not adjust length'
f.close()
if __name__ == '__main__':
test()
| """Returns False because StringIO objects are not connected to a
tty-like device.
"""
_complain_ifclosed(self.closed)
return False |
3.5 sortStack.js | // Chapter 3: Stacks & Queues
// Solution originally on repl.it, please see: https://repl.it/@khd25/
//==============================================================================
/*
Question 3.5 - Sort Stack:
Write a program to sort a stack such that the smallest items are on the top.
You can use an additional temporary stack,
but you may not copy the elements into any other data structure (such as an array).
The stack supports the following operations: push, pop, peek, and isEmpty.
R(ephrase): take a stack that contains unsorted values and return them in sorted order, inside a stack structure
- don't use any other DS to solve this, expect 1 additional temp stack
E(xample):
Stack { stack: [3,1,2] }
-> should change to Stack { stack: [3,2,1] } // so smallest can .pop() off from the back first
A(pproach):
- make very normal Stack class, with push, pop, peek, and empty
- sortStack function takes a disorderly stack and first makes a tempStack to hold values while sorting
- 1 while loop that runs until input stack is emptied (ie all things have been ordered and place in tempStack)
- 2 inner while loop that runs until things in tempStack are sorted, popping them back out and into original input stack when they are not in order
- need a temp variable to hold the current stack item while sorting in progress, push into tempStack if larger (we want smaller ones to wait until later)
-both loops finish, return the completed temp stack with sorted values in it
C(ode):
*/
class Stack {
constructor() {
this.stack = [];
}
push(val) {
this.stack.push(val);
}
pop() {
if (this.stack.length === 0) return -1;
return this.stack.pop();
}
peek() {
if (this.stack.length === 0) return -1;
return this.stack[this.stack.length - 1];
}
empty() {
return this.stack.length === 0;
}
}
//sort to put smallest on top so smallest comes off first
function | (stk) {
// make another stack to bounce values between with input stack while sorting
let tempStack = new Stack();
// while the input stack isn't empty yet...
while (!stk.empty()) {
// hold on to the last item you pop off input stack
let temp = stk.pop();
// while temp stack isn't empty yet && the last item on it is LESS than temp
while (!tempStack.empty() && tempStack.peek() < temp) {
// you want that smaller item to wait till later, so put it back on the input stack
stk.push(tempStack.pop());
// eventually this loop will break when either the tempStack is empty or when temp value is smaller value
}
// since we've held on to temp and not put it anywhere yet, now it gets to go onto tempStack
tempStack.push(temp);
// if it indeed did get pushed onto temp stack, it must be smaller than all items currently in tempStack
// this will leave temp stack able to pop items off the end starting from smallest values
// because the smallest values were pushed on at the end
// the outer while loop will break when we have dealt with every item in input stack, and it becomes empty
}
// the values are in the order we want in tempStack now, so we return tempStack
return tempStack;
}
// T(est):
let test = new Stack();
test.push(2);
test.push(6);
test.push(5);
test.push(1);
test.push(4);
test.push(3);
test.push(7);
test.push(3);
console.log('TEST: ', test); // expect TEST: Stack { stack: [2,6,5,1,4,3,7,3] }
sortStack(test); // expect Stack { stack: [7,6,5,4,3,3,2,1] }
// O(ptimize): in progess...
// current solution version is O(n^2) time and takes O(n) additional space
| sortStack |
color_diff_matrix.py | """
This module contains the formulas for comparing Lab values with matrices
and vectors. The benefit of using NumPy's matrix capabilities is speed. These
calls can be used to efficiently compare large volumes of Lab colors.
"""
import numpy
def delta_e_cie1976(lab_color_vector, lab_color_matrix):
"""
Calculates the Delta E (CIE1976) between `lab_color_vector` and all
colors in `lab_color_matrix`.
"""
return numpy.sqrt(
numpy.sum(numpy.power(lab_color_vector - lab_color_matrix, 2), axis=1))
# noinspection PyPep8Naming
def delta_e_cie1994(lab_color_vector, lab_color_matrix,
K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015):
|
# noinspection PyPep8Naming
def delta_e_cmc(lab_color_vector, lab_color_matrix, pl=2, pc=1):
"""
Calculates the Delta E (CIE1994) of two colors.
CMC values
Acceptability: pl=2, pc=1
Perceptability: pl=1, pc=1
"""
L, a, b = lab_color_vector
C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
delta_lab = lab_color_vector - lab_color_matrix
delta_L = delta_lab[:, 0].copy()
delta_C = C_1 - C_2
delta_lab[:, 0] = delta_C
H_1 = numpy.degrees(numpy.arctan2(b, a))
if H_1 < 0:
H_1 += 360
F = numpy.sqrt(numpy.power(C_1, 4) / (numpy.power(C_1, 4) + 1900.0))
# noinspection PyChainedComparisons
if 164 <= H_1 and H_1 <= 345:
T = 0.56 + abs(0.2 * numpy.cos(numpy.radians(H_1 + 168)))
else:
T = 0.36 + abs(0.4 * numpy.cos(numpy.radians(H_1 + 35)))
if L < 16:
S_L = 0.511
else:
S_L = (0.040975 * L) / (1 + 0.01765 * L)
S_C = ((0.0638 * C_1) / (1 + 0.0131 * C_1)) + 0.638
S_H = S_C * (F * T + 1 - F)
delta_C = C_1 - C_2
delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)
# noinspection PyArgumentList
delta_H = numpy.sqrt(delta_H_sq.clip(min=0))
LCH = numpy.vstack([delta_L, delta_C, delta_H])
params = numpy.array([[pl * S_L], [pc * S_C], [S_H]])
return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0))
# noinspection PyPep8Naming
def delta_e_cie2000(lab_color_vector, lab_color_matrix, Kl=1, Kc=1, Kh=1):
"""
Calculates the Delta E (CIE2000) of two colors.
"""
L, a, b = lab_color_vector
avg_Lp = (L + lab_color_matrix[:, 0]) / 2.0
C1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
avg_C1_C2 = (C1 + C2) / 2.0
G = 0.5 * (1 - numpy.sqrt(numpy.power(avg_C1_C2, 7.0) / (numpy.power(avg_C1_C2, 7.0) + numpy.power(25.0, 7.0))))
a1p = (1.0 + G) * a
a2p = (1.0 + G) * lab_color_matrix[:, 1]
C1p = numpy.sqrt(numpy.power(a1p, 2) + numpy.power(b, 2))
C2p = numpy.sqrt(numpy.power(a2p, 2) + numpy.power(lab_color_matrix[:, 2], 2))
avg_C1p_C2p = (C1p + C2p) / 2.0
h1p = numpy.degrees(numpy.arctan2(b, a1p))
h1p += (h1p < 0) * 360
h2p = numpy.degrees(numpy.arctan2(lab_color_matrix[:, 2], a2p))
h2p += (h2p < 0) * 360
avg_Hp = (((numpy.fabs(h1p - h2p) > 180) * 360) + h1p + h2p) / 2.0
T = 1 - 0.17 * numpy.cos(numpy.radians(avg_Hp - 30)) + \
0.24 * numpy.cos(numpy.radians(2 * avg_Hp)) + \
0.32 * numpy.cos(numpy.radians(3 * avg_Hp + 6)) - \
0.2 * numpy.cos(numpy.radians(4 * avg_Hp - 63))
diff_h2p_h1p = h2p - h1p
delta_hp = diff_h2p_h1p + (numpy.fabs(diff_h2p_h1p) > 180) * 360
delta_hp -= (h2p > h1p) * 720
delta_Lp = lab_color_matrix[:, 0] - L
delta_Cp = C2p - C1p
delta_Hp = 2 * numpy.sqrt(C2p * C1p) * numpy.sin(numpy.radians(delta_hp) / 2.0)
S_L = 1 + ((0.015 * numpy.power(avg_Lp - 50, 2)) / numpy.sqrt(20 + numpy.power(avg_Lp - 50, 2.0)))
S_C = 1 + 0.045 * avg_C1p_C2p
S_H = 1 + 0.015 * avg_C1p_C2p * T
delta_ro = 30 * numpy.exp(-(numpy.power(((avg_Hp - 275) / 25), 2.0)))
R_C = numpy.sqrt((numpy.power(avg_C1p_C2p, 7.0)) / (numpy.power(avg_C1p_C2p, 7.0) + numpy.power(25.0, 7.0)))
R_T = -2 * R_C * numpy.sin(2 * numpy.radians(delta_ro))
return numpy.sqrt(
numpy.power(delta_Lp / (S_L * Kl), 2) +
numpy.power(delta_Cp / (S_C * Kc), 2) +
numpy.power(delta_Hp / (S_H * Kh), 2) +
R_T * (delta_Cp / (S_C * Kc)) * (delta_Hp / (S_H * Kh)))
| """
Calculates the Delta E (CIE1994) of two colors.
K_l:
0.045 graphic arts
0.048 textiles
K_2:
0.015 graphic arts
0.014 textiles
K_L:
1 default
2 textiles
"""
C_1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C_2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
delta_lab = lab_color_vector - lab_color_matrix
delta_L = delta_lab[:, 0].copy()
delta_C = C_1 - C_2
delta_lab[:, 0] = delta_C
delta_H_sq = numpy.sum(numpy.power(delta_lab, 2) * numpy.array([-1, 1, 1]), axis=1)
# noinspection PyArgumentList
delta_H = numpy.sqrt(delta_H_sq.clip(min=0))
S_L = 1
S_C = 1 + K_1 * C_1
S_H = 1 + K_2 * C_1
LCH = numpy.vstack([delta_L, delta_C, delta_H])
params = numpy.array([[K_L * S_L], [K_C * S_C], [K_H * S_H]])
return numpy.sqrt(numpy.sum(numpy.power(LCH / params, 2), axis=0)) |
constructors.go | package protocol
import (
"bytes"
"encoding/binary"
"encoding/json"
"io"
"log"
"net"
)
func NewDNSHeader() *DNSHeader {
return &DNSHeader{
ID: generateRequestID(),
Flags: REC_DISERED | REC_AVAILABLE,
QDCount: 1,
}
}
func NewDNSQuestion(domain string, class uint16) *DNSQuestion {
return &DNSQuestion{
QuestionName: encodeDomain(domain),
QuestionType: class,
QuestionClass: QCLASS,
}
}
func NewDNSQuery(domain string, class uint16) *DNSQuery {
return &DNSQuery{
Header: NewDNSHeader(),
Question: NewDNSQuestion(domain, class),
}
}
func NewDNSResourceHeader(name string, resourceType, class uint16,
ttl uint32, length uint16) *DNSResourceHeader |
func NewDNSResource(header *DNSResourceHeader, data []byte) *DNSResource {
return &DNSResource{
Header: header,
Data: data,
}
}
func NewDNSStringResponse(header *DNSHeader, answersLength int) *DNSStringResponse {
return &DNSStringResponse{
Header: header,
Answers: make([]*DNSStringAnswer, answersLength),
}
}
func NewDNSStringAnswer(header *DNSResourceHeader, data string) *DNSStringAnswer {
return &DNSStringAnswer{
Header: header,
Data: data,
}
}
func (query *DNSQuery) SendRequest(writter io.Writer) error {
buffer := &bytes.Buffer{}
binary.Write(buffer, binary.BigEndian, query.Header)
binary.Write(buffer, binary.BigEndian, query.Question.QuestionName)
binary.Write(buffer, binary.BigEndian, query.Question.QuestionType)
binary.Write(buffer, binary.BigEndian, query.Question.QuestionClass)
_, err := writter.Write(buffer.Bytes())
return err
}
func GetResponse(reader io.Reader) []byte {
response := make([]byte, MAX_RESPONSE_LENGTH)
if _, err := reader.Read(response); err != nil {
log.Println("Response Error:", err)
return []byte{}
}
return response
}
func (response *DNSResponse) ToJSON(writter io.Writer) {
if response.Header.QDCount != 1 {
return
}
encoder := json.NewEncoder(writter)
encodedResponse := NewDNSStringResponse(response.Header, len(response.Answers))
for i, resource := range response.Answers {
data := ""
if resource.Header.Type == PTR {
data = decodeDomain(resource.Data)
} else if resource.Header.Type == AAAA || resource.Header.Type == A {
ip := make(net.IP, len(resource.Data))
copy(ip, resource.Data)
data = ip.String()
}
stringRecord := NewDNSStringAnswer(resource.Header, data)
encodedResponse.Answers[i] = stringRecord
}
if err := encoder.Encode(encodedResponse); err != nil {
log.Fatal("Error at encoding the response:", err)
}
}
| {
return &DNSResourceHeader{
Name: name,
Type: resourceType,
Class: class,
TTL: ttl,
Length: length,
}
} |
version.py | from __future__ import unicode_literals
import datetime
import os
import subprocess
def get_version(version=None):
"Returns a PEP 440-compliant version number from VERSION."
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
else:
sub = '.dev'
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_main_version(version=None):
"Returns main version (X.Y[.Z]) from VERSION." | version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""Returns a tuple of the graphql version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from graphql import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except:
return None
return timestamp.strftime('%Y%m%d%H%M%S') | |
nestedPie.stories.ts | import { NestedPieChartOptions, NestedPieSeriesData } from '@t/options';
import { deepMergedCopy } from '@src/helpers/utils';
import {
browserUsageData2,
groupedBrowserUsageData,
browserUsageData2WithNull,
groupedBrowserUsageDataWithNull,
} from './data';
import NestedPieChart from '@src/charts/nestedPieChart';
import { NestedPieChartThemeOptions } from '@t/theme';
import '@src/css/chart.css';
export default {
title: 'chart|Nested Pie',
};
function createChart(data: NestedPieSeriesData, customOptions: NestedPieChartOptions = {}) {
const el = document.createElement('div');
const options = deepMergedCopy(
{
chart: {
width: 660,
height: 560,
title: 'Usage share of web browsers',
},
} as NestedPieChartOptions,
customOptions || {}
);
el.style.width = options.chart?.width === 'auto' ? '90vw' : `${options.chart?.width}px`;
el.style.height = options.chart?.height === 'auto' ? '90vh' : `${options.chart?.height}px`;
const chart = new NestedPieChart({ el, data, options });
return { el, chart };
}
export const basic = () => {
const { el } = createChart(browserUsageData2, {
series: {
browsers: {
radiusRange: {
inner: '20%',
outer: '50%',
},
},
versions: {
radiusRange: {
inner: '55%',
outer: '85%',
},
},
},
});
return el;
};
export const basicWithNullData = () => {
const { el } = createChart(browserUsageData2WithNull, {
series: {
browsers: {
radiusRange: {
inner: '20%',
outer: '50%',
},
},
versions: {
radiusRange: {
inner: '55%',
outer: '85%',
},
},
dataLabels: {
visible: true,
pieSeriesName: { visible: false },
},
clockwise: false,
},
} as NestedPieChartOptions);
return el;
};
export const grouped = () => {
const { el } = createChart(groupedBrowserUsageData, {
series: {
browsers: {
radiusRange: {
inner: '20%',
outer: '50%',
},
},
versions: {
radiusRange: {
inner: '55%',
outer: '85%',
},
},
},
});
return el;
};
export const groupedWithNullData = () => {
const { el } = createChart(groupedBrowserUsageDataWithNull, {
series: {
browsers: {
radiusRange: {
inner: '20%',
outer: '50%',
},
},
versions: {
radiusRange: {
inner: '55%',
outer: '85%',
},
},
},
});
return el;
};
export const dataLabels = () => {
const { el } = createChart(groupedBrowserUsageData, {
series: {
browsers: {
radiusRange: {
inner: '20%',
outer: '50%',
},
dataLabels: {
visible: true,
pieSeriesName: {
visible: false,
},
},
},
versions: {
radiusRange: {
inner: '55%',
outer: '85%',
},
dataLabels: {
visible: true,
anchor: 'outer',
pieSeriesName: {
visible: false,
anchor: 'outer',
},
},
},
},
});
return el;
};
export const selectable = () => {
const { el } = createChart(groupedBrowserUsageData, {
series: {
browsers: {
radiusRange: {
inner: '30%',
outer: '60%',
},
},
versions: {
radiusRange: {
inner: '70%',
outer: '100%',
},
},
selectable: true,
},
} as NestedPieChartOptions);
return el;
};
export const responsive = () => {
const { el } = createChart(groupedBrowserUsageData, {
chart: {
title: 'Usage share of web browsers',
width: 'auto',
height: 'auto',
},
});
return el;
};
export const theme = () => {
const themeOptions: NestedPieChartThemeOptions = {
series: {
browsers: {
colors: ['#eef4c4', '#77543f', '#b7c72e', '#5b9aa0', '#30076f', '#622569'],
lineWidth: 5,
strokeStyle: '#0000ff',
hover: {
color: '#0000ff',
lineWidth: 5,
strokeStyle: '#000000',
shadowColor: 'rgba(0, 0, 0, 0.5)',
shadowBlur: 10,
},
},
versions: {
colors: [
'#cddbda',
'#efd1d1',
'#ea005e',
'#fece2f',
'#fc6104',
'#dd2429',
'#ebc7ff',
'#fece2f',
'#dd2429',
'#ff8d3a',
'#fc6104',
'#5ac18e',
'#8570ff',
],
lineWidth: 2,
strokeStyle: '#ff0000',
hover: {
color: '#ff0000',
lineWidth: 2,
strokeStyle: '#000000',
shadowColor: 'rgba(0, 0, 0, 0.5)',
shadowBlur: 10,
},
},
},
};
const { el } = createChart(browserUsageData2, {
series: {
browsers: {
radiusRange: {
inner: '30%',
outer: '60%',
},
},
versions: {
radiusRange: {
inner: '70%',
outer: '100%',
},
},
},
theme: themeOptions,
});
return el;
};
export const groupedTheme = () => {
const themeOptions: NestedPieChartThemeOptions = {
series: {
colors: ['#eef4c4', '#77543f', '#b7c72e', '#5b9aa0', '#30076f', '#622569', '#f75294'],
lineWidth: 5,
strokeStyle: '#cccccc',
browsers: {
hover: { color: '#6D9B46' },
},
versions: {
hover: { color: '#3A9278' },
},
},
};
const { el } = createChart(groupedBrowserUsageData, {
series: {
browsers: {
radiusRange: {
inner: '30%',
outer: '60%',
},
},
versions: {
radiusRange: {
inner: '70%',
outer: '100%',
},
},
},
theme: themeOptions,
});
return el;
};
export const dataLabelsWithTheme = () => {
const themeOptions: NestedPieChartThemeOptions = {
series: {
browsers: {
dataLabels: {
fontFamily: 'fantasy',
fontSize: 13,
useSeriesColor: true,
textBubble: {
visible: true,
backgroundColor: '#333333',
borderRadius: 5,
borderColor: '#ff0000',
borderWidth: 3,
shadowOffsetX: 0,
shadowOffsetY: 0,
shadowBlur: 0,
shadowColor: 'rgba(0, 0, 0, 0)',
},
},
},
versions: {
dataLabels: {
fontFamily: 'monaco',
useSeriesColor: true,
lineWidth: 2,
textStrokeColor: '#ffffff',
shadowColor: '#ffffff',
shadowBlur: 4,
callout: {
lineWidth: 3,
lineColor: '#f44336',
useSeriesColor: false,
},
pieSeriesName: {
useSeriesColor: false, | visible: true,
paddingX: 1,
paddingY: 1,
backgroundColor: 'rgba(158, 158, 158, 0.3)',
shadowOffsetX: 0,
shadowOffsetY: 0,
shadowBlur: 0,
shadowColor: 'rgba(0, 0, 0, 0)',
},
},
},
},
},
};
const { el } = createChart(groupedBrowserUsageData, {
series: {
browsers: {
dataLabels: {
visible: true,
},
},
versions: {
dataLabels: {
visible: true,
pieSeriesName: { visible: true, anchor: 'outer' },
},
},
},
theme: themeOptions,
});
return el;
}; | color: '#f44336',
fontFamily: 'fantasy',
fontSize: 13,
textBubble: { |
associate.py | """
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
ui.phoenix.page_files.associate.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Actions to associate files and references of the catalogue.
"""
import wx
import logging
from sppas import sppasTypeError
from sppas import sg
from sppas.src.config import ui_translation
from sppas.src.files import FileData
from sppas.src.files import States
from sppas.src.files import sppasFileDataFilters
from ..dialogs import Information, Error
from ..windows import sppasStaticText, sppasTextCtrl
from ..windows import sppasPanel
from ..windows import sppasDialog
from ..windows import sppasToolbar
from ..windows import BitmapTextButton, CheckButton
from ..windows import sppasRadioBoxPanel
from ..main_events import DataChangedEvent
from .filesutils import IdentifierTextValidator
# ---------------------------------------------------------------------------
MSG_HEADER_FILTER = ui_translation.gettext("Checking files")
MSG_NB_CHECKED = "{:d} files were matching the given filters and were checked."
MSG_NO_CHECKED = "None of the files is matching the given filters."
ASS_ACT_CHECK_ERROR = "Files can't be filtered due to the following" \
" error:\n{!s:s}"
# ---------------------------------------------------------------------------
class AssociatePanel(sppasPanel):
"""Panel with tools to associate files and references of the catalogue.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: [email protected]
:license: GPL, v3
:copyright: Copyright (C) 2011-2019 Brigitte Bigi
"""
def __init__(self, parent, name=wx.PanelNameStr):
super(AssociatePanel, self).__init__(
parent,
id=wx.ID_ANY,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.BORDER_NONE | wx.TAB_TRAVERSAL | wx.WANTS_CHARS | wx.NO_FULL_REPAINT_ON_RESIZE | wx.CLIP_CHILDREN,
name=name)
# The data this page is working on
self.__data = FileData()
# State of the button to check all or none of the filenames
self._checkall = False
# Construct the panel
self._create_content()
self._setup_events()
self.Layout()
# ------------------------------------------------------------------------
def set_data(self, data):
"""Assign new data to this panel.
:param data: (FileData)
"""
if isinstance(data, FileData) is False:
raise sppasTypeError("FileData", type(data))
logging.debug('New data to set in the associate panel. '
'Id={:s}'.format(data.id))
self.__data = data
# ------------------------------------------------------------------------
# Private methods to construct the panel.
# ------------------------------------------------------------------------
def _create_content(self):
"""Create the main content."""
filtr = self.__create_button("check_filter")
check = self.__create_button("checklist")
link = self.__create_button("link_add")
unlink = self.__create_button("link_del")
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddStretchSpacer(4)
sizer.Add(filtr, 1, wx.TOP | wx.ALIGN_CENTRE, 0)
sizer.Add(check, 1, wx.TOP | wx.ALIGN_CENTRE, 0)
sizer.AddStretchSpacer(2)
sizer.Add(link, 1, wx.BOTTOM | wx.ALIGN_CENTRE, 0)
sizer.Add(unlink, 1, wx.BOTTOM | wx.ALIGN_CENTRE, 0)
sizer.AddStretchSpacer(4)
self.SetMinSize(wx.Size(sppasPanel.fix_size(32), -1))
self.SetSizer(sizer)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def __create_button(self, icon, label=None):
btn = BitmapTextButton(self, name=icon, label=label)
btn.FocusStyle = wx.PENSTYLE_SOLID
btn.FocusWidth = 3
btn.FocusColour = wx.Colour(128, 128, 196, 128) # violet
btn.LabelPosition = wx.BOTTOM
btn.Spacing = 4
btn.BorderWidth = 0
btn.BitmapColour = self.GetForegroundColour()
btn.SetMinSize(wx.Size(sppasPanel.fix_size(24),
sppasPanel.fix_size(24)))
return btn
# -----------------------------------------------------------------------
# Events management
# -----------------------------------------------------------------------
def _setup_events(self):
"""Associate a handler function with the events.
It means that when an event occurs then the process handler function
will be called.
"""
# The user pressed a key of its keyboard
self.Bind(wx.EVT_KEY_DOWN, self._process_key_event)
# The user clicked (LeftDown - LeftUp) an action button
self.Bind(wx.EVT_BUTTON, self._process_action)
# ------------------------------------------------------------------------
def notify(self):
"""Send the EVT_DATA_CHANGED to the parent."""
if self.GetParent() is not None:
self.__data.set_state(States().CHECKED)
evt = DataChangedEvent(data=self.__data)
evt.SetEventObject(self)
wx.PostEvent(self.GetParent(), evt)
# ------------------------------------------------------------------------
# Callbacks to events
# ------------------------------------------------------------------------
def _process_key_event(self, event):
|
# ------------------------------------------------------------------------
def _process_action(self, event):
"""Respond to an association event."""
name = event.GetButtonObj().GetName()
if name == "check_filter":
self.check_filter()
elif name == "checklist":
self.check_all()
elif name == "link_add":
self.add_links()
elif name == "link_del":
self.delete_links()
event.Skip()
# ------------------------------------------------------------------------
# GUI methods to perform actions on the data
# ------------------------------------------------------------------------
def check_filter(self):
"""Check filenames matching the user-defined filters."""
dlg = sppasFilesFilterDialog(self)
response = dlg.ShowModal()
if response != wx.ID_CANCEL:
data_filters = dlg.get_filters()
if len(data_filters) > 0:
wx.BeginBusyCursor()
try:
data_set = self.__process_filter(data_filters, dlg.match_all)
if len(data_set) == 0:
Information(MSG_NO_CHECKED)
else:
# Uncheck all files (except the locked ones!) and all references
self.__data.set_object_state(States().UNUSED)
roots = list()
# Check files of the filtered data_set
for fn in data_set:
self.__data.set_object_state(States().CHECKED, fn)
root = self.__data.get_parent(fn)
if root not in roots:
roots.append(root)
Information(MSG_NB_CHECKED.format(len(data_set)))
# Check references matching the checked files
for fr in roots:
for ref in fr.get_references():
ref.set_state(States().CHECKED)
self.notify()
wx.EndBusyCursor()
except Exception as e:
wx.EndBusyCursor()
Error(ASS_ACT_CHECK_ERROR.format(str(e)), "Check error")
dlg.Destroy()
# ------------------------------------------------------------------------
def __process_filter(self, data_filters, match_all=True):
"""Perform the filter process.
:param data_filters: list of tuples with (filter name, function name, values)
:param match_all: (bool)
"""
logging.info("Check files matching the following: ")
logging.info(" >>> filter = sppasFileDataFilters()")
f = sppasFileDataFilters(self.__data)
data_sets = list()
for d in data_filters:
if len(d) != 3:
logging.error("Bad data format: {:s}".format(str(d)))
continue
# the method to be uses by Compare
method = d[0]
# the function to be applied
fct = d[1]
if method == "att":
# identifier:value are separated by a ":" but a tuple is needed
values = tuple(d[2].split(":"))
logging.info(" >>> filter.{:s}({:s}={!s:s})".format(method, fct, str(values)))
data_set = getattr(f, method)(**{fct: values})
# a little bit of doc:
# - getattr() returns the value of the named attributed of object:
# it returns f.tag if called like getattr(f, "tag")
# - func(**{'x': '3'}) is equivalent to func(x='3')
else:
# all the possible values are separated by commas
values = d[2].split(",")
logging.info(" >>> filter.{:s}({:s}={!s:s})".format(method, fct, values[0]))
data_set = getattr(f, method)(**{fct: values[0]})
# Apply "or" between each data_set matching a value
for i in range(1, len(values)):
v = values[i].strip()
data_set = data_set | getattr(f, method)(**{fct: v})
logging.info(" >>> | filter.{:s}({:s}={!s:s})".format(method, fct, v))
data_sets.append(data_set)
# no filename is matching
if len(data_sets) == 0:
return list()
# Merge results (apply '&' or '|' on the resulting data sets)
data_set = data_sets[0]
if match_all is True:
for i in range(1, len(data_sets)):
data_set = data_set & data_sets[i]
if len(data_set) == 0:
# no need to go further...
return list()
else:
for i in range(1, len(data_sets)):
data_set = data_set | data_sets[i]
return data_set
# ------------------------------------------------------------------------
def check_all(self):
"""Check all or any of the filenames and references."""
# reverse the current state
self._checkall = not self._checkall
# ask the data to change their state
if self._checkall is True:
state = States().CHECKED
else:
state = States().UNUSED
self.__data.set_object_state(state)
# update the view of checked references & checked files
self.notify()
# ------------------------------------------------------------------------
def add_links(self):
"""Associate checked filenames with checked references."""
associed = self.__data.associate()
if associed > 0:
self.notify()
# ------------------------------------------------------------------------
def delete_links(self):
"""Dissociate checked filenames with checked references."""
dissocied = self.__data.dissociate()
if dissocied > 0:
self.notify()
# ---------------------------------------------------------------------------
class sppasFilesFilterDialog(sppasDialog):
"""Dialog to get filters to check files and references.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: [email protected]
:license: GPL, v3
:copyright: Copyright (C) 2011-2019 Brigitte Bigi
"""
def __init__(self, parent):
"""Create a files filter dialog.
:param parent: (wx.Window)
"""
super(sppasFilesFilterDialog, self).__init__(
parent=parent,
title='{:s} Files selection'.format(sg.__name__),
style=wx.DEFAULT_FRAME_STYLE)
self.match_all = True
self.CreateHeader(title="Define filters to check files",
icon_name="check_filter")
self._create_content()
self._create_buttons()
self.Bind(wx.EVT_BUTTON, self._process_event)
self.SetSize(wx.Size(480, 320))
self.LayoutComponents()
self.CenterOnParent()
self.FadeIn(deltaN=-8)
# -----------------------------------------------------------------------
# Public methods
# -----------------------------------------------------------------------
def get_filters(self):
"""Return a list of (filter, function, values)."""
filters = list()
for i in range(self.listctrl.GetItemCount()):
filter_name = self.listctrl.GetValue(i, 0)
fct_name = self.listctrl.GetValue(i, 1)
values = self.listctrl.GetValue(i, 2)
filters.append((filter_name, fct_name, values))
return filters
# -----------------------------------------------------------------------
# Methods to construct the GUI
# -----------------------------------------------------------------------
def _create_content(self):
"""Create the content of the message dialog."""
panel = sppasPanel(self, name="content")
tb = self.__create_toolbar(panel)
self.listctrl = wx.dataview.DataViewListCtrl(panel, wx.ID_ANY)
self.listctrl.AppendTextColumn("filter", width=80)
self.listctrl.AppendTextColumn("function", width=90)
self.listctrl.AppendTextColumn("value", width=120)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(tb, proportion=0, flag=wx.EXPAND, border=0)
sizer.Add(self.listctrl, proportion=1, flag=wx.EXPAND | wx.LEFT | wx.RIGHT, border=5)
panel.SetSizer(sizer)
self.SetMinSize(wx.Size(320, 200))
panel.SetAutoLayout(True)
self.SetContent(panel)
# -----------------------------------------------------------------------
def __create_toolbar(self, parent):
"""Create the toolbar."""
tb = sppasToolbar(parent)
tb.set_focus_color(wx.Colour(196, 196, 96, 128))
tb.AddTextButton("filter_path", "+ Path")
tb.AddTextButton("filter_name", "+ Name")
tb.AddTextButton("filter_ext", "+ Type")
tb.AddTextButton("filter_ref", "+ Ref.")
tb.AddTextButton("filter_att", "+ Value")
tb.AddSpacer()
#tb.AddTextButton(None, "- Remove")
return tb
# -----------------------------------------------------------------------
def _create_buttons(self):
"""Create the buttons and bind events."""
panel = sppasPanel(self, name="actions")
# panel.SetMinSize(wx.Size(-1, wx.GetApp().settings.action_height))
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Create the buttons
cancel_btn = self.__create_action_button(panel, "Cancel", "cancel")
apply_or_btn = self.__create_action_button(panel, "Apply - OR", "apply")
apply_and_btn = self.__create_action_button(panel, "Apply - AND", "ok")
apply_and_btn.SetFocus()
sizer.Add(cancel_btn, 1, wx.ALL | wx.EXPAND, 0)
sizer.Add(self.VertLine(parent=panel), 0, wx.ALL | wx.EXPAND, 0)
sizer.Add(apply_or_btn, 1, wx.ALL | wx.EXPAND, 0)
sizer.Add(self.VertLine(parent=panel), 0, wx.ALL | wx.EXPAND, 0)
sizer.Add(apply_and_btn, 1, wx.ALL | wx.EXPAND, 0)
panel.SetSizer(sizer)
self.SetActions(panel)
# -----------------------------------------------------------------------
def __create_action_button(self, parent, text, icon):
btn = BitmapTextButton(parent, label=text, name=icon)
btn.LabelPosition = wx.RIGHT
btn.Spacing = sppasDialog.fix_size(12)
btn.BorderWidth = 0
btn.BitmapColour = self.GetForegroundColour()
btn.SetMinSize(wx.Size(sppasDialog.fix_size(32),
sppasDialog.fix_size(32)))
return btn
# ------------------------------------------------------------------------
# Callback to events
# ------------------------------------------------------------------------
def _process_event(self, event):
"""Process any kind of events.
:param event: (wx.Event)
"""
event_obj = event.GetEventObject()
event_name = event_obj.GetName()
if event_name == "filter_path":
self.__append_filter("path")
elif event_name == "filter_name":
self.__append_filter("name")
elif event_name == "filter_ext":
self.__append_filter("extension")
elif event_name == "filter_ref":
self.__append_filter("ref")
elif event_name == "filter_att":
dlg = sppasAttributeFilterDialog(self)
response = dlg.ShowModal()
if response == wx.ID_OK:
# Name of the method in sppasFileDataFilters,
# Name of the function and its value
f = dlg.get_data()
v = f[1].split(':')
if len(v[0].strip()) > 1 and len(v[1].strip()) > 0:
self.listctrl.AppendItem(["att", f[0], f[1].strip()])
else:
logging.error("Invalid input string for identifier or value.")
dlg.Destroy()
elif event_name == "cancel":
self.SetReturnCode(wx.ID_CANCEL)
self.Close()
elif event_name == "apply":
self.match_all = False
self.EndModal(wx.ID_APPLY)
elif event_name == "ok":
self.match_all = True
self.EndModal(wx.ID_OK)
else:
event.Skip()
# ------------------------------------------------------------------------
def __append_filter(self, fct):
dlg = sppasStringFilterDialog(self)
response = dlg.ShowModal()
if response == wx.ID_OK:
# Name of the method in sppasFileDataFilters,
# Name of the function and its value
f = dlg.get_data()
if len(f[1].strip()) > 0:
self.listctrl.AppendItem([fct, f[0], f[1].strip()])
else:
logging.error("Empty input pattern.")
dlg.Destroy()
# ---------------------------------------------------------------------------
class sppasStringFilterDialog(sppasDialog):
"""Dialog to get a filter on a string.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: [email protected]
:license: GPL, v3
:copyright: Copyright (C) 2011-2019 Brigitte Bigi
"""
choices = (
("exact", "exact"),
("contains", "contains"),
("starts with", "startswith"),
("ends with", "endswith"),
("match (regexp)", "regexp"),
("not exact", "exact"),
("not contains", "contains"),
("not starts with", "startswith"),
("not ends with", "endswith"),
("not match", "regexp")
)
def __init__(self, parent):
"""Create a string filter dialog.
:param parent: (wx.Window)
"""
super(sppasStringFilterDialog, self).__init__(
parent=parent,
title='{:s} filter'.format(sg.__name__),
style=wx.DEFAULT_FRAME_STYLE)
self._create_content()
self.CreateActions([wx.ID_CANCEL, wx.ID_OK])
self.SetSize(wx.Size(380, 320))
self.LayoutComponents()
self.CenterOnParent()
# -----------------------------------------------------------------------
def get_data(self):
"""Return the data defined by the user.
Returns: (tuple) with:
- function (str): one of the methods in Compare
- values (list): patterns to find separated by commas
"""
idx = self.radiobox.GetSelection()
label = self.radiobox.GetStringSelection()
given_fct = self.choices[idx][1]
# Fill the resulting dict
prepend_fct = ""
if given_fct != "regexp":
# prepend "not_" if reverse
if "not" in label:
prepend_fct += "not_"
# prepend "i" if case-insensitive
if self.checkbox.GetValue() is False:
prepend_fct += "i"
return prepend_fct+given_fct, self.text.GetValue()
# -----------------------------------------------------------------------
# Methods to construct the GUI
# -----------------------------------------------------------------------
def _create_content(self):
"""Create the content of the message dialog."""
panel = sppasPanel(self, name="content")
label = sppasStaticText(panel, label="Search for pattern(s): ")
self.text = sppasTextCtrl(panel, value="")
choices = [row[0] for row in self.choices]
self.radiobox = sppasRadioBoxPanel(
panel,
choices=choices,
majorDimension=2,
style=wx.RA_SPECIFY_COLS)
self.radiobox.SetSelection(1)
self.checkbox = CheckButton(panel, label="Case sensitive")
self.checkbox.SetValue(False)
# Layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(label, 0, flag=wx.EXPAND | wx.ALL, border=4)
sizer.Add(self.text, 0, flag=wx.EXPAND | wx.ALL, border=4)
sizer.Add(self.radiobox, 1, flag=wx.EXPAND | wx.ALL, border=4)
sizer.Add(self.checkbox, 0, flag=wx.EXPAND | wx.ALL, border=4)
panel.SetSizer(sizer)
panel.SetMinSize((240, 160))
panel.SetAutoLayout(True)
self.SetContent(panel)
# ---------------------------------------------------------------------------
class sppasAttributeFilterDialog(sppasDialog):
"""Dialog to get a filter on an attribute.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: [email protected]
:license: GPL, v3
:copyright: Copyright (C) 2011-2019 Brigitte Bigi
"""
choices = (
("exact", "exact"),
("contains", "contains"),
("starts with", "startswith"),
("ends with", "endswith"),
("match (regexp)", "regexp"),
("not exact", "exact"),
("not contains", "contains"),
("not starts with", "startswith"),
("not ends with", "endswith"),
("not match", "regexp"),
("equal", "equal"),
("greater than", "gt"),
("greater or equal", "ge"),
("lower than", "lt"),
("lower or equal", "le")
)
def __init__(self, parent):
"""Create an attribute filter dialog.
:param parent: (wx.Window)
"""
super(sppasAttributeFilterDialog, self).__init__(
parent=parent,
title='{:s} filter'.format(sg.__name__),
style=wx.DEFAULT_FRAME_STYLE)
self._create_content()
self.CreateActions([wx.ID_CANCEL, wx.ID_OK])
self.SetMinSize(wx.Size(sppasDialog.fix_size(420),
sppasDialog.fix_size(320)))
self.LayoutComponents()
self.CenterOnParent()
# ------------------------------------------------------------------------
def get_data(self):
"""Return the data defined by the user.
Returns: (tuple) with:
- function (str): one of the methods in Compare
- values (list): attribute to find as identifier, value
"""
idx = self.radiobox.GetSelection()
label = self.radiobox.GetStringSelection()
given_fct = self.choices[idx][1]
# Fill the resulting dict
prepend_fct = ""
if idx < 10 and given_fct != "regexp":
# prepend "not_" if reverse
if "not" in label:
prepend_fct += "not_"
# prepend "i" if case-insensitive
if self.checkbox.GetValue() is False:
prepend_fct += "i"
return prepend_fct + given_fct, \
self.text_ident.GetValue() + ":" + self.text_value.GetValue()
# -----------------------------------------------------------------------
# Methods to construct the GUI
# -----------------------------------------------------------------------
def _create_content(self):
"""Create the content of the message dialog."""
panel = sppasPanel(self, name="content")
label = sppasStaticText(panel, label="Identifier: ")
self.text_ident = sppasTextCtrl(
panel,
value="",
validator=IdentifierTextValidator())
choices = [row[0] for row in sppasAttributeFilterDialog.choices]
self.radiobox = sppasRadioBoxPanel(
panel,
choices=choices,
majorDimension=3,
style=wx.RA_SPECIFY_COLS)
self.radiobox.SetSelection(1)
self.radiobox.Bind(wx.EVT_RADIOBOX, self._on_radiobox_checked)
self.checkbox = CheckButton(panel, label="Case sensitive")
self.checkbox.SetValue(False)
self.text_value = sppasTextCtrl(panel, value="")
# Layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(label, 0, flag=wx.EXPAND | wx.ALL, border=4)
sizer.Add(self.text_ident, 0, flag=wx.EXPAND | wx.ALL, border=4)
sizer.Add(self.radiobox, 1, flag=wx.EXPAND | wx.ALL, border=4)
sizer.Add(self.text_value, 0, flag=wx.EXPAND | wx.ALL, border=4)
sizer.Add(self.checkbox, 0, flag=wx.EXPAND | wx.ALL, border=4)
panel.SetSizer(sizer)
panel.SetMinSize((240, 160))
panel.SetAutoLayout(True)
self.SetContent(panel)
def _on_radiobox_checked(self, event):
value = self.radiobox.GetStringSelection()
if value in sppasAttributeFilterDialog.choices[10:]:
self.checkbox.SetValue(False)
self.checkbox.Enable(False)
else:
self.checkbox.Enable(True)
| """Respond to a keypress event."""
key_code = event.GetKeyCode()
logging.debug('Associate panel received a key event. key_code={:d}'.format(key_code))
logging.debug('Key event skipped by the associate panel.')
event.Skip() |
package.py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Nsimd(CMakePackage):
"""NSIMD is a vectorization library that abstracts SIMD programming.
It was designed to exploit the maximum power of processors
at a low development cost."""
homepage = "https://agenium-scale.github.io/nsimd/"
url = "https://github.com/agenium-scale/nsimd/archive/v1.0.tar.gz"
maintainers = ['eschnett']
version('3.0.1', sha256='6a90d7ce5f9da5cfac872463951f3374bb0e0824d92f714db0fd4901b32497fd')
version('3.0', sha256='5cab09020ce3a6819ddb3b3b8cafa6bc1377821b596c0f2954f52c852d092d5c')
version('2.2', sha256='7916bec6c8ea9ddc690a5bfc80fb1b9402f9e1b2a4b4bb6b6bb8eb5a07eb018e')
version('2.1', sha256='3274f1061d1fac170130b8c75378a6b94580629b3dc1d53db244b51500ee4695')
# Version 2.0 is disabled since it does not support cmake
# version('2.0', sha256='b239e98316f93257161b25c8232634884edcee358982a74742981cc9b68da642')
version('1.0', sha256='523dae83f1d93eab30114321f1c9a67e2006a52595da4c51f121ca139abe0857')
variant('simd',
default='auto',
description='SIMD instruction set',
values=(
'auto',
'CPU',
'SSE2', 'SSE42', 'AVX', 'AVX2', 'AVX512_KNL', 'AVX512_SKYLAKE',
'NEON128', 'AARCH64',
'SVE', 'SVE128', 'SVE256', 'SVE512', 'SVE1024', 'SVE2048',
'VMX', 'VSX',
'CUDA', 'ROCM',
),
multi=False)
variant('optionals', values=any_combination_of('FMA', 'FP16'),
description='Optional SIMD features',)
conflicts('simd=SVE128', when=('@:1'),
msg="SIMD extension not available in version @:1")
conflicts('simd=SVE256', when=('@:1'),
msg="SIMD extension not available in version @:1")
conflicts('simd=SVE512', when=('@:1'),
msg="SIMD extension not available in version @:1")
conflicts('simd=SVE1024', when=('@:1'),
msg="SIMD extension not available in version @:1")
conflicts('simd=SVE2048', when=('@:1'),
msg="SIMD extension not available in version @:1")
conflicts('simd=VMX', when=('@:2'),
msg="SIMD extension not available in version @:2")
conflicts('simd=VSX', when=('@:2'),
msg="SIMD extension not available in version @:2")
conflicts('simd=CUDA', when=('@:1'),
msg="SIMD extension not available in version @:1")
conflicts('simd=ROCM', when=('@:1'),
msg="SIMD extension not available in version @:1")
conflicts('optionals=FMA', when=('@2:'),
msg="SIMD optionals not available in version @2:")
conflicts('optionals=FP16', when=('@2:'),
msg="SIMD optionals not available in version @2:")
conflicts('optionals=FMA,FP16', when=('@2:'),
msg="SIMD optionals not available in version @2:")
# Requires a C++14 compiler for building.
# The C++ interface requires a C++11 compiler to use.
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build', when='@2:')
depends_on('python@3:', type='build')
depends_on('py-chardet', type='build', when='@3:')
depends_on('py-requests', type='build', when='@3:')
# Add a 'generate_code' phase in the beginning
phases = ['generate_code'] + CMakePackage.phases
def generate_code(self, spec, prefix):
"""Auto-generates code in the build directory"""
if self.spec.satisfies("@:1"):
options = [
'egg/hatch.py',
'--all',
'--force',
]
python = spec['python'].command
python(*options)
def | (self):
# Required SIMD argument
simd = self.spec.variants['simd'].value
if simd == 'auto':
# x86
if 'avx512' in self.spec.target:
simd = 'AVX512_SKYLAKE'
elif self.spec.satisfies('target=mic_knl'):
simd = 'AVX512_KNL'
elif 'avx2' in self.spec.target:
simd = 'AVX2'
elif 'avx' in self.spec.target:
simd = 'AVX'
elif 'sse4_2' in self.spec.target:
simd = 'SSE42'
elif 'sse2' in self.spec.target:
simd = 'SSE2'
# ARM
elif 'sve' in self.spec.target:
# We require an explicit choice for particluar bit widths
simd = 'SVE'
elif self.spec.satisfies('target=aarch64:'):
simd = 'AARCH64'
elif 'neon' in self.spec.target:
simd = 'NEON128'
# POWER
elif 'vsx' in self.spec.target:
simd = 'VSX'
elif (self.spec.satisfies('target=ppc64:') or
self.spec.satisfies('target=ppc64le:')):
simd = 'VMX'
# Unknown CPU architecture
else:
simd = 'CPU'
if self.spec.satisfies("@:1"):
cmake_args = ["-DSIMD={0}".format(simd)]
else:
cmake_args = ["-Dsimd={0}".format(simd)]
if self.spec.satisfies("@:1"):
# Optional SIMD instructions to be turned on explicitly
optionals_value = self.spec.variants['optionals'].value
if optionals_value != 'none':
optionals_arg = ';'.join(optionals_value)
cmake_args.append("-DSIMD_OPTIONALS={0}".format(optionals_arg))
return cmake_args
| cmake_args |
dataset_reader.py | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import tensorflow as tf
import numpy as np
from dace.frontend.tensorflow import TFSession
import matplotlib.pyplot as plt
import sys
|
def data_input_fn(filenames, batch_size=2, shuffle=False):
def _parser(record):
features = {
"label": tf.FixedLenFeature([], tf.int64),
"image_raw": tf.FixedLenFeature([], tf.string),
}
parsed_record = tf.parse_single_example(record, features)
image = tf.decode_raw(parsed_record["image_raw"], tf.float32)
image = tf.reshape(image, [28, 28])
label = tf.cast(parsed_record["label"], tf.int32)
label = tf.one_hot(indices=label, depth=10, on_value=1, off_value=0)
return image, tf.one_hot(label, depth=10)
def _input_fn():
dataset = tf.data.TFRecordDataset(filenames).map(_parser)
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(batch_size, drop_remainder=True)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _input_fn
if __name__ == '__main__':
if len(sys.argv) < 2:
print('USAGE: dataset_reader.py <FILENAME> [FILENAMES...]')
exit(1)
filenames = list(sys.argv[1:])
with tf.Session() as sess:
output_tf = sess.run(data_input_fn(filenames)())[0]
for _out in output_tf:
_out = np.multiply(255.0, _out)
_out = _out.astype(np.uint8)
plt.imshow(_out)
plt.show()
with TFSession() as sess:
output_dace = sess.run(data_input_fn(filenames)())[0]
for _out in output_dace:
_out = np.multiply(255.0, _out)
_out = _out.astype(np.uint8)
plt.imshow(_out)
plt.show() | |
s3uploader_test.go | package imguploader
import (
"testing"
"github.com/grafana/grafana/pkg/setting"
. "github.com/smartystreets/goconvey/convey"
)
func | (t *testing.T) {
SkipConvey("[Integration test] for external_image_store.s3", t, func() {
setting.NewConfigContext(&setting.CommandLineArgs{
HomePath: "../../../",
})
s3Uploader, _ := NewImageUploader()
path, err := s3Uploader.Upload("../../../public/img/logo_transparent_400x.png")
So(err, ShouldBeNil)
So(path, ShouldNotEqual, "")
})
}
| TestUploadToS3 |
registration.ts | import { MockedResponse } from '@apollo/client/testing';
import addDays from 'date-fns/addDays';
import subDays from 'date-fns/subDays';
import {
RegistrationDocument,
RegistrationFieldsFragment,
} from '../../../generated/graphql';
import {
fakeRegistration,
fakeRegistrations,
} from '../../../utils/mockDataUtils';
import {
attendees,
waitingAttendees,
} from '../../enrolments/__mocks__/enrolmentsPage';
import { TEST_EVENT_ID } from '../../event/constants';
import { REGISTRATION_INCLUDES } from '../constants';
const registrationId = 'registration:1';
const now = new Date();
const enrolmentStartTime = subDays(now, 1).toISOString();
const enrolmentEndTime = addDays(now, 1).toISOString();
const registrationOverrides = {
id: registrationId,
audienceMaxAge: 18,
audienceMinAge: 12,
confirmationMessage: 'Confirmation message',
enrolmentEndTime,
enrolmentStartTime,
event: TEST_EVENT_ID,
instructions: 'Instructions',
maximumAttendeeCapacity: 100,
minimumAttendeeCapacity: 10,
signups: [...attendees.data, ...waitingAttendees.data],
waitingListCapacity: 5,
};
const registration = fakeRegistration(registrationOverrides);
const registrationVariables = {
createPath: undefined,
id: registrationId,
include: REGISTRATION_INCLUDES,
};
const registrationResponse = { data: { registration } };
const mockedRegistrationResponse: MockedResponse = {
request: { query: RegistrationDocument, variables: registrationVariables },
result: registrationResponse,
};
const singleRegistrationOverrides = {
enrolmentEndTime,
enrolmentStartTime,
maximumAttendeeCapacity: 10,
waitingListCapacity: 10,
};
const registrationsOverrides: Partial<RegistrationFieldsFragment>[] = [
{ | ...singleRegistrationOverrides,
},
{
id: '2',
...singleRegistrationOverrides,
currentAttendeeCount: singleRegistrationOverrides.maximumAttendeeCapacity,
currentWaitingListCount: 0,
},
{
id: '3',
...singleRegistrationOverrides,
currentAttendeeCount: singleRegistrationOverrides.maximumAttendeeCapacity,
currentWaitingListCount: 0,
waitingListCapacity: null,
},
{
id: '4',
...singleRegistrationOverrides,
currentAttendeeCount: singleRegistrationOverrides.maximumAttendeeCapacity,
currentWaitingListCount: singleRegistrationOverrides.waitingListCapacity,
},
{
id: '5',
...singleRegistrationOverrides,
currentAttendeeCount: 1000,
maximumAttendeeCapacity: 0,
},
];
const registrationsResponse = fakeRegistrations(
registrationsOverrides.length,
registrationsOverrides
);
export {
mockedRegistrationResponse,
registration,
registrationId,
registrationsResponse,
}; | id: '1',
currentAttendeeCount: 0, |
paths.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Package paths provides a common way to handle paths
// configuration for all Beats.
//
// Currently the following paths are defined:
//
// path.home - It’s the default folder for everything that doesn't fit in
// the categories below
//
// path.data - Contains things that are expected to change often during normal
// operations (“registry” files, UUID file, etc.)
//
// path.config - Configuration files and Elasticsearch template default location
//
// These settings can be set via the configuration file or via command line flags.
// The CLI flags overwrite the configuration file options.
//
// Use the Resolve function to resolve files to their absolute paths. For example,
// to look for a file in the config path:
//
// cfgfilePath := paths.Resolve(paths.Config, "beat.yml"
package paths
import (
"fmt"
"os"
"path/filepath"
)
// Path tracks user-configurable path locations and directories
type Path struct {
Home string
Config string
Data string
Logs string
}
// FileType is an enumeration type representing the file types.
// Currently existing file types are: Home, Config, Data
type FileType string
const (
// Home is the "root" directory for the running beats instance
Home FileType = "home"
// Config is the path to the beat config
Config FileType = "config"
// Data is the path to the beat data directory
Data FileType = "data"
// Logs is the path to the beats logs directory
Logs FileType = "logs"
)
// Paths is the Path singleton on which the top level functions from this
// package operate.
var Paths = New()
// New creates a new Paths object with all values set to empty values.
func New() *Path {
return &Path{}
}
// InitPaths sets the default paths in the configuration based on CLI flags,
// configuration file and default values. It also tries to create the data
// path with mode 0750 and returns an error on failure.
func (paths *Path) InitPaths(cfg *Path) error {
err := paths.initPaths(cfg)
if err != nil {
return err
}
// make sure the data path exists
err = os.MkdirAll(paths.Data, 0750)
if err != nil {
return fmt.Errorf("Failed to create data path %s: %v", paths.Data, err)
}
return nil
}
// InitPaths sets the default paths in the configuration based on CLI flags,
// configuration file and default values. It also tries to create the data
// path with mode 0750 and returns an error on failure.
func InitPaths(cfg *Path) error {
return Paths.InitPaths(cfg)
}
// initPaths sets the default paths in the configuration based on CLI flags,
// configuration file and default values.
func (paths *Path) initPaths(cfg *Path) error {
*paths = *cfg
// default for config path
if paths.Config == "" {
paths.Config = paths.Home
}
// default for data path
if paths.Data == "" {
paths.Data = filepath.Join(paths.Home, "data")
}
// default for logs path
if paths.Logs == "" {
paths.Logs = filepath.Join(paths.Home, "logs")
}
return nil
}
// Resolve resolves a path to a location in one of the default
// folders. For example, Resolve(Home, "test") returns an absolute
// path for "test" in the home path.
func (paths *Path) Resolve(fileType FileType, path string) string {
// absolute paths are not changed for non-hostfs file types, since hostfs is a little odd
if filepath.IsAbs(path) {
return path
}
switch fileType {
case Home:
return filepath.Join(paths.Home, path)
case Config:
return filepath.Join(paths.Config, path)
case Data:
return filepath.Join(paths.Data, path)
case Logs:
return filepath.Join(paths.Logs, path)
default:
panic(fmt.Sprintf("Unknown file type: %s", fileType))
}
}
// Resolve resolves a path to a location in one of the default
// folders. For example, Resolve(Home, "test") returns an absolute
// path for "test" in the home path.
// In case path is already an absolute path, the path itself is returned.
func Resolv | ype FileType, path string) string {
return Paths.Resolve(fileType, path)
}
// String returns a textual representation
func (paths *Path) String() string {
return fmt.Sprintf("Home path: [%s] Config path: [%s] Data path: [%s] Logs path: [%s]",
paths.Home, paths.Config, paths.Data, paths.Logs)
}
| e(fileT |
en_IE.js | var faker = new Faker({ locale: 'en_IE', localeFallback: 'en' });
faker.locales['en_IE'] = require('../lib/locales/en_IE');
faker.locales['en'] = require('../lib/locales/en');
module['exports'] = faker; | var Faker = require('../lib'); |
|
example_test.go | // Portions of this file are derived from Paessler AG's JSONPath implementation.
//
// Copyright (c) 2017 Paessler AG <[email protected]>. All rights reserved.
//
// https://github.com/PaesslerAG/jsonpath/blob/c18d0f043db32b5d4295e14c6518fa9160e45d15/example_test.go
//
// Licensed under a three-clause BSD-style license. A copy of the full license
// document is included in this distribution in the file
// `example_test.go.LICENSE`.
package jsonpath_test
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"strings"
"github.com/PaesslerAG/gval"
"github.com/puppetlabs/leg/jsonutil/pkg/jsonpath"
)
func ExampleGet() {
v := interface{}(nil)
_ = json.Unmarshal([]byte(`{
"welcome":{
"message":["Good Morning", "Hello World!"]
}
}`), &v)
welcome, err := jsonpath.Get("$.welcome.message[1]", v)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(welcome)
// Output:
// Hello World!
}
func ExampleGet_wildcard() {
v := interface{}(nil)
_ = json.Unmarshal([]byte(`{
"welcome":{
"message":["Good Morning", "Hello World!"]
}
}`), &v)
welcome, err := jsonpath.Get("$.welcome.message[*]", v)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for _, value := range welcome.([]interface{}) {
fmt.Printf("%v\n", value)
}
// Output:
// Good Morning
// Hello World!
}
func ExampleGet_filter() {
v := interface{}(nil)
_ = json.Unmarshal([]byte(`[
{"key":"a","value" : "I"},
{"key":"b","value" : "II"},
{"key":"c","value" : "III"}
]`), &v)
values, err := jsonpath.Get(`$[?(@.key=="b")].value`, v)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for _, value := range values.([]interface{}) {
fmt.Println(value)
}
// Output:
// II
}
func Example_gval() {
builder := gval.Full(jsonpath.Language(jsonpath.WithPlaceholders{}))
path, err := builder.NewEvaluable("{#1: $..[?(@.ping && @.speed > 100)].name}")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
v := interface{}(nil)
err = json.Unmarshal([]byte(`{
"device 1":{
"name": "fancy device",
"ping": true,
"speed": 200,
"subdevice 1":{
"ping" : true,
"speed" : 99,
"name" : "boring subdevice"
},
"subdevice 2":{
"ping" : true,
"speed" : 150,
"name" : "fancy subdevice"
},
"not an device":{
"name" : "ping me but I have no speed property",
"ping" : true
}
},
"fictive device":{
"ping" : false,
"speed" : 1000,
"name" : "dream device"
}
}`), &v)
if err != nil |
devices, err := path(context.Background(), v)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for device, name := range devices.(map[string]interface{}) {
fmt.Printf("%s -> %v\n", device, name)
}
// Unordered output:
// device 1 -> fancy device
// subdevice 2 -> fancy subdevice
}
func Example_variableSelector() {
builder := gval.NewLanguage(
jsonpath.Language(),
gval.VariableSelector(jsonpath.ChildVariableSelector(func(ctx context.Context, v interface{}, key interface{}, next func(context.Context, jsonpath.PathValue) error) error {
return jsonpath.DefaultVariableVisitor().VisitChild(ctx, v, key, func(ctx context.Context, pv jsonpath.PathValue) error {
if s, ok := pv.Value.(string); ok && strings.HasPrefix(s, "base64:") {
b, err := base64.StdEncoding.DecodeString(s[len("base64:"):])
if err != nil {
return fmt.Errorf("could not decode base64 value: %v", err)
}
pv.Value = string(b)
}
return next(ctx, pv)
})
})),
)
path, err := builder.NewEvaluable(`$.encoded`)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
var v interface{}
err = json.Unmarshal([]byte(`{
"encoded": "base64:SGVsbG8sIHdvcmxkIQ=="
}`), &v)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
decoded, err := path(context.Background(), v)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(decoded)
// Output:
// Hello, world!
}
| {
fmt.Println(err)
os.Exit(1)
} |
lib.rs | #[macro_use]
extern crate diesel;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
use diesel::prelude::*;
use diesel::insert_into;
#[cfg(test)]
use diesel::debug_query;
#[cfg(test)]
use diesel::pg::Pg;
use std::error::Error;
use std::time::SystemTime;
mod schema {
table! {
users {
id -> Integer,
name -> Text,
hair_color -> Nullable<Text>,
created_at -> Timestamp,
updated_at -> Timestamp,
}
}
}
use schema::users;
#[derive(Deserialize, Insertable)]
#[table_name = "users"]
pub struct UserForm<'a> {
name: &'a str,
hair_color: Option<&'a str>,
}
#[derive(Queryable, PartialEq, Debug)]
struct User {
id: i32,
name: String,
hair_color: Option<String>,
created_at: SystemTime,
updated_at: SystemTime,
}
pub fn insert_default_values(conn: &PgConnection) -> QueryResult<usize> {
use schema::users::dsl::*;
insert_into(users).default_values().execute(conn)
}
#[test]
fn examine_sql_from_insert_default_values() {
use schema::users::dsl::*;
let query = insert_into(users).default_values();
let sql = "INSERT INTO \"users\" DEFAULT VALUES -- binds: []";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
pub fn insert_single_column(conn: &PgConnection) -> QueryResult<usize> {
use schema::users::dsl::*;
insert_into(users).values(name.eq("Sean")).execute(conn)
}
#[test]
fn examine_sql_from_insert_single_column() {
use schema::users::dsl::*;
let query = insert_into(users).values(name.eq("Sean"));
let sql = "INSERT INTO \"users\" (\"name\") VALUES ($1) \
-- binds: [\"Sean\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
pub fn insert_multiple_columns(conn: &PgConnection) -> QueryResult<usize> {
use schema::users::dsl::*;
insert_into(users)
.values((name.eq("Tess"), hair_color.eq("Brown")))
.execute(conn)
}
#[test]
fn examine_sql_from_insert_multiple_columns() {
use schema::users::dsl::*;
let query = insert_into(users).values((name.eq("Tess"), hair_color.eq("Brown")));
let sql = "INSERT INTO \"users\" (\"name\", \"hair_color\") VALUES ($1, $2) \
-- binds: [\"Tess\", \"Brown\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
pub fn insert_insertable_struct(conn: &PgConnection) -> Result<(), Box<Error>> {
use schema::users::dsl::*;
let json = r#"{ "name": "Sean", "hair_color": "Black" }"#;
let user_form = serde_json::from_str::<UserForm>(json)?;
insert_into(users).values(&user_form).execute(conn)?;
Ok(())
}
#[test]
fn examine_sql_from_insertable_struct() {
use schema::users::dsl::*;
let json = r#"{ "name": "Sean", "hair_color": "Black" }"#;
let user_form = serde_json::from_str::<UserForm>(json).unwrap();
let query = insert_into(users).values(&user_form);
let sql = "INSERT INTO \"users\" (\"name\", \"hair_color\") VALUES ($1, $2) \
-- binds: [\"Sean\", \"Black\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
pub fn insert_insertable_struct_option(conn: &PgConnection) -> Result<(), Box<Error>> {
use schema::users::dsl::*;
let json = r#"{ "name": "Ruby", "hair_color": null }"#;
let user_form = serde_json::from_str::<UserForm>(json)?;
insert_into(users).values(&user_form).execute(conn)?;
Ok(())
}
#[test]
fn examine_sql_from_insertable_struct_option() {
use schema::users::dsl::*;
let json = r#"{ "name": "Ruby", "hair_color": null }"#;
let user_form = serde_json::from_str::<UserForm>(json).unwrap();
let query = insert_into(users).values(&user_form);
let sql = "INSERT INTO \"users\" (\"name\", \"hair_color\") VALUES ($1, DEFAULT) \
-- binds: [\"Ruby\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
pub fn insert_single_column_batch(conn: &PgConnection) -> QueryResult<usize> {
use schema::users::dsl::*;
insert_into(users)
.values(&vec![name.eq("Sean"), name.eq("Tess")])
.execute(conn)
}
#[test]
fn examine_sql_from_insert_single_column_batch() {
use schema::users::dsl::*;
let values = vec![name.eq("Sean"), name.eq("Tess")];
let query = insert_into(users).values(&values);
let sql = "INSERT INTO \"users\" (\"name\") VALUES ($1), ($2) \
-- binds: [\"Sean\", \"Tess\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
pub fn insert_single_column_batch_with_default(conn: &PgConnection) -> QueryResult<usize> {
use schema::users::dsl::*;
insert_into(users)
.values(&vec![Some(name.eq("Sean")), None])
.execute(conn)
}
#[test]
fn examine_sql_from_insert_single_column_batch_with_default() |
pub fn insert_tuple_batch(conn: &PgConnection) -> QueryResult<usize> {
use schema::users::dsl::*;
insert_into(users)
.values(&vec![
(name.eq("Sean"), hair_color.eq("Black")),
(name.eq("Tess"), hair_color.eq("Brown")),
])
.execute(conn)
}
#[test]
fn examine_sql_from_insert_tuple_batch() {
use schema::users::dsl::*;
let values = vec![
(name.eq("Sean"), hair_color.eq("Black")),
(name.eq("Tess"), hair_color.eq("Brown")),
];
let query = insert_into(users).values(&values);
let sql = "INSERT INTO \"users\" (\"name\", \"hair_color\") \
VALUES ($1, $2), ($3, $4) \
-- binds: [\"Sean\", \"Black\", \"Tess\", \"Brown\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
pub fn insert_tuple_batch_with_default(conn: &PgConnection) -> QueryResult<usize> {
use schema::users::dsl::*;
insert_into(users)
.values(&vec![
(name.eq("Sean"), Some(hair_color.eq("Black"))),
(name.eq("Ruby"), None),
])
.execute(conn)
}
#[test]
fn examine_sql_from_insert_tuple_batch_with_default() {
use schema::users::dsl::*;
let values = vec![
(name.eq("Sean"), Some(hair_color.eq("Black"))),
(name.eq("Ruby"), None),
];
let query = insert_into(users).values(&values);
let sql = "INSERT INTO \"users\" (\"name\", \"hair_color\") \
VALUES ($1, $2), ($3, DEFAULT) \
-- binds: [\"Sean\", \"Black\", \"Ruby\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
pub fn insert_insertable_struct_batch(conn: &PgConnection) -> Result<(), Box<Error>> {
use schema::users::dsl::*;
let json = r#"[
{ "name": "Sean", "hair_color": "Black" },
{ "name": "Tess", "hair_color": "Brown" }
]"#;
let user_form = serde_json::from_str::<Vec<UserForm>>(json)?;
insert_into(users).values(&user_form).execute(conn)?;
Ok(())
}
#[test]
fn examine_sql_from_insertable_struct_batch() {
use schema::users::dsl::*;
let json = r#"[
{ "name": "Sean", "hair_color": "Black" },
{ "name": "Tess", "hair_color": "Brown" }
]"#;
let user_form = serde_json::from_str::<Vec<UserForm>>(json).unwrap();
let query = insert_into(users).values(&user_form);
let sql = "INSERT INTO \"users\" (\"name\", \"hair_color\") \
VALUES ($1, $2), ($3, $4) \
-- binds: [\"Sean\", \"Black\", \"Tess\", \"Brown\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
#[test]
fn insert_get_results_batch() {
let conn = establish_connection();
conn.test_transaction::<_, diesel::result::Error, _>(|| {
use diesel::select;
use schema::users::dsl::*;
let now = select(diesel::dsl::now).get_result::<SystemTime>(&conn)?;
let inserted_users = insert_into(users)
.values(&vec![
(id.eq(1), name.eq("Sean")),
(id.eq(2), name.eq("Tess")),
])
.get_results(&conn)?;
let expected_users = vec![
User {
id: 1,
name: "Sean".into(),
hair_color: None,
created_at: now,
updated_at: now,
},
User {
id: 2,
name: "Tess".into(),
hair_color: None,
created_at: now,
updated_at: now,
},
];
assert_eq!(expected_users, inserted_users);
Ok(())
});
}
#[test]
fn examine_sql_from_insert_get_results_batch() {
use diesel::query_builder::AsQuery;
use schema::users::dsl::*;
let values = vec![(id.eq(1), name.eq("Sean")), (id.eq(2), name.eq("Tess"))];
let query = insert_into(users).values(&values).as_query();
let sql = "INSERT INTO \"users\" (\"id\", \"name\") VALUES ($1, $2), ($3, $4) \
RETURNING \"users\".\"id\", \"users\".\"name\", \
\"users\".\"hair_color\", \"users\".\"created_at\", \
\"users\".\"updated_at\" -- binds: [1, \"Sean\", 2, \"Tess\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
#[test]
fn insert_get_result() {
let conn = establish_connection();
conn.test_transaction::<_, diesel::result::Error, _>(|| {
use diesel::select;
use schema::users::dsl::*;
let now = select(diesel::dsl::now).get_result::<SystemTime>(&conn)?;
let inserted_user = insert_into(users)
.values((id.eq(3), name.eq("Ruby")))
.get_result(&conn)?;
let expected_user = User {
id: 3,
name: "Ruby".into(),
hair_color: None,
created_at: now,
updated_at: now,
};
assert_eq!(expected_user, inserted_user);
Ok(())
});
}
#[test]
fn examine_sql_from_insert_get_result() {
use diesel::query_builder::AsQuery;
use schema::users::dsl::*;
let query = insert_into(users)
.values((id.eq(3), name.eq("Ruby")))
.as_query();
let sql = "INSERT INTO \"users\" (\"id\", \"name\") VALUES ($1, $2) \
RETURNING \"users\".\"id\", \"users\".\"name\", \
\"users\".\"hair_color\", \"users\".\"created_at\", \
\"users\".\"updated_at\" -- binds: [3, \"Ruby\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
pub fn explicit_returning(conn: &PgConnection) -> QueryResult<i32> {
use schema::users::dsl::*;
insert_into(users)
.values(name.eq("Ruby"))
.returning(id)
.get_result(conn)
}
#[test]
fn examine_sql_from_explicit_returning() {
use schema::users::dsl::*;
let query = insert_into(users).values(name.eq("Ruby")).returning(id);
let sql = "INSERT INTO \"users\" (\"name\") VALUES ($1) \
RETURNING \"users\".\"id\" \
-- binds: [\"Ruby\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
}
#[cfg(test)]
fn establish_connection() -> PgConnection {
let url = ::std::env::var("DATABASE_URL").unwrap();
PgConnection::establish(&url).unwrap()
}
| {
use schema::users::dsl::*;
let values = vec![Some(name.eq("Sean")), None];
let query = insert_into(users).values(&values);
let sql = "INSERT INTO \"users\" (\"name\") VALUES ($1), (DEFAULT) \
-- binds: [\"Sean\"]";
assert_eq!(sql, debug_query::<Pg, _>(&query).to_string());
} |
PhysicsSystem.ts | import { pipe } from 'bitecs'
import { Box3, Mesh, Quaternion, Vector3 } from 'three'
import matches from 'ts-matches'
import { AvatarComponent } from '../../avatar/components/AvatarComponent'
import { Engine } from '../../ecs/classes/Engine'
import { Entity } from '../../ecs/classes/Entity'
import { World } from '../../ecs/classes/World'
import { defineQuery, getComponent, hasComponent } from '../../ecs/functions/ComponentFunctions'
import { useWorld } from '../../ecs/functions/SystemHooks'
import { BoundingBoxComponent } from '../../interaction/components/BoundingBoxComponent'
import { NetworkObjectComponent } from '../../networking/components/NetworkObjectComponent'
import { NetworkWorldAction } from '../../networking/functions/NetworkWorldAction'
import { NameComponent } from '../../scene/components/NameComponent'
import { Object3DComponent } from '../../scene/components/Object3DComponent'
import { TransformComponent } from '../../transform/components/TransformComponent'
import { isDynamicBody, isStaticBody } from '../classes/Physics'
import { ColliderComponent } from '../components/ColliderComponent'
import { CollisionComponent } from '../components/CollisionComponent'
import { RaycastComponent } from '../components/RaycastComponent'
import { VelocityComponent } from '../components/VelocityComponent'
import { teleportRigidbody } from '../functions/teleportRigidbody'
// Receptor
function physicsActionReceptor(action: unknown) {
const world = useWorld()
matches(action).when(NetworkWorldAction.teleportObject.matches, (a) => { | const entity = world.getNetworkObject(a.object.ownerId, a.object.networkId)
const colliderComponent = getComponent(entity, ColliderComponent)
if (colliderComponent) {
teleportRigidbody(colliderComponent.body, new Vector3(x, y, z), new Quaternion(qX, qY, qZ, qW))
}
})
}
// Queries
const boxQuery = defineQuery([BoundingBoxComponent, Object3DComponent])
const networkColliderQuery = defineQuery([NetworkObjectComponent, ColliderComponent])
const raycastQuery = defineQuery([RaycastComponent])
const colliderQuery = defineQuery([ColliderComponent])
const collisionComponent = defineQuery([CollisionComponent])
// Simulation Handlers
/**
* @author HydraFire <github.com/HydraFire>
* @author Josh Field <github.com/HexaField>
*/
const scratchBox = new Box3()
const processBoundingBox = (entity: Entity, force = false) => {
const boundingBox = getComponent(entity, BoundingBoxComponent)
if (boundingBox.dynamic || force) {
const object3D = getComponent(entity, Object3DComponent)
let object3DAABB = boundingBox.box.makeEmpty()
object3D.value.traverse((mesh: Mesh) => {
if (mesh instanceof Mesh) {
if (!mesh.geometry.boundingBox) mesh.geometry.computeBoundingBox() // only here for edge cases, this would already be calculated
const meshAABB = scratchBox.copy(mesh.geometry.boundingBox!)
meshAABB.applyMatrix4(mesh.matrixWorld)
object3DAABB.union(meshAABB)
}
})
}
}
const processRaycasts = (world: World) => {
for (const entity of raycastQuery()) {
world.physics.doRaycast(getComponent(entity, RaycastComponent))
}
return world
}
const processNetworkBodies = (world: World) => {
// Set network state to physics body pose for objects not owned by this user.
for (const entity of networkColliderQuery()) {
const network = getComponent(entity, NetworkObjectComponent)
const nameComponent = getComponent(entity, NameComponent)
// Ignore if we own this object or no new network state has been received for this object
// (i.e. packet loss and/or state not sent out from server because no change in state since last frame)
if (network.ownerId === Engine.userId || network.lastTick < world.fixedTick) {
// console.log('ignoring state for:', nameComponent)
continue
}
const collider = getComponent(entity, ColliderComponent)
const transform = getComponent(entity, TransformComponent)
const body = collider.body as PhysX.PxRigidDynamic
teleportRigidbody(body, transform.position, transform.rotation)
const linearVelocity = getComponent(entity, VelocityComponent).linear
const angularVelocity = getComponent(entity, VelocityComponent).angular
body.setLinearVelocity(linearVelocity, true)
body.setAngularVelocity(angularVelocity, true)
// console.log(
// 'physics velocity of network object:',
// nameComponent.name,
// network.lastTick,
// world.fixedTick,
// angularVelocity.x,
// angularVelocity.y,
// angularVelocity.z
// )
}
return world
}
const processBodies = (world: World) => {
for (const entity of colliderQuery()) {
const velocity = getComponent(entity, VelocityComponent)
const collider = getComponent(entity, ColliderComponent)
const transform = getComponent(entity, TransformComponent)
if (hasComponent(entity, AvatarComponent)) continue
if (Engine.isEditor || isStaticBody(collider.body)) {
const body = collider.body as PhysX.PxRigidDynamic
const currentPose = body.getGlobalPose()
if (velocity) {
velocity.linear.subVectors(currentPose.translation as Vector3, transform.position)
velocity.angular.setScalar(0) // TODO: Assuming zero velocity for static objects for now.
} else {
// console.warn("Physics entity found with no velocity component!")
}
teleportRigidbody(body, transform.position, transform.rotation)
} else if (isDynamicBody(collider.body)) {
const body = collider.body as PhysX.PxRigidDynamic
const linearVelocity = body.getLinearVelocity()
const angularVelocity = body.getAngularVelocity()
if (velocity) {
velocity.linear.copy(linearVelocity as Vector3)
velocity.angular.copy(angularVelocity as Vector3)
// const nameComponent = getComponent(entity, NameComponent)
// console.log("setting velocity component:", nameComponent.name, angularVelocity.x, angularVelocity.y, angularVelocity.z)
} else {
// console.warn("Physics entity found with no velocity component!")
}
const currentPose = body.getGlobalPose()
transform.position.copy(currentPose.translation as Vector3)
transform.rotation.copy(currentPose.rotation as Quaternion)
}
}
return world
}
const processCollisions = (world: World) => {
// clear collision components
for (const entity of collisionComponent()) {
getComponent(entity, CollisionComponent).collisions = []
}
// populate collision components with events over last simulation
for (const collisionEvent of world.physics.collisionEventQueue) {
if (collisionEvent.controllerID) {
const controller = world.physics.controllers.get(collisionEvent.controllerID)
const entity = (controller as any).userData
getComponent(entity, CollisionComponent).collisions.push(collisionEvent)
}
if (collisionEvent.shapeA) {
const bodyAID = world.physics.bodyIDByShapeID.get((collisionEvent.shapeA as any)._id)!
const bodyA = world.physics.bodies.get(bodyAID)
const bodyBID = world.physics.bodyIDByShapeID.get((collisionEvent.shapeB as any)._id)!
const bodyB = world.physics.bodies.get(bodyBID)
if (!bodyA || !bodyB) continue
const entityA = (bodyA as any).userData?.entity
const entityB = (bodyB as any).userData?.entity
getComponent(entityA, CollisionComponent).collisions.push({
type: collisionEvent.type,
bodySelf: bodyA,
bodyOther: bodyB,
shapeSelf: collisionEvent.shapeA,
shapeOther: collisionEvent.shapeB,
contacts: collisionEvent.contacts
})
getComponent(entityB, CollisionComponent).collisions.push({
type: collisionEvent.type,
bodySelf: bodyB,
bodyOther: bodyA,
shapeSelf: collisionEvent.shapeB,
shapeOther: collisionEvent.shapeA,
contacts: collisionEvent.contacts
})
}
}
// clear collision queue
world.physics.collisionEventQueue = []
return world
}
const simulationPipeline = pipe(processRaycasts, processNetworkBodies, processBodies, processCollisions)
export default async function PhysicsSystem(world: World) {
world.receptors.push(physicsActionReceptor)
await world.physics.createScene()
return () => {
for (const entity of boxQuery.enter()) {
processBoundingBox(entity, true)
}
for (const entity of colliderQuery.exit()) {
const colliderComponent = getComponent(entity, ColliderComponent, true)
if (colliderComponent?.body) {
world.physics.removeBody(colliderComponent.body)
}
}
if (Engine.isEditor) return
simulationPipeline(world)
// step physics world
for (let i = 0; i < world.physics.substeps; i++) {
world.physics.scene.simulate((world.physics.timeScale * world.fixedDelta) / world.physics.substeps, true)
world.physics.scene.fetchResults(true)
}
}
} | const [x, y, z, qX, qY, qZ, qW] = a.pose |
methodSignature.py | from PLC.Parameter import Parameter, Mixed
from PLC.Method import Method, xmlrpc_type
from functools import reduce
class methodSignature(Method):
"""
Returns an array of known signatures (an array of arrays) for the
method name passed. If no signatures are known, returns a
none-array (test for type != array to detect missing signature).
"""
roles = []
accepts = [Parameter(str, "Method name")]
returns = [Parameter([str], "Method signature")]
def __init__(self, api):
Method.__init__(self, api)
self.name = "system.methodSignature"
def possible_signatures(self, signature, arg):
|
def signatures(self, returns, args):
"""
Returns a list of possible signatures given a return value and
a set of arguments.
"""
signatures = [[xmlrpc_type(returns)]]
for arg in args:
# Create lists of possible new signatures for each current
# signature. Reduce the list of lists back down to a
# single list.
signatures = reduce(lambda a, b: a + b,
[self.possible_signatures(signature, arg) \
for signature in signatures])
return signatures
def call(self, method):
function = self.api.callable(method)
(min_args, max_args, defaults) = function.args()
signatures = []
assert len(max_args) >= len(min_args)
for num_args in range(len(min_args), len(max_args) + 1):
signatures += self.signatures(function.returns, function.accepts[:num_args])
return signatures
| """
Return a list of the possible new signatures given a current
signature and the next argument.
"""
if isinstance(arg, Mixed):
arg_types = [xmlrpc_type(mixed_arg) for mixed_arg in arg]
else:
arg_types = [xmlrpc_type(arg)]
return [signature + [arg_type] for arg_type in arg_types] |
ept.py | """
defines readers for BDF objects in the OP2 EPT/EPTS table
"""
#pylint: disable=C0103,R0914
from __future__ import annotations
from struct import unpack, Struct
from functools import partial
from typing import Tuple, List, TYPE_CHECKING
import numpy as np
#from pyNastran import is_release
from pyNastran.bdf.cards.properties.mass import PMASS, NSM, NSML
from pyNastran.bdf.cards.properties.bars import PBAR, PBARL, PBEND, PBEAM3
from pyNastran.bdf.cards.properties.beam import PBEAM, PBEAML, PBCOMP
from pyNastran.bdf.cards.properties.bush import PBUSH, PBUSHT
from pyNastran.bdf.cards.properties.damper import PDAMP, PVISC
from pyNastran.bdf.cards.properties.properties import PFAST, PGAP
from pyNastran.bdf.cards.properties.rods import PROD, PTUBE
from pyNastran.bdf.cards.properties.shell import PSHEAR, PSHELL, PCOMP
from pyNastran.bdf.cards.properties.solid import PSOLID
from pyNastran.bdf.cards.properties.springs import PELAS, PELAST
from pyNastran.bdf.cards.thermal.thermal import PCONV, PHBDY, PCONVM
# PCOMPG, PBUSH1D, PBEAML, PBEAM3
from pyNastran.op2.op2_interface.op2_reader import (
mapfmt, reshape_bytes_block_size) # reshape_bytes_block,
from .utils import get_minus1_start_end
from .geom2 import DoubleCardError
if TYPE_CHECKING: # pragma: no cover
from pyNastran.op2.op2_geom import OP2Geom
class EPT:
"""defines methods for reading op2 properties"""
@property
def size(self) -> int:
return self.op2.size
@property
def factor(self) -> int:
return self.op2.factor
def _read_fake(self, data: bytes, n: int) -> int:
return self.op2._read_fake(data, n)
def read_ept_4(self, data: bytes, ndata: int):
return self.op2._read_geom_4(self.ept_map, data, ndata)
def __init__(self, op2: OP2Geom):
self.op2 = op2
self.ept_map = {
(3201, 32, 55): ['NSM', self._read_nsm], # record 2
(52, 20, 181): ['PBAR', self._read_pbar], # record 11 - buggy
(9102, 91, 52): ['PBARL', self._read_pbarl], # record 12 - almost there...
(2706, 27, 287): ['PCOMP', self._read_pcomp], # record 22 - buggy
(302, 3, 46): ['PELAS', self._read_pelas], # record 39
(2102, 21, 121): ['PGAP', self._read_pgap], # record 42
(902, 9, 29): ['PROD', self._read_prod], # record 49
(1002, 10, 42): ['PSHEAR', self._read_pshear], # record 50
(2402, 24, 281): ['PSOLID', self._read_psolid], # record 51
(2302, 23, 283): ['PSHELL', self._read_pshell], # record 52
(1602, 16, 30): ['PTUBE', self._read_ptube], # record 56
(5402, 54, 262): ['PBEAM', self._read_pbeam], # record 14 - not done
(9202, 92, 53): ['PBEAML', self._read_pbeaml], # record 15
(2502, 25, 248): ['PBEND', self._read_pbend], # record 16 - not done
(1402, 14, 37): ['PBUSH', self._read_pbush], # record 19 - not done
(3101, 31, 219): ['PBUSH1D', self._read_pbush1d], # record 20 - not done
(152, 19, 147): ['PCONEAX', self._read_pconeax], # record 24 - not done
(11001, 110, 411): ['PCONV', self._read_pconv], # record 25 - not done
# record 26
(202, 2, 45): ['PDAMP', self._read_pdamp], # record 27 - not done
(2802, 28, 236): ['PHBDY', self._read_phbdy], # record 43 - not done
(402, 4, 44): ['PMASS', self._read_pmass], # record 48
(1802, 18, 31): ['PVISC', self._read_pvisc], # record 59
(10201, 102, 400): ['PVAL', self._read_pval], # record 58 - not done
(2606, 26, 289): ['VIEW', self._read_view], # record 62 - not done
(3201, 32, 991) : ['NSM', self._read_nsm_2], # record
(3301, 33, 992) : ['NSM1', self._read_nsm1], # record
(3701, 37, 995) : ['NSML1', self._read_nsml1_nx], # record
(3601, 36, 62): ['NSML1', self._read_nsml1_msc], # record 7
(15006, 150, 604): ['PCOMPG', self._read_pcompg], # record
(702, 7, 38): ['PBUSHT', self._read_pbusht], # record 1
(3301, 33, 56): ['NSM1', self._read_fake], # record 3
(3401, 34, 57) : ['NSMADD', self._read_fake], # record 5
(3501, 35, 58): ['NSML', self._read_fake], # record 6
(3501, 35, 994) : ['NSML', self._read_nsml],
(1502, 15, 36): ['PAABSF', self._read_fake], # record 8
(8300, 83, 382): ['PACABS', self._read_fake], # record 9
(8500, 85, 384): ['PACBAR', self._read_fake], # record 10
(5403, 55, 349): ['PBCOMP', self._read_pbcomp], # record 13
(13301, 133, 509): ['PBMSECT', self._read_fake], # record 17
(2902, 29, 420): ['PCONVM', self._read_pconvm], # record 26
(1202, 12, 33): ['PDAMPT', self._read_pdampt], # record 28
(8702, 87, 412): ['PDAMP5', self._read_pdamp5], # record 29
(6802, 68, 164): ['PDUM8', self._read_fake], # record 37
(6902, 69, 165): ['PDUM9', self._read_fake], # record 38
(1302, 13, 34): ['PELAST', self._read_pelast], # record 41
(12001, 120, 480): ['PINTC', self._read_fake], # record 44
(12101, 121, 484): ['PINTS', self._read_fake], # record 45
(4606, 46, 375): ['PLPLANE', self._read_plplane], # record 46
(4706, 47, 376): ['PLSOLID', self._read_plsolid], # record 47
(10301, 103, 399): ['PSET', self._read_pset], # record 57
(3002, 30, 415): ['VIEW3D', self._read_fake], # record 63
(13501, 135, 510) : ['PFAST', self._read_pfast_msc], # MSC-specific
(3601, 36, 55) : ['PFAST', self._read_pfast_nx], # NX-specific
(3801, 38, 979) : ['PPLANE', self._read_pplane],
(11801, 118, 560) : ['PWELD', self._read_fake],
(3401, 34, 993) : ['NSMADD', self._read_nsmadd],
(9300, 93, 684) : ['ELAR', self._read_fake],
(9400, 94, 685) : ['ELAR2', self._read_fake],
(16006, 160, 903) : ['PCOMPS', self._read_fake],
# MSC-specific
(14602, 146, 692): ['PSLDN1', self._read_fake],
(16502, 165, 916): ['PAXSYMH', self._read_fake],
(13201, 132, 513): ['PBRSECT', self._read_fake],
(13701, 137, 638): ['PWSEAM', self._read_fake],
(7001, 70, 632): ['???', self._read_fake],
(15106, 151, 953): ['PCOMPG1', self._read_fake],
(3901, 39, 969): ['PSHL3D', self._read_fake],
(17006, 170, 901): ['MATCID', self._read_fake],
(9601, 96, 691): ['PJOINT', self._read_fake],
(16502, 165, 916): ['???', self._read_fake],
(9701, 97, 692): ['PJOINT2', self._read_fake],
(13401, 134, 611): ['PBEAM3', self._read_pbeam3],
(8901, 89, 905): ['PSOLCZ', self._read_fake],
(9801, 98, 698): ['DESC', self._read_desc],
#(9701, 97, 692): ['???', self._read_fake],
#(9701, 97, 692): ['???', self._read_fake],
#(9701, 97, 692): ['???', self._read_fake],
}
def _add_op2_property(self, prop):
"""helper method for op2"""
op2 = self.op2
#if prop.pid > 100000000:
#raise RuntimeError('bad parsing; pid > 100000000...%s' % str(prop))
#print(str(prop)[:-1])
ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')
pid = prop.pid
allow_overwrites = (
ntables > 1 and
pid in op2.properties and
op2.properties[pid].type == prop.type)
op2._add_methods._add_property_object(prop, allow_overwrites=allow_overwrites)
def _add_op2_property_mass(self, prop):
"""helper method for op2"""
op2 = self.op2
#if prop.pid > 100000000:
#raise RuntimeError('bad parsing; pid > 100000000...%s' % str(prop))
#print(str(prop)[:-1])
ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')
pid = prop.pid
allow_overwrites = (
ntables > 1 and
pid in op2.properties_mass and
op2.properties_mass[pid].type == prop.type)
op2._add_methods._add_property_mass_object(prop, allow_overwrites=allow_overwrites)
def _add_pconv(self, prop: PCONV) -> None:
if prop.pconid > 100000000:
raise RuntimeError('bad parsing pconid > 100000000...%s' % str(prop))
self.op2._add_methods._add_convection_property_object(prop)
# HGSUPPR
def _read_desc(self, data: bytes, n: int) -> int:
"""
RECORD – DESC(9801,98,698)
Word Name Type Description
1 DID I Description identification number
2 NWORDS I Number of words for the description string
3 DESC CHAR4 Description
Words 3 repeats NWORDS times
data = (1, 14, 'FACE CONTACT(1) ')
"""
op2 = self.op2
assert self.size == 4, 'DESC size={self.size} is not supported'
#op2.show_data(data[n:], types='ifs')
struct_2i = Struct(op2._endian + b'2i')
while n < len(data):
datai = data[n:n+8]
desc_id, nwords = struct_2i.unpack(datai)
#print(desc_id, nwords)
ndatai = 8 + nwords * 4
word_bytes = data[n+8:n+ndatai]
word = word_bytes.decode('ascii').rstrip()
assert len(word_bytes) == nwords * 4
#print('word_bytes =', word_bytes)
op2.log.warning(f'geom skipping DESC={desc_id}: {word!r}')
n += ndatai
assert n == len(data), n
return n
def _read_nsml(self, data: bytes, n: int) -> int:
"""
NX 2019.2
RECORD – NSML(3501, 35, 994)
Defines a set of lumped nonstructural mass by ID.
Word Name Type Description
1 SID I Set identification number
2 PROP(2) CHAR4 Set of properties or elements
4 ID I Property of element identification number
5 VALUE RS Lumped nonstructural mass value
Words 4 and 5 repeat until -1 occurs
ints = (3, ELEMENT, 0, 200, 0.7, -1, 4, PSHELL, 0, 6401, 4.2, -1)
floats = (3, ELEMENT, 0.0, 200, 0.7, -1, 4, PSHELL, 0.0, 6401, 4.2, -1)
"""
op2 = self.op2
n0 = n
#op2.show_data(data[n:])
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
for (i0, i1) in zip(istart, iend):
#data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)
assert ints[i1] == -1, ints[i1]
sid = ints[i0]
prop_bytes = data[n0+(i0+1)*size:n0+(i0+3)*size]
#print(sid, prop_bytes)
ids = ints[i0+4:i1:2].tolist()
values = floats[i0+5:i1:2].tolist()
#print(ids, values)
assert len(ids) == len(values)
nsm_type = prop_bytes.decode('latin1').rstrip()
nsml = op2.add_nsml(sid, nsm_type, ids, values)
#print(nsml)
str(nsml)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSML'] = ncards
return n
def _read_nsmadd(self, data: bytes, n: int) -> int:
"""
NX 2019.2
(3401, 34, 993)
RECORD – NSMADD(3401,34,993)
Combines the nonstructural mass inputs.
Word Name Type Description
1 SID I Set identification number
2 ID I Set of properties or elements
Word 2 repeats until End of Record
(1, 2, 3, 4, -1)
"""
op2 = self.op2
ints = np.frombuffer(data[n:], op2.idtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
istart = [0] + list(iend + 1)
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
sid, *nsms = ints[i0:i1]
nsmadd = op2.add_nsmadd(sid, nsms)
#print(nsmadd)
str(nsmadd)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSMADD'] = ncards
return n
def _read_nsml1_nx(self, data: bytes, n: int) -> int:
"""
NSML1(3701, 37, 995)
Alternate form of NSML entry. Defines lumped nonstructural mass entries by VALUE, ID list.
Word Name Type Description
1 SID I Set identification number
2 PROP CHAR4 Set of properties
3 TYPE CHAR4 Set of elements
4 VALUE RS Lumped nonstructural mass value
5 SPECOPT I Specification option
SPECOPT=1 By IDs
6 ID I Property of element identification number
Word 6 repeats until -1 occurs
SPECOPT=2 All
6 ALL(2) CHAR4 Keyword ALL
Words 6 and 7 repeat until -1 occurs
SPECOPT=3 Thru range
6 ID1 I Starting identification number
7 THRU(2) CHAR4 Keyword THRU
9 ID2 I Ending identification number
Words 6 through 9 repeat until -1 occurs
SPECOPT=4 Thru range with by
6 ID1 I Starting identification number
7 THRU(2) CHAR4 Keyword THRU
9 ID2 I Ending identification number
10 BY(2) CHAR4 Keyword BY
12 N I Increment
Words 6 through 12 repeat until -1 occurs
data = (
3701, 37, 995,
1, ELEMENT, 466.2,
3, 249311, THRU, 250189, -1,
3, 250656, THRU, 251905, -1,
3, 270705, THRU, 275998, -1,
3, 332687, THRU, 334734, -1,
-2,
2, ELEMENT, 77.7,
3, 225740, THRU 227065, -1,
3, 227602, THRU, 228898, -1,
3, 229435, THRU, 230743, -1,
3, 231280, THRU, 233789, -1,
3, 233922, THRU, 235132, -1,
3, 235265, THRU, 236463, -1,
3, 338071, THRU, 341134, -1, -2)
"""
#ints = (1, ELEMENT, 466.2,
# 3, 249311, THRU, 250189, -1,
# 3, 250656, THRU, 251905, -1,
# 3, 270705, THRU, 275998, -1,
# 3, 332687, THRU, 334734, -1,
# -2,
#
# 2, ELEMENT, 77.7,
# 3, 225740, THRU 227065, -1,
# 3, 227602, THRU, 228898, -1,
# 3, 229435, THRU, 230743, -1,
# 3, 231280, THRU, 233789, -1,
# 3, 233922, THRU, 235132, -1,
# 3, 235265, THRU, 236463, -1,
# 3, 338071, THRU, 341134, -1, -2)
op2 = self.op2
n0 = n
#op2.show_data(data[n:])
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
iminus2 = np.where(ints == -2)[0]
istart = [0] + list(iminus2[:-1] + 1)
iend = iminus2
#print(istart, iend)
assert len(data[n:]) > 12, data[n:]
#op2.show_data(data[n:], types='ifs')
ncards = 0
istart = [0] + list(iend + 1)
size = self.size
for (i0, i1) in zip(istart, iend):
#data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)
assert ints[i1] == -2, ints[i1]
sid = ints[i0]
nsm_type = data[n0+(i0+1)*size:n0+(i0+2)*size].decode('latin1').rstrip()
value = float(floats[i0+3])
#print(f'sid={sid} nsm_type={nsm_type} value={value}')
iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]
#print('-1', iminus1)
#print('-2', iminus2)
istart2 = [i0 + 4] + list(iminus1[:-1] + 1)
iend2 = iminus1
#print(istart2, iend2)
for istarti, iendi in zip(istart2, iend2):
#print(istarti, iendi)
spec_opt = ints[istarti] # 4
#print(f'ints[{istarti}] = spec_opt = {spec_opt}')
if spec_opt == 1:
# 6 ID I Property of element identification number
ivalues = list(range(istarti, iendi))
#print('ivalues =', ivalues)
pid_eids = ints[ivalues].tolist()
#print('pid_eids =', pid_eids)
elif spec_opt == 3:
# datai = (3, 249311, 'THRU ', 250189)
#print(f'i0={i0}')
#datai = data[n0+(i0+6)*size:n0+i1*size]
#op2.show_data(datai)
ids = ints[istarti:iendi]
istart = ids[1]
iend = ids[-1]
pid_eids = list(range(istart, iend+1))
else:
raise NotImplementedError(spec_opt)
if nsm_type == 'ELEM':
nsm_type = 'ELEMENT'
#for pid_eid in pid_eids:
#nsml = op2.add_nsml1(sid, nsm_type, pid_eids, [value])
assert len(pid_eids) > 0, pid_eids
nsml1 = op2.add_nsml1(sid, nsm_type, value, pid_eids)
#print(nsml1)
str(nsml1)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSML'] = ncards
return n
def _read_nsml1_msc(self, data: bytes, n: int) -> int:
r"""
NSML1(3601, 36, 62)
Word Name Type Description
1 SID I Set identification number
2 PROP CHAR4 Set of property or elements
3 VALUE RS Lumped nonstructural mass value
4 SPECOPT I Specification option
SPECOPT=1 By IDs
5 IDs , =FLG1LIST in ixidlst.prm
6 ID I Property or element ID
Word 6 repeats until End of Record
SPECOPT=2 means ALL, =FLG1ALL in ixidlst.prm
5 ALL(2) CHAR4 Keyword ALL
Words 5 through 6 repeat until End of Record
SPECOPT=3 means THRU range, =FLG1THRU in ixidlst.prm
5 ID1 I Starting ID
6 THRU(2) CHAR4 Keyword THRU
8 ID2 I Ending ID
Words 5 through 8 repeat until End of Record
SPECOPT=4 means THRU range with BY, =FLG1THBY in ixidlst.prm
5 ID1 I Starting ID
6 THRU(2) CHAR4 Keyword THRU
8 ID2 I Ending ID
9 BY(2) CHAR4 Keyword BY
11 N I Increment
Words 5 through 11 repeat until End of Record
End SPECOPT
Words 4 through max repeat until End of Record
C:\MSC.Software\simcenter_nastran_2019.2\tpl_post2\elsum15.op2
data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)
"""
op2 = self.op2
op2.log.info(f'geom skipping NSML1 in {op2.table_name}; ndata={len(data)-12}')
#op2.show_data(data[n:], types='ifs')
#bbb
return len(data)
def _read_nsm1(self, data: bytes, n: int) -> int:
"""
NSM1(3301, 33, 992)
Defines the properties of a nonstructural mass.
Word Name Type Description
1 SID I Set identification number
2 PROP CHAR4 Set of properties
3 TYPE CHAR4 Set of elements
4 ORIGIN I Entry origin
5 VALUE RS Nonstructural mass value
6 SPECOPT I Specification option
SPECOPT=1 By IDs
7 ID I
Word 7 repeats until -1 occurs
SPECOPT=2 All
7 ALL(2) CHAR4
Words 7 and 8 repeat until -1 occurs
SPECOPT=3 Thru range
7 ID I
8 THRU(2) CHAR4
10 ID I
Words 7 through 10 repeat until -1 occurs
SPECOPT=4 Thru range with by
7 ID I
8 THRU(2) CHAR4
10 ID I
11 BY(2) CHAR4
13 N I
Words 7 through 13 repeat until -1 occurs
data = (3, PCOMP, 0, 0.37, 2, ALL, -1,
4, ELEMENT, 2, 2.1, 1, 3301, -1)
"""
op2 = self.op2
#op2.show_data(data[n:], types='ifs')
n0 = n
#op2.show_data(data[n:])
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
# 1 SID I Set identification number
sid = ints[i0]
# 2 PROP CHAR4 Set of properties
# 3 TYPE CHAR4 Set of elements
# 4 ORIGIN I Entry origin
# 5 VALUE RS Nonstructural mass value
# 6 SPECOPT I Specification option
nsm_type = data[n0+(i0+1)*size:n0+(i0+3)*size].decode('latin1').rstrip()
zero_two = ints[i0+3]
value = float(floats[i0+4])
spec_opt = ints[i0+5]
assert zero_two in [0, 2], zero_two
#nii = 6
#print(ints[i0+nii:i1])
#print(floats[i0+nii:i1])
#print(sid, nsm_type, value, spec_opt)
iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]
#print('-1', iminus1)
#print('-2', iminus2)
istart2 = [i0 + 5] + list(iminus1[:-1] + 1)
iend2 = iminus1
#print(istart2, iend2)
if spec_opt == 1:
# 7 ID I
ids = ints[i0+6:i1]
elif spec_opt == 2:
word = data[n0+(i0+6)*size:n0+i1*size]
ids = word
elif spec_opt == 3: # thru
# datai = (249311, 'THRU ', 250189)
#datai = data[n0+(i0+6)*size:n0+i1*size]
ids = ints[i0+6:i1]
istart = ids[0]
iend = ids[-1]
ids = list(range(istart, iend+1))
else:
raise NotImplementedError(spec_opt)
#print(sid, nsm_type, zero_two, value, ids)
#if nsm_type == 'ELEM':
#nsm_type = 'ELEMENT'
#for pid_eid in pid_eids:
#nsml = self.add_nsml1(sid, nsm_type, pid_eids, [value])
nsm1 = op2.add_nsm1(sid, nsm_type, value, ids)
#print(nsm1)
str(nsm1)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSM1'] = ncards
return n
def _read_nsm(self, data: bytes, n: int) -> int:
"""NSM"""
op2 = self.op2
n = op2.reader_geom2._read_dual_card(
data, n,
self._read_nsm_nx, self._read_nsm_msc,
'NSM', op2._add_methods._add_nsm_object)
return n
def _read_nsm_2(self, data: bytes, n: int) -> int:
"""
NX 2019.2
NSM(3201, 32, 991)
RECORD – NSM(3201,32,991)
Defines the properties of a nonstructural mass.
Word Name Type Description
1 SID I Set identification number
2 PROP CHAR4 Set of properties
3 TYPE CHAR4 Set of elements <---- not right...it's an integer and not used...
4 ID I Property or element identification number
5 VALUE RS Nonstructural mass value
Words 5 through 6 repeat until End of Record
NSM,2,conrod,1007,0.3
data = (2, CONROD, 0, 1007, 0.3, -1,
2, ELEMENT, 0, 200, 0.20, -1,
3, PSHELL, 0, 3301, 0.20, -1,
3, ELEMENT, 2, 200, 1.0, -1,
4, PSHELL, 2, 6401, 4.2, -1)
"""
op2 = self.op2
n0 = n
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
for (i0, i1) in zip(istart, iend):
#data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)
assert ints[i1] == -1, ints[i1]
sid = ints[i0]
prop_type = data[n0+(i0+1)*size:n0+(i0+3)*size]
elem_type = data[n0+(i0+3)*size:n0+(i0+4)*size]
nsm_type = prop_type.decode('latin1').rstrip()
dunno_int = ints[i0+3]
#print(ints[i0+4:i1])
#print(floats[i0+4:i1])
ids = ints[i0+4:i1:2].tolist()
values = floats[i0+5:i1:2].tolist()
assert len(ids) == len(values)
assert dunno_int in [0, 2], (sid, prop_type, (ints[i0+3], floats[i0+4]), ids, values)
#print(sid, prop_type, (ints[i0+3], floats[i0+4]), ids, values)
nsm = op2.add_nsm(sid, nsm_type, ids, values)
#print(nsm[0])
str(nsm)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSM'] = ncards
return n
def _read_nsm_msc(self, data: bytes, n: int) -> int:
"""
NSM(3201,32,55) - the marker for Record 2
MSC
1 SID I Set identification number
2 PROP CHAR4 Set of property or elements
3 ID I Property or element identification number
4 VALUE RS Nonstructural mass value
ORIGIN=0 NSM Bulk Data entry
5 ID I Property or element ID
6 VALUE RS Nonstructural mass value
Words 5 through 6 repeat until End of Record
ORIGIN=2 NSML Bulk Data entry
5 ID I Property or element ID
6 VALUE RS Nonstructural mass value
Words 5 through 6 repeat until End of Record
Words 3 through 4 repeat until End of Record
"""
op2 = self.op2
properties = []
struct1 = Struct(op2._endian + b'i 4s if')
ndelta = 16
i = 0
ints = np.frombuffer(data[n:], op2.idtype).copy()
floats = np.frombuffer(data[n:], op2.fdtype).copy()
while n < len(data):
edata = data[n:n+ndelta]
out = struct1.unpack(edata)
(sid, prop_set, pid, value) = out
# 538976312
assert pid < 100000000
i += 4
n += ndelta
prop_set = prop_set.decode('utf8').rstrip(' ') # \x00
values = [value]
#print('ints[i:]=', ints[i:])
while ints[i] != -1:
value2 = floats[i]
values.append(value2)
n += 4
i += 1
op2.log.info("MSC: NSM-sid=%s prop_set=%s pid=%s values=%s" % (
sid, prop_set, pid, values))
prop = NSM.add_op2_data([sid, prop_set, pid, value])
#op2._add_methods._add_nsm_object(prop)
properties.append(prop)
# handle the trailing -1
i += 1
n += 4
return n, properties
def _read_nsm_nx(self, data: bytes, n: int) -> int:
"""
NSM(3201,32,55) - the marker for Record 2
1 SID I Set identification number
2 PROP(2) CHAR4 Set of properties or elements
4 ORIGIN I Entry origin
5 ID I Property or element identification number
6 VALUE RS Nonstructural mass value
Words 5 through 6 repeat until End of Record
"""
op2 = self.op2
properties = []
#NX: C:\Users\sdoyle\Dropbox\move_tpl\nsmlcr2s.op2
struct1 = Struct(op2._endian + b'i 8s ii f')
ndelta = 24
#op2.show_data(data[12:], 'ifs')
i = 0
ints = np.frombuffer(data[n:], op2.idtype).copy()
floats = np.frombuffer(data[n:], op2.fdtype).copy()
unused_packs = break_by_minus1(ints)
#for pack in packs:
#print(pack)
#ipack = 0
while n < len(data):
#print('ints[i:]=', ints[i:].tolist())
#i1, i2 = packs[ipack]
#print('idata=%s' % idata[i1:i2])
#print('fdata=%s' % fdata[i1:i2])
#print(idata[i1:i2])
edata = data[n:n+ndelta]
out = struct1.unpack(edata)
(sid, prop_set, origin, pid, value) = out
# 538976312
assert pid < 100000000
i += 6
n += ndelta
prop_set = prop_set.decode('utf8').rstrip(' ') # \x00
pids = [pid]
values = [value]
#print('ints[i:]=', ints[i:].tolist())
while ints[i] != -1:
pid = ints[i]
value2 = floats[i+1]
assert pid != -1
pids.append(pid)
values.append(value2)
n += 8
i += 2
for pid, value in zip(pids, values):
if origin == 0:
#op2.log.info("NX: NSM-sid=%s prop_set=%s pid=%s values=%s" % (
#sid, prop_set, pid, values))
prop = NSM.add_op2_data([sid, prop_set, pid, value])
elif origin == 2:
#op2.log.info("NX: NSML-sid=%s prop_set=%s pid=%s values=%s" % (
#sid, prop_set, pid, values))
prop = NSML.add_op2_data([sid, prop_set, pid, value])
#print(prop.rstrip(), pid, value)
#op2._add_methods._add_nsm_object(prop)
properties.append(prop)
#print('----')
# handle the trailing -1
i += 1
n += 4
#ipack += 1
return n, properties
# NSM1
# NSML1
# NSMADD
# NSML
# NSML1
# PAABSF
# PACABS
# PACBAR
def _read_pbar(self, data: bytes, n: int) -> int:
"""
PBAR(52,20,181) - the marker for Record 11
.. warning:: this makes a funny property...
MSC 2016/NX10
Word Name Type Description
1 PID I Property identification number
2 MID I Material identification number
3 A RS Area
4 I1 RS Area moment of inertia in plane 1
5 I2 RS Area moment of inertia in plane 2
6 J RS Torsional constant
7 NSM RS Nonstructural mass per unit length
8 FE RS
9 C1 RS Stress recovery location at point C in element y-axis
10 C2 RS Stress recovery location at point C in element z-axis
11 D1 RS Stress recovery location at point D in element y-axis
12 D2 RS Stress recovery location at point D in element z-axis
13 E1 RS Stress recovery location at point E in element y-axis
14 E2 RS Stress recovery location at point E in element z-axis
15 F1 RS Stress recovery location at point F in element y-axis
16 F2 RS Stress recovery location at point F in element z-axis
17 K1 RS Area factor for shear in plane 1
18 K2 RS Area factor for shear in plane 2
19 I12 RS Area product of inertia for plane 1 and 2
"""
op2 = self.op2
ntotal = 76 * self.factor # 19*4
struct1 = Struct(mapfmt(op2._endian + b'2i17f', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid, mid, a, I1, I2, J, nsm, fe, c1, c2, d1, d2,
#e1, e2, f1, f2, k1, k2, I12) = out
prop = PBAR.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PBAR'] = nentries
return n
def _read_pbarl(self, data: bytes, n: int) -> int:
"""
PBARL(9102,91,52) - the marker for Record 12
TODO: buggy
It's possible to have a PBARL and a PBAR at the same time.
NSM is at the end of the element.
"""
op2 = self.op2
valid_types = {
'ROD': 1,
'TUBE': 2,
'TUBE2': 2,
'I': 6,
'CHAN': 4,
'T': 4,
'BOX': 4,
'BAR': 2,
'CROSS': 4,
'H': 4,
'T1': 4,
'I1': 4,
'CHAN1': 4,
'Z': 4,
'CHAN2': 4,
"T2": 4,
'BOX1': 6,
'HEXA': 3,
'HAT': 4,
'HAT1': 5,
'DBOX': 10, # was 12
#'MLO TUBE' : 2,
} # for GROUP="MSCBML0"
size = self.size
ntotal = 28 * self.factor # 7*4 - ROD - shortest entry...could be buggy... # TODO fix this
if size == 4:
struct1 = Struct(op2._endian + b'2i 8s 8s f')
else:
struct1 = Struct(op2._endian + b'2q 16s 16s d')
#nentries = (len(data) - n) // ntotal
#print(self.show_ndata(80))
ndata = len(data)
while ndata - n > ntotal:
edata = data[n:n+ntotal]
n += ntotal
out = struct1.unpack(edata)
(pid, mid, group, beam_type, value) = out
if pid > 100000000 or pid < 1:
op2.log.debug(" pid=%s mid=%s group=%r beam_type=%r value=%s" % (
pid, mid, group, beam_type, value))
raise RuntimeError('bad parsing...')
beam_type = reshape_bytes_block_size(beam_type, size=size)
group = reshape_bytes_block_size(group, size=size)
data_in = [pid, mid, group, beam_type, value]
expected_length = valid_types[beam_type]
iformat = op2._endian + b'%if' % expected_length
ndelta = expected_length * 4
dims_nsm = list(unpack(iformat, data[n:n+ndelta]))
data_in += dims_nsm
#print(" pid=%s mid=%s group=%r beam_type=%r value=%s dims_nsm=%s" % (
#pid, mid, group, beam_type, value, dims_nsm))
# TODO why do i need the +4???
# is that for the nsm?
#min_len = expected_length * 4 + 4
#if len(data)
#data = data[n + expected_length * 4 + 4:]
n += ndelta
#prin( "len(out) = ",len(out)))
#print("PBARL = %s" % data_in)
prop = PBARL.add_op2_data(data_in) # last value is nsm
pid = prop.pid
if pid in op2.properties:
#op2.log.debug("removing:\n%s" % op2.properties[pid])
op2._type_to_id_map['PBAR'].remove(pid)
del op2.properties[pid]
self._add_op2_property(prop)
#op2.properties[pid] = prop
#print(prop.get_stats())
#print(op2.show_data(data[n-8:-100]))
# the PBARL ends with a -1 flag
#value, = unpack(op2._endian + b'i', data[n:n+4])
n += 4 * self.factor
if len(op2._type_to_id_map['PBAR']) == 0 and 'PBAR' in op2.card_count:
del op2._type_to_id_map['PBAR']
del op2.card_count['PBAR']
op2.increase_card_count('PBARL')
#assert len(data) == n
if self.size == 8:
n += 16
#n += 8 # same for 32/64 bit - not 100% that it's always active
return n
def _read_pbcomp(self, data: bytes, n: int) -> int:
"""
PBCOMP(5403, 55, 349)
pid mid A I1 I2 I12 J NSM
PBCOMP 3 2 2.00E-4 6.67E-9 1.67E-9 0.0 4.58E-9 0.0 +
pid mid
floats = (3, 2, 0.0002, 6.67e-09, 1.67e-09, 0.0, 4.58e-09, 0.0, 1.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
ints = (3, 2, 0.0002, 6.67E-9, 1.67E-9, 0, 4.58E-9, 0, 1.0, 1.0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
"""
op2 = self.op2
struct1 = Struct(mapfmt(op2._endian + b'2i 12f i', self.size))
struct2 = Struct(mapfmt(op2._endian + b'3f 2i', self.size))
nproperties = 0
ntotal1 = 60 * self.factor # 4*15
ntotal2 = 20 * self.factor
ndata = len(data)
#print(ntotal1, ntotal2)
if self.factor == 2:
op2.show_data(data[12*self.factor:], types='qd')
#print(len(data[12*self.factor:]))
while n < ndata:
#op2.log.debug(f"n={n} ndata={ndata}")
edata = data[n:n+ntotal1]
#if len(edata) == ntotal1:
data1 = struct1.unpack(edata)
#else:
#op2.show_data(edata, types='qdi')
#n += ntotal2
#continue
nsections = data1[-1]
if op2.is_debug_file:
(pid, mid, a, i1, i2, i12, j, nsm, k1, k2,
m1, m2, n1, n2, unused_nsections) = data1
op2.log.info(f'PBCOMP pid={pid} mid={mid} nsections={nsections} '
f'k1={k1} k2={k2} m=({m1},{m2}) n=({n1},{n2})\n')
#if pid > 0 and nsections == 0:
#print('n1')
#n += ntotal1
#continue
#if pid == 0 and nsections == 0:
#print('n2')
#n += ntotal2
#continue
data2 = []
n += ntotal1
if nsections in [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]:
# 16 Y RS Lumped area location along element's y-axis
# 17 Z RS Lumped area location along element's z-axis
# 18 C RS Fraction of the total area for the lumped area
# 19 MID I Material identification number
# 20 UNDEF None
# Words 16 through 20 repeat NSECT times
for unused_i in range(nsections):
datai = data[n:n+ntotal2]
xi, yi, ci, mid, unused_null = struct2.unpack(datai)
data2.append((xi, yi, ci, mid))
n += ntotal2
else:
op2.log.error(f'PBCOMP={data1[0]} has no sections; check your bdf')
return n
#raise NotImplementedError('PBCOMP nsections=%r' % nsections)
if op2.is_debug_file:
op2.binary_debug.write(' PBCOMP: %s\n' % str([data1, data2]))
msg = (
' i=%-2s so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '
'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (
nsections, None, -9999., a, i1, i2, i12, j, nsm,
None, None, None, None, None, None, None, None,)
)
op2.log.debug(msg)
#op2.log.debug(data1)
#op2.log.debug(data2)
data_in = [data1, data2]
prop = PBCOMP.add_op2_data(data_in)
pid = data1[0]
if pid in op2.properties:
op2._type_to_id_map['PBEAM'].remove(pid)
del op2.properties[pid]
self._add_op2_property(prop)
nproperties += 1
#print(f"n={n} ndata={ndata}")
assert nproperties > 0, 'PBCOMP nproperties=%s' % (nproperties)
if len(op2._type_to_id_map['PBEAM']) == 0 and 'PBEAM' in op2.card_count:
del op2._type_to_id_map['PBEAM']
del op2.card_count['PBEAM']
op2.card_count['PBCOMP'] = nproperties
return n
def _read_pbeam(self, data: bytes, n: int) -> int:
"""
PBEAM(5402,54,262) - the marker for Record 14
.. todo:: add object
"""
op2 = self.op2
cross_section_type_map = {
0 : 'variable',
1 : 'constant',
2 : '???',
}
struct1 = Struct(mapfmt(op2._endian + b'4if', self.size))
struct2 = Struct(mapfmt(op2._endian + b'16f', self.size))
struct3 = Struct(mapfmt(op2._endian + b'16f', self.size))
unused_ntotal = 768 # 4*(5+16*12)
#nproperties = (len(data) - n) // ntotal
#assert nproperties > 0, 'ndata-n=%s n=%s datai\n%s' % (len(data)-n, n, op2.show_data(data[n:100+n]))
ndata = len(data)
#op2.show_data(data[12:], 'if')
#assert ndata % ntotal == 0, 'ndata-n=%s n=%s ndata%%ntotal=%s' % (len(data)-n, n, ndata % ntotal)
nproperties = 0
ntotal1 = 20 * self.factor
ntotal2 = 64 * self.factor
while n < ndata:
#while 1: #for i in range(nproperties):
edata = data[n:n+ntotal1]
n += ntotal1
data_in = list(struct1.unpack(edata))
#if op2.is_debug_file:
#op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s\n' % tuple(data_in))
(pid, unused_mid, unused_nsegments, ccf, unused_x) = data_in
#op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s' % tuple(data_in))
# Constant cross-section flag: 1=yes and 0=no
# what is 2?
if ccf not in [0, 1, 2]:
msg = (' PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s; '
'ccf must be in [0, 1, 2]\n' % tuple(data_in))
raise ValueError(msg)
cross_section_type = cross_section_type_map[ccf]
#print('cross_section_type = %s' % cross_section_type)
is_pbcomp = False
is_bad_so = False
so = []
xxb = []
for i in range(11):
edata = data[n:n+ntotal2]
if len(edata) != ntotal2:
endpack = []
raise RuntimeError(f'PBEAM unexpected length i={i:d}...')
n += ntotal2
pack = struct2.unpack(edata)
(soi, xxbi, a, i1, i2, i12, j, nsm, c1, c2,
d1, d2, e1, e2, f1, f2) = pack
xxb.append(xxbi)
so.append(soi)
if soi == 0.0:
so_str = 'NO'
elif soi == 1.0:
so_str = 'YES'
else:
so_str = str(soi)
is_bad_so = True
#msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; soi not in 0.0 or 1.0' % (
#pid, i, xxb, soi)
#raise NotImplementedError(msg)
#if xxb != 0.0:
#msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; xxb not in 0.0 or 1.0' % (
#pid, i, xxb, soi)
#raise NotImplementedError(msg)
pack2 = (so_str, xxbi, a, i1, i2, i12, j, nsm, c1, c2,
d1, d2, e1, e2, f1, f2)
data_in.append(pack2)
if op2.is_debug_file:
op2.binary_debug.write(f' {pack}\n')
msg = (
' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '
'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))
)
op2.binary_debug.write(msg)
#msg = (
#' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '
#'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))
#)
#print(msg)
edata = data[n:n+ntotal2]
if len(edata) != ntotal2:
endpack = []
raise RuntimeError('PBEAM unexpected length 2...')
endpack = struct3.unpack(edata)
n += ntotal2
assert len(endpack) == 16, endpack
#(k1, k2, s1, s2, nsia, nsib, cwa, cwb, # 8
#m1a, m2a, m1b, m2b, n1a, n2a, n1b, n2b) = endpack # 8 -> 16
if op2.is_debug_file:
op2.binary_debug.write(' k=[%s,%s] s=[%s,%s] nsi=[%s,%s] cw=[%s,%s] '
'ma=[%s,%s] mb=[%s,%s] na=[%s,%s] nb=[%s,%s]' % (
tuple(endpack)))
data_in.append(endpack)
if is_bad_so:
#if soi < 0.:
xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])
so_str = ', '.join(['%g' % soi for soi in so])
msg = (f'PBEAM pid={pid} i={i} soi=[{so_str}]; '
'soi not 0.0 or 1.0; assuming PBCOMP & dropping')
op2.log.error(msg)
is_pbcomp = True
if min(xxb) < 0.0 or max(xxb) > 1.0:
xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])
msg = (f'PBEAM pid={pid} i={i} x/xb=[{xxb_str}]; '
'x/xb must be between 0.0 and 1.0; assuming PBCOMP & dropping')
op2.log.error(msg)
is_pbcomp = True
if is_pbcomp:
continue
if pid in op2.properties:
if op2.properties[pid].type == 'PBCOMP':
continue
prop = PBEAM.add_op2_data(data_in)
nproperties += 1
self._add_op2_property(prop)
if nproperties:
op2.card_count['PBEAM'] = nproperties
return n
def _read_pbeaml(self, data: bytes, n: int) -> int:
"""
PBEAML(9202,92,53)
Word Name Type Description
1 PID I Property identification number
2 MID I Material identification number
3 GROUP(2) CHAR4 Cross-section group name
5 TYPE(2) CHAR4 Cross section type
7 VALUE RS Cross section values for XXB, SO, NSM, and dimensions
Word 7 repeats until (-1) occurs
"""
op2 = self.op2
#strs = numpy.core.defchararray.reshapesplit(data, sep=",")
#ints = np.frombuffer(data[n:], self._uendian + 'i').copy()
#floats = np.frombuffer(data[n:], self._uendian + 'f').copy()
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
size = self.size
nproperties = len(istart)
if size == 4:
struct1 = Struct(op2._endian + b'2i 8s 8s')
else:
struct1 = Struct(op2._endian + b'2q 16s 16s')
for unused_i, (istarti, iendi) in enumerate(zip(istart, iend)):
idata = data[n+istarti*size : n+(istarti+6)*size]
pid, mid, group, beam_type = struct1.unpack(idata)
group = group.decode('latin1').strip()
beam_type = beam_type.decode('latin1').strip()
fvalues = floats[istarti+6: iendi]
if op2.is_debug_file:
op2.binary_debug.write(' %s\n' % str(fvalues))
op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')
op2.log.debug(fvalues)
#op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')
data_in = [pid, mid, group, beam_type, fvalues]
prop = PBEAML.add_op2_data(data_in)
if pid in op2.properties:
# this is a fake PSHELL
propi = op2.properties[pid]
assert propi.type in ['PBEAM'], propi.get_stats()
nproperties -= 1
continue
self._add_op2_property(prop)
if nproperties:
op2.card_count['PBEAML'] = nproperties
return len(data)
def _read_pbend(self, data: bytes, n: int) -> int:
"""PBEND"""
op2 = self.op2
n = op2.reader_geom2._read_dual_card(
data, n,
self._read_pbend_nx, self._read_pbend_msc,
'PBEND', op2._add_methods._add_property_object)
return n
def _read_pbend_msc(self, data: bytes, n: int) -> int:
"""
PBEND
1 PID I Property identification number
2 MID I Material identification number
3 A RS Area
4 I1 RS Area moment of inertia in plane 1
5 I2 RS Area moment of inertia in plane 2
6 J RS Torsional constant
7 FSI I flexibility and stress intensification factors
8 RM RS Mean cross-sectional radius of the curved pipe
9 T RS Wall thickness of the curved pipe
10 P RS Internal pressure
11 RB RS Bend radius of the line of centroids
12 THETAB RS Arc angle of element
13 C1 RS Stress recovery location at point C in element y-axis
14 C2 RS Stress recovery location at point C in element z-axis
15 D1 RS Stress recovery location at point D in element y-axis
16 D2 RS Stress recovery location at point D in element z-axis
17 E1 RS Stress recovery location at point E in element y-axis
18 E2 RS Stress recovery location at point E in element z-axis
19 F1 RS Stress recovery location at point F in element y-axis
20 F2 RS Stress recovery location at point F in element z-axis
21 K1 RS Area factor for shear in plane 1
22 K2 RS Area factor for shear in plane 2
23 NSM RS Nonstructural mass per unit length
24 RC RS Radial offset of the geometric centroid
25 ZC RS Offset of the geometric centroid
26 DELTAN I Radial offset of the neutral axis from the geometric
centroid
"""
op2 = self.op2
ntotal = 104 # 26*4
struct1 = Struct(op2._endian + b'2i 4f i 18f f') # delta_n is a float, not an integer
nproperties = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
properties = []
for unused_i in range(nproperties):
edata = data[n:n+104]
out = struct1.unpack(edata)
(pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,
delta_n) = out
beam_type = fsi
if (area, rm, t, p) == (0., 0., 0., 0.):
area = None
rm = None
t = None
p = None
delta_n = None
beam_type = 2
if delta_n == 0:
#: Radial offset of the neutral axis from the geometric
#: centroid, positive is toward the center of curvature
delta_n = None
pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,
nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)
#print(pbend)
pbend.validate()
properties.append(pbend)
n += ntotal
return n, properties
def _read_pbend_nx(self, data: bytes, n: int) -> int:
"""
PBEND
1 PID I Property identification number
2 MID I Material identification number
3 A RS Area
4 I1 RS Area moment of inertia in plane 1
5 I2 RS Area moment of inertia in plane 2
6 J RS Torsional constant
7 FSI I Flexibility and stress intensification factors
8 RM RS Mean cross-sectional radius of the curved pipe
9 T RS Wall thickness of the curved pipe
10 P RS Internal pressure
11 RB RS Bend radius of the line of centroids
12 THETAB RS Arc angle of element
13 C1 RS Stress recovery location at point C in element y-axis
14 C2 RS Stress recovery location at point C in element z-axis
15 D1 RS Stress recovery location at point D in element y-axis
16 D2 RS Stress recovery location at point D in element z-axis
17 E1 RS Stress recovery location at point E in element y-axis
18 E2 RS Stress recovery location at point E in element z-axis
19 F1 RS Stress recovery location at point F in element y-axis
20 F2 RS Stress recovery location at point F in element z-axis
21 K1 RS Area factor for shear in plane 1
22 K2 RS Area factor for shear in plane 2
23 NSM RS Nonstructural mass per unit length
24 RC RS Radial offset of the geometric centroid
25 ZC RS Offset of the geometric centroid
26 DELTAN RS Radial offset of the neutral axis from the geometric
centroid
27 SACL RS Miter spacing at center line.
28 ALPHA RS One-half angle between the adjacent miter axis
(Degrees).
29 FLANGE I For FSI=5, defines the number of flanges attached.
30 KX RS For FSI=6, the user defined flexibility factor for the
torsional moment.
31 KY RS For FSI=6, the user defined flexibility factor for the
out-of-plane bending moment.
32 KZ RS For FSI=6, the user defined flexbility factor for the
in-plane bending moment.
33 Not used
"""
op2 = self.op2
#op2.log.info('geom skipping PBEND in EPT')
#return len(data)
ntotal = 132 # 33*4
struct1 = Struct(op2._endian + b'2i 4f i 21f i 4f')
nproperties = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
properties = []
for unused_i in range(nproperties):
edata = data[n:n+132]
out = struct1.unpack(edata)
(pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,
delta_n, unused_sacl, unused_alpha, unused_flange,
unused_kx, unused_ky, unused_kz, unused_junk,) = out
beam_type = fsi
pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,
nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)
pbend.validate()
properties.append(pbend)
n += ntotal
return n, properties
# PBMSECT
# PBRSECT
def _read_pbush(self, data: bytes, n: int) -> int:
"""
The PBUSH card is different between MSC and NX Nastran.
DMAP NX 11
----------
NX has 23 fields in NX 11-NX 2019.2 (same as MSC 2005)
NX has 18 fields in the pre-2001 format
DMAP MSC 2005
-------------
MSC has 23 fields in 2005
MSC has 18 fields in the pre-2001 format
DMAP MSC 2016
-------------
MSC has 24 fields in 2016.1
MSC has 18 fields in the pre-2001 format
DMAP MSC 2021
-------------
MSC has 27 fields in 2021
"""
op2 = self.op2
card_name = 'PBUSH'
card_obj = PBUSH
methods = {
72 : self._read_pbush_nx_72, # 72=4*18
92 : self._read_pbush_msc_92, # 92=4*23
96 : self._read_pbush_msc_96, # 96=4*24
108 : self._read_pbush_msc_108, # 108=4*27
}
try:
n = op2.reader_geom2._read_double_card(
card_name, card_obj, self._add_op2_property,
methods, data, n)
except DoubleCardError:
nx_method = partial(self._read_pbush_nx_72, card_obj)
msc_method = partial(self._read_pbush_msc_92, card_obj)
n = op2.reader_geom2._read_dual_card(
data, n,
nx_method, msc_method,
card_name, self._add_op2_property)
# we're listing nx twice because NX/MSC used to be consistent
# the new form for MSC is not supported
#n = self._read_dual_card(data, n, self._read_pbush_nx, self._read_pbush_msc,
#'PBUSH', self._add_op2_property)
return n
def _read_pbush_nx_72(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
"""
PBUSH(1402,14,37) - 18 fields
legacy MSC/NX format
"""
op2 = self.op2
ntotal = 72 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'i17f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1, sa, st, ea, et) = out
#op2.log.debug(out)
assert pid > 0, pid
g2 = g3 = g4 = g5 = g6 = g1
data_in = (pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
g1, g2, g3, g4, g5, g6, sa, st, ea, et)
prop = PBUSH.add_op2_data(data_in)
props.append(prop)
n += ntotal
return n, props
def _read_pbush_msc_92(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
"""PBUSH(1402,14,37) - 23 fields
MSC 2005r2 to <MSC 2016
"""
op2 = self.op2
ntotal = 92 * self.factor # 23*4
struct1 = Struct(mapfmt(op2._endian + b'i22f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
#g1, g2, g3, g4, g5, g6, sa, st, ea, et) = out
pid = out[0]
assert pid > 0, pid
prop = PBUSH.add_op2_data(out)
props.append(prop)
n += ntotal
return n, props
def _read_pbush_msc_96(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
"""PBUSH(1402,14,37) - 24 fields
MSC 2016.1? to 2020
"""
op2 = self.op2
ntotal = 96 * self.factor # 24*4
struct1 = Struct(mapfmt(op2._endian + b'i22f f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
#g1, g2, g3, g4, g5, g6, sa, st, ea, et, mass) = out
pid = out[0]
assert pid > 0, pid
prop = PBUSH.add_op2_data(out)
props.append(prop)
n += ntotal
return n, props
def _read_pbush_msc_108(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
"""
PBUSH(1402,14,37) - 27 fields
MSC 2021 to current
ints = (1402, 14, 37, 2, 100000.0, 200000.0, 300000.0, 0.15, 0.25, 0.35, 1000.0, 2000.0, 3000.0, 0.0015, 0.0025, 0.0035, 0,
-1577048263, -1577048263, -1577048263, -1577048263, -1577048263, 1065353216, 1065353216, 1065353216, 1065353216, 0, 0, 0, 0)
floats = (1402, 14, 37,
2, 100000.0, 200000.0, 300000.0, 0.15, 0.25, 0.35, 1000.0, 2000.0, 3000.0, 0.0015, 0.0025, 0.0035, 0.0,
-1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0)
"""
op2 = self.op2
ntotal = 108 * self.factor # 27*4
struct1 = Struct(mapfmt(op2._endian + b'i22f 4f', self.size))
#op2.show_data(data, types='ifs')
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
#g1, g2, g3, g4, g5, g6, sa, st, ea, et) = out
pid = out[0]
assert pid > 0, pid
prop = PBUSH.add_op2_data(out)
str(prop)
props.append(prop)
n += ntotal
return n, props
def _read_pbush1d(self, data: bytes, n: int) -> int:
"""
Record 18 -- PBUSH1D(3101,31,219)
1 PID I Property identification number
2 K RS Stiffness
3 C RS Viscous Damping
4 M RS Mass
5 ALPHA RS Temperature coefficient
6 SA RS Stress recovery coefficient
7 EA/SE RS Strain recovery coefficient
8 TYPEA I Shock data type:0=Null, 1=Table, 2=Equation
9 CVT RS Coefficient of translation velocity tension
10 CVC RS Coefficient of translation velocity compression
11 EXPVT RS Exponent of velocity tension
12 EXPVC RS Exponent of velocity compression
13 IDTSU I TABLEDi or DEQATN entry identification number for scale factor vs displacement
14 IDTCU I DEQATN entry identification number for scale factor vs displacement
15 IDTSUD I DEQATN entry identification number for derivative tension
16 IDCSUD I DEQATN entry identification number for derivative compression
17 TYPES I Spring data type: 0=Null, 1=Table, 2=Equation
18 IDTS I TABLEDi or DEQATN entry identification number for tension compression
19 IDCS I DEQATN entry identification number for compression
20 IDTDU I DEQATN entry identification number for scale factor vs displacement
21 IDCDU I DEQATN entry identification number for force vs displacement
22 TYPED I Damper data type: 0=Null, 1=Table, 2=Equation
23 IDTD I TABLEDi or DEQATN entry identification number for tension compression
24 IDCD I DEQATN entry identification number for compression
25 IDTDV I DEQATN entry identification number for scale factor versus velocity
26 IDCDV I DEQATN entry identification number for force versus velocity
27 TYPEG I General data type: 0=Null, 1=Table, 2=Equation
28 IDTG I TABLEDi or DEQATN entry identification number for tension compression
29 IDCG I DEQATN entry identification number for compression
30 IDTDU I DEQATN entry identification number for scale factor versus displacement
31 IDCDU I DEQATN entry identification number for force versus displacement
32 IDTDV I DEQATN entry identification number for scale factor versus velocity
33 IDCDV I DEQATN entry identification number for force vs velocity
34 TYPEF I Fuse data type: 0=Null, 1=Table
35 IDTF I TABLEDi entry identification number for tension
36 IDCF I TABLEDi entry identification number for compression
37 UT RS Ultimate tension
38 UC RS Ultimate compression
"""
op2 = self.op2
type_map = {
0 : None, # NULL
1 : 'TABLE',
2 : 'EQUAT',
}
ntotal = 152 * self.factor # 38*4
struct1 = Struct(mapfmt(op2._endian + b'i 6f i 4f 24i 2f', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid, k, c, m, unused_alpha, sa, se,
typea, cvt, cvc, expvt, expvc, idtsu, idtcu, idtsud, idcsud,
types, idts, idcs, idtdus, idcdus,
typed, idtd, idcd, idtdvd, idcdvd,
typeg, idtg, idcg, idtdug, idcdug, idtdvg, idcdvg,
typef, idtf, idcf,
unused_ut, unused_uc) = out
# test_op2_other_05
#pbush1d, 204, 1.e+5, 1000., , , , , , +pb1
#+pb1, spring, table, 205, , , , , , +pb2
#+pb2, damper, table, 206
#pid=204 k=100000.0 c=1000.0 m=0.0 sa=nan se=nan
msg = f'PBUSH1D pid={pid} k={k} c={c} m={m} sa={sa} se={se}'
optional_vars = {}
typea_str = type_map[typea]
types_str = type_map[types]
typed_str = type_map[typed]
unused_typeg_str = type_map[typeg]
unused_typef_str = type_map[typef]
if min([typea, types, typed, typeg, typef]) < 0:
raise RuntimeError(f'typea={typea} types={types} typed={typed} typeg={typeg} typef={typef}')
if typea in [1, 2]: # SHOCKA?
#pbush1d, 204, 1.e+5, 1000., , , , , , +pb4
#+pb4, shocka, table, 1000., , 1., , 214, , +pb41
#+pb41, spring, table, 205
idts = idtsu # if typea_str == 'TABLE' else 0
idets = idtsu # if typea_str == 'EQUAT' else 0
optional_vars['SHOCKA'] = [typea_str, cvt, cvc, expvt, expvc,
idts, idets, idtcu, idtsud, idcsud]
#(shock_type, shock_cvt, shock_cvc, shock_exp_vt, shock_exp_vc,
#shock_idts, shock_idets, shock_idecs, shock_idetsd, shock_idecsd
#)
#print('shock_idts, shock_idets', typea_str, idtsu, idtsu)
msg += (
f' SHOCKA type={typea} cvt={cvt} cvc={cvc} expvt={expvt} expvc={expvc}\n'
f' idtsu={idtsu} (idts={idts} idets={idets}) idtcu={idtcu} idtsud={idtsud} idcsud={idcsud}')
if types in [1, 2]: # SPRING: Spring data type: 0=Null, 1=Table, 2=Equation
#(spring_type, spring_idt, spring_idc, spring_idtdu, spring_idcdu) = values
# SPRING, TYPE IDT IDC IDTDU IDCDU
optional_vars['SPRING'] = [types_str, idts, idcs, idtdus, idcdus]
msg += f' SPRING type={types} idt={idts} idc={idcs} idtdu={idtdus} idcdu={idcdus}'
if typed in [1, 2]: # Damper data type: 0=Null, 1=Table, 2=Equation
optional_vars['DAMPER'] = [typed_str, idtd, idcd, idtdvd, idcdvd]
msg += f' DAMPER type={typed} idt={idtd} idc={idtd} idtdv={idtdvd} idcdv={idcdvd}'
if typeg in [1, 2]: # general, GENER?: 0=Null, 1=Table 2=Equation
# C:\NASA\m4\formats\git\examples\move_tpl\ar29scbt.bdf
#pbush1d, 206, 1.e+3, 10., , , , , , +pb6
#+pb6, gener, equat, 315, , 3015, , 3016
msg += f' GENER type={typeg} idt={idtg} idc={idcg} idtdu={idtdug} idcdu={idcdug} idtdv={idtdvg} idcdv={idcdvg}'
optional_vars['GENER'] = [idtg, idcg, idtdug, idcdug, idtdvg, idcdvg]
if typef in [1, 2]: # Fuse data type: 0=Null, 1=Table
raise NotImplementedError(f'typef={typef} idtf={idtf} idcf={idcf}')
if op2.is_debug_file:
op2.binary_debug.write(msg)
pbush1d = op2.add_pbush1d(pid, k=k, c=c, m=m, sa=sa, se=se,
optional_vars=optional_vars,)
str(pbush1d)
n += ntotal
op2.card_count['PBUSH1D'] = nentries
return n
#def _read_pbusht(self, data: bytes, n: int) -> int:
#"""reads the PBUSHT(702, 7, 38)"""
#n, props = self._read_pbusht_nx(data, n)
#for prop in props:
##print(prop)
#op2._add_pbusht_object(prop)
#return n
def _read_pbusht(self, data: bytes, n: int) -> int:
"""
NX 12 / MSC 2005
Word Name Type Description
1 PID I Property identification number
2 TKID(6) I TABLEDi entry identification numbers for stiffness
8 TBID(6) I TABLEDi entry identification numbers for viscous damping
14 TGEID(6) I TABLEDi entry identification number for structural damping
20 TKNID(6) I TABLEDi entry identification numbers for force versus deflection
old style
Word Name Type Description
1 PID I Property identification number
2 TKID(6) I TABLEDi entry identification numbers for stiffness
8 TBID(6) I TABLEDi entry identification numbers for viscous damping
14 TGEID I TABLEDi entry identification number for structural damping
15 TKNID(6) I TABLEDi entry IDs for force versus deflection
"""
op2 = self.op2
card_name = 'PBUSHT'
card_obj = PBUSHT
methods = {
80 : self._read_pbusht_80,
100 : self._read_pbusht_100,
136 : self._read_pbusht_136,
}
try:
n = op2.reader_geom2._read_double_card(
card_name, card_obj, op2._add_methods._add_pbusht_object,
methods, data, n)
except DoubleCardError:
raise
op2.log.warning(f'try-except {card_name}')
#n = self._read_split_card(data, n,
#self._read_cquad8_current, self._read_cquad8_v2001,
#card_name, self.add_op2_element)
#nelements = op2.card_count['CQUAD8']
#op2.log.debug(f'nCQUAD8 = {nelements}')
#n = self._read_dual_card(data, n, self._read_ctriax_8, self._read_ctriax_9,
#'CTRIAX', self.add_op2_element)
return n
def _read_pbusht_nx_old(self, data: bytes, n: int) -> int:
op2 = self.op2
#op2.show_data(data[12:])
ndata = (len(data) - n) // self.factor
if ndata % 100 == 0 and ndata % 80 == 0:
op2.log.warning(f"skipping PBUSHT in EPT because nfields={ndata//4}, which is "
'nproperties*25 or nproperties*20')
return len(data), []
if ndata % 100 == 0:
n, props = self._read_pbusht_100(data, n)
elif ndata % 80 == 0:
n, props = self._read_pbusht_80(data, n)
else:
# C:\MSC.Software\msc_nastran_runs\mbsh14.op2
# ints = (1,
# 51, 51, 0, 0, 0, 0,
# 61, 61, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0,
# 0, '', '', 0, 0, '', '', 0, 0, 925353388, 0, 0, 0, 0, 0,
# 7,
# 51, 51, 0, 0, 0, 0,
# 61, 61, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0,
# 0, '', '', 0, 0, '', '', 0, 0, 925353388, 0, 0, 0, 0, 0)
# strings = (b"1 51 51 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00=\x00\x00\x00=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\xac\xc5'7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x003\x00\x00\x003\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00=\x00\x00\x00=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\xac\xc5'7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",)
# ints = (1, 51, 51, 0, 0, 0, 0, 61, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ' ', ' ', 0, 0, ' ', ' ', 0, 0, 1e-5, 0, 0, 0, 0 , 0,
#
# 7, 51, 51, 0, 0, 0, 0, 61, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ' ', ' ', 0, 0, ' ', ' ', 0, 0, 1e-5, 0, 0, 0, 0, 0)
#op2.show_data(data[n:], types='is')
raise NotImplementedError('You have blank lines in your PBUSHT')
return n, props
def _read_pbusht_80(self, card_obj, data: bytes, n: int) -> int:
"""
Word Name Type Description
1 PID I Property identification number
2 TKID(6) I TABLEDi entry identification numbers for stiffness
8 TBID(6) I TABLEDi entry identification numbers for viscous damping
14 TGEID I TABLEDi entry identification number for structural damping
15 TKNID(6) I TABLEDi entry identification numbers for force versus deflection
16,17,18,19,20
???
"""
op2 = self.op2
ntotal = 80 * self.factor
struct1 = Struct(op2._endian + b'20i')
nentries = (len(data) - n) // ntotal
assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid,
#k1, k2, k3, k4, k5, k6,
#b1, b2, b3, b4, b5, b6,
#g1, sa, st, ea, et) = out
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1,
n1, n2, n3, n4, n5, n6) = out
g2 = g3 = g4 = g5 = g6 = g1
k_tables = [k1, k2, k3, k4, k5, k6]
b_tables = [b1, b2, b3, b4, b5, b6]
ge_tables = [g1, g2, g3, g4, g5, g6]
kn_tables = [n1, n2, n3, n4, n5, n6]
prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)
props.append(prop)
n += ntotal
return n, props
def _read_pbusht_100(self, card_obj, data: bytes, n: int) -> int:
op2 = self.op2
props = []
ntotal = 100 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'25i', self.size))
nentries = (len(data) - n) // ntotal
assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1, g2, g3, g4, g5, g6,
n1, n2, n3, n4, n5, n6) = out
k_tables = [k1, k2, k3, k4, k5, k6]
b_tables = [b1, b2, b3, b4, b5, b6]
ge_tables = [g1, g2, g3, g4, g5, g6]
kn_tables = [n1, n2, n3, n4, n5, n6]
prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)
props.append(prop)
n += ntotal
return n, props
def _read_pbusht_136(self, card_obj, data: bytes, n: int) -> int:
r"""not 100%
1 PID I Property identification number
2 TKID(6) I TABLEDi entry identification numbers for stiffness
8 TBID(6) I TABLEDi entry identification numbers for viscous damping
14 TGEID(6) I TABLEDi entry identification number for structural damping
20 TKNID(6) I TABLEDi entry IDs for force vs. deflection
26 FDC(2) CHAR4 Force deflection curve rule
28 FUSE I Failure level
29 DIR I Fuse direction
30 OPTION(2) CHAR4 Failure mode
32 LOWER RS Lower failure bound
33 UPPER RS Upper failure bound
34 FRATE RS FACTOR of scales the stiffness
35 LRGR I Controls large rotation
36 UNDEF(4) none
# C:\MSC.Software\msc_nastran_runs\mbsh14.op2
PBUSHT 1 K 51 51
B 61 61
PBUSHT 7 K 51 51
B 61 61
538976288 = ' '
ints = (
702, 7, 38,
1, (51, 51, 0, 0, 0, 0), (61, 61, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0, 538976288, 538976288, 0, 0, 538976288, 538976288, 0, 0, 925353388, 0, 0, 0, 0, 0,
7, (51, 51, 0, 0, 0, 0), (61, 61, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0, 538976288, 538976288, 0, 0, 538976288, 538976288, 0, 0, 925353388, 0, 0, 0, 0, 0)
floats = (
702, 7, 38,
1, 51, 51, 0.0, 0.0, 0.0, 0.0, 61, 61, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 1.e-7, 0.0, 0.0, 0.0, 0.0, 0.0,
7, 51, 51, 0.0, 0.0, 0.0, 0.0, 61, 61, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 1.e-7, 0.0, 0.0, 0.0, 0.0, 0.0)
"""
op2 = self.op2
props = []
ntotal = 136 * self.factor # k b g n fdc
struct1 = Struct(mapfmt(op2._endian + b'i 6i 6i 6i 6i 4s 2i i 5i', self.size))
nentries = (len(data) - n) // ntotal
assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1, g2, g3, g4, g5, g6,
n1, n2, n3, n4, n5, n6,
word1, a, word2, c, *other) = out
k_tables = [ki if ki != 538976288 else 0
for ki in [k1, k2, k3, k4, k5, k6]]
b_tables = [bi if bi != 538976288 else 0
for bi in [b1, b2, b3, b4, b5, b6]]
ge_tables = [gei if gei != 538976288 else 0
for gei in [g1, g2, g3, g4, g5, g6]]
kn_tables = [kni if kni != 538976288 else 0
for kni in [n1, n2, n3, n4, n5, n6]]
op2.log.warning(
f'PBUSHT: pid={pid} '
f'k={k_tables} '
f'b={b_tables} '
f'ge={ge_tables} '
f'n={kn_tables} ' +
'words=' + str([word1, a, word2, c]) +
f' other={other}')
assert sum(other) == 0, other
prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)
props.append(prop)
n += ntotal
return n, props
def _read_pcomp(self, data: bytes, n: int) -> int:
r"""
PCOMP(2706,27,287) - the marker for Record 22
standard:
EPTS; 64-bit: C:\MSC.Software\simcenter_nastran_2019.2\tpl_post1\cqrdbxdra3lg.op2
optistruct:
ints = (2706, 27, 287,
5,
3, -2.75, 0, 0, 1, 0, 0,
2, 0.25, 0, 2, # why is sout=2?
3, 5.0, 0, 3, # why is sout=3?
2, 0.25, 0, 2, # why is sout=2?
6, 5, -3.0, 0, 0, 1, 0, 0,
2, 0.25, 0, 2,
2, 0.25, 0, 2,
3, 5.0, 0, 3,
2, 0.25, 0, 2,
2, 0.25, 0, 2, 7, 7, -1068498944, 0, 0, 1, 0, 0, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 3, 5.0, 0, 3, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 2, 0.25, 0, 2)
floats = (2706, 27, 287,
5, 3, -2.75, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 6, 5, -3.0, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 9.80908925027372e-45, 9.80908925027372e-45, -3.25, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2)
"""
op2 = self.op2
if self.size == 4:
n2, props = self._read_pcomp_32_bit(data, n)
nproperties = len(props)
for prop in props:
self._add_op2_property(prop)
op2.card_count['PCOMP'] = nproperties
else:
n2 = op2.reader_geom2._read_dual_card(
data, n, self._read_pcomp_32_bit,
self._read_pcomp_64_bit,
'PCOMP', self._add_op2_property)
return n2
def _read_pcomp_64_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]:
"""
PCOMP(2706,27,287) - the marker for Record 22
1 PID I Property identification number
2 N(C) I Number of plies
3 Z0 RS Distance from the reference plane to the bottom surface
4 NSM RS Nonstructural mass per unit area
5 SB RS Allowable shear stress of the bonding material
6 FT I Failure theory
7 TREF RS Reference temperature
8 GE RS Damping coefficient
9 MID I Material identification number
10 T RS Thicknesses of the ply
11 THETA RS Orientation angle of the longitudinal direction of the ply
12 SOUT I Stress or strain output request of the ply
Words 9 through 12 repeat N times
TODO:
64-bit bug: why is the number of plies 0???
doubles (float64) = (
1, 0.0, 1.7368e-18, 0.0, 1.0, 1.5e-323, 0.0, 0.0,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
-1, -1, -1, -1,
21, 0.0, 1.7368e-18, 0.0, 1.0, 1.5e-323, 0.0, 0.0,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
-1, -1, -1, -1)
long long (int64) = (
1, 0, 1.7368e-18, 0, 1.0, 3, 0, 0, 1, 4592590756007337001, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
-1, -1, -1, -1,
21, 0, 4341475431749739292, 0, 4607182418800017408, 3, 0, 0,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
-1, -1, -1, -1)
doubles (float64) = (5e-324, 0.0, -0.005, 0.0, 0.0, 0.0, 0.0, 0.0,
4e-323, 0.005, 0.0, 5e-324,
4e-323, 0.005, 0.0, 5e-324,
nan, nan, nan, nan)
long long (int64) = (1, 0, -4650957407178058629, 0, 0, 0, 0, 0,
8, 4572414629676717179, 0, 1,
8, 4572414629676717179, 0, 1,
-1, -1, -1, -1)
C:\MSC.Software\simcenter_nastran_2019.2\tpl_post2\dbxdr12lg.op2
data = (3321, 2, -0.5, 0.0, 1.0, 4, 0.0, 0.0,
3, 0.5, 0, 1,
3, 0.5, 0, 1)
"""
op2 = self.op2
op2.to_nx(' because PCOMP-64 was found')
nproperties = 0
s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))
ntotal1 = 32 * self.factor
s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))
four_minus1 = Struct(mapfmt(op2._endian + b'4i', self.size))
ndata = len(data)
ntotal2 = 16 * self.factor
props = []
while n < (ndata - ntotal1):
out = s1.unpack(data[n:n+ntotal1])
(pid, nlayers, z0, nsm, sb, ft, tref, ge) = out
assert pid > 0
if op2.binary_debug:
op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
f'sb={sb} ft={ft} Tref={tref} ge={ge}')
assert isinstance(nlayers, int), out
#print(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
#f'sb={sb} ft={ft} Tref={tref} ge={ge}')
n += ntotal1
# None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'
is_symmetrical = 'NO'
#if nlayers < 0:
#is_symmetrical = 'SYM'
#nlayers = abs(nlayers)
mids = []
T = []
thetas = []
souts = []
edata2 = data[n:n+ntotal2]
idata = four_minus1.unpack(edata2)
while idata != (-1, -1, -1, -1):
(mid, t, theta, sout) = s2.unpack(edata2)
mids.append(mid)
T.append(t)
thetas.append(theta)
souts.append(sout)
if op2.is_debug_file:
op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\n')
n += ntotal2
#print(f' mid={mid} t={t} theta={theta} sout={sout}')
edata2 = data[n:n+ntotal2]
if n == ndata:
op2.log.warning(' no (-1, -1, -1, -1) flag was found to close the PCOMPs')
break
idata = four_minus1.unpack(edata2)
if self.size == 4:
assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s Tref=%s ge=%s' % (
pid, nlayers, z0, nsm, sb, ft, tref, ge)
else:
assert nlayers == 0, nlayers
nlayers = len(mids)
data_in = [
pid, z0, nsm, sb, ft, tref, ge,
is_symmetrical, mids, T, thetas, souts]
prop = PCOMP.add_op2_data(data_in)
nproperties += 1
n += ntotal2
props.append(prop)
return n, props
def _read_pcomp_32_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]: # pragma: no cover
"""PCOMP(2706,27,287) - the marker for Record 22"""
op2 = self.op2
nproperties = 0
s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))
ntotal1 = 32 * self.factor
s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))
ndata = len(data)
ntotal2 = 16 * self.factor
props = []
while n < (ndata - ntotal1):
out = s1.unpack(data[n:n+ntotal1])
(pid, nlayers, z0, nsm, sb, ft, tref, ge) = out
assert pid > 0
if op2.binary_debug:
op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
f'sb={sb} ft={ft} Tref={tref} ge={ge}')
assert isinstance(nlayers, int), out
#print(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
#f'sb={sb} ft={ft} Tref={tref} ge={ge}')
n += ntotal1
mids = []
T = []
thetas = []
souts = []
# None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'
is_symmetrical = 'NO'
if nlayers < 0:
is_symmetrical = 'SYM'
nlayers = abs(nlayers)
assert nlayers > 0, out
assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s' % (
pid, nlayers, z0, nsm, sb, ft, tref, ge)
if op2.is_debug_file:
op2.binary_debug.write(' pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s\n' % (
pid, nlayers, z0, nsm, sb, ft, tref, ge))
#if op2._nastran_format == 'optistruct':
#print(' pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s' % (
#pid, nlayers, z0, nsm, sb, ft, tref, ge))
for unused_ilayer in range(nlayers):
(mid, t, theta, sout) = s2.unpack(data[n:n+ntotal2])
if op2._nastran_format == 'optistruct':
#print(f' mid={mid} t={t} theta={theta} sout={sout}')
if sout in [2, 3]: # TODO: Why is this 2/3?
sout = 1 # YES
mids.append(mid)
assert mid > 0
T.append(t)
thetas.append(theta)
souts.append(sout)
if op2.is_debug_file:
op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\n')
n += ntotal2
data_in = [
pid, z0, nsm, sb, ft, tref, ge,
is_symmetrical, mids, T, thetas, souts]
prop = PCOMP.add_op2_data(data_in)
#print(prop)
props.append(prop)
nproperties += 1
return n, props
def _read_pcompg(self, data: bytes, n: int) -> int:
"""
PCOMP(2706,27,287)
1 PID I Property identification number
2 LAMOPT I Laminate option
3 Z0 RS Distance from the reference plane to the bottom surface
4 NSM RS Nonstructural mass per unit area
5 SB RS Allowable shear stress of the bonding material
6 FT I Failure theory
7 TREF RS Reference temperature
8 GE RS Damping coefficient
9 GPLYIDi I Global ply IDs.
10 MID I Material identification number
11 T RS Thicknesses of the ply
12 THETA RS Orientation angle of the longitudinal direction of the ply
13 SOUT I Stress or strain output request of the ply
Words 9 through 13 repeat N times (until -1, -1, -1, -1, -1 as Nplies doesn't exist...)
float = (15006, 150, 604,
5, 0.0, 1.7368e-18, 0.0, 0.0, 0.0, 20.0, 0.0,
5e-324, 5e-324, 2.0, 0.0, 0.0,
1e-323, 1e-323, 3.0, 0.0, 0.0,
1.5e-323, 1e-323, 3.0, 0.0, 0.0,
2e-323, 5e-324, 2.0, 0.0, 0.0,
nan, nan, nan, nan, nan)
int = (15006, 150, 604,
5, 0, 1.7368e-18, 0, 0, 0, 20.0, 0,
1, 1, 4611686018427387904, 0, 0,
2, 2, 4613937818241073152, 0, 0,
3, 2, 4613937818241073152, 0, 0,
4, 1, 4611686018427387904, 0, 0,
-1, -1, -1, -1, -1)
"""
op2 = self.op2
nproperties = 0
s1 = Struct(mapfmt(op2._endian + b'2i 3f i 2f', self.size))
s2 = Struct(mapfmt(op2._endian + b'2i 2f i', self.size))
struct_i5 = Struct(mapfmt(op2._endian + b'5i', self.size))
# lam - SYM, MEM, BEND, SMEAR, SMCORE, None
lam_map = {
0 : None,
# MEM
# BEND
# SMEAR
# SMCORE
}
# ft - HILL, HOFF, TSAI, STRN, None
ft_map = {
0 : None,
# HILL
# HOFF
3 : 'TSAI',
# STRN
}
# sout - YES, NO
sout_map = {
0 : 'NO',
1 : 'YES',
}
ndata = len(data)
#op2.show_data(data, types='qd')
ntotal1 = 32 * self.factor
ntotal2 = 20 * self.factor
while n < (ndata - ntotal1):
out = s1.unpack(data[n:n+ntotal1])
(pid, lam_int, z0, nsm, sb, ft_int, tref, ge) = out
if op2.binary_debug:
op2.binary_debug.write(f'PCOMPG pid={pid} lam_int={lam_int} z0={z0} nsm={nsm} '
f'sb={sb} ft_int={ft_int} tref={tref} ge={ge}')
#print(f'PCOMPG pid={pid} lam_int={lam_int} z0={z0} nsm={nsm} sb={sb} '
#f'ft_int={ft_int} tref={tref} ge={ge}')
assert isinstance(lam_int, int), out
assert pid > -1, out
n += ntotal1
mids = []
thicknesses = []
thetas = []
souts = []
global_ply_ids = []
# None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'
#is_symmetrical = 'NO'
#if nlayers < 0:
#is_symmetrical = 'SYM'
#nlayers = abs(nlayers)
#assert nlayers > 0, out
#assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s tref=%s ge=%s' % (
#pid, nlayers, z0, nsm, sb, ft, tref, ge)
#if op2.is_debug_file:
#op2.binary_debug.write(' pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s tref=%s ge=%s\n' % (
#pid, nlayers, z0, nsm, sb, ft, tref, ge))
ilayer = 0
while ilayer < 1000:
ints5 = struct_i5.unpack(data[n:n+ntotal2])
if ints5 == (-1, -1, -1, -1, -1):
if op2.is_debug_file:
op2.binary_debug.write(' global_ply=%-1 mid=%-1 t=%-1 theta=%-1 sout=-1\n')
break
(global_ply, mid, t, theta, sout_int) = s2.unpack(data[n:n+ntotal2])
#print(' ', (global_ply, mid, t, theta, sout_int))
try:
sout = sout_map[sout_int]
except KeyError:
op2.log.error('cant parse global_ply=%s sout=%s; assuming 0=NO' % (
global_ply, sout_int))
sout = 'NO'
global_ply_ids.append(global_ply)
mids.append(mid)
thicknesses.append(t)
thetas.append(theta)
souts.append(sout)
if op2.is_debug_file:
op2.binary_debug.write(' global_ply=%s mid=%s t=%s theta=%s sout_int=%s sout=%r\n' % (
global_ply, mid, t, theta, sout_int, sout))
n += ntotal2
ilayer += 1
n += ntotal2
try:
ft = ft_map[ft_int]
except KeyError:
op2.log.error('pid=%s cant parse ft=%s; should be HILL, HOFF, TSAI, STRN'
'...skipping' % (pid, ft_int))
continue
try:
lam = lam_map[lam_int]
except KeyError:
op2.log.error('pid=%s cant parse lam=%s; should be HILL, HOFF, TSAI, STRN'
'...skipping' % (pid, lam_int))
continue
# apparently Nastran makes duplicate property ids...
if pid in op2.properties and op2.properties[pid].type == 'PCOMP':
del op2.properties[pid]
op2.add_pcompg(pid, global_ply_ids, mids, thicknesses, thetas=thetas, souts=souts,
nsm=nsm, sb=sb, ft=ft, tref=tref, ge=ge, lam=lam, z0=z0, comment='')
nproperties += 1
op2.card_count['PCOMPG'] = nproperties
return n
# PCOMPA
def _read_pconeax(self, data: bytes, n: int) -> int:
"""
(152,19,147) - Record 24
"""
self.op2.log.info('geom skipping PCONEAX in EPT')
return len(data)
def _read_pconv(self, data: bytes, n: int) -> int:
"""common method for reading PCONVs"""
op2 = self.op2
#n = self._read_dual_card(data, n, self._read_pconv_nx, self._read_pconv_msc,
#'PCONV', self._add_pconv)
card_name = 'PCONV'
card_obj = PCONV
methods = {
16 : self._read_pconv_nx_16, # 16=4*4
56 : self._read_pconv_msc_56, # 56=4*14
}
try:
n, elements = op2.reader_geom2._read_double_card_load(
card_name, card_obj,
methods, data, n)
except DoubleCardError:
nx_method = partial(self._read_pconv_nx_16, card_obj)
msc_method = partial(self._read_pconv_msc_56, card_obj)
n, elements = op2._read_dual_card_load(
data, n,
nx_method, msc_method,
card_name, self._add_op2_property)
nelements = len(elements)
for prop in elements:
key = prop.pconid
if key in op2.convection_properties:
prop_old = op2.convection_properties[key]
if prop != prop_old:
op2.log.warning(prop.raw_fields())
op2.log.warning(prop_old.raw_fields())
op2.log.warning(f'PCONV pconid={key}; old, new\n{prop_old}{prop}')
# this will fail due to a duplicate id
self._add_pconv(prop)
#else:
# already exists
else:
self._add_pconv(prop)
op2.card_count['PCONV'] = nelements
return n
def _read_pconv_nx_16(self, card_obj: PCONV, data: bytes, n: int) -> int:
"""
(11001,110,411)- NX version
"""
op2 = self.op2
ntotal = 16 # 4*4
struct_3if = Struct(op2._endian + b'3if')
nentries = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
props = []
for unused_i in range(nentries):
out = struct_3if.unpack(data[n:n+ntotal])
(pconid, mid, form, expf) = out
ftype = tid = chlen = gidin = ce = e1 = e2 = e3 = None
data_in = (pconid, mid, form, expf, ftype, tid, chlen,
gidin, ce, e1, e2, e3)
prop = PCONV.add_op2_data(data_in)
props.append(prop)
n += ntotal
return n, props
def _read_pconv_msc_56(self, card_obj: PCONV, data: bytes, n: int) -> int:
"""
(11001,110,411)- MSC version - Record 25
"""
op2 = self.op2
ntotal = 56 # 14*4
s = Struct(op2._endian + b'3if 4i fii 3f')
nentries = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
props = []
for unused_i in range(nentries):
out = s.unpack(data[n:n+ntotal])
(pconid, mid, form, expf, ftype, tid, unused_undef1, unused_undef2, chlen,
gidin, ce, e1, e2, e3) = out
data_in = (pconid, mid, form, expf, ftype, tid, chlen,
gidin, ce, e1, e2, e3)
prop = PCONV.add_op2_data(data_in)
props.append(prop)
n += ntotal
return n, props
def _read_pconvm(self, data: bytes, n: int) -> int:
"""Record 24 -- PCONVM(2902,29,420)
1 PID I Property identification number
2 MID I Material identification number
3 FORM I Type of formula used for free convection
4 FLAG I Flag for mass flow convection
5 COEF RS Constant coefficient used for forced convection
6 EXPR RS Reynolds number convection exponent
7 EXPPI RS Prandtl number convection exponent into the working fluid
8 EXPPO RS Prandtl number convection exponent out of the working fluid
"""
op2 = self.op2
ntotal = 32 # 8*4
structi = Struct(op2._endian + b'4i 4f')
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = structi.unpack(data[n:n+ntotal])
if out != (0, 0, 0, 0, 0., 0., 0., 0.):
(pconid, mid, form, flag, coeff, expr, expri, exppo) = out
#print(out)
prop = PCONVM(pconid, mid, coeff, form=form, flag=flag,
expr=expr, exppi=expri, exppo=exppo, comment='')
op2._add_methods._add_convection_property_object(prop)
n += ntotal
op2.card_count['PCONVM'] = nentries
return n
def _read_pdamp(self, data: bytes, n: int) -> int:
"""
PDAMP(202,2,45) - the marker for Record ???
"""
op2 = self.op2
ntotal = 8 * self.factor # 2*4
struct_if = Struct(mapfmt(op2._endian + b'if', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = struct_if.unpack(data[n:n+ntotal])
#(pid, b) = out
prop = PDAMP.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PDAMP'] = nentries
return n
def _read_pdampt(self, data: bytes, n: int) -> int: # 26
self.op2.log.info('geom skipping PDAMPT in EPT')
return len(data)
def _read_pdamp5(self, data: bytes, n: int) -> int: # 26
self.op2.log.info('geom skipping PDAMP5 in EPT')
return len(data)
# PDUM1
# PDUM2
# PDUM3
# PDUM4
# PDUM5
# PDUM6
# PDUM7
# PDUM8
# PDUM9
def _read_pelas(self, data: bytes, n: int) -> int:
"""PELAS(302,3,46) - the marker for Record 39"""
op2 = self.op2
struct_i3f = Struct(mapfmt(op2._endian + b'i3f', self.size))
ntotal = 16 * self.factor # 4*4
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_i3f.unpack(edata)
#(pid, k, ge, s) = out
if op2.is_debug_file:
op2.binary_debug.write(' PELAS=%s\n' % str(out))
prop = PELAS.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PELAS'] = nproperties
return n
def _read_pfast_msc(self, data: bytes, n: int) -> int:
r"""
Word Name Type Description
1 PID I Property identification number
2 MID I Material property identification number
3 D RS Diameter of the fastener
4 CONNBEH I Connection behavior (0=FF/F, 1=FR, 10=RF/R, 11=RR)
5 CONNTYPE I Connection type (0=clamp, 1=hinge, 2=bolt)
6 EXTCON I External constraint flag (0=off, 1=on)
7 CONDTYPE I Condition type (0=rigid, 1=equivalent)
8 WELDTYPE I Weld type (0=spot weld, 1=but seam, 2=T-seam)
9 MINLEN RS Minimum length of spot weld
10 MAXLEN RS Maximum length of spot weld
11 GMCHK I Perform geometry check
12 SPCGS I SPC the master grid GS
13 CMASS RS Concentrated mass
14 GE RS Structureal Damping
15 UNDEF(3) none Not used
18 MCID I Element stiffness coordinate system
19 MFLAG I Defined the coordinate system type
20 KT(3) RS Stiffness values in direction 1
23 KR(3) RS Rotation stiffness values in direction 1
C:\MSC.Software\msc_nastran_runs\cfmass.op2
pid mid D con con ext cond weld min max chk spc cmass ge und und und mcid mfag kt1 kt2 kt3 kr1 kr2 kr3
ints = (99, 0, 0.1, 0, 0, 0, 0, -1, 0.2, 5.0, 0, 0, 7.9, 0, 0, 0, 0, -1, 0, 471200.0, 181200.0, 181200.0, 226.6, 45610.0, 45610.0)
floats = (99, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, -1, 0.2, 5.0, 0.0, 0.0, 7.9, 0.0, 0.0, 0.0, 0.0, -1, 0.0, 471200.0, 181200.0, 181200.0, 226.6, 45610.0, 45610.0)
"""
op2 = self.op2
#op2.show_data(data[n:], types='ifs')
#ntotal = 92 * self.factor # 26*4
#struct1 = Struct(op2._endian + b'ifii 3f')
ntotal = 100 * self.factor # 25*4
struct1 = Struct(op2._endian + b'2if 5i 2f2i2f 3i 2i 6f')
ndatai = len(data) - n
nproperties = ndatai // ntotal
delta = ndatai % ntotal
assert delta == 0, 'len(data)-n=%s n=%s' % (ndatai, ndatai / 100.)
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PFAST=%s\n' % str(out))
(pid, d, mcid, unused_connbeh, unused_conntype, unused_extcon,
unused_condtype, unused_weldtype, unused_minlen, unused_maxlen,
unused_gmcheck, unused_spcgs, mass, ge,
unused_aa, unused_bb, unused_cc, mcid, mflag,
kt1, kt2, kt3, kr1, kr2, kr3) = out
data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,
kr1, kr2, kr3, mass, ge)
prop = PFAST.add_op2_data(data_in)
str(prop)
#print(prop)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PFAST'] = nproperties
return n
def _read_pfast_nx(self, data: bytes, n: int) -> int:
"""
PFAST(3601,36,55)
NX only
"""
op2 = self.op2
ntotal = 48
struct1 = Struct(op2._endian + b'ifii 8f')
nproperties = (len(data) - n) // ntotal
delta = (len(data) - n) % ntotal
assert delta == 0, 'len(data)-n=%s n=%s' % (len(data) - n, (len(data) - n) / 48.)
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PFAST=%s\n' % str(out))
(pid, d, mcid, mflag, kt1, kt2, kt3, kr1, kr2, kr3, mass, ge) = out
data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,
kr1, kr2, kr3, mass, ge)
prop = PFAST.add_op2_data(data_in)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PFAST'] = nproperties
op2.to_nx(' because PFAST-NX was found')
return n
def _read_pelast(self, data: bytes, n: int) -> int:
"""
Record 41 -- PELAST(1302,13,34)
1 PID I Property identification number
2 TKID I TABLEDi entry identification number for stiffness
3 TGEID I TABLEDi entry identification number for structural
damping
4 TKNID I TABLEDi entry
"""
op2 = self.op2
ntotal = 16 * self.factor
struct_4i = Struct(mapfmt(op2._endian + b'4i', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_4i.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PELAST=%s\n' % str(out))
#(pid, tkid, tgeid, tknid) = out
prop = PELAST.add_op2_data(out)
op2._add_methods._add_pelast_object(prop)
n += ntotal
op2.card_count['PELAST'] = nproperties
return n
def _read_pgap(self, data: bytes, n: int) -> int:
"""
PGAP(2102,21,121) - the marker for Record 42
"""
op2 = self.op2
ntotal = 44 * self.factor
struct_i10f = Struct(mapfmt(op2._endian + b'i10f', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_i10f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PGAP=%s\n' % str(out))
#(pid,u0,f0,ka,kb,kt,mu1,mu2,tmax,mar,trmin) = out
prop = PGAP.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PGAP'] = nproperties
return n
def _read_phbdy(self, data: bytes, n: int) -> int:
"""
PHBDY(2802,28,236) - the marker for Record 43
"""
op2 = self.op2
struct_i3f = Struct(op2._endian + b'ifff')
nproperties = (len(data) - n) // 16
for unused_i in range(nproperties):
edata = data[n:n+16]
out = struct_i3f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PHBDY=%s\n' % str(out))
#(pid, af, d1, d2) = out
prop = PHBDY.add_op2_data(out)
op2._add_methods._add_phbdy_object(prop)
n += 16
op2.card_count['PHBDY'] = nproperties
return n
def _read_pintc(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping PINTC in EPT')
return len(data)
def _read_pints(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping PINTS in EPT')
return len(data)
def _read_pbeam3(self, data: bytes, n: int) -> int:
op2 = self.op2
card_name = 'PBUSHT'
card_obj = PBUSHT
methods = {
264 : self._read_pbeam3_264,
456 : self._read_pbeam3_456,
}
try:
n = op2.reader_geom2._read_double_card(
card_name, card_obj, self._add_op2_property,
methods, data, n)
except DoubleCardError:
raise
op2.log.warning(f'try-except {card_name}')
return n
def _read_pbeam3_456(self, card_obj, data: bytes, n: int) -> int:
r"""
# per C:\MSC.Software\msc_nastran_runs\b3plod3.op2
ints = (2201, 1, 1.0, 0.1833, 0.0833, 0, -1.0, 0, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, -0.5,
2, 1.0, 0.1833, 0.0833, 0, -1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 1.0, 0.1833, 0.0833, 0, -1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2901, 2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0.5, 0, 0, 0.5, -0.5, 0, 0, -0.5,
2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
floats = (2201, 1, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, -0.5,
2, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
2, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
2901, 2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.5, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5,
2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
"""
op2 = self.op2
#op2.show_data(data[n:])
ntotal = 456 * self.factor # 114*4
#
struct1 = Struct(mapfmt(op2._endian +
b'2i' # pid, mid
b'3f' # A, Iy, Iz
b'5f' # # a, b, c, d, e
b'5f fi 14f i' #fj ki 14f i
b'2i3f' #aa-ee - good
b'5f' #ff-jj
b'5f' #kk-oo
b'5f' #pp-tt
b'6f' #uu-zz
b'5f' #aaa-eee
b'4i' #fff-iii
# jjj-ooo
b'2f iii f'
# ppp-ttt
b'5f'
# uuu-zzz
b'6f'
b'30f', self.size))
ndatai = len(data) - n
nentries = ndatai // ntotal
assert ndatai % ntotal == 0
props = []
for unused_i in range(nentries):
#print(n, ntotal)
datai = data[n:n+ntotal]
#op2.show_data(datai, types='ifqd')
n += ntotal
(pid, mid, A, iz, iy,
a, b, c, d, e,
f, g, h, i, j,
k, inta, l, m, ni, o, p, q, r, s, t, u, v, w, x, y, z,
aa, bb, cc, dd, ee,
ff, gg, hh, ii, jj,
kk, ll, mm, nn, oo,
pp, qq, rr, ss, tt,
uu, vv, ww, xx, yy, zz,
aaa, bbb, ccc, ddd, eee,
fff, ggg, hhh, iii,
jjj, kkk, lll, mmm, nnn, ooo,
ppp, qqq, rrr, sss, ttt,
uuu, vvv, www, xxx, yyy, zzz,
*other) = struct1.unpack(datai)
#print(pid, mid, A, iz, iy)
#print('a-e', (a, b, c, d, e))
#print('f-j', (f, g, h, i, j))
#print(k, inta, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z)
#print('aa-ee', (aa, bb, cc, dd, ee))
#print('ff-jj', (ff, gg, hh, ii, jj))
#print('kk-oo', (kk, ll, mm, nn, oo))
#print('pp-tt', (pp, qq, rr, ss, tt))
#print('uu-zz', (uu, vv, ww, xx, yy, zz))
#print('aaa-eee', (aaa, bbb, ccc, ddd, eee))
#print('fff-jjj', (fff, ggg, hhh, iii))
#print('jjj-ooo', (jjj, kkk, lll, mmm, nnn, ooo))
#print('ppp-ttt', (ppp, qqq, rrr, sss, ttt))
#print('uuu-zzz', (uuu, vvv, www, xxx, yyy, zzz))
if mid == 0:
continue
#assert sum(other) < 100, other
prop = PBEAM3(
pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,
so=None,
cy=None, cz=None,
dy=None, dz=None,
ey=None, ez=None,
fy=None, fz=None,
ky=1., kz=1.,
ny=None, nz=None, my=None, mz=None,
nsiy=None, nsiz=None, nsiyz=None,
cw=None, stress='GRID',
w=None, wy=None, wz=None, comment='')
assert pid > 0, prop.get_stats()
assert mid > 0, prop.get_stats()
str(prop)
props.append(prop)
#self._add_op2_property(prop)
#op2.card_count['PBEAM3'] = nentries
return n, props
def _read_pbeam3_264(self, card_obj, data: bytes, n: int) -> int:
"""
TODO: partial
# per test_cbeam_cbeam3???
ints = (2901, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0.5, 0, 0, 0.5, -0.5, 0, 0, -0.5, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0)
floats = (2901, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.5, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0)
"""
op2 = self.op2
ntotal = 264 * self.factor # 66*4
# p/m ayz ae fj ki 14f i
struct1 = Struct(mapfmt(op2._endian + b'2i 3f 5f 5f fi 14f i 30f 4i', self.size))
ndatai = len(data) - n
nentries = ndatai // ntotal
assert ndatai % ntotal == 0
props = []
for unused_i in range(nentries):
pid, mid, A, iz, iy, a, b, c, d, e, f, g, h, i, j, k, inta, *other = struct1.unpack(data[n:n+ntotal])
#print(pid, mid, A, iz, iy)
#print((a, b, c, d, e))
#print((f, g, h, i, j))
#print(k, inta)
assert sum(other) < 100, other
prop = PBEAM3(
pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,
so=None,
cy=None, cz=None,
dy=None, dz=None,
ey=None, ez=None,
fy=None, fz=None,
ky=1., kz=1.,
ny=None, nz=None, my=None, mz=None,
nsiy=None, nsiz=None, nsiyz=None,
cw=None, stress='GRID',
w=None, wy=None, wz=None, comment='')
assert pid > 0, prop.get_stats()
assert mid > 0, prop.get_stats()
str(prop)
props.append(prop)
n += ntotal
return n, props
def _read_pplane(self, data: bytes, n: int) -> int:
"""
RECORD – PPLANE(3801,38,979)
Word Name Type Description
1 PID I Property identification number
2 MID I Material identification number
3 T RS Default membrane thickness for Ti on the connection entry
4 NSM RS Nonstructural mass per unit area
5 FOROPT I Formulation option number
6 CSOPT I Reserved for coordinate system definition of plane
7 UNDEF(2) None
ints = (1, 1, 1.0, 0, 0, 0, 0, 0, 2, 2, 1.0, 0, 0, 0, 0, 0)
floats = (1, 1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2, 2, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0)
"""
op2 = self.op2
ntotal = 32 * self.factor # 8*4
struct1 = Struct(mapfmt(op2._endian + b'2i 2f 4i', self.size))
ndatai = len(data) - n
nentries = ndatai // ntotal
assert ndatai % ntotal == 0
for unused_i in range(nentries):
out = struct1.unpack(data[n:n+ntotal])
pid, mid, t, nsm, foropt, csopt = out[:6]
#print(out)
assert csopt == 0, csopt
pplane = op2.add_pplane(pid, mid, t=t, nsm=nsm,
formulation_option=foropt)
pplane.validate()
#print(pplane)
str(pplane)
n += ntotal
op2.card_count['PLPLANE'] = nentries
return n
def _read_plplane(self, data: bytes, n: int) -> int:
"""
PLPLANE(4606,46,375)
NX 10
1 PID I Property identification number
2 MID I Material identification number
3 CID I Coordinate system identification number
4 STR CHAR4 Location of stress and strain output
5 T RS Default membrane thickness for Ti on the connection entry
6 CSOPT I Reserved for coordinate system definition of plane
7 UNDEF(5) None
MSC 2016
PID I Property identification number
2 MID I Material identification number
3 CID I Coordinate system identification number
4 STR CHAR4 Location of stress and strain output
5 UNDEF(7 ) none Not used
.. warning:: CSOPT ad T are not supported
"""
op2 = self.op2
ntotal = 44 * self.factor # 4*11
if self.size == 4:
s = Struct(op2._endian + b'3i 4s f 6i')
else:
s = Struct(op2._endian + b'3q 8s d 6q')
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = s.unpack(data[n:n+ntotal])
pid, mid, cid, location, unused_t, unused_csopt = out[:6]
location = location.decode('latin1')
#op2.show_data(data[n:n+ntotal], 'ifs')
op2.add_plplane(pid, mid, cid=cid, stress_strain_output_location=location)
n += ntotal
op2.card_count['PLPLANE'] = nentries
return n
def _read_plsolid(self, data: bytes, n: int) -> int:
"""
MSC 2016
1 PID I Property identification number
2 MID I Material identification number
3 STR CHAR4 Location of stress and strain output
4 UNDEF(4 ) none Not used
NX 10
1 PID I Property identification number
2 MID I Material identification number
3 STR CHAR4 Location of stress and strain output
4 CSOPT I Reserved for coordinate system definition of plane
5 UNDEF(3) None
.. warning:: CSOPT is not supported
"""
op2 = self.op2
ntotal = 28 * self.factor # 4*7
if self.size == 4:
struct1 = Struct(op2._endian + b'2i 4s 4i')
else:
struct1 = Struct(op2._endian + b'2q 8s 4q')
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = struct1.unpack(data[n:n+ntotal])
pid, mid, location, unused_csopt, unused_null_a, unused_null_b, unused_null_c = out
location = location.decode('latin1')
#op2.show_data(data[n:n+ntotal], 'ifs')
op2.add_plsolid(pid, mid, stress_strain=location, ge=0.)
n += ntotal
op2.card_count['PLSOLID'] = nentries
return n
def _read_pmass(self, data: bytes, n: int) -> int:
"""
PMASS(402,4,44) - the marker for Record 48
"""
op2 = self.op2
ntotal = 8 * self.factor # 2*4
struct_if = Struct(mapfmt(op2._endian + b'if', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
edata = data[n:n + ntotal]
out = struct_if.unpack(edata)
#out = (pid, mass)
if op2.is_debug_file:
op2.binary_debug.write(' PMASS=%s\n' % str(out))
prop = PMASS.add_op2_data(out)
self._add_op2_property_mass(prop)
n += ntotal
return n
def _read_prod | a: bytes, n: int) -> int:
"""
PROD(902,9,29) - the marker for Record 49
"""
op2 = self.op2
ntotal = 24 * self.factor # 6*4
struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_2i4f.unpack(edata)
#(pid, mid, a, j, c, nsm) = out
prop = PROD.add_op2_data(out)
if op2.is_debug_file:
op2.binary_debug.write(' PROD=%s\n' % str(out))
self._add_op2_property(prop)
n += ntotal
op2.card_count['PROD'] = nproperties
return n
def _read_pshear(self, data: bytes, n: int) -> int:
"""
PSHEAR(1002,10,42) - the marker for Record 50
"""
op2 = self.op2
ntotal = 24 * self.factor
struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_2i4f.unpack(edata)
#(pid, mid, t, nsm, f1, f2) = out
if op2.is_debug_file:
op2.binary_debug.write(' PSHEAR=%s\n' % str(out))
prop = PSHEAR.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PSHEAR'] = nproperties
return n
def _read_pshell(self, data: bytes, n: int) -> int:
"""
PSHELL(2302,23,283) - the marker for Record 51
"""
op2 = self.op2
ntotal = 44 * self.factor # 11*4
s = Struct(mapfmt(op2._endian + b'iififi4fi', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(pid, mid1, unused_t, mid2, unused_bk, mid3, unused_ts,
unused_nsm, unused_z1, unused_z2, mid4) = out
if op2.is_debug_file:
op2.binary_debug.write(' PSHELL=%s\n' % str(out))
prop = PSHELL.add_op2_data(out)
n += ntotal
if pid in op2.properties:
# this is a fake PSHELL
propi = op2.properties[pid]
if prop == propi:
op2.log.warning(f'Fake PSHELL {pid:d} (skipping):\n{propi}')
nproperties -= 1
continue
#assert propi.type in ['PCOMP', 'PCOMPG'], propi.get_stats()
op2.log.error(f'PSHELL {pid:d} is also {propi.type} (skipping PSHELL):\n{propi}{prop}')
nproperties -= 1
continue
#continue
#if max(pid, mid1, mid2, mid3, mid4) > 1e8:
#self.big_properties[pid] = prop
#else:
self._add_op2_property(prop)
if nproperties:
op2.card_count['PSHELL'] = nproperties
return n
def _read_psolid(self, data: bytes, n: int) -> int:
"""
PSOLID(2402,24,281) - the marker for Record 52
"""
op2 = self.op2
#print("reading PSOLID")
if self.size == 4:
ntotal = 28 # 7*4
struct_6i4s = Struct(op2._endian + b'6i4s')
else:
ntotal = 28 * 2
struct_6i4s = Struct(op2._endian + b'6q8s')
nproperties = (len(data) - n) // ntotal
nproperties_found = 0
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_6i4s.unpack(edata)
#(pid, mid, cid, inp, stress, isop, fctn) = out
#data_in = [pid, mid, cid, inp, stress, isop, fctn]
if op2.is_debug_file:
op2.binary_debug.write(' PSOLID=%s\n' % str(out))
n += ntotal
fctn = out[-1]
if fctn == b'FAKE':
op2.log.warning(' PSOLID=%s; is this a PCOMPLS?' % str(out))
continue
prop = PSOLID.add_op2_data(out)
self._add_op2_property(prop)
nproperties_found += 1
op2.card_count['PSOLID'] = nproperties_found
return n
# PSOLIDL
# PTRIA6
# PTSHELL
def _read_ptube(self, data: bytes, n: int) -> int:
"""
PTUBE(1602,16,30) - the marker for Record 56
.. todo:: OD2 only exists for heat transfer...
how do i know if there's heat transfer at this point?
I could store all the tubes and add them later,
but what about themal/non-thermal subcases?
.. warning:: assuming OD2 is not written (only done for thermal)
"""
op2 = self.op2
struct_2i3f = Struct(op2._endian + b'2i3f')
nproperties = (len(data) - n) // 20
for unused_i in range(nproperties):
edata = data[n:n+20] # or 24???
out = struct_2i3f.unpack(edata)
(pid, mid, OD, t, nsm) = out
data_in = [pid, mid, OD, t, nsm]
if op2.is_debug_file:
op2.binary_debug.write(' PTUBE=%s\n' % str(out))
prop = PTUBE.add_op2_data(data_in)
self._add_op2_property(prop)
n += 20
op2.card_count['PTUBE'] = nproperties
return n
def _read_pset(self, data: bytes, n: int) -> int:
op2 = self.op2
struct_5i4si = Struct(op2._endian + b'5i4si')
nentries = 0
while n < len(data):
edata = data[n:n+28]
out = struct_5i4si.unpack(edata)
#print(out)
idi, poly1, poly2, poly3, cid, typei, typeid = out
typei = typei.rstrip().decode('latin1')
assert typei in ['SET', 'ELID'], (idi, poly1, poly2, poly3, cid, typei, typeid)
if op2.is_debug_file:
op2.binary_debug.write(' PVAL=%s\n' % str(out))
#print(idi, poly1, poly2, poly3, cid, typei, typeid)
typeids = []
n += 28
while typeid != -1:
typeids.append(typeid)
typeid, = op2.struct_i.unpack(data[n:n+4])
n += 4
#print(val)
#print(typeids)
# PSET ID POLY1 POLY2 POLY3 CID SETTYP ID
if len(typeids) == 1:
typeids = typeids[0]
op2.add_pset(idi, poly1, poly2, poly3, cid, typei, typeids)
op2.card_count['PSET'] = nentries
return n
def _read_pval(self, data: bytes, n: int) -> int:
"""
PVAL(10201,102,400)
Word Name Type Description
1 ID I p-value set identification number
2 POLY1 I Polynomial order in 1 direction of the CID system
3 POLY2 I Polynomial order in 2 direction of the CID system
4 POLY3 I Polynomial order in 2 direction of the CID system
5 CID I Coordinate system identification number
6 TYPE CHAR4 Type of set provided: "SET" or "ELID"
7 TYPEID I SET identification number or element identification
number with this p-value specification.
Words 1 through 7 repeat until End of Record
"""
op2 = self.op2
#op2.show_data(data[n:])
if self.size == 4:
struct_5i4si = Struct(op2._endian + b'5i 4s i')
struct_i = op2.struct_i
else:
struct_5i4si = Struct(op2._endian + b'5q 8s q')
struct_i = op2.struct_q
nentries = 0
ntotal = 28 * self.factor
size = self.size
while n < len(data):
edata = data[n:n+ntotal]
out = struct_5i4si.unpack(edata)
#print(out)
idi, poly1, poly2, poly3, cid, typei, typeid = out
typei = typei.rstrip().decode('latin1')
assert typei in ['SET', 'ELID'], f'idi={idi} poly1={poly1} poly2={poly2} poly3={poly3} cid={cid} typei={typei} typeid={typeid}'
if op2.is_debug_file:
op2.binary_debug.write(' PVAL=%s\n' % str(out))
#print(idi, poly1, poly2, poly3, cid, typei, typeid)
typeids = []
n += ntotal
while typeid != -1:
typeids.append(typeid)
typeid, = struct_i.unpack(data[n:n+size])
n += size
#print(val)
#print(typeids)
# PVAL ID POLY1 POLY2 POLY3 CID SETTYP ID
op2.add_pval(idi, poly1, poly2, poly3, cid, typei, typeids)
op2.card_count['PVAL'] = nentries
return n
def _read_pvisc(self, data: bytes, n: int) -> int:
"""PVISC(1802,18,31) - the marker for Record 39"""
op2 = self.op2
struct_i2f = Struct(op2._endian + b'i2f')
nproperties = (len(data) - n) // 12
for unused_i in range(nproperties):
edata = data[n:n+12]
out = struct_i2f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PVISC=%s\n' % str(out))
#(pid, ce, cr) = out
prop = PVISC.add_op2_data(out)
self._add_op2_property(prop)
n += 12
op2.card_count['PVISC'] = nproperties
return n
# PWELD
# PWSEAM
def _read_view(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping VIEW in EPT')
return len(data)
def _read_view3d(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping VIEW3D in EPT')
return len(data)
def break_by_minus1(idata):
"""helper for ``read_nsm_nx``"""
i1 = 0
i = 0
i2 = None
packs = []
for idatai in idata:
#print('data[i:] = ', data[i:])
if idatai == -1:
i2 = i
packs.append((i1, i2))
i1 = i2 + 1
i += 1
continue
i += 1
#print(packs)
return packs
| (self, dat |
finetune-vggvox-v2.py | import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../gcs/mesolitica-storage.json'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
import malaya_speech.train as train
import malaya_speech.train.model.vggvox_v2 as vggvox_v2
import malaya_speech
from glob import glob
import librosa
import numpy as np
def | (wav, hop_length, win_length, n_fft=1024):
linear = librosa.stft(
wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length
) # linear spectrogram
return linear.T
def load_data(
wav,
win_length=400,
sr=16000,
hop_length=50,
n_fft=512,
spec_len=250,
mode='train',
):
linear_spect = lin_spectogram_from_wav(wav, hop_length, win_length, n_fft)
mag, _ = librosa.magphase(linear_spect) # magnitude
mag_T = mag.T
freq, time = mag_T.shape
if mode == 'train':
if time > spec_len:
randtime = np.random.randint(0, time - spec_len)
spec_mag = mag_T[:, randtime: randtime + spec_len]
else:
spec_mag = np.pad(mag_T, ((0, 0), (0, spec_len - time)), 'constant')
else:
spec_mag = mag_T
# preprocessing, subtract mean, divided by time-wise var
mu = np.mean(spec_mag, 0, keepdims=True)
std = np.std(spec_mag, 0, keepdims=True)
return (spec_mag - mu) / (std + 1e-5)
DIMENSION = 257
def calc(v):
r = load_data(v, mode='eval')
return r
def preprocess_inputs(example):
s = tf.compat.v1.numpy_function(calc, [example['inputs']], tf.float32)
s = tf.reshape(s, (DIMENSION, -1, 1))
example['inputs'] = s
return example
def parse(serialized_example):
data_fields = {
'inputs': tf.VarLenFeature(tf.float32),
'targets': tf.VarLenFeature(tf.int64),
}
features = tf.parse_single_example(
serialized_example, features=data_fields
)
for k in features.keys():
features[k] = features[k].values
features = preprocess_inputs(features)
keys = list(features.keys())
for k in keys:
if k not in ['inputs', 'targets']:
features.pop(k, None)
return features
def get_dataset(files, batch_size=32, shuffle_size=1024, thread_count=24):
def get():
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(parse, num_parallel_calls=thread_count)
dataset = dataset.shuffle(shuffle_size)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'inputs': tf.TensorShape([DIMENSION, None, 1]),
'targets': tf.TensorShape([None]),
},
padding_values={
'inputs': tf.constant(0, dtype=tf.float32),
'targets': tf.constant(0, dtype=tf.int64),
},
)
dataset = dataset.repeat()
return dataset
return get
learning_rate = 1e-5
init_checkpoint = '../vggvox-speaker-identification/v2/vggvox.ckpt'
def model_fn(features, labels, mode, params):
Y = tf.cast(features['targets'][:, 0], tf.int32)
model = vggvox_v2.Model(features['inputs'], num_class=2, mode='train')
logits = model.logits
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=Y
)
)
tf.identity(loss, 'train_loss')
accuracy = tf.metrics.accuracy(
labels=Y, predictions=tf.argmax(logits, axis=1)
)
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
variables = [v for v in variables if 'prediction' not in v.name]
assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={'accuracy': accuracy},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
files = tf.io.gfile.glob(
'gs://mesolitica-general/speaker-change/data/*.tfrecords'
)
train_dataset = get_dataset(files)
save_directory = 'output-vggvox-v2-speaker-change'
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir=save_directory,
num_gpus=1,
log_step=1,
save_checkpoint_step=25000,
max_steps=300000,
train_hooks=train_hooks,
)
| lin_spectogram_from_wav |
buggy_split_at_mut.rs | mod safe {
use std::slice::from_raw_parts_mut;
pub fn split_at_mut<T>(self_: &mut [T], mid: usize) -> (&mut [T], &mut [T]) |
}
fn main() {
let mut array = [1,2,3,4];
let (a, b) = safe::split_at_mut(&mut array, 0);
//~^ ERROR borrow stack
a[1] = 5;
b[1] = 6;
}
| {
let len = self_.len();
let ptr = self_.as_mut_ptr();
unsafe {
assert!(mid <= len);
(from_raw_parts_mut(ptr, len - mid), // BUG: should be "mid" instead of "len - mid"
from_raw_parts_mut(ptr.offset(mid as isize), len - mid))
}
} |
const_ctrl.rs | // Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// | // limitations under the License.
//! A simple module that just sets a constant control parameter.
use module::{Module, Buffer};
pub struct ConstCtrl {
value: f32,
}
impl ConstCtrl {
pub fn new(value: f32) -> ConstCtrl {
ConstCtrl { value: value }
}
}
impl Module for ConstCtrl {
fn n_ctrl_out(&self) -> usize { 1 }
fn process(&mut self, _control_in: &[f32], control_out: &mut [f32],
_buf_in: &[&Buffer], _buf_out: &mut [Buffer])
{
control_out[0] = self.value;
}
} | // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and |
stream.go | package cctvapi
import (
"context"
"github.com/labstack/echo/v4"
"github.com/tierklinik-dobersberg/cis/internal/app"
"github.com/tierklinik-dobersberg/cis/internal/permission"
)
// StreamCameraEndpoint streams the video of the camera
// to the caller.
func | (router *app.Router) {
router.GET(
"v1/camera/:camera/stream",
permission.OneOf{
WatchCameraStream,
},
func(ctx context.Context, app *app.App, c echo.Context) error {
return app.CCTV.AttachToStream(ctx, c.Param("camera"), c)
},
)
}
| StreamCameraEndpoint |
data.py | ##
## For importing data from the database | import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, desc
######## Set up DB ########
engine = create_engine("sqlite:///static/agnesShows.sqlite")
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
def getData():
return 0 | ##
# Python SQL toolkit and Object Relational Mapper |
SetupIntents.d.ts | // File generated from our OpenAPI spec
declare module 'stripe' {
namespace Stripe {
/**
* The SetupIntent object.
*/
interface SetupIntent {
/**
* Unique identifier for the object.
*/
id: string;
/**
* String representing the object's type. Objects of the same type share the same value.
*/
object: 'setup_intent';
/**
* ID of the Connect application that created the SetupIntent.
*/
application: string | Stripe.Application | null;
/**
* Reason for cancellation of this SetupIntent, one of `abandoned`, `requested_by_customer`, or `duplicate`.
*/
cancellation_reason: SetupIntent.CancellationReason | null;
/**
* The client secret of this SetupIntent. Used for client-side retrieval using a publishable key.
*
* The client secret can be used to complete payment setup from your frontend. It should not be stored, logged, embedded in URLs, or exposed to anyone other than the customer. Make sure that you have TLS enabled on any page that includes the client secret.
*/
client_secret: string | null;
/**
* Time at which the object was created. Measured in seconds since the Unix epoch.
*/
created: number;
/**
* ID of the Customer this SetupIntent belongs to, if one exists.
*
* If present, the SetupIntent's payment method will be attached to the Customer on successful setup. Payment methods attached to other Customers cannot be used with this SetupIntent.
*/
customer: string | Stripe.Customer | Stripe.DeletedCustomer | null;
/**
* An arbitrary string attached to the object. Often useful for displaying to users.
*/
description: string | null;
/**
* The error encountered in the previous SetupIntent confirmation.
*/
last_setup_error: SetupIntent.LastSetupError | null;
/**
* Has the value `true` if the object exists in live mode or the value `false` if the object exists in test mode.
*/
livemode: boolean;
/**
* ID of the multi use Mandate generated by the SetupIntent.
*/
mandate: string | Stripe.Mandate | null;
/**
* Set of [key-value pairs](https://stripe.com/docs/api/metadata) that you can attach to an object. This can be useful for storing additional information about the object in a structured format.
*/
metadata: Metadata;
/**
* If present, this property tells you what actions you need to take in order for your customer to continue payment setup.
*/
next_action: SetupIntent.NextAction | null;
/**
* The account (if any) for which the setup is intended.
*/
on_behalf_of: string | Stripe.Account | null;
/**
* ID of the payment method used with this SetupIntent.
*/
payment_method: string | Stripe.PaymentMethod | null;
/**
* Payment-method-specific configuration for this SetupIntent.
*/
payment_method_options: SetupIntent.PaymentMethodOptions | null;
/**
* The list of payment method types (e.g. card) that this SetupIntent is allowed to set up.
*/
payment_method_types: Array<string>;
/**
* ID of the single_use Mandate generated by the SetupIntent.
*/
single_use_mandate: string | Stripe.Mandate | null;
/**
* [Status](https://stripe.com/docs/payments/intents#intent-statuses) of this SetupIntent, one of `requires_payment_method`, `requires_confirmation`, `requires_action`, `processing`, `canceled`, or `succeeded`.
*/
status: SetupIntent.Status;
/**
* Indicates how the payment method is intended to be used in the future.
*
* Use `on_session` if you intend to only reuse the payment method when the customer is in your checkout flow. Use `off_session` if your customer may or may not be in your checkout flow. If not provided, this value defaults to `off_session`.
*/
usage: string;
}
namespace SetupIntent {
type CancellationReason =
| 'abandoned'
| 'duplicate'
| 'requested_by_customer';
interface LastSetupError {
/**
* For card errors, the ID of the failed charge.
*/
charge?: string;
/**
* For some errors that could be handled programmatically, a short string indicating the [error code](https://stripe.com/docs/error-codes) reported.
*/
code?: string;
/**
* For card errors resulting from a card issuer decline, a short string indicating the [card issuer's reason for the decline](https://stripe.com/docs/declines#issuer-declines) if they provide one.
*/
decline_code?: string;
/**
* A URL to more information about the [error code](https://stripe.com/docs/error-codes) reported.
*/
doc_url?: string;
/**
* A human-readable message providing more details about the error. For card errors, these messages can be shown to your users.
*/
message?: string;
/**
* If the error is parameter-specific, the parameter related to the error. For example, you can use this to display a message near the correct form field.
*/
param?: string;
/**
* A PaymentIntent guides you through the process of collecting a payment from your customer.
* We recommend that you create exactly one PaymentIntent for each order or
* customer session in your system. You can reference the PaymentIntent later to
* see the history of payment attempts for a particular session.
*
* A PaymentIntent transitions through
* [multiple statuses](https://stripe.com/docs/payments/intents#intent-statuses)
* throughout its lifetime as it interfaces with Stripe.js to perform
* authentication flows and ultimately creates at most one successful charge.
*
* Related guide: [Payment Intents API](https://stripe.com/docs/payments/payment-intents).
*/
payment_intent?: Stripe.PaymentIntent;
/**
* PaymentMethod objects represent your customer's payment instruments.
* They can be used with [PaymentIntents](https://stripe.com/docs/payments/payment-intents) to collect payments or saved to
* Customer objects to store instrument details for future payments.
*
* Related guides: [Payment Methods](https://stripe.com/docs/payments/payment-methods) and [More Payment Scenarios](https://stripe.com/docs/payments/more-payment-scenarios).
*/
payment_method?: Stripe.PaymentMethod;
/**
* A SetupIntent guides you through the process of setting up and saving a customer's payment credentials for future payments.
* For example, you could use a SetupIntent to set up and save your customer's card without immediately collecting a payment.
* Later, you can use [PaymentIntents](https://stripe.com/docs/api#payment_intents) to drive the payment flow.
*
* Create a SetupIntent as soon as you're ready to collect your customer's payment credentials.
* Do not maintain long-lived, unconfirmed SetupIntents as they may no longer be valid.
* The SetupIntent then transitions through multiple [statuses](https://stripe.com/docs/payments/intents#intent-statuses) as it guides
* you through the setup process.
*
* Successful SetupIntents result in payment credentials that are optimized for future payments.
* For example, cardholders in [certain regions](https://stripe.com/guides/strong-customer-authentication) may need to be run through
* [Strong Customer Authentication](https://stripe.com/docs/strong-customer-authentication) at the time of payment method collection
* in order to streamline later [off-session payments](https://stripe.com/docs/payments/setup-intents).
* If the SetupIntent is used with a [Customer](https://stripe.com/docs/api#setup_intent_object-customer), upon success,
* it will automatically attach the resulting payment method to that Customer.
* We recommend using SetupIntents or [setup_future_usage](https://stripe.com/docs/api#payment_intent_object-setup_future_usage) on
* PaymentIntents to save payment methods in order to prevent saving invalid or unoptimized payment methods.
*
* By using SetupIntents, you ensure that your customers experience the minimum set of required friction,
* even as regulations change over time.
*
* Related guide: [Setup Intents API](https://stripe.com/docs/payments/setup-intents).
*/
setup_intent?: Stripe.SetupIntent;
source?: CustomerSource;
/**
* The type of error returned. One of `api_connection_error`, `api_error`, `authentication_error`, `card_error`, `idempotency_error`, `invalid_request_error`, or `rate_limit_error`
*/
type: LastSetupError.Type;
}
namespace LastSetupError {
type Type =
| 'api_connection_error'
| 'api_error'
| 'authentication_error'
| 'card_error'
| 'idempotency_error'
| 'invalid_request_error'
| 'rate_limit_error';
}
interface NextAction {
redirect_to_url?: NextAction.RedirectToUrl;
/**
* Type of the next action to perform, one of `redirect_to_url` or `use_stripe_sdk`.
*/
type: string;
/**
* When confirming a SetupIntent with Stripe.js, Stripe.js depends on the contents of this dictionary to invoke authentication flows. The shape of the contents is subject to change and is only intended to be used by Stripe.js.
*/
use_stripe_sdk?: NextAction.UseStripeSdk;
}
namespace NextAction {
interface RedirectToUrl {
/**
* If the customer does not exit their browser while authenticating, they will be redirected to this specified URL after completion.
*/
return_url: string | null;
/**
* The URL you must redirect your customer to in order to authenticate.
*/
url: string | null;
}
interface UseStripeSdk {}
}
interface PaymentMethodOptions {
card?: PaymentMethodOptions.Card;
}
namespace PaymentMethodOptions {
interface Card {
/**
* We strongly recommend that you rely on our SCA Engine to automatically prompt your customers for authentication based on risk level and [other requirements](https://stripe.com/docs/strong-customer-authentication). However, if you wish to request 3D Secure based on logic from your own fraud engine, provide this option. Permitted values include: `automatic` or `any`. If not provided, defaults to `automatic`. Read our guide on [manually requesting 3D Secure](https://stripe.com/docs/payments/3d-secure#manual-three-ds) for more information on how this configuration interacts with Radar and our SCA Engine.
*/
request_three_d_secure: Card.RequestThreeDSecure | null;
}
namespace Card {
type RequestThreeDSecure = 'any' | 'automatic' | 'challenge_only';
}
}
type Status =
| 'canceled'
| 'processing'
| 'requires_action'
| 'requires_confirmation'
| 'requires_payment_method'
| 'succeeded';
}
interface SetupIntentCreateParams {
/**
* Set to `true` to attempt to confirm this SetupIntent immediately. This parameter defaults to `false`. If the payment method attached is a card, a return_url may be provided in case additional authentication is required.
*/
confirm?: boolean;
/**
* ID of the Customer this SetupIntent belongs to, if one exists.
*
* If present, the SetupIntent's payment method will be attached to the Customer on successful setup. Payment methods attached to other Customers cannot be used with this SetupIntent.
*/
customer?: string;
/**
* An arbitrary string attached to the object. Often useful for displaying to users.
*/
description?: string;
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
/**
* This hash contains details about the Mandate to create. This parameter can only be used with [`confirm=true`](https://stripe.com/docs/api/setup_intents/create#create_setup_intent-confirm).
*/
mandate_data?: SetupIntentCreateParams.MandateData;
/**
* Set of [key-value pairs](https://stripe.com/docs/api/metadata) that you can attach to an object. This can be useful for storing additional information about the object in a structured format. Individual keys can be unset by posting an empty value to them. All keys can be unset by posting an empty value to `metadata`.
*/
metadata?: MetadataParam;
/**
* The Stripe account ID for which this SetupIntent is created.
*/
on_behalf_of?: string;
/**
* ID of the payment method (a PaymentMethod, Card, or saved Source object) to attach to this SetupIntent.
*/
payment_method?: string;
/**
* Payment-method-specific configuration for this SetupIntent.
*/
payment_method_options?: SetupIntentCreateParams.PaymentMethodOptions;
/**
* The list of payment method types (e.g. card) that this SetupIntent is allowed to use. If this is not provided, defaults to ["card"].
*/
payment_method_types?: Array<string>;
/**
* The URL to redirect your customer back to after they authenticate or cancel their payment on the payment method's app or site. If you'd prefer to redirect to a mobile application, you can alternatively supply an application URI scheme. This parameter can only be used with [`confirm=true`](https://stripe.com/docs/api/setup_intents/create#create_setup_intent-confirm).
*/
return_url?: string;
/**
* If this hash is populated, this SetupIntent will generate a single_use Mandate on success.
*/
single_use?: SetupIntentCreateParams.SingleUse;
/**
* Indicates how the payment method is intended to be used in the future. If not provided, this value defaults to `off_session`.
*/
usage?: SetupIntentCreateParams.Usage;
}
namespace SetupIntentCreateParams {
interface MandateData {
/**
* This hash contains details about the customer acceptance of the Mandate.
*/
customer_acceptance: MandateData.CustomerAcceptance;
}
namespace MandateData {
interface CustomerAcceptance {
/**
* The time at which the customer accepted the Mandate.
*/
accepted_at?: number;
/**
* If this is a Mandate accepted offline, this hash contains details about the offline acceptance.
*/
offline?: CustomerAcceptance.Offline;
/**
* If this is a Mandate accepted online, this hash contains details about the online acceptance.
*/
online?: CustomerAcceptance.Online;
/**
* The type of customer acceptance information included with the Mandate. One of `online` or `offline`.
*/
type: CustomerAcceptance.Type;
}
namespace CustomerAcceptance {
interface Offline {}
interface Online {
/**
* The IP address from which the Mandate was accepted by the customer.
*/
ip_address: string;
/**
* The user agent of the browser from which the Mandate was accepted by the customer.
*/
user_agent: string;
}
type Type = 'offline' | 'online';
}
}
interface PaymentMethodOptions {
/**
* Configuration for any card setup attempted on this SetupIntent.
*/
card?: PaymentMethodOptions.Card;
}
namespace PaymentMethodOptions {
interface Card {
/**
* When specified, this parameter signals that a card has been collected
* as MOTO (Mail Order Telephone Order) and thus out of scope for SCA. This
* parameter can only be provided during confirmation.
*/
moto?: boolean;
/**
* We strongly recommend that you rely on our SCA Engine to automatically prompt your customers for authentication based on risk level and [other requirements](https://stripe.com/docs/strong-customer-authentication). However, if you wish to request 3D Secure based on logic from your own fraud engine, provide this option. Permitted values include: `automatic` or `any`. If not provided, defaults to `automatic`. Read our guide on [manually requesting 3D Secure](https://stripe.com/docs/payments/3d-secure#manual-three-ds) for more information on how this configuration interacts with Radar and our SCA Engine.
*/
request_three_d_secure?: Card.RequestThreeDSecure;
}
namespace Card {
type RequestThreeDSecure = 'any' | 'automatic';
}
}
interface SingleUse {
/**
* Amount the customer is granting permission to collect later. A positive integer representing how much to charge in the [smallest currency unit](https://stripe.com/docs/currencies#zero-decimal) (e.g., 100 cents to charge $1.00 or 100 to charge ¥100, a zero-decimal currency). The minimum amount is $0.50 US or [equivalent in charge currency](https://stripe.com/docs/currencies#minimum-and-maximum-charge-amounts). The amount value supports up to eight digits (e.g., a value of 99999999 for a USD charge of $999,999.99).
*/
amount: number;
/**
* Three-letter [ISO currency code](https://www.iso.org/iso-4217-currency-codes.html), in lowercase. Must be a [supported currency](https://stripe.com/docs/currencies).
*/
currency: string;
}
type Usage = 'off_session' | 'on_session';
}
interface SetupIntentRetrieveParams {
/**
* The client secret of the SetupIntent. Required if a publishable key is used to retrieve the SetupIntent.
*/
client_secret?: string;
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
}
interface SetupIntentUpdateParams {
/**
* ID of the Customer this SetupIntent belongs to, if one exists.
*
* If present, the SetupIntent's payment method will be attached to the Customer on successful setup. Payment methods attached to other Customers cannot be used with this SetupIntent.
*/
customer?: string;
/**
* An arbitrary string attached to the object. Often useful for displaying to users.
*/
description?: string;
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
/**
* Set of [key-value pairs](https://stripe.com/docs/api/metadata) that you can attach to an object. This can be useful for storing additional information about the object in a structured format. Individual keys can be unset by posting an empty value to them. All keys can be unset by posting an empty value to `metadata`.
*/
metadata?: MetadataParam | null;
/**
* ID of the payment method (a PaymentMethod, Card, or saved Source object) to attach to this SetupIntent.
*/
payment_method?: string;
/**
* Payment-method-specific configuration for this SetupIntent.
*/
payment_method_options?: SetupIntentUpdateParams.PaymentMethodOptions;
/**
* The list of payment method types (e.g. card) that this SetupIntent is allowed to set up. If this is not provided, defaults to ["card"].
*/
payment_method_types?: Array<string>;
}
namespace SetupIntentUpdateParams {
interface PaymentMethodOptions {
/**
* Configuration for any card setup attempted on this SetupIntent.
*/
card?: PaymentMethodOptions.Card;
}
namespace PaymentMethodOptions {
interface Card {
/**
* When specified, this parameter signals that a card has been collected
* as MOTO (Mail Order Telephone Order) and thus out of scope for SCA. This
* parameter can only be provided during confirmation.
*/
moto?: boolean;
/**
* We strongly recommend that you rely on our SCA Engine to automatically prompt your customers for authentication based on risk level and [other requirements](https://stripe.com/docs/strong-customer-authentication). However, if you wish to request 3D Secure based on logic from your own fraud engine, provide this option. Permitted values include: `automatic` or `any`. If not provided, defaults to `automatic`. Read our guide on [manually requesting 3D Secure](https://stripe.com/docs/payments/3d-secure#manual-three-ds) for more information on how this configuration interacts with Radar and our SCA Engine.
*/
request_three_d_secure?: Card.RequestThreeDSecure;
}
namespace Card {
type RequestThreeDSecure = 'any' | 'automatic';
}
}
}
interface SetupIntentListParams extends PaginationParams {
/**
* A filter on the list, based on the object `created` field. The value can be a string with an integer Unix timestamp, or it can be a dictionary with a number of different query options.
*/
created?: RangeQueryParam | number;
/**
* Only return SetupIntents for the customer specified by this customer ID.
*/
customer?: string;
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
/**
* Only return SetupIntents associated with the specified payment method.
*/
payment_method?: string;
}
interface SetupIntentCancelParams {
/**
* Reason for canceling this SetupIntent. Possible values are `abandoned`, `requested_by_customer`, or `duplicate`
*/
cancellation_reason?: SetupIntentCancelParams.CancellationReason;
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
}
namespace SetupIntentCancelParams {
type CancellationReason =
| 'abandoned'
| 'duplicate'
| 'requested_by_customer';
}
interface SetupIntentConfirmParams {
/**
* Specifies which fields in the response should be expanded.
*/
expand?: Array<string>;
/**
* This hash contains details about the Mandate to create
*/
mandate_data?:
| SetupIntentConfirmParams.MandateData1
| SetupIntentConfirmParams.MandateData2;
/**
* ID of the payment method (a PaymentMethod, Card, or saved Source object) to attach to this SetupIntent. | payment_method?: string;
/**
* Payment-method-specific configuration for this SetupIntent.
*/
payment_method_options?: SetupIntentConfirmParams.PaymentMethodOptions;
/**
* The URL to redirect your customer back to after they authenticate on the payment method's app or site.
* If you'd prefer to redirect to a mobile application, you can alternatively supply an application URI scheme.
* This parameter is only used for cards and other redirect-based payment methods.
*/
return_url?: string;
}
namespace SetupIntentConfirmParams {
interface MandateData1 {
/**
* This hash contains details about the customer acceptance of the Mandate.
*/
customer_acceptance: MandateData1.CustomerAcceptance;
}
namespace MandateData1 {
interface CustomerAcceptance {
/**
* The time at which the customer accepted the Mandate.
*/
accepted_at?: number;
/**
* If this is a Mandate accepted offline, this hash contains details about the offline acceptance.
*/
offline?: CustomerAcceptance.Offline;
/**
* If this is a Mandate accepted online, this hash contains details about the online acceptance.
*/
online?: CustomerAcceptance.Online;
/**
* The type of customer acceptance information included with the Mandate. One of `online` or `offline`.
*/
type: CustomerAcceptance.Type;
}
namespace CustomerAcceptance {
interface Offline {}
interface Online {
/**
* The IP address from which the Mandate was accepted by the customer.
*/
ip_address: string;
/**
* The user agent of the browser from which the Mandate was accepted by the customer.
*/
user_agent: string;
}
type Type = 'offline' | 'online';
}
}
interface MandateData2 {
/**
* This hash contains details about the customer acceptance of the Mandate.
*/
customer_acceptance: MandateData2.CustomerAcceptance;
}
namespace MandateData2 {
interface CustomerAcceptance {
/**
* If this is a Mandate accepted online, this hash contains details about the online acceptance.
*/
online: CustomerAcceptance.Online;
/**
* The type of customer acceptance information included with the Mandate.
*/
type: 'online';
}
namespace CustomerAcceptance {
interface Online {
/**
* The IP address from which the Mandate was accepted by the customer.
*/
ip_address?: string;
/**
* The user agent of the browser from which the Mandate was accepted by the customer.
*/
user_agent?: string;
}
}
}
interface PaymentMethodOptions {
/**
* Configuration for any card setup attempted on this SetupIntent.
*/
card?: PaymentMethodOptions.Card;
}
namespace PaymentMethodOptions {
interface Card {
/**
* When specified, this parameter signals that a card has been collected
* as MOTO (Mail Order Telephone Order) and thus out of scope for SCA. This
* parameter can only be provided during confirmation.
*/
moto?: boolean;
/**
* We strongly recommend that you rely on our SCA Engine to automatically prompt your customers for authentication based on risk level and [other requirements](https://stripe.com/docs/strong-customer-authentication). However, if you wish to request 3D Secure based on logic from your own fraud engine, provide this option. Permitted values include: `automatic` or `any`. If not provided, defaults to `automatic`. Read our guide on [manually requesting 3D Secure](https://stripe.com/docs/payments/3d-secure#manual-three-ds) for more information on how this configuration interacts with Radar and our SCA Engine.
*/
request_three_d_secure?: Card.RequestThreeDSecure;
}
namespace Card {
type RequestThreeDSecure = 'any' | 'automatic';
}
}
}
class SetupIntentsResource {
/**
* Creates a SetupIntent object.
*
* After the SetupIntent is created, attach a payment method and [confirm](https://stripe.com/docs/api/setup_intents/confirm)
* to collect any required permissions to charge the payment method later.
*/
create(
params?: SetupIntentCreateParams,
options?: RequestOptions
): Promise<Stripe.SetupIntent>;
create(options?: RequestOptions): Promise<Stripe.SetupIntent>;
/**
* Retrieves the details of a SetupIntent that has previously been created.
*
* Client-side retrieval using a publishable key is allowed when the client_secret is provided in the query string.
*
* When retrieved with a publishable key, only a subset of properties will be returned. Please refer to the [SetupIntent](https://stripe.com/docs/api#setup_intent_object) object reference for more details.
*/
retrieve(
id: string,
params?: SetupIntentRetrieveParams,
options?: RequestOptions
): Promise<Stripe.SetupIntent>;
retrieve(
id: string,
options?: RequestOptions
): Promise<Stripe.SetupIntent>;
/**
* Updates a SetupIntent object.
*/
update(
id: string,
params?: SetupIntentUpdateParams,
options?: RequestOptions
): Promise<Stripe.SetupIntent>;
/**
* Returns a list of SetupIntents.
*/
list(
params?: SetupIntentListParams,
options?: RequestOptions
): ApiListPromise<Stripe.SetupIntent>;
list(options?: RequestOptions): ApiListPromise<Stripe.SetupIntent>;
/**
* A SetupIntent object can be canceled when it is in one of these statuses: requires_payment_method, requires_confirmation, or requires_action.
*
* Once canceled, setup is abandoned and any operations on the SetupIntent will fail with an error.
*/
cancel(
id: string,
params?: SetupIntentCancelParams,
options?: RequestOptions
): Promise<Stripe.SetupIntent>;
cancel(id: string, options?: RequestOptions): Promise<Stripe.SetupIntent>;
/**
* Confirm that your customer intends to set up the current or
* provided payment method. For example, you would confirm a SetupIntent
* when a customer hits the “Save” button on a payment method management
* page on your website.
*
* If the selected payment method does not require any additional
* steps from the customer, the SetupIntent will transition to the
* succeeded status.
*
* Otherwise, it will transition to the requires_action status and
* suggest additional actions via next_action. If setup fails,
* the SetupIntent will transition to the
* requires_payment_method status.
*/
confirm(
id: string,
params?: SetupIntentConfirmParams,
options?: RequestOptions
): Promise<Stripe.SetupIntent>;
confirm(
id: string,
options?: RequestOptions
): Promise<Stripe.SetupIntent>;
}
}
} | */ |
script.js | /*------------------------------------------------------------------
* Bootstrap Simple Admin Template
* Version: 1.2
* Author: Alexis Luna
* Copyright 2020 Alexis Luna
* Website: https://github.com/alexis-luna/bootstrap-simple-admin-template
-------------------------------------------------------------------*/
// Toggle sidebar on Menu button click
$('#sidebarCollapse').on('click', function () {
$('#sidebar').toggleClass('active');
$('#body').toggleClass('active');
});
// Auto-hide sidebar on window resize if window size is small
// $(window).on('resize', function () {
// if ($(window).width() <= 768) {
// $('#sidebar, #body').addClass('active');
// }
// }); | // Initiate time picker
//$('.timepicker').mdtimepicker(); | |
test_data_utils_filetree.py | import json
from uuid import uuid1
import pytest
from click.testing import CliRunner
from flask_jsondash.data_utils import filetree
def test_path_hierarchy(tmpdir):
uid = uuid1()
tmp = tmpdir.mkdir('{}'.format(uid))
data = filetree.path_hierarchy(tmp.strpath)
assert json.dumps(data)
for key in ['type', 'name', 'path']:
assert key in data
def test_path_hierarchy_invalid_path(tmpdir):
with pytest.raises(OSError):
filetree.path_hierarchy('invalid-path')
def test_path_hierarchy_invalid_path_none(tmpdir):
with pytest.raises(AssertionError):
filetree.path_hierarchy(None)
def | (tmpdir):
with pytest.raises(OSError):
filetree.path_hierarchy('')
def test_get_tree_invalid_path(tmpdir):
runner = CliRunner()
result = runner.invoke(filetree.get_tree, ['-p', '/{}'.format(uuid1())])
assert result.exit_code == -1
assert isinstance(result.exception, OSError)
assert 'No such file or directory' in str(result.exception)
def test_get_tree_valid_path(tmpdir):
uid = str(uuid1())
tmp = tmpdir.mkdir(uid)
runner = CliRunner()
result = runner.invoke(filetree.get_tree, ['-p', tmp.strpath])
assert result.exit_code == 0
assert 'path' in result.output
assert 'name' in result.output
assert 'type' in result.output
assert 'children' in result.output
def test_get_tree_valid_path_jsonfile(tmpdir):
uid = str(uuid1())
tmp = tmpdir.mkdir(uid)
jsonfile = tmp.join('foo.json')
jsonpath = str(jsonfile.realpath()).encode('utf-8')
jsonfile.write('')
assert str(jsonfile.read()) == ''
runner = CliRunner()
result = runner.invoke(
filetree.get_tree, ['-p', tmp.strpath, '-j', jsonpath])
assert result.exit_code == 0
data = str(jsonfile.read())
assert 'path' in data
assert 'name' in data
assert 'type' in data
assert 'children' in data
def test_get_tree_valid_path_prettyprint(tmpdir):
uid = str(uuid1())
tmp = tmpdir.mkdir(uid)
runner = CliRunner()
result = runner.invoke(
filetree.get_tree, ['-p', tmp.strpath, '--ppr'])
assert result.exit_code == 0
assert 'path' in result.output
assert 'name' in result.output
assert 'type' in result.output
assert 'children' in result.output
| test_path_hierarchy_invalid_path_empty_path |
update_references.py | import functools
import itertools
import os
from uitools.qt import Q
from maya import cmds
from sgfs import SGFS
import mayatools.shelf
from mayatools.tickets import ticket_ui_context
from mayatools.geocache import utils as geocache_utils
from sgpublish import uiutils as ui_utils
from sgpublish import check
from sgpublish.check import maya as maya_check
from sgpublish.mayatools import create_reference
class VersionedItem(Q.TreeWidgetItem):
default_type = '-'
def __init__(self, sgfs, status):
self.sgfs = sgfs
self.status = status
self._setupData()
fields = self._viewFields()
super(VersionedItem, self).__init__(fields)
self._setupGui()
def _setupData(self):
self.path = path = self.status.path
self.name = os.path.basename(self.path)
self.publish = publish = self.status.used
if publish:
task = self.task = publish.parent()
else:
tasks = self.sgfs.entities_from_path(path, 'Task')
task = self.task = tasks[0] if tasks else None
if task:
task.fetch(('step.Step.code', 'content'))
self.entity = task.parent()
else:
entities = self.sgfs.entities_from_path(path, set(('Asset', 'Shot')))
self.entity = entities[0] if entities else None
def _viewFields(self):
if self.publish:
return [
self.name,
self.entity['code'],
self.task['step.Step.code'],
self.task['content'],
self.publish['sg_type'],
self.publish['code'],
('v%04d' % self.publish['sg_version']) if self.is_latest else
('v%04d (of %d)' % (self.publish['sg_version'], self.status.latest['sg_version'])),
]
else:
return [
self.name,
self.entity['code'] if self.entity else '-',
self.task['step.Step.code'] if self.task else '-',
self.task['content'] if self.task else '-',
self.default_type,
'-',
'-',
]
def _setupGui(self):
self._updateIcon()
@property
def is_latest(self):
return self.publish is self.status.latest
def _updateIcon(self):
if self.publish:
if self.is_latest:
self.setIcon(0, ui_utils.icon('silk/tick', size=12, as_icon=True))
else:
self.setIcon(0, ui_utils.icon('silk/cross', size=12, as_icon=True))
else:
self.setIcon(0, ui_utils.icon('silk/error', size=12, as_icon=True))
def attach_to_tree(self, tree=None):
if tree:
self.tree = tree
class ReferenceItem(VersionedItem):
default_type = 'bare reference'
def _setupData(self):
super(ReferenceItem, self)._setupData()
self.name = self.namespace = cmds.file(self.path, q=True, namespace=True)
self.node = cmds.referenceQuery(self.path, referenceNode=True)
def _setupGui(self):
super(ReferenceItem, self)._setupGui()
if self.publish:
self.combo = combo = Q.ComboBox()
for i, sibling in enumerate(self.status.all):
combo.addItem('v%04d' % sibling['sg_version'], sibling)
if sibling['sg_version'] == self.publish['sg_version']:
combo.setCurrentIndex(i)
combo.currentIndexChanged.connect(self._combo_changed)
else:
self.button = button = Q.PushButton("Pick a Publish")
button.clicked.connect(self._pick_publish)
def attach_to_tree(self, *args, **kwargs):
super(ReferenceItem, self).attach_to_tree(*args, **kwargs)
if self.publish:
self.tree.setItemWidget(self, 6, self.combo)
else:
self.tree.setItemWidget(self, 6, self.button)
def _combo_changed(self, index):
with ticket_ui_context():
new_publish = self.status.all[index]
new_path = new_publish['sg_path']
print '#', self.node, 'to', new_path
cmds.file(
new_path,
loadReference=self.node,
type='mayaAscii' if new_path.endswith('.ma') else 'mayaBinary',
options='v=0',
)
self.publish = new_publish
self.path = new_path
self._updateIcon()
def _pick_publish(self):
self._picker = create_reference.Dialog(path=self.path, custom_namespace=False)
self._picker._button.setText('Pick a Publish')
self._picker._do_reference = self._do_picker_reference
self._picker.show()
def _do_picker_reference(self, path, namespace):
|
class GeocacheItem(VersionedItem):
default_type = 'bare geocache'
def _setupData(self):
super(GeocacheItem, self)._setupData()
self.name = os.path.basename(os.path.dirname(self.path)) + '/' + os.path.splitext(os.path.basename(self.path))[0]
class Dialog(Q.Widgets.Dialog):
def __init__(self):
super(Dialog, self).__init__()
self._setupGui()
self._populate_references()
self._did_check = False
self.setMinimumWidth(self._tree.viewport().width() + 120) # 120 for combos
def _setupGui(self):
self.setWindowTitle("Update References")
self.setLayout(Q.VBoxLayout())
self._tree = Q.TreeWidget()
self._tree.setIndentation(0)
self._tree.setItemsExpandable(False)
self._tree.setHeaderLabels(["Name", "Entity", "Step", "Task", "Type", "Publish Name", "Version"])
self.layout().addWidget(self._tree)
button_layout = Q.HBoxLayout()
button_layout.addStretch()
#TO-DO: Finish implementing Update all
self._update_button = Q.PushButton('Update All')
#button_layout.addWidget(self._update_button)
self._close_button = Q.PushButton('Close')
self._close_button.clicked.connect(self._on_close_button)
button_layout.addWidget(self._close_button)
self.layout().addLayout(button_layout)
def _populate_references(self):
sgfs = SGFS()
reference_statuses = check.check_paths(cmds.file(q=True, reference=True), only_published=False)
for reference in reference_statuses:
item = ReferenceItem(sgfs, reference)
self._tree.addTopLevelItem(item)
item.attach_to_tree(self._tree)
geocaches = geocache_utils.get_existing_cache_mappings().keys()
geocache_statuses = check.check_paths(geocaches, only_published=True)
for geocache in geocache_statuses:
item = GeocacheItem(sgfs, geocache)
self._tree.addTopLevelItem(item)
item.attach_to_tree(self._tree)
for i in range(7):
self._tree.resizeColumnToContents(i)
self._tree.setColumnWidth(i, self._tree.columnWidth(i) + 10)
def sizeHint(self):
total = 0
for i in range(7):
total += self._tree.columnWidth(i)
hint = super(Dialog, self).sizeHint()
hint.setWidth(total + 50)
return hint
def closeEvent(self, e):
super(Dialog, self).closeEvent(e)
if not self._did_check:
self._did_check = True
maya_check.start_background_check()
def _on_close_button(self):
self.close()
def __before_reload__():
if dialog:
dialog.close()
dialog = None
def run():
global dialog
if dialog:
dialog.close()
dialog = Dialog()
dialog.show()
dialog.raise_()
| with ticket_ui_context():
print '#', self.node, 'to', path
cmds.file(
path,
loadReference=self.node,
type='mayaAscii' if path.endswith('.ma') else 'mayaBinary',
options='v=0',
)
self.status = check.check_paths([path])[0]
self.publish = self.status.used
new_data = self._viewFields()
for i, v in enumerate(new_data):
self.setData(i, Q.DisplayRole, v)
# This is dangerous to call a second time for some reason.
# On Mike's home machine, I used to not set self.publish above,
# which would overwrite the button, and for some reason
# segfault when the dialog was closed.
self._setupGui()
self.attach_to_tree() |
types.ts | import { NgModuleRef, Type, NgZone } from '@angular/core';
import { AppProps } from 'single-spa';
import { BaseSingleSpaAngularOptions } from 'single-spa-angular/internals';
export interface SingleSpaAngularOptions extends BaseSingleSpaAngularOptions { | // This might be `noop` if the root module is bootstrapped
// with `{ ngZone: 'noop' }` options.
NgZone: typeof NgZone | 'noop';
updateFunction?(props: AppProps): Promise<any>;
Router?: Type<any>;
AnimationEngine?: Type<any>;
}
export interface BootstrappedSingleSpaAngularOptions extends SingleSpaAngularOptions {
bootstrappedModule: NgModuleRef<any>;
// All below properties can be optional in case of
// `SingleSpaAngularOpts.NgZone` is a `noop` string and not an `NgZone` class.
bootstrappedNgZone?: NgZone;
routingEventListener?: () => void;
zoneIdentifier?: string;
} | |
supervised_model.py | class SupervisedModel:
def train(self, x, y):
raise NotImplementedError
def | (self, x):
raise NotImplementedError
def predict_classes(self, x):
raise NotImplementedError
def save(self, path):
raise NotImplementedError
def load(self, path):
raise NotImplementedError | predict |
interfaces.go | package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// InterfacesClient is the network Client
type InterfacesClient struct {
BaseClient
}
// NewInterfacesClient creates an instance of the InterfacesClient client.
func NewInterfacesClient(subscriptionID string) InterfacesClient {
return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient client.
func | (baseURI string, subscriptionID string) InterfacesClient {
return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a network interface.
//
// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface.
// parameters is parameters supplied to the create or update network interface operation.
func (client InterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters Interface) (result InterfacesCreateOrUpdateFuture, err error) {
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, networkInterfaceName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
result, err = client.CreateOrUpdateSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", result.Response(), "Failure sending request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client InterfacesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters Interface) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-06-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (future InterfacesCreateOrUpdateFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
return
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result Interface, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the specified network interface.
//
// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface.
func (client InterfacesClient) Delete(ctx context.Context, resourceGroupName string, networkInterfaceName string) (result InterfacesDeleteFuture, err error) {
req, err := client.DeletePreparer(ctx, resourceGroupName, networkInterfaceName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure preparing request")
return
}
result, err = client.DeleteSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", result.Response(), "Failure sending request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client InterfacesClient) DeletePreparer(ctx context.Context, resourceGroupName string, networkInterfaceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-06-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) DeleteSender(req *http.Request) (future InterfacesDeleteFuture, err error) {
sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client))
future.Future = azure.NewFuture(req)
future.req = req
_, err = future.Done(sender)
if err != nil {
return
}
err = autorest.Respond(future.Response(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
return
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client InterfacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets information about the specified network interface.
//
// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface.
// expand is expands referenced resources.
func (client InterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result Interface, err error) {
req, err := client.GetPreparer(ctx, resourceGroupName, networkInterfaceName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client InterfacesClient) GetPreparer(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-06-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client InterfacesClient) GetResponder(resp *http.Response) (result Interface, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetVirtualMachineScaleSetNetworkInterface get the specified network interface in a virtual machine scale set.
//
// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine
// scale set. virtualmachineIndex is the virtual machine index. networkInterfaceName is the name of the network
// interface. expand is expands referenced resources.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, err error) {
req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", nil, "Failure preparing request")
return
}
resp, err := client.GetVirtualMachineScaleSetNetworkInterfaceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure sending request")
return
}
result, err = client.GetVirtualMachineScaleSetNetworkInterfaceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure responding to request")
}
return
}
// GetVirtualMachineScaleSetNetworkInterfacePreparer prepares the GetVirtualMachineScaleSetNetworkInterface request.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualmachineIndex": autorest.Encode("path", virtualmachineIndex),
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
const APIVersion = "2015-06-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always
// closes the http.Response Body.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceResponder(resp *http.Response) (result Interface, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List gets all network interfaces in a resource group.
//
// resourceGroupName is the name of the resource group.
func (client InterfacesClient) List(ctx context.Context, resourceGroupName string) (result InterfaceListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.ilr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending request")
return
}
result.ilr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client InterfacesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-06-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client InterfacesClient) listNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.interfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client InterfacesClient) ListComplete(ctx context.Context, resourceGroupName string) (result InterfaceListResultIterator, err error) {
result.page, err = client.List(ctx, resourceGroupName)
return
}
// ListAll gets all network interfaces in a subscription.
func (client InterfacesClient) ListAll(ctx context.Context) (result InterfaceListResultPage, err error) {
result.fn = client.listAllNextResults
req, err := client.ListAllPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing request")
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.ilr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending request")
return
}
result.ilr, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to request")
}
return
}
// ListAllPreparer prepares the ListAll request.
func (client InterfacesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2015-06-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListAllSender sends the ListAll request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListAllResponder handles the response to the ListAll request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListAllResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listAllNextResults retrieves the next set of results, if any.
func (client InterfacesClient) listAllNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.interfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listAllNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listAllNextResults", resp, "Failure sending next results request")
}
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listAllNextResults", resp, "Failure responding to next results request")
}
return
}
// ListAllComplete enumerates all values, automatically crossing page boundaries as required.
func (client InterfacesClient) ListAllComplete(ctx context.Context) (result InterfaceListResultIterator, err error) {
result.page, err = client.ListAll(ctx)
return
}
// ListVirtualMachineScaleSetNetworkInterfaces gets all network interfaces in a virtual machine scale set.
//
// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine
// scale set.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResultPage, err error) {
result.fn = client.listVirtualMachineScaleSetNetworkInterfacesNextResults
req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(ctx, resourceGroupName, virtualMachineScaleSetName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing request")
return
}
resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req)
if err != nil {
result.ilr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending request")
return
}
result.ilr, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to request")
}
return
}
// ListVirtualMachineScaleSetNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetNetworkInterfaces request.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPreparer(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
const APIVersion = "2015-06-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listVirtualMachineScaleSetNetworkInterfacesNextResults retrieves the next set of results, if any.
func (client InterfacesClient) listVirtualMachineScaleSetNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.interfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetNetworkInterfacesNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetNetworkInterfacesNextResults", resp, "Failure sending next results request")
}
result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetNetworkInterfacesNextResults", resp, "Failure responding to next results request")
}
return
}
// ListVirtualMachineScaleSetNetworkInterfacesComplete enumerates all values, automatically crossing page boundaries as required.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesComplete(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResultIterator, err error) {
result.page, err = client.ListVirtualMachineScaleSetNetworkInterfaces(ctx, resourceGroupName, virtualMachineScaleSetName)
return
}
// ListVirtualMachineScaleSetVMNetworkInterfaces gets information about all network interfaces in a virtual machine in
// a virtual machine scale set.
//
// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine
// scale set. virtualmachineIndex is the virtual machine index.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResultPage, err error) {
result.fn = client.listVirtualMachineScaleSetVMNetworkInterfacesNextResults
req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing request")
return
}
resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req)
if err != nil {
result.ilr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending request")
return
}
result.ilr, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to request")
}
return
}
// ListVirtualMachineScaleSetVMNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetVMNetworkInterfaces request.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualmachineIndex": autorest.Encode("path", virtualmachineIndex),
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
const APIVersion = "2015-06-15"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listVirtualMachineScaleSetVMNetworkInterfacesNextResults retrieves the next set of results, if any.
func (client InterfacesClient) listVirtualMachineScaleSetVMNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.interfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetVMNetworkInterfacesNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetVMNetworkInterfacesNextResults", resp, "Failure sending next results request")
}
result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetVMNetworkInterfacesNextResults", resp, "Failure responding to next results request")
}
return
}
// ListVirtualMachineScaleSetVMNetworkInterfacesComplete enumerates all values, automatically crossing page boundaries as required.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesComplete(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResultIterator, err error) {
result.page, err = client.ListVirtualMachineScaleSetVMNetworkInterfaces(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex)
return
}
| NewInterfacesClientWithBaseURI |
slpwk_dr0.rs | #[doc = "Register `SLPWK_DR0` writer"]
pub struct W(crate::W<SLPWK_DR0_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<SLPWK_DR0_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<SLPWK_DR0_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<SLPWK_DR0_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `PID7` writer - Peripheral 7 SleepWalking Disable"]
pub struct PID7_W<'a> {
w: &'a mut W,
}
impl<'a> PID7_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7);
self.w
}
}
#[doc = "Field `PID8` writer - Peripheral 8 SleepWalking Disable"]
pub struct PID8_W<'a> {
w: &'a mut W,
}
impl<'a> PID8_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8);
self.w
}
}
#[doc = "Field `PID10` writer - Peripheral 10 SleepWalking Disable"]
pub struct PID10_W<'a> {
w: &'a mut W,
}
impl<'a> PID10_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10);
self.w
}
}
#[doc = "Field `PID11` writer - Peripheral 11 SleepWalking Disable"]
pub struct PID11_W<'a> {
w: &'a mut W,
}
impl<'a> PID11_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | ((value as u32 & 0x01) << 11);
self.w
}
}
#[doc = "Field `PID13` writer - Peripheral 13 SleepWalking Disable"]
pub struct PID13_W<'a> {
w: &'a mut W,
}
impl<'a> PID13_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13);
self.w
}
}
#[doc = "Field `PID14` writer - Peripheral 14 SleepWalking Disable"]
pub struct PID14_W<'a> {
w: &'a mut W,
}
impl<'a> PID14_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | ((value as u32 & 0x01) << 14);
self.w
}
}
#[doc = "Field `PID15` writer - Peripheral 15 SleepWalking Disable"]
pub struct PID15_W<'a> {
w: &'a mut W,
}
impl<'a> PID15_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | ((value as u32 & 0x01) << 15);
self.w
}
}
#[doc = "Field `PID16` writer - Peripheral 16 SleepWalking Disable"]
pub struct PID16_W<'a> {
w: &'a mut W,
}
impl<'a> PID16_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | ((value as u32 & 0x01) << 16);
self.w
}
}
#[doc = "Field `PID18` writer - Peripheral 18 SleepWalking Disable"]
pub struct PID18_W<'a> {
w: &'a mut W,
}
impl<'a> PID18_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | ((value as u32 & 0x01) << 18);
self.w
}
}
#[doc = "Field `PID19` writer - Peripheral 19 SleepWalking Disable"]
pub struct PID19_W<'a> {
w: &'a mut W,
}
impl<'a> PID19_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | ((value as u32 & 0x01) << 19);
self.w
}
}
#[doc = "Field `PID20` writer - Peripheral 20 SleepWalking Disable"]
pub struct PID20_W<'a> {
w: &'a mut W,
}
impl<'a> PID20_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | ((value as u32 & 0x01) << 20);
self.w
}
}
#[doc = "Field `PID21` writer - Peripheral 21 SleepWalking Disable"]
pub struct PID21_W<'a> {
w: &'a mut W,
}
impl<'a> PID21_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | ((value as u32 & 0x01) << 21);
self.w
}
}
#[doc = "Field `PID22` writer - Peripheral 22 SleepWalking Disable"]
pub struct PID22_W<'a> {
w: &'a mut W,
}
impl<'a> PID22_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 22)) | ((value as u32 & 0x01) << 22);
self.w
}
}
#[doc = "Field `PID23` writer - Peripheral 23 SleepWalking Disable"]
pub struct PID23_W<'a> {
w: &'a mut W,
}
impl<'a> PID23_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 23)) | ((value as u32 & 0x01) << 23);
self.w
}
}
#[doc = "Field `PID24` writer - Peripheral 24 SleepWalking Disable"]
pub struct PID24_W<'a> {
w: &'a mut W,
}
impl<'a> PID24_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | ((value as u32 & 0x01) << 24);
self.w
}
}
#[doc = "Field `PID25` writer - Peripheral 25 SleepWalking Disable"]
pub struct PID25_W<'a> {
w: &'a mut W,
}
impl<'a> PID25_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | ((value as u32 & 0x01) << 25);
self.w
}
}
#[doc = "Field `PID26` writer - Peripheral 26 SleepWalking Disable"]
pub struct PID26_W<'a> {
w: &'a mut W,
}
impl<'a> PID26_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | ((value as u32 & 0x01) << 26);
self.w
}
}
#[doc = "Field `PID27` writer - Peripheral 27 SleepWalking Disable"]
pub struct PID27_W<'a> {
w: &'a mut W,
}
impl<'a> PID27_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 27)) | ((value as u32 & 0x01) << 27);
self.w
}
}
#[doc = "Field `PID28` writer - Peripheral 28 SleepWalking Disable"]
pub struct PID28_W<'a> {
w: &'a mut W,
}
impl<'a> PID28_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 28)) | ((value as u32 & 0x01) << 28);
self.w
}
}
#[doc = "Field `PID29` writer - Peripheral 29 SleepWalking Disable"]
pub struct PID29_W<'a> {
w: &'a mut W,
}
impl<'a> PID29_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 29)) | ((value as u32 & 0x01) << 29);
self.w
}
}
#[doc = "Field `PID30` writer - Peripheral 30 SleepWalking Disable"]
pub struct PID30_W<'a> {
w: &'a mut W,
}
impl<'a> PID30_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | ((value as u32 & 0x01) << 30);
self.w
}
}
#[doc = "Field `PID31` writer - Peripheral 31 SleepWalking Disable"]
pub struct PID31_W<'a> {
w: &'a mut W,
}
impl<'a> PID31_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31);
self.w
}
}
impl W {
#[doc = "Bit 7 - Peripheral 7 SleepWalking Disable"]
#[inline(always)]
pub fn pid7(&mut self) -> PID7_W {
PID7_W { w: self }
}
#[doc = "Bit 8 - Peripheral 8 SleepWalking Disable"]
#[inline(always)]
pub fn pid8(&mut self) -> PID8_W {
PID8_W { w: self }
}
#[doc = "Bit 10 - Peripheral 10 SleepWalking Disable"]
#[inline(always)]
pub fn pid10(&mut self) -> PID10_W {
PID10_W { w: self }
}
#[doc = "Bit 11 - Peripheral 11 SleepWalking Disable"]
#[inline(always)]
pub fn pid11(&mut self) -> PID11_W {
PID11_W { w: self }
}
#[doc = "Bit 13 - Peripheral 13 SleepWalking Disable"]
#[inline(always)]
pub fn pid13(&mut self) -> PID13_W {
PID13_W { w: self }
}
#[doc = "Bit 14 - Peripheral 14 SleepWalking Disable"]
#[inline(always)]
pub fn pid14(&mut self) -> PID14_W {
PID14_W { w: self }
}
#[doc = "Bit 15 - Peripheral 15 SleepWalking Disable"]
#[inline(always)]
pub fn pid15(&mut self) -> PID15_W {
PID15_W { w: self }
}
#[doc = "Bit 16 - Peripheral 16 SleepWalking Disable"]
#[inline(always)]
pub fn pid16(&mut self) -> PID16_W {
PID16_W { w: self }
}
#[doc = "Bit 18 - Peripheral 18 SleepWalking Disable"]
#[inline(always)]
pub fn pid18(&mut self) -> PID18_W {
PID18_W { w: self }
}
#[doc = "Bit 19 - Peripheral 19 SleepWalking Disable"]
#[inline(always)]
pub fn pid19(&mut self) -> PID19_W {
PID19_W { w: self }
}
#[doc = "Bit 20 - Peripheral 20 SleepWalking Disable"]
#[inline(always)]
pub fn pid20(&mut self) -> PID20_W {
PID20_W { w: self }
}
#[doc = "Bit 21 - Peripheral 21 SleepWalking Disable"]
#[inline(always)]
pub fn pid21(&mut self) -> PID21_W {
PID21_W { w: self }
}
#[doc = "Bit 22 - Peripheral 22 SleepWalking Disable"]
#[inline(always)]
pub fn pid22(&mut self) -> PID22_W {
PID22_W { w: self }
}
#[doc = "Bit 23 - Peripheral 23 SleepWalking Disable"]
#[inline(always)]
pub fn pid23(&mut self) -> PID23_W {
PID23_W { w: self }
}
#[doc = "Bit 24 - Peripheral 24 SleepWalking Disable"]
#[inline(always)]
pub fn pid24(&mut self) -> PID24_W {
PID24_W { w: self }
}
#[doc = "Bit 25 - Peripheral 25 SleepWalking Disable"]
#[inline(always)]
pub fn pid25(&mut self) -> PID25_W {
PID25_W { w: self }
}
#[doc = "Bit 26 - Peripheral 26 SleepWalking Disable"]
#[inline(always)]
pub fn pid26(&mut self) -> PID26_W {
PID26_W { w: self }
}
#[doc = "Bit 27 - Peripheral 27 SleepWalking Disable"]
#[inline(always)]
pub fn pid27(&mut self) -> PID27_W {
PID27_W { w: self }
}
#[doc = "Bit 28 - Peripheral 28 SleepWalking Disable"]
#[inline(always)]
pub fn pid28(&mut self) -> PID28_W {
PID28_W { w: self }
}
#[doc = "Bit 29 - Peripheral 29 SleepWalking Disable"]
#[inline(always)]
pub fn pid29(&mut self) -> PID29_W {
PID29_W { w: self }
}
#[doc = "Bit 30 - Peripheral 30 SleepWalking Disable"]
#[inline(always)]
pub fn pid30(&mut self) -> PID30_W {
PID30_W { w: self }
}
#[doc = "Bit 31 - Peripheral 31 SleepWalking Disable"]
#[inline(always)]
pub fn | (&mut self) -> PID31_W {
PID31_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "SleepWalking Disable Register 0\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [slpwk_dr0](index.html) module"]
pub struct SLPWK_DR0_SPEC;
impl crate::RegisterSpec for SLPWK_DR0_SPEC {
type Ux = u32;
}
#[doc = "`write(|w| ..)` method takes [slpwk_dr0::W](W) writer structure"]
impl crate::Writable for SLPWK_DR0_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets SLPWK_DR0 to value 0"]
impl crate::Resettable for SLPWK_DR0_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| pid31 |
routes.py | import os
from functools import wraps
from flask import flash, redirect, render_template, url_for, current_app, Markup, request
from flask_login import login_user, login_required, logout_user, current_user
from app.auth import bp
from app.auth.forms import SignUpForm, RegistrationForm, LoginForm, ResetPasswordForm, NewPasswordForm, UserForm
from app.auth.email import send_email
from itsdangerous import URLSafeTimedSerializer
from app.models import User
from app import db
def offer_to_log_in(email: str):
href = f"""<a href="{url_for('auth.login', email=email)}" class="danger-link">Log In</a>"""
message = f"The email: {email} is used. Please {href}."
flash(Markup(message), 'danger')
def get_email_from_token(token):
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
return email
def redirect_authenticated(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_authenticated and current_user.email == get_email_from_token(kwargs.get('token')):
return redirect(url_for('main.index'))
return f(*args, **kwargs)
return decorated_function
@bp.route('/signup', methods=['GET', 'POST'])
async def signup():
form = SignUpForm()
is_busy = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit() and not is_busy:
res = await send_email(form.email.data, goal='registration')
print(res)
flash(f'To continue registration, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
elif is_busy:
offer_to_log_in(form.email.data)
return render_template('auth/signup.html', form=form)
@bp.route('/register/<token>', methods=['GET', 'POST'])
@redirect_authenticated
def register(token):
|
@bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if email := request.args.get('email'):
form.email.data = email
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if not user:
flash(f'User with email {form.email.data} not registered', 'danger')
return redirect(url_for('auth.signup'))
elif not user.check_password(form.password.data):
flash('Wrong password', 'danger')
return redirect(url_for('main.index'))
else:
login_user(user, remember=form.remember_me.data)
flash(f'Hi, {user.first_name}!', 'success')
return redirect(url_for('main.index'))
return render_template('auth/login.html', form=form)
@bp.route('/log_out', methods=['GET', 'POST'])
@login_required
def log_out():
logout_user()
flash('You are logged out', 'info')
return redirect(url_for('main.index'))
@bp.route('/reset_password', methods=['GET', 'POST'])
def reset_password():
form = ResetPasswordForm()
if current_user.is_authenticated:
form.email.data = current_user.email
form.email.render_kw = {'disabled': True}
is_present = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit():
if is_present:
send_email(form.email.data, goal='reset')
flash('To continue reset password, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
else:
href = f"""<a href="{url_for('auth.signup', email=form.email.data)}" class="danger-link">Sign up</a>"""
message = f"The email: {form.email.data} not founded. Please {href} or use correct email."
flash(Markup(message), 'danger')
return render_template('auth/signup.html', form=form)
@bp.route('/new_password/<token>', methods=['GET', 'POST'])
def new_password(token):
form = NewPasswordForm()
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
form.email.data = email
user = User.query.filter_by(email=email).first()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Password was changed. You can log in', 'success')
return redirect(url_for('main.index'))
elif form.is_submitted():
return render_template('auth/new_password.html', form=form), 422
return render_template('auth/new_password.html', form=form)
@bp.route('/user_page', methods=['GET', 'POST'])
@login_required
def user_page():
form = UserForm(obj=current_user)
if form.validate_on_submit():
is_changed = False
for field in 'email', 'first_name', 'last_name':
if getattr(form, field).data is not getattr(current_user, field):
setattr(current_user, field, getattr(form, field).data)
is_changed = True
if is_changed:
db.session.commit()
return render_template('auth/user_page.html', form=form)
| form = RegistrationForm()
email = get_email_from_token(token)
if bool(User.query.filter_by(email=email).first()):
offer_to_log_in(email)
return redirect(url_for('main.index'))
form.email.data = email
if form.validate_on_submit():
new_user = User(
email=email, # noqa
first_name=form.first_name.data, # noqa
last_name=form.last_name.data, # noqa
is_admin=True if email == current_app.config['ADMIN_EMAIL'] else False # noqa
)
new_user.set_password(form.password.data)
print(f'{new_user.is_admin:=}')
db.session.add(new_user)
db.session.commit()
if not os.path.isdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id))):
os.mkdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id)))
flash('You can log in', 'success')
return redirect(url_for('main.index'))
return render_template('auth/register.html', form=form) |
cgroup_controller_linux.go | //go:build linux
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package control
import (
"fmt"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/taskresource/cgroup/control/factory"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/cihub/seelog"
"github.com/containerd/cgroups"
"github.com/pkg/errors"
)
// control is used to implement the cgroup Control interface
type control struct {
factory.CgroupFactory
}
// New is used to obtain a new cgroup control object
func New() Control {
if config.CgroupV2 { | }
return newControl(&factory.GlobalCgroupFactory{})
}
// newControl helps setup the cgroup controller
func newControl(cgroupFact factory.CgroupFactory) Control {
return &control{
cgroupFact,
}
}
// Create creates a new cgroup based off the spec post validation
func (c *control) Create(cgroupSpec *Spec) error {
// Validate incoming spec
err := validateCgroupSpec(cgroupSpec)
if err != nil {
return fmt.Errorf("cgroup create: failed to validate spec: %w", err)
}
seelog.Debugf("Creating cgroup cgroupPath=%s", cgroupSpec.Root)
_, err = c.New(cgroups.V1, cgroups.StaticPath(cgroupSpec.Root), cgroupSpec.Specs)
if err != nil {
return fmt.Errorf("cgroup create: unable to create controller: v1: %s", err)
}
return nil
}
// Remove is used to delete the cgroup
func (c *control) Remove(cgroupPath string) error {
seelog.Debugf("Removing cgroup cgroupPath=%s", cgroupPath)
controller, err := c.Load(cgroups.V1, cgroups.StaticPath(cgroupPath))
if err != nil {
// use the %w verb to wrap the error to be unwrapped by errors.Is()
return fmt.Errorf("cgroup remove: unable to obtain controller: %w", err)
}
// Delete cgroup
err = controller.Delete()
if err != nil {
return fmt.Errorf("cgroup remove: unable to delete cgroup: %w", err)
}
return nil
}
// Exists is used to verify the existence of a cgroup
func (c *control) Exists(cgroupPath string) bool {
seelog.Debugf("Checking existence of cgroup cgroupPath=%s", cgroupPath)
controller, err := c.Load(cgroups.V1, cgroups.StaticPath(cgroupPath))
if err != nil || controller == nil {
return false
}
return true
}
// Init is used to setup the cgroup root for ecs
func (c *control) Init() error {
seelog.Debugf("Creating root ecs cgroup cgroupPath=%s", config.DefaultTaskCgroupV1Prefix)
// Build cgroup spec
cgroupSpec := &Spec{
Root: config.DefaultTaskCgroupV1Prefix,
Specs: &specs.LinuxResources{},
}
err := c.Create(cgroupSpec)
return err
}
// validateCgroupSpec checks the cgroup spec for valid path and specifications
func validateCgroupSpec(cgroupSpec *Spec) error {
if cgroupSpec == nil {
return errors.New("cgroup spec validator: empty cgroup spec")
}
if cgroupSpec.Root == "" {
return errors.New("cgroup spec validator: invalid cgroup root")
}
// Validate the linux resource specs
if cgroupSpec.Specs == nil {
return errors.New("cgroup spec validator: empty linux resource spec")
}
return nil
} | return &controlv2{} |
democracy.rs |
//! Autogenerated weights for `democracy`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2022-01-18, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dali-dev"), DB CACHE: 128
// Executed Command:
// ./target/release/composable
// benchmark
// --chain=dali-dev
// --execution=wasm
// --wasm-execution=compiled
// --pallet=democracy
// --extrinsic=*
// --steps=50
// --repeat=20
// --raw
// --output=runtime/dali/src/weights
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
use frame_support::{traits::Get, weights::Weight};
use sp_std::marker::PhantomData;
/// Weight functions for `democracy`.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> democracy::WeightInfo for WeightInfo<T> {
// Storage: Democracy PublicPropCount (r:1 w:1)
// Storage: Democracy PublicProps (r:1 w:1)
// Storage: Democracy Blacklist (r:1 w:0)
// Storage: Democracy DepositOf (r:0 w:1)
fn propose() -> Weight {
(78_147_000 as Weight)
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
// Storage: Democracy DepositOf (r:1 w:1)
fn second(s: u32, ) -> Weight {
(40_901_000 as Weight)
// Standard Error: 1_000
.saturating_add((247_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Democracy ReferendumInfoOf (r:1 w:1)
// Storage: Democracy VotingOf (r:1 w:1)
// Storage: Balances Locks (r:1 w:1)
fn vote_new(r: u32, ) -> Weight {
(44_497_000 as Weight)
// Standard Error: 1_000
.saturating_add((237_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
// Storage: Democracy ReferendumInfoOf (r:1 w:1)
// Storage: Democracy VotingOf (r:1 w:1)
// Storage: Balances Locks (r:1 w:1)
fn vote_existing(r: u32, ) -> Weight {
(44_328_000 as Weight)
// Standard Error: 1_000
.saturating_add((224_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
// Storage: Democracy ReferendumInfoOf (r:1 w:1)
// Storage: Democracy Cancellations (r:1 w:1)
fn emergency_cancel() -> Weight {
(27_961_000 as Weight)
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
// Storage: Democracy PublicProps (r:1 w:1)
// Storage: Democracy NextExternal (r:1 w:1)
// Storage: Democracy ReferendumInfoOf (r:1 w:1)
// Storage: Democracy Blacklist (r:0 w:1)
// Storage: Democracy DepositOf (r:1 w:1)
// Storage: System Account (r:2 w:2)
fn blacklist(p: u32, ) -> Weight {
(95_296_000 as Weight)
// Standard Error: 6_000
.saturating_add((566_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(6 as Weight))
.saturating_add(T::DbWeight::get().writes(7 as Weight))
}
// Storage: Democracy NextExternal (r:1 w:1)
// Storage: Democracy Blacklist (r:1 w:0)
fn external_propose(v: u32, ) -> Weight {
(13_838_000 as Weight)
// Standard Error: 0
.saturating_add((88_000 as Weight).saturating_mul(v as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Democracy NextExternal (r:0 w:1)
fn external_propose_majority() -> Weight {
(2_993_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Democracy NextExternal (r:0 w:1)
fn external_propose_default() -> Weight {
(2_876_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Democracy NextExternal (r:1 w:1)
// Storage: Democracy ReferendumCount (r:1 w:1)
// Storage: Democracy ReferendumInfoOf (r:0 w:1)
fn fast_track() -> Weight {
(29_139_000 as Weight)
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
// Storage: Democracy NextExternal (r:1 w:1)
// Storage: Democracy Blacklist (r:1 w:1)
fn veto_external(v: u32, ) -> Weight {
(30_300_000 as Weight)
// Standard Error: 0
.saturating_add((115_000 as Weight).saturating_mul(v as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
// Storage: Democracy PublicProps (r:1 w:1)
// Storage: Democracy DepositOf (r:1 w:1)
// Storage: System Account (r:2 w:2)
fn cancel_proposal(p: u32, ) -> Weight {
(70_582_000 as Weight)
// Standard Error: 2_000
.saturating_add((499_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(4 as Weight))
.saturating_add(T::DbWeight::get().writes(4 as Weight))
}
// Storage: Democracy ReferendumInfoOf (r:0 w:1)
fn cancel_referendum() -> Weight {
(18_194_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Scheduler Lookup (r:1 w:1)
// Storage: Scheduler Agenda (r:1 w:1)
fn cancel_queued(r: u32, ) -> Weight {
(31_439_000 as Weight)
// Standard Error: 3_000
.saturating_add((1_990_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
// Storage: Democracy LowestUnbaked (r:1 w:1)
// Storage: Democracy ReferendumCount (r:1 w:0)
// Storage: Democracy ReferendumInfoOf (r:1 w:0)
fn on_initialize_base(r: u32, ) -> Weight {
(6_091_000 as Weight)
// Standard Error: 4_000
.saturating_add((5_373_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight)))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Democracy LowestUnbaked (r:1 w:1)
// Storage: Democracy ReferendumCount (r:1 w:0)
// Storage: Democracy LastTabledWasExternal (r:1 w:0)
// Storage: Democracy NextExternal (r:1 w:0)
// Storage: Democracy PublicProps (r:1 w:0)
// Storage: Democracy ReferendumInfoOf (r:1 w:0)
fn on_initialize_base_with_launch_period(r: u32, ) -> Weight {
(14_522_000 as Weight)
// Standard Error: 4_000
.saturating_add((5_383_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight)))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Democracy VotingOf (r:3 w:3)
// Storage: Democracy ReferendumInfoOf (r:1 w:1)
// Storage: Balances Locks (r:1 w:1)
fn delegate(r: u32, ) -> Weight {
(60_787_000 as Weight)
// Standard Error: 5_000
.saturating_add((7_105_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(4 as Weight))
.saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight)))
.saturating_add(T::DbWeight::get().writes(4 as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight)))
}
// Storage: Democracy VotingOf (r:2 w:2)
// Storage: Democracy ReferendumInfoOf (r:1 w:1)
fn undelegate(r: u32, ) -> Weight {
(28_718_000 as Weight)
// Standard Error: 5_000
.saturating_add((7_101_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight)))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
.saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight)))
}
// Storage: Democracy PublicProps (r:0 w:1)
fn clear_public_proposals() -> Weight |
// Storage: Democracy Preimages (r:1 w:1)
fn note_preimage(b: u32, ) -> Weight {
(44_503_000 as Weight)
// Standard Error: 0
.saturating_add((3_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Democracy Preimages (r:1 w:1)
fn note_imminent_preimage(b: u32, ) -> Weight {
(30_171_000 as Weight)
// Standard Error: 0
.saturating_add((2_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Democracy Preimages (r:1 w:1)
// Storage: System Account (r:1 w:0)
fn reap_preimage(b: u32, ) -> Weight {
(41_252_000 as Weight)
// Standard Error: 0
.saturating_add((1_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
// Storage: Democracy VotingOf (r:1 w:1)
// Storage: Balances Locks (r:1 w:1)
// Storage: System Account (r:1 w:1)
fn unlock_remove(r: u32, ) -> Weight {
(38_156_000 as Weight)
// Standard Error: 1_000
.saturating_add((119_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
// Storage: Democracy VotingOf (r:1 w:1)
// Storage: Balances Locks (r:1 w:1)
// Storage: System Account (r:1 w:1)
fn unlock_set(r: u32, ) -> Weight {
(36_213_000 as Weight)
// Standard Error: 1_000
.saturating_add((209_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
// Storage: Democracy ReferendumInfoOf (r:1 w:1)
// Storage: Democracy VotingOf (r:1 w:1)
fn remove_vote(r: u32, ) -> Weight {
(20_612_000 as Weight)
// Standard Error: 1_000
.saturating_add((201_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
// Storage: Democracy ReferendumInfoOf (r:1 w:1)
// Storage: Democracy VotingOf (r:1 w:1)
fn remove_other_vote(r: u32, ) -> Weight {
(20_752_000 as Weight)
// Standard Error: 1_000
.saturating_add((203_000 as Weight).saturating_mul(r as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
}
| {
(3_294_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
} |
accounts.component.ts | import { Component, OnInit } from '@angular/core';
import { Observable } from 'rxjs';
import { AlertQuery } from '../../../shared/alert/state/alert.query';
import { AlertService } from '../../../shared/alert/state/alert.service';
import { TokenSource } from '../../../shared/enum/token-source.enum';
import { TokenQuery } from '../../../shared/state/token.query';
import { Profile, User } from '../../../shared/swagger';
import { UsersService } from '../../../shared/swagger/api/users.service';
import { UserQuery } from '../../../shared/user/user.query';
import { UserService } from '../../../shared/user/user.service';
@Component({
selector: 'app-accounts-internal',
templateUrl: './accounts.component.html',
styleUrls: ['./accounts.component.scss']
})
export class | implements OnInit {
user: User;
TokenSource = TokenSource;
googleProfile: Profile;
gitHubProfile: Profile;
hasGitHubToken$: Observable<boolean>;
hasGoogleToken$: Observable<boolean>;
public isRefreshing$: Observable<boolean>;
constructor(
private userService: UserService,
private usersService: UsersService,
private tokenQuery: TokenQuery,
private userQuery: UserQuery,
private alertQuery: AlertQuery,
private alertService: AlertService
) {
this.hasGitHubToken$ = this.tokenQuery.hasGitHubToken$;
this.hasGoogleToken$ = this.tokenQuery.hasGoogleToken$;
this.isRefreshing$ = this.alertQuery.showInfo$;
}
/**
* Update user metadata for a service
*
* @param {TokenSource} service TokenSource.GITHUB or TokenSource.GOOGLE
* @memberof AccountsInternalComponent
*/
sync(service: TokenSource.GOOGLE | TokenSource.GITHUB) {
this.alertService.start('Updating user metadata');
this.usersService.updateLoggedInUserMetadata(service).subscribe(
(user: User) => {
this.userService.updateUser(user);
this.alertService.simpleSuccess();
},
error => {
this.alertService.simpleError();
}
);
}
private getUser() {
this.userQuery.user$.subscribe((user: User) => {
this.user = user;
if (user) {
const userProfiles = user.userProfiles;
if (userProfiles) {
this.googleProfile = userProfiles[TokenSource.GOOGLE];
// Using gravatar for Google also, may result in two identical pictures if both accounts use the same email address
if (this.googleProfile && !this.googleProfile.avatarURL) {
this.googleProfile.avatarURL = this.userService.gravatarUrl(this.googleProfile.email, this.googleProfile.avatarURL);
}
this.gitHubProfile = userProfiles[TokenSource.GITHUB];
if (this.gitHubProfile && !this.gitHubProfile.avatarURL) {
this.gitHubProfile.avatarURL = this.userService.gravatarUrl(this.gitHubProfile.email, this.gitHubProfile.avatarURL);
}
}
}
});
}
ngOnInit() {
this.getUser();
}
}
| AccountsInternalComponent |
pausescreen.ts | import { CANVAS_HEIGHT, CANVAS_WIDTH } from "./config";
import { game } from "./game";
import { InputManager, Key } from "./input";
import { OptionsScreen } from "./optionsscreen";
import { IScreen } from "./screen";
import { TitleScreen } from "./titlescreen";
import { AudioManager } from "./audiomanager";
export class PauseScreen implements IScreen {
private prevScreen: IScreen;
private selected: number;
private menuItems: string[];
constructor(prevScreen?: IScreen) {
this.prevScreen = prevScreen;
this.selected = 0;
this.menuItems = [
"Resume",
"Options",
"Quit",
];
}
public update(deltaTime: number): void {
if (InputManager.pressed(Key.UP_ARROW)
|| InputManager.pressed(Key.W)) {
if (this.selected !== 0) {
this.selected -= 1;
}
}
if (InputManager.pressed(Key.DOWN_ARROW)
|| InputManager.pressed(Key.S)) {
if (this.selected !== this.menuItems.length - 1) {
this.selected += 1;
}
}
if (InputManager.pressed(Key.ENTER)
|| InputManager.pressed(Key.SPACE)) {
if (this.selected === 0) {
game.currentScreen = this.prevScreen;
} else if (this.selected === 1) {
game.SetUpScreen(OptionsScreen);
} else if (this.selected === 2) {
AudioManager.stopMusic();
game.SetUpScreen(TitleScreen);
}
} | }
public draw(ctx: CanvasRenderingContext2D, deltaTime: number): void {
ctx.fillStyle = "red";
ctx.font = "32pt 'Press Start 2P'";
const titleText = "PAUSED";
ctx.fillText(titleText, (CANVAS_WIDTH - ctx.measureText(titleText).width) / 2, CANVAS_HEIGHT / 2);
ctx.font = "32px 'Press Start 2P'";
for (let i = 0; i < this.menuItems.length; i++) {
if (i === this.selected) {
ctx.fillStyle = "yellow";
} else {
ctx.fillStyle = "white";
}
const xPos = (CANVAS_WIDTH - ctx.measureText(this.menuItems[i]).width) / 2;
const yPos = 400 + i * 50;
ctx.fillText(this.menuItems[i], xPos, yPos);
}
}
} | |
5b2f27493d7e_.py | """empty message
Revision ID: 5b2f27493d7e
Revises: 1d17bfa8fe08
Create Date: 2018-06-14 14:54:29.224338
"""
from alembic import op
import sqlalchemy as sa
| depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('movie_plan',
sa.Column('id', sa.String(length=128), nullable=False),
sa.Column('mp_movie', sa.Integer(), nullable=True),
sa.Column('mp_hall', sa.Integer(), nullable=True),
sa.Column('mp_time', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['mp_hall'], ['hall.id'], ),
sa.ForeignKeyConstraint(['mp_movie'], ['movies.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('movie_plan')
# ### end Alembic commands ### | # revision identifiers, used by Alembic.
revision = '5b2f27493d7e'
down_revision = '1d17bfa8fe08'
branch_labels = None |
delete_image_request_response.go | // Copyright (c) 2016, 2018, 2022, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
package core
import (
"fmt"
"github.com/oracle/oci-go-sdk/v63/common"
"net/http"
"strings"
)
// DeleteImageRequest wrapper for the DeleteImage operation
//
// See also
//
// Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/core/DeleteImage.go.html to see an example of how to use DeleteImageRequest.
type DeleteImageRequest struct {
// The OCID (https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the image.
ImageId *string `mandatory:"true" contributesTo:"path" name:"imageId"`
// For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
// parameter to the value of the etag from a previous GET or POST response for that resource. The resource
// will be updated or deleted only if the etag you provide matches the resource's current etag value.
IfMatch *string `mandatory:"false" contributesTo:"header" name:"if-match"`
// Unique Oracle-assigned identifier for the request.
// If you need to contact Oracle about a particular request, please provide the request ID.
OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`
// Metadata about the request. This information will not be transmitted to the service, but
// represents information that the SDK will consume to drive retry behavior.
RequestMetadata common.RequestMetadata
}
func (request DeleteImageRequest) String() string {
return common.PointerString(request)
}
// HTTPRequest implements the OCIRequest interface
func (request DeleteImageRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error) {
_, err := request.ValidateEnumValue()
if err != nil {
return http.Request{}, err
}
return common.MakeDefaultHTTPRequestWithTaggedStructAndExtraHeaders(method, path, request, extraHeaders)
}
// BinaryRequestBody implements the OCIRequest interface
func (request DeleteImageRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {
return nil, false
}
// RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request DeleteImageRequest) RetryPolicy() *common.RetryPolicy {
return request.RequestMetadata.RetryPolicy
}
// ValidateEnumValue returns an error when providing an unsupported enum value
// This function is being called during constructing API request process
// Not recommended for calling this function directly
func (request DeleteImageRequest) ValidateEnumValue() (bool, error) {
errMessage := []string{}
if len(errMessage) > 0 {
return true, fmt.Errorf(strings.Join(errMessage, "\n"))
}
return false, nil
}
// DeleteImageResponse wrapper for the DeleteImage operation
type DeleteImageResponse struct {
// The underlying http response
RawResponse *http.Response
// Unique Oracle-assigned identifier for the request. If you need to contact
// Oracle about a particular request, please provide the request ID.
OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
}
func (response DeleteImageResponse) String() string {
return common.PointerString(response)
} | return response.RawResponse
} |
// HTTPResponse implements the OCIResponse interface
func (response DeleteImageResponse) HTTPResponse() *http.Response { |
utils.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package syncers
import (
"fmt"
"strconv"
"strings"
"time"
"google.golang.org/api/compute/v1"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
negtypes "k8s.io/ingress-gce/pkg/neg/types"
"k8s.io/ingress-gce/pkg/utils"
"k8s.io/klog"
)
const (
MAX_NETWORK_ENDPOINTS_PER_BATCH = 500
// For each NEG, only retries 15 times to process it.
// This is a convention in kube-controller-manager.
maxRetries = 15
minRetryDelay = 5 * time.Second
maxRetryDelay = 600 * time.Second
separator = "||"
negIPPortNetworkEndpointType = "GCE_VM_IP_PORT"
)
// encodeEndpoint encodes ip and instance into a single string
func encodeEndpoint(ip, instance, port string) string {
return strings.Join([]string{ip, instance, port}, separator)
}
// decodeEndpoint decodes ip and instance from an encoded string
func decodeEndpoint(str string) (string, string, string) {
strs := strings.Split(str, separator)
return strs[0], strs[1], strs[2]
}
// calculateDifference determines what endpoints needs to be added and removed in order to move current state to target state.
func calculateDifference(targetMap, currentMap map[string]sets.String) (map[string]sets.String, map[string]sets.String) {
addSet := map[string]sets.String{}
removeSet := map[string]sets.String{}
for zone, endpointSet := range targetMap {
diff := endpointSet.Difference(currentMap[zone])
if len(diff) > 0 {
addSet[zone] = diff
}
}
for zone, endpointSet := range currentMap {
diff := endpointSet.Difference(targetMap[zone])
if len(diff) > 0 {
removeSet[zone] = diff
}
}
return addSet, removeSet
}
// calculateNetworkEndpointDifference determines what endpoints needs to be added and removed in order to move current state to target state.
func calculateNetworkEndpointDifference(targetMap, currentMap map[string]negtypes.NetworkEndpointSet) (map[string]negtypes.NetworkEndpointSet, map[string]negtypes.NetworkEndpointSet) {
addSet := map[string]negtypes.NetworkEndpointSet{}
removeSet := map[string]negtypes.NetworkEndpointSet{}
for zone, endpointSet := range targetMap {
diff := endpointSet.Difference(currentMap[zone])
if len(diff) > 0 {
addSet[zone] = diff
}
}
for zone, endpointSet := range currentMap {
diff := endpointSet.Difference(targetMap[zone])
if len(diff) > 0 {
removeSet[zone] = diff
}
}
return addSet, removeSet
}
// getService retrieves service object from serviceLister based on the input Namespace and Name
func getService(serviceLister cache.Indexer, namespace, name string) *apiv1.Service {
if serviceLister == nil {
return nil
}
service, exists, err := serviceLister.GetByKey(utils.ServiceKeyFunc(namespace, name))
if exists && err == nil {
return service.(*apiv1.Service)
}
if err != nil {
klog.Errorf("Failed to retrieve service %s/%s from store: %v", namespace, name, err)
}
return nil
}
// ensureNetworkEndpointGroup ensures corresponding NEG is configured correctly in the specified zone.
func ensureNetworkEndpointGroup(svcNamespace, svcName, negName, zone, negServicePortName string, cloud negtypes.NetworkEndpointGroupCloud, serviceLister cache.Indexer, recorder record.EventRecorder) error {
neg, err := cloud.GetNetworkEndpointGroup(negName, zone)
if err != nil {
// Most likely to be caused by non-existed NEG
klog.V(4).Infof("Error while retriving %q in zone %q: %v", negName, zone, err)
}
needToCreate := false
if neg == nil {
needToCreate = true
} else if !utils.EqualResourceIDs(neg.Network, cloud.NetworkURL()) ||
!utils.EqualResourceIDs(neg.Subnetwork, cloud.SubnetworkURL()) {
needToCreate = true
klog.V(2).Infof("NEG %q in %q does not match network and subnetwork of the cluster. Deleting NEG.", negName, zone)
err = cloud.DeleteNetworkEndpointGroup(negName, zone)
if err != nil {
return err
} else {
if recorder != nil && serviceLister != nil {
if svc := getService(serviceLister, svcNamespace, svcName); svc != nil {
recorder.Eventf(svc, apiv1.EventTypeNormal, "Delete", "Deleted NEG %q for %s in %q.", negName, negServicePortName, zone)
}
}
}
}
if needToCreate {
klog.V(2).Infof("Creating NEG %q for %s in %q.", negName, negServicePortName, zone)
err = cloud.CreateNetworkEndpointGroup(&compute.NetworkEndpointGroup{
Name: negName,
NetworkEndpointType: negIPPortNetworkEndpointType,
Network: cloud.NetworkURL(),
Subnetwork: cloud.SubnetworkURL(),
}, zone)
if err != nil {
return err
} else {
if recorder != nil && serviceLister != nil {
if svc := getService(serviceLister, svcNamespace, svcName); svc != nil {
recorder.Eventf(svc, apiv1.EventTypeNormal, "Create", "Created NEG %q for %s in %q.", negName, negServicePortName, zone)
}
}
}
}
return nil
}
// toZoneNetworkEndpointMap translates addresses in endpoints object and Istio:DestinationRule subset into zone and endpoints map
func toZoneNetworkEndpointMap(endpoints *apiv1.Endpoints, zoneGetter negtypes.ZoneGetter, targetPort string, podLister cache.Indexer, subsetLables string) (map[string]negtypes.NetworkEndpointSet, negtypes.EndpointPodMap, error) {
zoneNetworkEndpointMap := map[string]negtypes.NetworkEndpointSet{}
networkEndpointPodMap := negtypes.EndpointPodMap{}
if endpoints == nil {
klog.Errorf("Endpoint object is nil")
return zoneNetworkEndpointMap, networkEndpointPodMap, nil
}
targetPortNum, _ := strconv.Atoi(targetPort)
for _, subset := range endpoints.Subsets {
matchPort := ""
// service spec allows target Port to be a named Port.
// support both explicit Port and named Port.
for _, port := range subset.Ports {
if targetPortNum != 0 {
// TargetPort is int
if int(port.Port) == targetPortNum {
matchPort = targetPort
}
} else {
// TargetPort is string
if port.Name == targetPort {
matchPort = strconv.Itoa(int(port.Port))
}
}
if len(matchPort) > 0 {
break
}
}
// subset does not contain target Port
if len(matchPort) == 0 {
continue
}
// processAddressFunc adds the qualified endpoints from the input list into the endpointSet group by zone
processAddressFunc := func(addresses []v1.EndpointAddress, includeAllEndpoints bool) error {
for _, address := range addresses {
// Apply the selector if Istio:DestinationRule subset labels provided.
if subsetLables != "" {
if address.TargetRef == nil || address.TargetRef.Kind != "Pod" {
klog.V(2).Infof("Endpoint %q in Endpoints %s/%s does not have a Pod as the TargetRef object. Skipping", address.IP, endpoints.Namespace, endpoints.Name)
continue
}
// Skip if the endpoint's pod not matching the subset lables.
if !shouldPodBeInDestinationRuleSubset(podLister, address.TargetRef.Namespace, address.TargetRef.Name, subsetLables) {
continue
}
}
if address.NodeName == nil {
klog.V(2).Infof("Endpoint %q in Endpoints %s/%s does not have an associated node. Skipping", address.IP, endpoints.Namespace, endpoints.Name)
continue
}
if address.TargetRef == nil {
klog.V(2).Infof("Endpoint %q in Endpoints %s/%s does not have an associated pod. Skipping", address.IP, endpoints.Namespace, endpoints.Name)
continue
}
zone, err := zoneGetter.GetZoneForNode(*address.NodeName)
if err != nil {
return fmt.Errorf("failed to retrieve associated zone of node %q: %v", *address.NodeName, err)
}
if zoneNetworkEndpointMap[zone] == nil {
zoneNetworkEndpointMap[zone] = negtypes.NewNetworkEndpointSet()
}
if includeAllEndpoints || shouldPodBeInNeg(podLister, address.TargetRef.Namespace, address.TargetRef.Name) {
networkEndpoint := negtypes.NetworkEndpoint{IP: address.IP, Port: matchPort, Node: *address.NodeName}
zoneNetworkEndpointMap[zone].Insert(networkEndpoint)
networkEndpointPodMap[networkEndpoint] = types.NamespacedName{Namespace: address.TargetRef.Namespace, Name: address.TargetRef.Name}
}
}
return nil
}
if err := processAddressFunc(subset.Addresses, true); err != nil {
return nil, nil, err
}
if err := processAddressFunc(subset.NotReadyAddresses, false); err != nil {
return nil, nil, err
}
}
return zoneNetworkEndpointMap, networkEndpointPodMap, nil
}
// retrieveExistingZoneNetworkEndpointMap lists existing network endpoints in the neg and return the zone and endpoints map
func retrieveExistingZoneNetworkEndpointMap(negName string, zoneGetter negtypes.ZoneGetter, cloud negtypes.NetworkEndpointGroupCloud) (map[string]negtypes.NetworkEndpointSet, error) {
zones, err := zoneGetter.ListZones()
if err != nil {
return nil, err
}
zoneNetworkEndpointMap := map[string]negtypes.NetworkEndpointSet{}
for _, zone := range zones {
zoneNetworkEndpointMap[zone] = negtypes.NewNetworkEndpointSet()
networkEndpointsWithHealthStatus, err := cloud.ListNetworkEndpoints(negName, zone, false)
if err != nil {
return nil, err
}
for _, ne := range networkEndpointsWithHealthStatus {
zoneNetworkEndpointMap[zone].Insert(negtypes.NetworkEndpoint{IP: ne.NetworkEndpoint.IpAddress, Node: ne.NetworkEndpoint.Instance, Port: strconv.FormatInt(ne.NetworkEndpoint.Port, 10)})
}
}
return zoneNetworkEndpointMap, nil
}
// makeEndpointBatch return a batch of endpoint from the input and remove the endpoints from input set
// The return map has the encoded endpoint as key and GCE network endpoint object as value
func makeEndpointBatch(endpoints negtypes.NetworkEndpointSet) (map[negtypes.NetworkEndpoint]*compute.NetworkEndpoint, error) {
endpointBatch := map[negtypes.NetworkEndpoint]*compute.NetworkEndpoint{}
for i := 0; i < MAX_NETWORK_ENDPOINTS_PER_BATCH; i++ {
networkEndpoint, ok := endpoints.PopAny()
if !ok {
break
}
portNum, err := strconv.Atoi(networkEndpoint.Port)
if err != nil {
return nil, fmt.Errorf("failed to decode endpoint port %v: %v", networkEndpoint, err)
}
endpointBatch[networkEndpoint] = &compute.NetworkEndpoint{
Instance: networkEndpoint.Node,
IpAddress: networkEndpoint.IP,
Port: int64(portNum),
}
}
return endpointBatch, nil
}
func keyFunc(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
// shouldPodBeInNeg returns true if pod is not in graceful termination state
func shouldPodBeInNeg(podLister cache.Indexer, namespace, name string) bool {
if podLister == nil {
return false
}
key := keyFunc(namespace, name)
obj, exists, err := podLister.GetByKey(key)
if err != nil |
if !exists {
return false
}
pod, ok := obj.(*v1.Pod)
if !ok {
klog.Errorf("Failed to convert obj %s to v1.Pod. The object type is %T", key, obj)
return false
}
// if pod has DeletionTimestamp, that means pod is in graceful termination state.
if pod.DeletionTimestamp != nil {
return false
}
return true
}
// shouldPodBeInDestinationRuleSubset return ture if pod match the DestinationRule subset lables.
func shouldPodBeInDestinationRuleSubset(podLister cache.Indexer, namespace, name string, subsetLables string) bool {
if podLister == nil {
return false
}
key := keyFunc(namespace, name)
obj, exists, err := podLister.GetByKey(key)
if err != nil {
klog.Errorf("Failed to retrieve pod %s from pod lister: %v", key, err)
return false
}
if !exists {
return false
}
pod, ok := obj.(*v1.Pod)
if !ok {
klog.Errorf("Failed to convert obj %s to v1.Pod. The object type is %T", key, obj)
return false
}
selector, err := labels.Parse(subsetLables)
if err != nil {
klog.Errorf("Failed to parse the subset selectors.")
return false
}
return selector.Matches(labels.Set(pod.Labels))
}
| {
klog.Errorf("Failed to retrieve pod %s from pod lister: %v", key, err)
return false
} |
spid.rs | #[doc = "Reader of register SPID"]
pub type R = crate::R<u32, super::SPID>;
#[doc = "Writer for register SPID"]
pub type W = crate::W<u32, super::SPID>;
#[doc = "Register SPID `reset()`'s with value 0x1700"]
impl crate::ResetValue for super::SPID {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x1700
}
}
#[doc = "Reader of field `MCU_SEL`"]
pub type MCU_SEL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `MCU_SEL`"]
pub struct MCU_SEL_W<'a> {
w: &'a mut W,
}
impl<'a> MCU_SEL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 12)) | (((value as u32) & 0x03) << 12);
self.w
}
}
#[doc = "Reader of field `FUN_DRV`"]
pub type FUN_DRV_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `FUN_DRV`"]
pub struct FUN_DRV_W<'a> {
w: &'a mut W,
}
impl<'a> FUN_DRV_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 10)) | (((value as u32) & 0x03) << 10);
self.w
}
}
#[doc = "Reader of field `FUN_IE`"]
pub type FUN_IE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FUN_IE`"]
pub struct FUN_IE_W<'a> {
w: &'a mut W,
}
impl<'a> FUN_IE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `FUN_PU`"]
pub type FUN_PU_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FUN_PU`"]
pub struct FUN_PU_W<'a> {
w: &'a mut W,
}
impl<'a> FUN_PU_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `FUN_PD`"]
pub type FUN_PD_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FUN_PD`"]
pub struct | <'a> {
w: &'a mut W,
}
impl<'a> FUN_PD_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `SLP_DRV`"]
pub type SLP_DRV_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `SLP_DRV`"]
pub struct SLP_DRV_W<'a> {
w: &'a mut W,
}
impl<'a> SLP_DRV_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 5)) | (((value as u32) & 0x03) << 5);
self.w
}
}
#[doc = "Reader of field `SLP_IE`"]
pub type SLP_IE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SLP_IE`"]
pub struct SLP_IE_W<'a> {
w: &'a mut W,
}
impl<'a> SLP_IE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `SLP_PU`"]
pub type SLP_PU_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SLP_PU`"]
pub struct SLP_PU_W<'a> {
w: &'a mut W,
}
impl<'a> SLP_PU_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `SLP_PD`"]
pub type SLP_PD_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SLP_PD`"]
pub struct SLP_PD_W<'a> {
w: &'a mut W,
}
impl<'a> SLP_PD_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `SLP_SEL`"]
pub type SLP_SEL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SLP_SEL`"]
pub struct SLP_SEL_W<'a> {
w: &'a mut W,
}
impl<'a> SLP_SEL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `SLP_OE`"]
pub type SLP_OE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SLP_OE`"]
pub struct SLP_OE_W<'a> {
w: &'a mut W,
}
impl<'a> SLP_OE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bits 12:13 - configures IO_MUX function"]
#[inline(always)]
pub fn mcu_sel(&self) -> MCU_SEL_R {
MCU_SEL_R::new(((self.bits >> 12) & 0x03) as u8)
}
#[doc = "Bits 10:11 - configures drive strength"]
#[inline(always)]
pub fn fun_drv(&self) -> FUN_DRV_R {
FUN_DRV_R::new(((self.bits >> 10) & 0x03) as u8)
}
#[doc = "Bit 9 - configures input enable"]
#[inline(always)]
pub fn fun_ie(&self) -> FUN_IE_R {
FUN_IE_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 8 - configures pull up"]
#[inline(always)]
pub fn fun_pu(&self) -> FUN_PU_R {
FUN_PU_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 7 - configures pull down"]
#[inline(always)]
pub fn fun_pd(&self) -> FUN_PD_R {
FUN_PD_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bits 5:6 - configures drive strength during sleep mode"]
#[inline(always)]
pub fn slp_drv(&self) -> SLP_DRV_R {
SLP_DRV_R::new(((self.bits >> 5) & 0x03) as u8)
}
#[doc = "Bit 4 - configures input enable during sleep mode"]
#[inline(always)]
pub fn slp_ie(&self) -> SLP_IE_R {
SLP_IE_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 3 - configures pull up during sleep mode"]
#[inline(always)]
pub fn slp_pu(&self) -> SLP_PU_R {
SLP_PU_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 2 - configures pull down during sleep mode"]
#[inline(always)]
pub fn slp_pd(&self) -> SLP_PD_R {
SLP_PD_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1 - configures sleep mode selection"]
#[inline(always)]
pub fn slp_sel(&self) -> SLP_SEL_R {
SLP_SEL_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0 - configures output enable during sleep mode"]
#[inline(always)]
pub fn slp_oe(&self) -> SLP_OE_R {
SLP_OE_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 12:13 - configures IO_MUX function"]
#[inline(always)]
pub fn mcu_sel(&mut self) -> MCU_SEL_W {
MCU_SEL_W { w: self }
}
#[doc = "Bits 10:11 - configures drive strength"]
#[inline(always)]
pub fn fun_drv(&mut self) -> FUN_DRV_W {
FUN_DRV_W { w: self }
}
#[doc = "Bit 9 - configures input enable"]
#[inline(always)]
pub fn fun_ie(&mut self) -> FUN_IE_W {
FUN_IE_W { w: self }
}
#[doc = "Bit 8 - configures pull up"]
#[inline(always)]
pub fn fun_pu(&mut self) -> FUN_PU_W {
FUN_PU_W { w: self }
}
#[doc = "Bit 7 - configures pull down"]
#[inline(always)]
pub fn fun_pd(&mut self) -> FUN_PD_W {
FUN_PD_W { w: self }
}
#[doc = "Bits 5:6 - configures drive strength during sleep mode"]
#[inline(always)]
pub fn slp_drv(&mut self) -> SLP_DRV_W {
SLP_DRV_W { w: self }
}
#[doc = "Bit 4 - configures input enable during sleep mode"]
#[inline(always)]
pub fn slp_ie(&mut self) -> SLP_IE_W {
SLP_IE_W { w: self }
}
#[doc = "Bit 3 - configures pull up during sleep mode"]
#[inline(always)]
pub fn slp_pu(&mut self) -> SLP_PU_W {
SLP_PU_W { w: self }
}
#[doc = "Bit 2 - configures pull down during sleep mode"]
#[inline(always)]
pub fn slp_pd(&mut self) -> SLP_PD_W {
SLP_PD_W { w: self }
}
#[doc = "Bit 1 - configures sleep mode selection"]
#[inline(always)]
pub fn slp_sel(&mut self) -> SLP_SEL_W {
SLP_SEL_W { w: self }
}
#[doc = "Bit 0 - configures output enable during sleep mode"]
#[inline(always)]
pub fn slp_oe(&mut self) -> SLP_OE_W {
SLP_OE_W { w: self }
}
}
| FUN_PD_W |
ws2812_spi.rs | #![deny(warnings)]
#![deny(unsafe_code)]
#![no_main]
#![no_std]
extern crate cortex_m;
extern crate cortex_m_rt as rt;
extern crate cortex_m_semihosting as sh;
extern crate nb;
extern crate panic_halt;
extern crate stm32g0xx_hal as hal;
use hal::prelude::*;
use hal::rcc::{self, PllConfig};
use hal::spi;
use hal::stm32;
use rt::entry;
use smart_leds::{SmartLedsWrite, RGB};
use ws2812_spi as ws2812;
#[entry]
fn | () -> ! {
let dp = stm32::Peripherals::take().expect("cannot take peripherals");
let cp = cortex_m::Peripherals::take().expect("cannot take core peripherals");
// Configure APB bus clock to 48MHz, cause ws2812 requires 3Mbps SPI
let pll_cfg = PllConfig::with_hsi(4, 24, 2);
let rcc_cfg = rcc::Config::pll().pll_cfg(pll_cfg);
let mut rcc = dp.RCC.freeze(rcc_cfg);
let mut delay = cp.SYST.delay(&mut rcc);
let gpioa = dp.GPIOA.split(&mut rcc);
let spi = dp.SPI2.spi(
(spi::NoSck, spi::NoMiso, gpioa.pa10),
ws2812::MODE,
3.mhz(),
&mut rcc,
);
let mut ws = ws2812::Ws2812::new(spi);
let mut cnt: usize = 0;
let mut data: [RGB<u8>; 8] = [RGB::default(); 8];
loop {
for (idx, color) in data.iter_mut().enumerate() {
*color = match (cnt + idx) % 3 {
0 => RGB { r: 255, g: 0, b: 0 },
1 => RGB { r: 0, g: 255, b: 0 },
_ => RGB { r: 0, g: 0, b: 255 },
};
}
ws.write(data.iter().cloned()).unwrap();
cnt += 1;
delay.delay(200.ms());
}
}
| main |
parser.rs | use super::lexitem::*;
use crate::error::*;
use crate::structs::*;
use crate::validate::*;
use crate::StrictnessLevel;
use std::fs::File;
use std::io::prelude::*;
/// Parse the given mmCIF file into a PDB struct.
/// Returns an PDBError when it found a BreakingError. Otherwise it returns the PDB with all errors/warnings found while parsing it.
pub fn open_mmcif(
filename: &str,
level: StrictnessLevel,
) -> Result<(PDB, Vec<PDBError>), Vec<PDBError>> {
let mut file = if let Ok(f) = File::open(filename) | else {
return Err(vec![PDBError::new(ErrorLevel::BreakingError, "Could not open file", "Could not open the specified file, make sure the path is correct, you have permission, and that it is not open in another program.", Context::show(filename))]);
};
let mut contents = String::new();
if let Err(e) = file.read_to_string(&mut contents) {
return Err(vec![PDBError::new(
ErrorLevel::BreakingError,
"Error while reading file",
&format!("Error: {}", e),
Context::show(filename),
)]);
}
match super::lexer::lex_cif(contents) {
Ok(data_block) => parse_mmcif(&data_block, level),
Err(e) => Err(vec![e]),
}
}
/// Parse a CIF intermediate structure into a PDB
fn parse_mmcif(
input: &DataBlock,
level: StrictnessLevel,
) -> Result<(PDB, Vec<PDBError>), Vec<PDBError>> {
let mut pdb = PDB::default();
let mut errors: Vec<PDBError> = Vec::new();
let mut unit_cell = UnitCell::default();
pdb.identifier = Some(input.name.clone());
for item in &input.items {
let result = match item {
Item::DataItem(di) => match di {
DataItem::Loop(multiple) => {
if multiple.header.contains(&"atom_site.group_PDB".to_string()) {
parse_atoms(multiple, &mut pdb)
} else {
None
}
}
DataItem::Single(single) => {
let context = Context::show(&single.name);
match &single.name[..] {
"cell.length_a" => get_f64(&single.content, &context)
.map(|n| unit_cell.set_a(n.expect("UnitCell length a should be provided")))
.err(),
"cell.length_b" => get_f64(&single.content, &context)
.map(|n| unit_cell.set_b(n.expect("UnitCell length b should be provided")))
.err(),
"cell.length_c" => get_f64(&single.content, &context)
.map(|n| unit_cell.set_c(n.expect("UnitCell length c should be provided")))
.err(),
"cell.angle_alpha" => get_f64(&single.content, &context)
.map(|n| unit_cell.set_alpha(n.expect("UnitCell angle alpha should be provided")))
.err(),
"cell.angle_beta" => get_f64(&single.content, &context)
.map(|n| unit_cell.set_beta(n.expect("UnitCell angle beta should be provided")))
.err(),
"cell.angle_gamma" => get_f64(&single.content, &context)
.map(|n| unit_cell.set_gamma(n.expect("UnitCell angle gamma should be provided")))
.err(),
"symmetry.Int_Tables_number" | "space_group.IT_number" => {
if pdb.symmetry.is_none() {
get_usize(&single.content, &context)
.map(|n| pdb.symmetry = Symmetry::from_index(n.expect("Symmetry international tables number should be provided")))
.err()
} else if let Ok(Some(value)) = get_usize(&single.content, &context) {
if pdb.symmetry != Symmetry::from_index(value) {
Some(PDBError::new(ErrorLevel::InvalidatingError, "Space group does not match", "The given space group does not match the space group earlier defined in this file.", context.clone()))
}
else {
None
}
} else {
None
}
}
"symmetry.space_group_name_H-M" | "symmetry.space_group_name_Hall" | "space_group.name_H-M_alt" | "space_group.name_Hall" => {
if pdb.symmetry.is_none() {
get_text(&single.content, &context)
.map(|t| pdb.symmetry = Symmetry::new(t.expect("Symmetry space group name should be provided")))
.err()
} else if let Ok(Some(value)) = get_text(&single.content, &context) {
if pdb.symmetry != Symmetry::new(value) {
Some(PDBError::new(ErrorLevel::InvalidatingError, "Space group does not match", "The given space group does not match the space group earlier defined in this file.", context.clone()))
}
else {
None
}
} else {
None
}
}
_ => None,
}
.map(|e| vec![e])
}
},
_ => None,
};
if let Some(e) = result {
errors.extend(e);
}
}
if unit_cell != UnitCell::default() {
pdb.unit_cell = Some(unit_cell);
}
reshuffle_conformers(&mut pdb);
errors.extend(validate(&pdb));
if errors.iter().any(|e| e.fails(level)) {
Err(errors)
} else {
Ok((pdb, errors))
}
}
/// Flatten a Result of a Result with the same error type (#70142 is still unstable)
fn flatten_result<T, E>(value: Result<Result<T, E>, E>) -> Result<T, E> {
match value {
Ok(Ok(t)) => Ok(t),
Ok(Err(e)) => Err(e),
Err(e) => Err(e),
}
}
/// Parse a loop containing atomic data
fn parse_atoms(input: &Loop, pdb: &mut PDB) -> Option<Vec<PDBError>> {
/// These are the columns needed to fill out the PDB correctly
const COLUMNS: &[&str] = &[
"atom_site.group_PDB",
"atom_site.label_atom_id",
"atom_site.id",
"atom_site.type_symbol",
"atom_site.label_comp_id",
"atom_site.label_seq_id",
"atom_site.label_asym_id",
"atom_site.Cartn_x",
"atom_site.Cartn_y",
"atom_site.Cartn_z",
"atom_site.occupancy",
"atom_site.B_iso_or_equiv",
"atom_site.pdbx_formal_charge",
];
/// These are some optional columns with data that will be used but is not required to be present
const OPTIONAL_COLUMNS: &[&str] = &[
"atom_site.pdbx_PDB_model_num",
"atom_site.label_alt_id",
"atom_site.pdbx_PDB_ins_code",
"_atom_site.aniso_U[1][1]",
"_atom_site.aniso_U[1][2]",
"_atom_site.aniso_U[1][3]",
"_atom_site.aniso_U[2][1]",
"_atom_site.aniso_U[2][2]",
"_atom_site.aniso_U[2][3]",
"_atom_site.aniso_U[3][1]",
"_atom_site.aniso_U[3][2]",
"_atom_site.aniso_U[3][3]",
];
let positions_: Vec<Result<usize, PDBError>> = COLUMNS
.iter()
.map(|tag| (input.header.iter().position(|t| t == tag), tag))
.map(|(pos, tag)| match pos {
Some(p) => Ok(p),
None => Err(PDBError::new(
ErrorLevel::InvalidatingError,
"Missing column in coordinate atoms data loop",
"The above column is missing",
Context::show(tag),
)),
})
.collect();
let mut errors = positions_
.iter()
.filter_map(|i| i.clone().err())
.collect::<Vec<_>>();
if !errors.is_empty() {
return Some(errors);
}
#[allow(clippy::unwrap_used)]
let positions: Vec<usize> = positions_.iter().map(|i| *i.as_ref().unwrap()).collect();
let optional_positions: Vec<Option<usize>> = OPTIONAL_COLUMNS
.iter()
.map(|tag| input.header.iter().position(|t| t == tag))
.collect();
for (index, row) in input.data.iter().enumerate() {
let values: Vec<&Value> = positions.iter().map(|i| &row[*i]).collect();
let optional_values: Vec<Option<&Value>> = optional_positions
.iter()
.map(|i| i.map(|x| &row[x]))
.collect();
let context = Context::show(&format!("Main atomic data loop row: {}", index));
/// Parse a column given the function to use and the column index
macro_rules! parse_column {
($type:tt, $index:tt) => {
match $type(values[$index], &context) {
Ok(t) => t,
Err(e) => {
errors.push(e);
continue;
}
}
};
}
/// Parse a value from an optional column, if in place, with the same format as parse_column!
macro_rules! parse_optional {
($type:tt, $index:tt) => {
if let Some(value) = optional_values[$index] {
match $type(value, &context) {
Ok(t) => t,
Err(e) => {
errors.push(e);
None
}
}
} else {
None
}
};
}
let atom_type = parse_column!(get_text, 0).expect("Atom type should be defined");
let name = parse_column!(get_text, 1).expect("Atom name should be provided");
let serial_number =
parse_column!(get_usize, 2).expect("Atom serial number should be provided");
let element = parse_column!(get_text, 3).expect("Atom element should be provided");
let residue_name = parse_column!(get_text, 4).expect("Residue name should be provided");
#[allow(clippy::cast_possible_wrap)]
let residue_number =
parse_column!(get_isize, 5).unwrap_or_else(|| pdb.total_residue_count() as isize);
let chain_name = parse_column!(get_text, 6).expect("Chain name should be provided");
let pos_x = parse_column!(get_f64, 7).expect("Atom X position should be provided");
let pos_y = parse_column!(get_f64, 8).expect("Atom Y position should be provided");
let pos_z = parse_column!(get_f64, 9).expect("Atom Z position should be provided");
let occupancy = parse_column!(get_f64, 10).unwrap_or(1.0);
let b_factor = parse_column!(get_f64, 11).unwrap_or(1.0);
let charge = parse_column!(get_isize, 12).unwrap_or(0);
let model_number = parse_optional!(get_usize, 0).unwrap_or(1);
let alt_loc = parse_optional!(get_text, 1);
let insertion_code = parse_optional!(get_text, 2);
let aniso_temp = [
[
parse_optional!(get_f64, 3),
parse_optional!(get_f64, 4),
parse_optional!(get_f64, 5),
],
[
parse_optional!(get_f64, 6),
parse_optional!(get_f64, 7),
parse_optional!(get_f64, 8),
],
[
parse_optional!(get_f64, 9),
parse_optional!(get_f64, 10),
parse_optional!(get_f64, 11),
],
];
let aniso = if aniso_temp
.iter()
.flat_map(|l| l.iter())
.all(|v| v.is_some())
{
#[allow(clippy::unwrap_used)]
Some([
[
aniso_temp[0][0].unwrap(),
aniso_temp[0][1].unwrap(),
aniso_temp[0][2].unwrap(),
],
[
aniso_temp[1][0].unwrap(),
aniso_temp[1][1].unwrap(),
aniso_temp[1][2].unwrap(),
],
[
aniso_temp[2][0].unwrap(),
aniso_temp[2][1].unwrap(),
aniso_temp[2][2].unwrap(),
],
])
} else if aniso_temp
.iter()
.flat_map(|l| l.iter())
.any(|v| v.is_some())
{
errors.push(PDBError::new(
ErrorLevel::StrictWarning,
"Atom aniso U definition incomplete",
"For a valid anisotropic temperature factor definition all columns (1,1 up to and including 3,3) have to be defined.",
context.clone(),
));
None
} else {
None
};
let model = unsafe {
// I could not find a way to make the borrow checker happy, but if no item
// could be find the borrow should be ended and as such safe for mutating
// in the second branch.
let pdb_pointer: *mut PDB = pdb;
if let Some(m) = (*pdb_pointer)
.models_mut()
.find(|m| m.serial_number() == model_number)
{
m
} else {
(*pdb_pointer).add_model(Model::new(model_number));
#[allow(clippy::unwrap_used)]
(*pdb_pointer).models_mut().rev().next().unwrap()
}
};
let mut hetero = false;
if atom_type == "ATOM" {
hetero = false;
} else if atom_type == "HETATM" {
hetero = true;
} else {
errors.push(PDBError::new(
ErrorLevel::InvalidatingError,
"Atom type not correct",
"The atom type should be ATOM or HETATM",
context.clone(),
))
}
if let Some(mut atom) = Atom::new(
hetero,
serial_number,
name,
pos_x,
pos_y,
pos_z,
occupancy,
b_factor,
element,
charge,
) {
if let Some(matrix) = aniso {
atom.set_anisotropic_temperature_factors(matrix);
}
model.add_atom(
atom,
chain_name,
(residue_number, insertion_code),
(residue_name, alt_loc),
);
} else {
errors.push(PDBError::new(
ErrorLevel::InvalidatingError,
"Atom definition incorrect",
"The atom name and element should only contain valid characters.",
context.clone(),
))
}
}
if !errors.is_empty() {
Some(errors)
} else {
None
}
}
/// Get the Textual content of the value, if available
fn get_text<'a, 'b>(value: &'a Value, context: &'b Context) -> Result<Option<&'a str>, PDBError> {
match value {
Value::Text(t) => Ok(Some(t)),
Value::Inapplicable => Ok(None),
Value::Unknown => Ok(None),
_ => Err(PDBError::new(
ErrorLevel::InvalidatingError,
"Not text",
"",
context.clone(),
)),
}
}
/// Get the Numeric content of the value, if available, it also fails on NumericWithUncertainty
fn get_f64(value: &Value, context: &Context) -> Result<Option<f64>, PDBError> {
match value {
Value::Numeric(num) => Ok(Some(*num)),
Value::Inapplicable => Ok(None),
Value::Unknown => Ok(None),
_ => Err(PDBError::new(
ErrorLevel::InvalidatingError,
"Not a number",
"",
context.clone(),
)),
}
}
/// Get the Numeric content of the value, if available, as a usize
fn get_usize(value: &Value, context: &Context) -> Result<Option<usize>, PDBError> {
flatten_result(get_f64(value, context).map(|result| {
if let Some(num) = result {
#[allow(
clippy::cast_precision_loss,
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
clippy::float_cmp
)]
if (0.0..std::usize::MAX as f64).contains(&num) && num.trunc() == num {
Ok(Some(num as usize))
} else {
Err(PDBError::new(
ErrorLevel::InvalidatingError,
"Not an unsigned integer",
"",
context.clone(),
))
}
} else {
Ok(None)
}
}))
}
/// Get the Numeric content of the value, if available, as an isize
fn get_isize(value: &Value, context: &Context) -> Result<Option<isize>, PDBError> {
flatten_result(get_f64(value, context).map(|result| {
if let Some(num) = result {
#[allow(
clippy::cast_precision_loss,
clippy::cast_possible_truncation,
clippy::float_cmp
)]
if (std::isize::MIN as f64..std::isize::MAX as f64).contains(&num) && num.trunc() == num
{
Ok(Some(num as isize))
} else {
Err(PDBError::new(
ErrorLevel::InvalidatingError,
"Not an integer",
"",
context.clone(),
))
}
} else {
Ok(None)
}
}))
}
| {
f
} |
afk.py |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Ported by @azrim
""" Userbot module which contains afk-related commands """
from datetime import datetime
import time
from random import choice, randint
from asyncio import sleep
from telethon.events import StopPropagation
from userbot import (AFKREASON, COUNT_MSG, CMD_HELP, ISAFK, BOTLOG,
BOTLOG_CHATID, USERS, PM_AUTO_BAN)
from userbot.events import register
# ========================= CONSTANTS ============================
AFKSTR = [
"I'm busy right now. Please talk in a bag and when I come back you can just give me the bag!",
"I'm away right now. If you need anything, leave a message after the beep:\n`beeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeep`!",
"You missed me, next time aim better.",
"I'll be back in a few minutes and if I'm not...,\nwait longer.",
"I'm not here right now, so I'm probably somewhere else.",
"Roses are red,\nViolets are blue,\nLeave me a message,\nAnd I'll get back to you.",
"Sometimes the best things in life are worth waiting for…\nI'll be right back.",
"I'll be right back,\nbut if I'm not right back,\nI'll be back later.",
"If you haven't figured it out already,\nI'm not here.",
"Hello, welcome to my away message, how may I ignore you today?",
"I'm away over 7 seas and 7 countries,\n7 waters and 7 continents,\n7 mountains and 7 hills,\n7 plains and 7 mounds,\n7 pools and 7 lakes,\n7 springs and 7 meadows,\n7 cities and 7 neighborhoods,\n7 blocks and 7 houses...\n\nWhere not even your messages can reach me!",
"I'm away from the keyboard at the moment, but if you'll scream loud enough at your screen, I might just hear you.",
"I went that way\n---->",
"I went this way\n<----",
"Please leave a message and make me feel even more important than I already am.",
"I am not here so stop writing to me,\nor else you will find yourself with a screen full of your own messages.",
"If I were here,\nI'd tell you where I am.\n\nBut I'm not,\nso ask me when I return...",
"I am away!\nI don't know when I'll be back!\nHopefully a few minutes from now!",
"I'm not available right now so please leave your name, number, and address and I will stalk you later.",
"Sorry, I'm not here right now.\nFeel free to talk to my userbot as long as you like.\nI'll get back to you later.",
"I bet you were expecting an away message!",
"Life is so short, there are so many things to do...\nI'm away doing one of them..",
"I am not here right now...\nbut if I was...\n\nwouldn't that be awesome?",
]
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
USER_AFK = {}
afk_time = None
afk_start = {}
# =================================================================
@register(outgoing=True, pattern="^.off(?: |$)(.*)", disable_errors=True)
async def set_afk(afk_e):
""" For .afk command, allows you to inform people that you are afk when they message you """
message = afk_e.text
string = afk_e.pattern_match.group(1)
global ISAFK
global AFKREASON
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
global reason
USER_AFK = {}
afk_time = None
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
if string:
AFKREASON = string
await afk_e.edit(f"**Into the Void!**\
\nReason: `{string}`")
else:
await afk_e.edit("**Into The Void!**")
if BOTLOG:
await afk_e.client.send_message(BOTLOG_CHATID, "#AFK\nYou went Away from Keyboard!")
ISAFK = True
afk_time = datetime.now() # pylint:disable=E0602
raise StopPropagation
@register(outgoing=True, pattern="^.unoff(?: |$)(.*)", disable_errors=True)
async def type_afk_is_not_true(notafk):
""" This sets your status as not afk automatically when you write something while being afk """
global ISAFK
global COUNT_MSG
global USERS
global AFKREASON
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if ISAFK:
ISAFK = False
msg = await notafk.edit("**I'm back BISH!**")
time.sleep(3)
await msg.delete()
if BOTLOG:
await notafk.client.send_message(
BOTLOG_CHATID,
"You've recieved " + str(COUNT_MSG) + " messages from " +
str(len(USERS)) + " chats while you were away",
)
for i in USERS:
name = await notafk.client.get_entity(i)
name0 = str(name.first_name)
await notafk.client.send_message(
BOTLOG_CHATID,
"[" + name0 + "](tg://user?id=" + str(i) + ")" +
" sent you " + "`" + str(USERS[i]) + " messages`",
)
COUNT_MSG = 0
USERS = {}
AFKREASON = None
@register(incoming=True, disable_edited=True)
async def mention_afk(mention):
"" |
@register(incoming=True, disable_errors=True)
async def afk_on_pm(sender):
""" Function which informs people that you are AFK in PM """
global ISAFK
global USERS
global COUNT_MSG
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "**a while ago**"
if sender.is_private and sender.sender_id != 777000 and not (
await sender.get_sender()).bot:
if PM_AUTO_BAN:
try:
from userbot.modules.sql_helper.pm_permit_sql import is_approved
apprv = is_approved(sender.sender_id)
except AttributeError:
apprv = True
else:
apprv = True
if apprv and ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h {int(minutes)}m` ago"
elif minutes > 0:
afk_since = f"`{int(minutes)}m {int(seconds)}s` ago"
else:
afk_since = f"`{int(seconds)}s` ago"
if sender.sender_id not in USERS:
if AFKREASON:
await sender.reply(f"**I'm not available right now.** (Since **{afk_since}**).\
\nReason: `{AFKREASON}`")
else:
await sender.reply(f"**I'm not available right now.** (Since **{afk_since}**).\
\nPlease come back later")
USERS.update({sender.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif apprv and sender.sender_id in USERS:
if USERS[sender.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await sender.reply(f"**I'm still not available right now.** (Since **{afk_since}**).\
\nReason: `{AFKREASON}`")
else:
await sender.reply(f"**I'm not available right now.** (Since **{afk_since}**).\
\nPlease come back later")
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[sender.sender_id] = USERS[sender.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
CMD_HELP.update({
"afk":
".off [Optional Reason]\
\nUsage: Sets you as afk.\nReplies to anyone who tags/PM's you telling them that you are AFK(reason).\
\n\n.unoff\
\nUsage: Back from afk state\
"
})
| " This function takes care of notifying the people who mention you that you are AFK."""
global COUNT_MSG
global USERS
global ISAFK
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
afk_since = "**a while ago**"
if mention.message.mentioned and not (await mention.get_sender()).bot:
if ISAFK:
now = datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h {int(minutes)}m` ago"
elif minutes > 0:
afk_since = f"`{int(minutes)}m {int(seconds)}s` ago"
else:
afk_since = f"`{int(seconds)}s` ago"
if mention.sender_id not in USERS:
if AFKREASON:
await mention.reply(f"**I'm not available right now.** (Since **{afk_since}**).\
\nReason: `{AFKREASON}`")
else:
await mention.reply(f"**I'm not available right now.** (Since **{afk_since}**).\
\nPlease come back later")
USERS.update({mention.sender_id: 1})
COUNT_MSG = COUNT_MSG + 1
elif mention.sender_id in USERS:
if USERS[mention.sender_id] % randint(2, 4) == 0:
if AFKREASON:
await mention.reply(f"**I'm still not available right now.** (Since **{afk_since}**).\
\nReason: `{AFKREASON}`")
else:
await mention.reply(f"**I'm not available right now.** (Since **{afk_since}**).\
\nPlease come back later")
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
else:
USERS[mention.sender_id] = USERS[mention.sender_id] + 1
COUNT_MSG = COUNT_MSG + 1
|
editor.rs | use std::path::PathBuf;
use std::sync::{Mutex, Arc};
use std::sync::mpsc::{Sender, Receiver};
use std::sync::mpsc::channel;
use std::env;
use std::rc::Rc;
use std::char;
use rustbox::{RustBox, Event};
use syntect::highlighting::ThemeSet;
use syntect::parsing::SyntaxSet;
use input::Input;
use keyboard::Key;
use view::View;
use modes::{Mode, ModeType, InsertMode, NormalMode};
use overlay::{Overlay, OverlayEvent};
use buffer::Buffer;
use command::Command;
use command::{Action, BuilderEvent, Operation, Instruction};
pub struct Options {
pub syntax_enabled: bool,
}
impl Default for Options {
fn default() -> Options {
Options {
syntax_enabled: false,
}
}
}
/// The main Editor structure
///
/// This is the top-most structure in Iota.
pub struct Editor<'e> {
buffers: Vec<Arc<Mutex<Buffer>>>,
view: View,
running: bool,
rb: RustBox,
mode: Box<Mode + 'e>,
options: Options,
command_queue: Receiver<Command>,
command_sender: Sender<Command>,
}
impl<'e> Editor<'e> {
/// Create a new Editor instance from the given source
pub fn new(source: Input, mode: Box<Mode + 'e>, rb: RustBox, opts: Options) -> Editor<'e> {
let height = rb.height();
let width = rb.width();
let (snd, recv) = channel();
let mut buffers = Vec::new();
// TODO: load custom syntax files rather than using defaults
// see below
let mut ps = SyntaxSet::load_defaults_nonewlines();
ps.link_syntaxes();
let buffer = match source {
Input::Filename(path) => {
match path {
Some(path) => Buffer::new_with_syntax(PathBuf::from(path), &ps),
None => Buffer::new(),
}
},
Input::Stdin(reader) => {
Buffer::from(reader)
}
};
buffers.push(Arc::new(Mutex::new(buffer)));
// NOTE: this will only work on linux
// TODO: make this more cross-platform friendly
let mut subl_config = env::home_dir().unwrap();
subl_config.push(".config/sublime-text-3/Packages/Base16/");
let (theme_name, ts) = if subl_config.exists() {
(String::from("base16-default-dark"),
Rc::new(ThemeSet::load_from_folder(subl_config).unwrap()))
} else {
(String::from("base16-eighties.dark"),
Rc::new(ThemeSet::load_defaults()))
};
let view = View::new(buffers[0].clone(), ts.clone(), theme_name, width, height);
Editor {
buffers: buffers,
view: view,
running: true,
rb: rb,
mode: mode,
options: opts,
command_queue: recv,
command_sender: snd,
}
}
/// Handle key events
///
/// Key events can be handled in an Overlay, OR in the current Mode.
///
/// If there is an active Overlay, the key event is sent there, which gives
/// back an OverlayEvent. We then parse this OverlayEvent and determine if
/// the Overlay is finished and can be cleared. The response from the
/// Overlay is then converted to a Command and sent off to be handled.
///
/// If there is no active Overlay, the key event is sent to the current
/// Mode, which returns a Command which we dispatch to handle_command.
fn handle_key_event(&mut self, key: Option<Key>) {
let key = match key {
Some(k) => k,
None => return
};
let mut remove_overlay = false;
let command = match self.view.overlay {
Overlay::None => self.mode.handle_key_event(key),
_ => {
let event = self.view.overlay.handle_key_event(key);
match event {
OverlayEvent::Finished(response) => {
remove_overlay = true;
self.handle_overlay_response(response)
}
_ => { BuilderEvent::Incomplete }
}
}
};
if remove_overlay {
self.view.overlay = Overlay::None;
self.view.clear(&mut self.rb);
}
if let BuilderEvent::Complete(c) = command {
let _ = self.command_sender.send(c);
}
}
/// Translate the response from an Overlay to a Command wrapped in a BuilderEvent
///
/// In most cases, we will just want to convert the response directly to
/// a Command, however in some cases we will want to perform other actions
/// first, such as in the case of Overlay::SavePrompt.
fn handle_overlay_response(&mut self, response: Option<String>) -> BuilderEvent {
// FIXME: This entire method neext to be updated
match response {
Some(data) => {
match self.view.overlay {
// FIXME: this is just a temporary fix
Overlay::Prompt { ref data, .. } => {
match &**data {
// FIXME: need to find a better system for these commands
// They should be chainable
// ie: wq - save & quit
// They should also take arguments
// ie w file.txt - write buffer to file.txt
"q" | "quit" => BuilderEvent::Complete(Command::exit_editor()),
"w" | "write" => BuilderEvent::Complete(Command::save_buffer()),
_ => BuilderEvent::Incomplete
}
}
Overlay::SavePrompt { .. } => {
if data.is_empty() {
BuilderEvent::Invalid
} else {
let path = PathBuf::from(&*data);
self.view.buffer.lock().unwrap().file_path = Some(path);
BuilderEvent::Complete(Command::save_buffer())
}
}
Overlay::SelectFile { .. } => {
let path = PathBuf::from(data);
let buffer = Arc::new(Mutex::new(Buffer::from(path)));
self.buffers.push(buffer.clone());
self.view.set_buffer(buffer.clone());
self.view.clear(&mut self.rb);
BuilderEvent::Complete(Command::noop())
}
_ => BuilderEvent::Incomplete,
}
}
None => BuilderEvent::Incomplete
}
}
/// Handle resize events
///
/// width and height represent the new height of the window.
fn handle_resize_event(&mut self, width: usize, height: usize) {
self.view.resize(width, height);
}
/// Draw the current view to the frontend
fn draw(&mut self) {
self.view.draw(&mut self.rb, self.options.syntax_enabled);
}
/// Handle the given command, performing the associated action
fn handle_command(&mut self, command: Command) |
fn handle_instruction(&mut self, instruction: Instruction, command: Command) {
match instruction {
Instruction::SaveBuffer => { self.view.try_save_buffer() }
Instruction::ExitEditor => {
if self.view.buffer_is_dirty() {
let _ = self.command_sender.send(Command::show_message("Unsaved changes"));
} else {
self.running = false;
}
}
Instruction::SetMark(mark) => {
if let Some(object) = command.object {
self.view.move_mark(mark, object)
}
}
Instruction::SetOverlay(overlay_type) => {
self.view.set_overlay(overlay_type)
}
Instruction::SetMode(mode) => {
match mode {
ModeType::Insert => { self.mode = Box::new(InsertMode::new()) }
ModeType::Normal => { self.mode = Box::new(NormalMode::new()) }
}
}
Instruction::SwitchToLastBuffer => {
self.view.switch_last_buffer();
self.view.clear(&mut self.rb);
}
Instruction::ShowMessage(msg) => {
self.view.show_message(msg)
}
_ => {}
}
}
fn handle_operation(&mut self, operation: Operation, command: Command) {
match operation {
Operation::Insert(c) => {
for _ in 0..command.number {
self.view.insert_char(c)
}
}
Operation::DeleteObject => {
if let Some(obj) = command.object {
self.view.delete_object(obj);
}
}
Operation::DeleteFromMark(m) => {
if command.object.is_some() {
self.view.delete_from_mark_to_object(m, command.object.unwrap())
}
}
Operation::Undo => { self.view.undo() }
Operation::Redo => { self.view.redo() }
}
}
/// Start Iota!
pub fn start(&mut self) {
while self.running {
self.draw();
self.rb.present();
self.view.maybe_clear_message();
match self.rb.poll_event(true) {
Ok(Event::KeyEventRaw(_, key, ch)) => {
let k = match key {
0 => char::from_u32(ch).map(Key::Char),
a => Key::from_special_code(a),
};
self.handle_key_event(k)
},
Ok(Event::ResizeEvent(width, height)) => self.handle_resize_event(width as usize, height as usize),
_ => {}
}
while let Ok(message) = self.command_queue.try_recv() {
self.handle_command(message)
}
}
}
}
| {
let repeat = if command.number > 0 {
command.number
} else { 1 };
for _ in 0..repeat {
match command.action {
Action::Instruction(i) => self.handle_instruction(i, command),
Action::Operation(o) => self.handle_operation(o, command),
}
}
} |
from_yaml.rs | use crate::commands::WholeStreamCommand;
use crate::prelude::*;
use nu_errors::ShellError;
use nu_protocol::{Primitive, Signature, TaggedDictBuilder, UntaggedValue, Value};
pub struct FromYAML;
#[async_trait]
impl WholeStreamCommand for FromYAML {
fn name(&self) -> &str {
"from yaml"
}
fn signature(&self) -> Signature {
Signature::build("from yaml")
}
fn usage(&self) -> &str {
"Parse text as .yaml/.yml and create table."
}
async fn run( | &self,
args: CommandArgs,
registry: &CommandRegistry,
) -> Result<OutputStream, ShellError> {
from_yaml(args, registry).await
}
}
pub struct FromYML;
#[async_trait]
impl WholeStreamCommand for FromYML {
fn name(&self) -> &str {
"from yml"
}
fn signature(&self) -> Signature {
Signature::build("from yml")
}
fn usage(&self) -> &str {
"Parse text as .yaml/.yml and create table."
}
async fn run(
&self,
args: CommandArgs,
registry: &CommandRegistry,
) -> Result<OutputStream, ShellError> {
from_yaml(args, registry).await
}
}
fn convert_yaml_value_to_nu_value(
v: &serde_yaml::Value,
tag: impl Into<Tag>,
) -> Result<Value, ShellError> {
let tag = tag.into();
let span = tag.span;
let err_not_compatible_number = ShellError::labeled_error(
"Expected a compatible number",
"expected a compatible number",
&tag,
);
Ok(match v {
serde_yaml::Value::Bool(b) => UntaggedValue::boolean(*b).into_value(tag),
serde_yaml::Value::Number(n) if n.is_i64() => {
UntaggedValue::int(n.as_i64().ok_or_else(|| err_not_compatible_number)?).into_value(tag)
}
serde_yaml::Value::Number(n) if n.is_f64() => UntaggedValue::decimal_from_float(
n.as_f64().ok_or_else(|| err_not_compatible_number)?,
span,
)
.into_value(tag),
serde_yaml::Value::String(s) => UntaggedValue::string(s).into_value(tag),
serde_yaml::Value::Sequence(a) => {
let result: Result<Vec<Value>, ShellError> = a
.iter()
.map(|x| convert_yaml_value_to_nu_value(x, &tag))
.collect();
UntaggedValue::Table(result?).into_value(tag)
}
serde_yaml::Value::Mapping(t) => {
let mut collected = TaggedDictBuilder::new(&tag);
for (k, v) in t.iter() {
// A ShellError that we re-use multiple times in the Mapping scenario
let err_unexpected_map = ShellError::labeled_error(
format!("Unexpected YAML:\nKey: {:?}\nValue: {:?}", k, v),
"unexpected",
tag.clone(),
);
match (k, v) {
(serde_yaml::Value::String(k), _) => {
collected.insert_value(k.clone(), convert_yaml_value_to_nu_value(v, &tag)?);
}
// Hard-code fix for cases where "v" is a string without quotations with double curly braces
// e.g. k = value
// value: {{ something }}
// Strangely, serde_yaml returns
// "value" -> Mapping(Mapping { map: {Mapping(Mapping { map: {String("something"): Null} }): Null} })
(serde_yaml::Value::Mapping(m), serde_yaml::Value::Null) => {
return m
.iter()
.take(1)
.collect_vec()
.first()
.and_then(|e| match e {
(serde_yaml::Value::String(s), serde_yaml::Value::Null) => Some(
UntaggedValue::string("{{ ".to_owned() + &s + " }}")
.into_value(tag),
),
_ => None,
})
.ok_or(err_unexpected_map);
}
(_, _) => {
return Err(err_unexpected_map);
}
}
}
collected.into_value()
}
serde_yaml::Value::Null => UntaggedValue::Primitive(Primitive::Nothing).into_value(tag),
x => unimplemented!("Unsupported yaml case: {:?}", x),
})
}
pub fn from_yaml_string_to_value(s: String, tag: impl Into<Tag>) -> Result<Value, ShellError> {
let tag = tag.into();
let v: serde_yaml::Value = serde_yaml::from_str(&s).map_err(|x| {
ShellError::labeled_error(
format!("Could not load yaml: {}", x),
"could not load yaml from text",
&tag,
)
})?;
Ok(convert_yaml_value_to_nu_value(&v, tag)?)
}
async fn from_yaml(
args: CommandArgs,
registry: &CommandRegistry,
) -> Result<OutputStream, ShellError> {
let registry = registry.clone();
let args = args.evaluate_once(®istry).await?;
let tag = args.name_tag();
let input = args.input;
let concat_string = input.collect_string(tag.clone()).await?;
match from_yaml_string_to_value(concat_string.item, tag.clone()) {
Ok(x) => match x {
Value {
value: UntaggedValue::Table(list),
..
} => Ok(futures::stream::iter(list).to_output_stream()),
x => Ok(OutputStream::one(x)),
},
Err(_) => Err(ShellError::labeled_error_with_secondary(
"Could not parse as YAML",
"input cannot be parsed as YAML",
&tag,
"value originates from here",
&concat_string.tag,
)),
}
}
#[cfg(test)]
mod tests {
use super::ShellError;
use super::*;
use nu_protocol::row;
use nu_test_support::value::string;
#[test]
fn examples_work_as_expected() -> Result<(), ShellError> {
use crate::examples::test as test_examples;
Ok(test_examples(FromYAML {})?)
}
#[test]
fn test_problematic_yaml() {
struct TestCase {
description: &'static str,
input: &'static str,
expected: Result<Value, ShellError>,
}
let tt: Vec<TestCase> = vec![
TestCase {
description: "Double Curly Braces With Quotes",
input: r#"value: "{{ something }}""#,
expected: Ok(row!["value".to_owned() => string("{{ something }}")]),
},
TestCase {
description: "Double Curly Braces Without Quotes",
input: r#"value: {{ something }}"#,
expected: Ok(row!["value".to_owned() => string("{{ something }}")]),
},
];
for tc in tt.into_iter() {
let actual = from_yaml_string_to_value(tc.input.to_owned(), Tag::default());
if actual.is_err() {
assert!(
tc.expected.is_err(),
"actual is Err for test:\nTest Description {}\nErr: {:?}",
tc.description,
actual
);
} else {
assert_eq!(actual, tc.expected, "{}", tc.description);
}
}
}
} | |
plot_time_cost_bar.py | import matplotlib.pyplot as plt
import pandas as pd
from numpy import arange, array
import os
import logging
logging.basicConfig()
logger = logging.getLogger('PlotTimeCost')
logger.setLevel('INFO')
class PlotTimeCostBar:
| def __init__(self, data, path, show=False):
self.data = data
self.path = path
self.show_flag = show
(filepath, tempfilename) = os.path.split(path)
if not os.path.exists(filepath):
os.makedirs(filepath)
(filename, extension) = os.path.splitext(tempfilename)
self.format = extension[1:]
def plot(self):
data = array([0, 0, 0])
data[1:] = self.data['Time Cost'].values
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
width = 0.5
xticks = self.data.index
n = data.shape[0]
ind = arange(n)
data = data / 3600
colors = ['black', 'tab:blue', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown']
plt.bar(x=ind, height=data, width=width, color=colors)
ax.set_xticks(ind[1:])
ax.set_xticklabels(xticks)
# ax.set_xlabel('Multi-fidelity control strategy', fontsize=16)
ax.tick_params(labelsize=12)
ax.set_ylabel('Time Cost (h)', fontsize=16)
if self.show_flag:
plt.show()
fig.savefig(self.path, format=self.format, dpi=80, bbox_inches='tight') |
|
App.js | import React, { useMemo, useState, useEffect, createRef } from 'react'
import ReactQuill from 'react-quill'
import 'react-quill/dist/quill.snow.css'
import Dracula from './Dracula'
import { createEditor } from 'slate'
import { useFocused, Slate, Editable, withReact } from 'slate-react'
import { ChakraProvider, Flex, Box } from '@chakra-ui/react'
import { isNil } from 'ramda'
import { setImageHook, s2m, s2h, q2s, s2q } from 'asteroid-parser'
import ImageUploader from 'quill-image-uploader2'
import { sha256 } from 'js-sha256'
const entities = require('entities')
let Parchment = ReactQuill.Quill.import('parchment')
let Delta = ReactQuill.Quill.import('delta')
let Break = ReactQuill.Quill.import('blots/break')
let Embed = ReactQuill.Quill.import('blots/embed')
let Block = ReactQuill.Quill.import('blots/block')
class SmartBreak extends Break {
length() {
return 1
}
value() {
return '\n'
}
insertInto(parent, ref) {
Embed.prototype.insertInto.call(this, parent, ref)
}
}
SmartBreak.blotName = 'inline-break'
SmartBreak.tagName = 'BR'
const Strike = ReactQuill.Quill.import('formats/strike')
Strike.tagName = 'DEL'
ReactQuill.Quill.register(Strike, true)
function lineBreakMatcher() {
let newDelta = new Delta()
newDelta.insert({ ['inline-break']: '' })
return newDelta
}
ReactQuill.Quill.register(SmartBreak)
ImageUploader(ReactQuill.Quill)
const App = () => {
const [editor] = useState(() => withReact(createEditor()))
const [qvalue, setQValue] = useState('')
const [upV, setUpV] = useState(null)
const [isMarkdown, setIsMarkdown] = useState(true)
let quillRef = React.createRef()
const [value, setValue] = useState([
{
type: 'paragraph',
children: [{ text: '' }]
}
])
useEffect(() => {
setImageHook({
fromBase64: url => {
if (/^data\:image\/.+/.test(url)) {
const img = window.image_map[sha256(url)]
if (!isNil(img)) return `data:image/${img.ext};local,${img.id}`
}
return url
},
toBase64: url => {
if (/^data\:image\/.+;local,/.test(url)) {
const img = window.image_map[url.split(',')[1]]
if (!isNil(img)) return img.url
}
return url
}
})
}, [])
useEffect(() => {
if (isMarkdown) {
setQValue(s2q(value))
}
setUpV(false)
}, [value])
useEffect(() => {
if (!isMarkdown) {
setUpV(true)
setValue(q2s(qvalue))
}
}, [qvalue])
const options = {
toolbar: [
[{ header: [1, 2, 3, 4, 5, 6, false] }],
['bold', 'italic', 'underline', 'strike', 'blockquote', 'link', 'image'],
[{ color: [] }, { background: [] }],
[{ list: 'ordered' }, { list: 'bullet' }],
['clean']
],
imageUploader: {},
clipboard: {
matchers: [['BR', lineBreakMatcher]]
},
keyboard: {
bindings: {
handleEnter: {
key: 13,
handler: function (range, context) {
if (range.length > 0) {
this.quill.scroll.deleteAt(range.index, range.length)
}
let lineFormats = Object.keys(context.format).reduce(function (
lineFormats,
format
) {
if (
Parchment.query(format, Parchment.Scope.BLOCK) &&
!Array.isArray(context.format[format])
) {
lineFormats[format] = context.format[format]
}
return lineFormats
},
{})
var previousChar = this.quill.getText(range.index - 1, 1)
this.quill.insertText(
range.index,
'\n',
lineFormats,
ReactQuill.Quill.sources.USER
)
this.quill.setSelection(
range.index + 1,
ReactQuill.Quill.sources.SILENT
)
try {
this.quill.selection.scrollIntoView()
} catch (e) {}
Object.keys(context.format).forEach(name => {
if (lineFormats[name] != null) return
if (Array.isArray(context.format[name])) return
if (name === 'link') return
this.quill.format(
name,
context.format[name],
ReactQuill.Quill.sources.USER
)
})
}
},
linebreak: {
key: 13,
shiftKey: true,
handler: function (range, context) {
var nextChar = this.quill.getText(range.index + 1, 1)
var ee = this.quill.insertEmbed(
range.index,
'inline-break',
true,
'user'
)
if (nextChar.length == 0) {
var ee = this.quill.insertEmbed(
range.index,
'inline-break',
true,
'user'
)
}
this.quill.setSelection(
range.index + 1,
ReactQuill.Quill.sources.SILENT
)
}
}
}
}
}
const modules = useMemo(() => options, [])
return (
<ChakraProvider>
<Flex height='100vh'>
<style global jsx>{`
p,
.ql-editor p {
margin-bottom: 15px;
}
.quill {
display: flex;
flex-direction: column;
width: 100%;
height: 100%;
}
.ql-container {
flex: 1;
overflow-y: auto;
font-size: 17px;
}
.ql-editor {
border-left: 1px solid #ccc;
}
.ql-container.ql-snow {
border: 0px;
}
.ql-tooltip {
margin-left: 120px;
}
`}</style>
<Dracula />
<Flex bg='#eee' flex={1} direction='column' height='100%'>
<Flex
direction='column'
flex={1}
bg='white'
sx={{ overflow: 'auto' }}
height='50vh'
>
<Flex
color='#222'
justify='center'
align='center'
height='43px'
borderBottom='1px solid #ccc'
>
Slate Editor
</Flex>
<Box flex={1} p={3} sx={{ wordBreak: 'break-all' }}>
{upV ? null : (
<Slate
editor={editor}
value={value}
onChange={newValue => {
setIsMarkdown(true)
setValue(newValue)
}}
>
<Editable style={{ height: '100%' }} />
</Slate>
)}
</Box>
</Flex>
<Flex flex={1} height='50vh'>
<Flex direction='column' flex={1} bg='#eee'>
<Flex
color='#222'
justify='center'
align='center'
height='43px'
borderBottom='1px solid #aaa'
borderTop='1px solid #aaa'
>
Markdown
</Flex>
<Flex
p={3}
sx={{ overflow: 'auto', wordBreak: 'break-all' }}
flex={1}
height='100%'
dangerouslySetInnerHTML={{
__html: entities
.encodeHTML(s2m(value))
.replace(/\&NewLine\;/g, '<br />')
}}
></Flex>
</Flex> | color='#222'
justify='center'
align='center'
height='43px'
borderBottom='1px solid #aaa'
borderTop='1px solid #aaa'
>
HTML
</Flex>
<Flex
p={3}
sx={{ overflow: 'auto', wordBreak: 'break-all' }}
bg='#ddd'
flex={1}
height='100%'
dangerouslySetInnerHTML={{
__html: entities
.encodeHTML(s2h(value))
.replace(/\&NewLine\;/g, '<br />')
}}
/>
</Flex>
</Flex>
</Flex>
<Flex bg='#ddd' flex={1} direction='column'>
<Box flex={1} bg='white' sx={{ overflow: 'auto' }}>
<ReactQuill
ref={el => {
if (!isNil(el) && isNil(quillRef)) quillRef = el.getEditor()
}}
theme='snow'
value={qvalue}
onChange={(val, d, s, e) => {
setQValue(val)
}}
modules={modules}
onFocus={() => setIsMarkdown(false)}
onBlur={() => setIsMarkdown(true)}
/>
</Box>
<Flex bg='#ccc' direction='column' flex={1} sx={{ overflow: 'auto' }}>
<Flex
color='#222'
justify='center'
align='center'
height='43px'
borderBottom='1px solid #aaa'
borderTop='1px solid #aaa'
>
Preview
</Flex>
<Box
p={3}
flex={1}
height='100%'
dangerouslySetInnerHTML={{
__html: s2h(value)
}}
/>
</Flex>
</Flex>
</Flex>
</ChakraProvider>
)
}
export default App | <Flex direction='column' flex={1} bg='#ddd'>
<Flex |
secret_value.go | package provider
import (
"encoding/json"
"fmt"
"github.com/jmespath/go-jmespath"
)
// Contains the actual contents of the secret fetched from either Secrete Manager
// or SSM Parameter Store along with the original descriptor.
type SecretValue struct {
Value []byte
Descriptor SecretDescriptor
}
func (p *SecretValue) String() string { return "<REDACTED>" } // Do not log secrets
//parse out and return specified key value pairs from the secret
func (p *SecretValue) getJsonSecrets() (s []*SecretValue, e error) {
jsonValues := make([]*SecretValue, 0)
if len(p.Descriptor.JMESPath) == 0 {
return jsonValues, nil
}
var data interface{}
err := json.Unmarshal(p.Value, &data)
if err != nil {
return nil, fmt.Errorf("Invalid JSON used with jmesPath in secret: %s.", p.Descriptor.ObjectName)
}
//fetch all specified key value pairs`
for _, jmesPathEntry := range p.Descriptor.JMESPath {
jsonSecret, err := jmespath.Search(jmesPathEntry.Path, data)
if err != nil {
return nil, fmt.Errorf("Invalid JMES Path: %s.", jmesPathEntry.Path)
}
if jsonSecret == nil |
jsonSecretAsString, isString := jsonSecret.(string)
if !isString {
return nil, fmt.Errorf("Invalid JMES search result type for path:%s. Only string is allowed.", jmesPathEntry.Path)
}
descriptor := p.Descriptor.getJmesEntrySecretDescriptor(&jmesPathEntry)
secretValue := SecretValue{
Value: []byte(jsonSecretAsString),
Descriptor: descriptor,
}
jsonValues = append(jsonValues, &secretValue)
}
return jsonValues, nil
}
| {
return nil, fmt.Errorf("JMES Path - %s for object alias - %s does not point to a valid object.",
jmesPathEntry.Path, jmesPathEntry.ObjectAlias)
} |
criocli.go | package criocli
import (
"fmt"
"os"
"path/filepath"
"strings"
libconfig "github.com/cri-o/cri-o/pkg/config"
"github.com/cri-o/cri-o/server/metrics/collectors"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// DefaultsPath is the path to default configuration files set at build time
var DefaultsPath string
// DefaultCommands are the flags commands can be added to every binary
var DefaultCommands = []*cli.Command{
completion(),
man(),
markdown(),
}
func GetConfigFromContext(c *cli.Context) (*libconfig.Config, error) {
config, ok := c.App.Metadata["config"].(*libconfig.Config)
if !ok {
return nil, fmt.Errorf("type assertion error when accessing server config")
}
return config, nil
}
func | (c *cli.Context) (*libconfig.Config, error) {
config, err := GetConfigFromContext(c)
if err != nil {
return nil, err
}
if err := mergeConfig(config, c); err != nil {
return nil, err
}
return config, nil
}
func mergeConfig(config *libconfig.Config, ctx *cli.Context) error {
// Don't parse the config if the user explicitly set it to "".
path := ctx.String("config")
if path != "" {
if err := config.UpdateFromFile(path); err != nil {
if ctx.IsSet("config") || !os.IsNotExist(err) {
return err
}
// Use the build-time-defined defaults path
if DefaultsPath != "" && os.IsNotExist(err) {
path = filepath.Join(DefaultsPath, "/crio.conf")
if err := config.UpdateFromFile(path); err != nil {
if ctx.IsSet("config") || !os.IsNotExist(err) {
return err
}
}
}
}
}
// Parse the drop-in configuration files for config override
if err := config.UpdateFromPath(ctx.String("config-dir")); err != nil {
return err
}
// If "config-dir" is specified, config.UpdateFromPath() will set config.singleConfigPath as
// the last config file in "config-dir".
// We need correct it to the path specified by "config"
if path != "" {
config.SetSingleConfigPath(path)
}
// Override options set with the CLI.
if ctx.IsSet("conmon") {
config.Conmon = ctx.String("conmon")
}
if ctx.IsSet("pause-command") {
config.PauseCommand = ctx.String("pause-command")
}
if ctx.IsSet("pause-image") {
config.PauseImage = ctx.String("pause-image")
}
if ctx.IsSet("pause-image-auth-file") {
config.PauseImageAuthFile = ctx.String("pause-image-auth-file")
}
if ctx.IsSet("global-auth-file") {
config.GlobalAuthFile = ctx.String("global-auth-file")
}
if ctx.IsSet("signature-policy") {
config.SignaturePolicyPath = ctx.String("signature-policy")
}
if ctx.IsSet("root") {
config.Root = ctx.String("root")
}
if ctx.IsSet("runroot") {
config.RunRoot = ctx.String("runroot")
}
if ctx.IsSet("storage-driver") {
config.Storage = ctx.String("storage-driver")
}
if ctx.IsSet("storage-opt") {
config.StorageOptions = StringSliceTrySplit(ctx, "storage-opt")
}
if ctx.IsSet("insecure-registry") {
config.InsecureRegistries = StringSliceTrySplit(ctx, "insecure-registry")
}
if ctx.IsSet("registry") {
config.Registries = StringSliceTrySplit(ctx, "registry")
}
if ctx.IsSet("default-transport") {
config.DefaultTransport = ctx.String("default-transport")
}
if ctx.IsSet("listen") {
config.Listen = ctx.String("listen")
}
if ctx.IsSet("stream-address") {
config.StreamAddress = ctx.String("stream-address")
}
if ctx.IsSet("stream-port") {
config.StreamPort = ctx.String("stream-port")
}
if ctx.IsSet("default-runtime") {
config.DefaultRuntime = ctx.String("default-runtime")
}
if ctx.IsSet("decryption-keys-path") {
config.DecryptionKeysPath = ctx.String("decryption-keys-path")
}
if ctx.IsSet("runtimes") {
runtimes := StringSliceTrySplit(ctx, "runtimes")
for _, r := range runtimes {
fields := strings.Split(r, ":")
runtimeType := libconfig.DefaultRuntimeType
privilegedWithoutHostDevices := false
runtimeConfigPath := ""
switch len(fields) {
case 6:
runtimeConfigPath = fields[5]
fallthrough
case 5:
if fields[4] == "true" {
privilegedWithoutHostDevices = true
}
fallthrough
case 4:
runtimeType = fields[3]
fallthrough
case 3:
config.Runtimes[fields[0]] = &libconfig.RuntimeHandler{
RuntimePath: fields[1],
RuntimeRoot: fields[2],
RuntimeType: runtimeType,
PrivilegedWithoutHostDevices: privilegedWithoutHostDevices,
RuntimeConfigPath: runtimeConfigPath,
}
default:
return fmt.Errorf("wrong format for --runtimes: %q", r)
}
}
}
if ctx.IsSet("selinux") {
config.SELinux = ctx.Bool("selinux")
}
if ctx.IsSet("seccomp-profile") {
config.SeccompProfile = ctx.String("seccomp-profile")
}
if ctx.IsSet("seccomp-use-default-when-empty") {
config.SeccompUseDefaultWhenEmpty = ctx.Bool("seccomp-use-default-when-empty")
}
if ctx.IsSet("apparmor-profile") {
config.ApparmorProfile = ctx.String("apparmor-profile")
}
if ctx.IsSet("blockio-config-file") {
config.BlockIOConfigFile = ctx.String("blockio-config-file")
}
if ctx.IsSet("irqbalance-config-file") {
config.IrqBalanceConfigFile = ctx.String("irqbalance-config-file")
}
if ctx.IsSet("rdt-config-file") {
config.RdtConfigFile = ctx.String("rdt-config-file")
}
if ctx.IsSet("cgroup-manager") {
config.CgroupManagerName = ctx.String("cgroup-manager")
}
if ctx.IsSet("conmon-cgroup") {
config.ConmonCgroup = ctx.String("conmon-cgroup")
}
if ctx.IsSet("hooks-dir") {
config.HooksDir = StringSliceTrySplit(ctx, "hooks-dir")
}
if ctx.IsSet("default-mounts-file") {
config.DefaultMountsFile = ctx.String("default-mounts-file")
}
if ctx.IsSet("default-capabilities") {
config.DefaultCapabilities = StringSliceTrySplit(ctx, "default-capabilities")
}
if ctx.IsSet("default-sysctls") {
config.DefaultSysctls = StringSliceTrySplit(ctx, "default-sysctls")
}
if ctx.IsSet("default-ulimits") {
config.DefaultUlimits = StringSliceTrySplit(ctx, "default-ulimits")
}
if ctx.IsSet("pids-limit") {
config.PidsLimit = ctx.Int64("pids-limit")
}
if ctx.IsSet("log-size-max") {
config.LogSizeMax = ctx.Int64("log-size-max")
}
if ctx.IsSet("log-journald") {
config.LogToJournald = ctx.Bool("log-journald")
}
if ctx.IsSet("cni-default-network") {
config.CNIDefaultNetwork = ctx.String("cni-default-network")
}
if ctx.IsSet("cni-config-dir") {
config.NetworkDir = ctx.String("cni-config-dir")
}
if ctx.IsSet("cni-plugin-dir") {
config.PluginDirs = StringSliceTrySplit(ctx, "cni-plugin-dir")
}
if ctx.IsSet("image-volumes") {
config.ImageVolumes = libconfig.ImageVolumesType(ctx.String("image-volumes"))
}
if ctx.IsSet("read-only") {
config.ReadOnly = ctx.Bool("read-only")
}
if ctx.IsSet("bind-mount-prefix") {
config.BindMountPrefix = ctx.String("bind-mount-prefix")
}
if ctx.IsSet("uid-mappings") {
config.UIDMappings = ctx.String("uid-mappings")
}
if ctx.IsSet("gid-mappings") {
config.GIDMappings = ctx.String("gid-mappings")
}
if ctx.IsSet("log-level") {
config.LogLevel = ctx.String("log-level")
}
if ctx.IsSet("log-filter") {
config.LogFilter = ctx.String("log-filter")
}
if ctx.IsSet("log-dir") {
config.LogDir = ctx.String("log-dir")
}
if ctx.IsSet("additional-devices") {
config.AdditionalDevices = StringSliceTrySplit(ctx, "additional-devices")
}
if ctx.IsSet("device-ownership-from-security-context") {
config.DeviceOwnershipFromSecurityContext = ctx.Bool("device-ownership-from-security-context")
}
if ctx.IsSet("conmon-env") {
config.ConmonEnv = StringSliceTrySplit(ctx, "conmon-env")
}
if ctx.IsSet("default-env") {
config.DefaultEnv = StringSliceTrySplit(ctx, "default-env")
}
if ctx.IsSet("container-attach-socket-dir") {
config.ContainerAttachSocketDir = ctx.String("container-attach-socket-dir")
}
if ctx.IsSet("container-exits-dir") {
config.ContainerExitsDir = ctx.String("container-exits-dir")
}
if ctx.IsSet("ctr-stop-timeout") {
config.CtrStopTimeout = ctx.Int64("ctr-stop-timeout")
}
if ctx.IsSet("grpc-max-recv-msg-size") {
config.GRPCMaxRecvMsgSize = ctx.Int("grpc-max-recv-msg-size")
}
if ctx.IsSet("grpc-max-send-msg-size") {
config.GRPCMaxSendMsgSize = ctx.Int("grpc-max-send-msg-size")
}
if ctx.IsSet("drop-infra-ctr") {
config.DropInfraCtr = ctx.Bool("drop-infra-ctr")
}
if ctx.IsSet("namespaces-dir") {
config.NamespacesDir = ctx.String("namespaces-dir")
}
if ctx.IsSet("pinns-path") {
config.PinnsPath = ctx.String("pinns-path")
}
if ctx.IsSet("no-pivot") {
config.NoPivot = ctx.Bool("no-pivot")
}
if ctx.IsSet("stream-enable-tls") {
config.StreamEnableTLS = ctx.Bool("stream-enable-tls")
}
if ctx.IsSet("stream-tls-ca") {
config.StreamTLSCA = ctx.String("stream-tls-ca")
}
if ctx.IsSet("stream-tls-cert") {
config.StreamTLSCert = ctx.String("stream-tls-cert")
}
if ctx.IsSet("stream-tls-key") {
config.StreamTLSKey = ctx.String("stream-tls-key")
}
if ctx.IsSet("stream-idle-timeout") {
config.StreamIdleTimeout = ctx.String("stream-idle-timeout")
}
if ctx.IsSet("version-file") {
config.VersionFile = ctx.String("version-file")
}
if ctx.IsSet("version-file-persist") {
config.VersionFilePersist = ctx.String("version-file-persist")
}
if ctx.IsSet("clean-shutdown-file") {
config.CleanShutdownFile = ctx.String("clean-shutdown-file")
}
if ctx.IsSet("absent-mount-sources-to-reject") {
config.AbsentMountSourcesToReject = StringSliceTrySplit(ctx, "absent-mount-sources-to-reject")
}
if ctx.IsSet("internal-wipe") {
config.InternalWipe = ctx.Bool("internal-wipe")
}
if ctx.IsSet("enable-metrics") {
config.EnableMetrics = ctx.Bool("enable-metrics")
}
if ctx.IsSet("metrics-port") {
config.MetricsPort = ctx.Int("metrics-port")
}
if ctx.IsSet("metrics-socket") {
config.MetricsSocket = ctx.String("metrics-socket")
}
if ctx.IsSet("metrics-cert") {
config.MetricsCert = ctx.String("metrics-cert")
}
if ctx.IsSet("metrics-key") {
config.MetricsKey = ctx.String("metrics-key")
}
if ctx.IsSet("metrics-collectors") {
config.MetricsCollectors = collectors.FromSlice(ctx.StringSlice("metrics-collectors"))
}
if ctx.IsSet("big-files-temporary-dir") {
config.BigFilesTemporaryDir = ctx.String("big-files-temporary-dir")
}
if ctx.IsSet("separate-pull-cgroup") {
config.SeparatePullCgroup = ctx.String("separate-pull-cgroup")
}
if ctx.IsSet("infra-ctr-cpuset") {
config.InfraCtrCPUSet = ctx.String("infra-ctr-cpuset")
}
return nil
}
func GetFlagsAndMetadata() ([]cli.Flag, map[string]interface{}, error) {
config, err := libconfig.DefaultConfig()
if err != nil {
return nil, nil, errors.Errorf("error loading server config: %v", err)
}
// TODO FIXME should be crio wipe flags
flags := getCrioFlags(config)
metadata := map[string]interface{}{
"config": config,
}
return flags, metadata, nil
}
func getCrioFlags(defConf *libconfig.Config) []cli.Flag {
return []cli.Flag{
&cli.StringFlag{
Name: "config",
Aliases: []string{"c"},
Value: libconfig.CrioConfigPath,
Usage: "Path to configuration file",
EnvVars: []string{"CONTAINER_CONFIG"},
TakesFile: true,
},
&cli.StringFlag{
Name: "config-dir",
Aliases: []string{"d"},
Value: libconfig.CrioConfigDropInPath,
Usage: fmt.Sprintf("Path to the configuration drop-in directory."+`
This directory will be recursively iterated and each file gets applied
to the configuration in their processing order. This means that a
configuration file named '00-default' has a lower priority than a file
named '01-my-overwrite'.
The global config file, provided via '--config,-c' or per default in
%s, always has a lower priority than the files in the directory specified
by '--config-dir,-d'.
Besides that, provided command line parameters have a higher priority
than any configuration file.`, libconfig.CrioConfigPath),
EnvVars: []string{"CONTAINER_CONFIG_DIR"},
TakesFile: true,
},
&cli.StringFlag{
Name: "conmon",
Usage: fmt.Sprintf("Path to the conmon binary, used for monitoring the OCI runtime. Will be searched for using $PATH if empty. (default: %q)", defConf.Conmon),
EnvVars: []string{"CONTAINER_CONMON"},
TakesFile: true,
},
&cli.StringFlag{
Name: "conmon-cgroup",
Usage: "cgroup to be used for conmon process",
Value: defConf.ConmonCgroup,
EnvVars: []string{"CONTAINER_CONMON_CGROUP"},
},
&cli.StringFlag{
Name: "listen",
Usage: "Path to the CRI-O socket",
Value: defConf.Listen,
EnvVars: []string{"CONTAINER_LISTEN"},
TakesFile: true,
},
&cli.StringFlag{
Name: "stream-address",
Usage: "Bind address for streaming socket",
Value: defConf.StreamAddress,
EnvVars: []string{"CONTAINER_STREAM_ADDRESS"},
},
&cli.StringFlag{
Name: "stream-port",
Usage: "Bind port for streaming socket. If the port is set to '0', then CRI-O will allocate a random free port number.",
Value: defConf.StreamPort,
EnvVars: []string{"CONTAINER_STREAM_PORT"},
},
&cli.StringFlag{
Name: "log",
Usage: "Set the log file path where internal debug information is written",
EnvVars: []string{"CONTAINER_LOG"},
TakesFile: true,
},
&cli.StringFlag{
Name: "log-format",
Value: "text",
Usage: "Set the format used by logs: 'text' or 'json'",
EnvVars: []string{"CONTAINER_LOG_FORMAT"},
},
&cli.StringFlag{
Name: "log-level",
Aliases: []string{"l"},
Value: "info",
Usage: "Log messages above specified level: trace, debug, info, warn, error, fatal or panic",
EnvVars: []string{"CONTAINER_LOG_LEVEL"},
},
&cli.StringFlag{
Name: "log-filter",
Usage: `Filter the log messages by the provided regular expression. For example 'request.\*' filters all gRPC requests.`,
EnvVars: []string{"CONTAINER_LOG_FILTER"},
},
&cli.StringFlag{
Name: "log-dir",
Usage: "Default log directory where all logs will go unless directly specified by the kubelet",
Value: defConf.LogDir,
EnvVars: []string{"CONTAINER_LOG_DIR"},
TakesFile: true,
},
&cli.StringFlag{
Name: "pause-command",
Usage: "Path to the pause executable in the pause image",
Value: defConf.PauseCommand,
EnvVars: []string{"CONTAINER_PAUSE_COMMAND"},
},
&cli.StringFlag{
Name: "pause-image",
Usage: "Image which contains the pause executable",
Value: defConf.PauseImage,
EnvVars: []string{"CONTAINER_PAUSE_IMAGE"},
},
&cli.StringFlag{
Name: "pause-image-auth-file",
Usage: fmt.Sprintf("Path to a config file containing credentials for --pause-image (default: %q)", defConf.PauseImageAuthFile),
EnvVars: []string{"CONTAINER_PAUSE_IMAGE_AUTH_FILE"},
TakesFile: true,
},
&cli.StringFlag{
Name: "separate-pull-cgroup",
Usage: fmt.Sprintf("[EXPERIMENTAL] Pull in new cgroup (default: %q)", defConf.SeparatePullCgroup),
EnvVars: []string{"PULL_IN_A_CGROUP"},
},
&cli.StringFlag{
Name: "global-auth-file",
Usage: fmt.Sprintf("Path to a file like /var/lib/kubelet/config.json holding credentials necessary for pulling images from secure registries (default: %q)", defConf.GlobalAuthFile),
EnvVars: []string{"CONTAINER_GLOBAL_AUTH_FILE"},
TakesFile: true,
},
&cli.StringFlag{
Name: "signature-policy",
Usage: fmt.Sprintf("Path to signature policy JSON file. (default: %q, to use the system-wide default)", defConf.SignaturePolicyPath),
EnvVars: []string{"CONTAINER_SIGNATURE_POLICY"},
TakesFile: true,
},
&cli.StringFlag{
Name: "root",
Aliases: []string{"r"},
Usage: "The CRI-O root directory",
Value: defConf.Root,
EnvVars: []string{"CONTAINER_ROOT"},
TakesFile: true,
},
&cli.StringFlag{
Name: "runroot",
Usage: "The CRI-O state directory",
Value: defConf.RunRoot,
EnvVars: []string{"CONTAINER_RUNROOT"},
TakesFile: true,
},
&cli.StringFlag{
Name: "storage-driver",
Aliases: []string{"s"},
Usage: fmt.Sprintf("OCI storage driver (default: %q)", defConf.Storage),
EnvVars: []string{"CONTAINER_STORAGE_DRIVER"},
},
&cli.StringSliceFlag{
Name: "storage-opt",
Value: cli.NewStringSlice(defConf.StorageOptions...),
Usage: "OCI storage driver option",
EnvVars: []string{"CONTAINER_STORAGE_OPT"},
},
&cli.StringSliceFlag{
Name: "insecure-registry",
Value: cli.NewStringSlice(defConf.InsecureRegistries...),
Usage: "Enable insecure registry communication, i.e., enable un-encrypted and/or untrusted communication." + `
1. List of insecure registries can contain an element with CIDR notation to
specify a whole subnet.
2. Insecure registries accept HTTP or accept HTTPS with certificates from
unknown CAs.
3. Enabling '--insecure-registry' is useful when running a local registry.
However, because its use creates security vulnerabilities, **it should ONLY
be enabled for testing purposes**. For increased security, users should add
their CA to their system's list of trusted CAs instead of using
'--insecure-registry'.`,
EnvVars: []string{"CONTAINER_INSECURE_REGISTRY"},
},
&cli.StringSliceFlag{
Name: "registry",
Value: cli.NewStringSlice(defConf.Registries...),
Usage: "Registry to be prepended when pulling unqualified images, can be specified multiple times",
EnvVars: []string{"CONTAINER_REGISTRY"},
},
&cli.StringFlag{
Name: "default-transport",
Usage: "A prefix to prepend to image names that cannot be pulled as-is",
Value: defConf.DefaultTransport,
EnvVars: []string{"CONTAINER_DEFAULT_TRANSPORT"},
},
&cli.StringFlag{
Name: "decryption-keys-path",
Usage: "Path to load keys for image decryption.",
Value: defConf.DecryptionKeysPath,
},
&cli.StringFlag{
Name: "default-runtime",
Usage: "Default OCI runtime from the runtimes config",
Value: defConf.DefaultRuntime,
EnvVars: []string{"CONTAINER_DEFAULT_RUNTIME"},
},
&cli.StringSliceFlag{
Name: "runtimes",
Usage: "OCI runtimes, format is runtime_name:runtime_path:runtime_root:runtime_type:privileged_without_host_devices:runtime_config_path",
EnvVars: []string{"CONTAINER_RUNTIMES"},
},
&cli.StringFlag{
Name: "seccomp-profile",
Usage: fmt.Sprintf("Path to the seccomp.json profile to be used as the runtime's default. If not specified, then the internal default seccomp profile will be used. (default: %q)", defConf.SeccompProfile),
EnvVars: []string{"CONTAINER_SECCOMP_PROFILE"},
TakesFile: true,
},
&cli.StringFlag{
Name: "seccomp-use-default-when-empty",
Usage: fmt.Sprintf("Use the default seccomp profile when an empty one is specified (default: %t)", defConf.SeccompUseDefaultWhenEmpty),
EnvVars: []string{"CONTAINER_SECCOMP_USE_DEFAULT_WHEN_EMPTY"},
},
&cli.StringFlag{
Name: "apparmor-profile",
Usage: "Name of the apparmor profile to be used as the runtime's default. This only takes effect if the user does not specify a profile via the Kubernetes Pod's metadata annotation.",
Value: defConf.ApparmorProfile,
EnvVars: []string{"CONTAINER_APPARMOR_PROFILE"},
},
&cli.StringFlag{
Name: "blockio-config-file",
Usage: "Path to the blockio class configuration file for configuring the cgroup blockio controller.",
Value: defConf.BlockIOConfigFile,
},
&cli.StringFlag{
Name: "irqbalance-config-file",
Usage: "The irqbalance service config file which is used by CRI-O.",
Value: defConf.IrqBalanceConfigFile,
},
&cli.StringFlag{
Name: "rdt-config-file",
Usage: "Path to the RDT configuration file for configuring the resctrl pseudo-filesystem",
Value: defConf.RdtConfigFile,
},
&cli.BoolFlag{
Name: "selinux",
Usage: fmt.Sprintf("Enable selinux support (default: %t)", defConf.SELinux),
EnvVars: []string{"CONTAINER_SELINUX"},
},
&cli.StringFlag{
Name: "cgroup-manager",
Usage: "cgroup manager (cgroupfs or systemd)",
Value: defConf.CgroupManagerName,
EnvVars: []string{"CONTAINER_CGROUP_MANAGER"},
},
&cli.Int64Flag{
Name: "pids-limit",
Value: libconfig.DefaultPidsLimit,
Usage: "Maximum number of processes allowed in a container",
EnvVars: []string{"CONTAINER_PIDS_LIMIT"},
},
&cli.Int64Flag{
Name: "log-size-max",
Value: libconfig.DefaultLogSizeMax,
Usage: "Maximum log size in bytes for a container. If it is positive, it must be >= 8192 to match/exceed conmon read buffer",
EnvVars: []string{"CONTAINER_LOG_SIZE_MAX"},
},
&cli.BoolFlag{
Name: "log-journald",
Usage: fmt.Sprintf("Log to systemd journal (journald) in addition to kubernetes log file (default: %t)", defConf.LogToJournald),
EnvVars: []string{"CONTAINER_LOG_JOURNALD"},
},
&cli.StringFlag{
Name: "cni-default-network",
Usage: `Name of the default CNI network to select. If not set or "", then CRI-O will pick-up the first one found in --cni-config-dir.`,
Value: defConf.CNIDefaultNetwork,
EnvVars: []string{"CONTAINER_CNI_DEFAULT_NETWORK"},
},
&cli.StringFlag{
Name: "cni-config-dir",
Usage: "CNI configuration files directory",
Value: defConf.NetworkDir,
EnvVars: []string{"CONTAINER_CNI_CONFIG_DIR"},
TakesFile: true,
},
&cli.StringSliceFlag{
Name: "cni-plugin-dir",
Value: cli.NewStringSlice(defConf.PluginDir),
Usage: "CNI plugin binaries directory",
EnvVars: []string{"CONTAINER_CNI_PLUGIN_DIR"},
},
&cli.StringFlag{
Name: "image-volumes",
Value: string(libconfig.ImageVolumesMkdir),
Usage: "Image volume handling ('mkdir', 'bind', or 'ignore')" + `
1. mkdir: A directory is created inside the container root filesystem for
the volumes.
2. bind: A directory is created inside container state directory and bind
mounted into the container for the volumes.
3. ignore: All volumes are just ignored and no action is taken.`,
EnvVars: []string{"CONTAINER_IMAGE_VOLUMES"},
},
&cli.StringSliceFlag{
Name: "hooks-dir",
Usage: `Set the OCI hooks directory path (may be set multiple times)
If one of the directories does not exist, then CRI-O will automatically
skip them.
Each '\*.json' file in the path configures a hook for CRI-O
containers. For more details on the syntax of the JSON files and
the semantics of hook injection, see 'oci-hooks(5)'. CRI-O
currently support both the 1.0.0 and 0.1.0 hook schemas, although
the 0.1.0 schema is deprecated.
This option may be set multiple times; paths from later options
have higher precedence ('oci-hooks(5)' discusses directory
precedence).
For the annotation conditions, CRI-O uses the Kubernetes
annotations, which are a subset of the annotations passed to the
OCI runtime. For example, 'io.kubernetes.cri-o.Volumes' is part of
the OCI runtime configuration annotations, but it is not part of
the Kubernetes annotations being matched for hooks.
For the bind-mount conditions, only mounts explicitly requested by
Kubernetes configuration are considered. Bind mounts that CRI-O
inserts by default (e.g. '/dev/shm') are not considered.`,
Value: cli.NewStringSlice(defConf.HooksDir...),
EnvVars: []string{"CONTAINER_HOOKS_DIR"},
},
&cli.StringFlag{
Name: "default-mounts-file",
Usage: fmt.Sprintf("Path to default mounts file (default: %q)", defConf.DefaultMountsFile),
EnvVars: []string{"CONTAINER_DEFAULT_MOUNTS_FILE"},
TakesFile: true,
},
&cli.StringSliceFlag{
Name: "default-capabilities",
Usage: "Capabilities to add to the containers",
EnvVars: []string{"CONTAINER_DEFAULT_CAPABILITIES"},
Value: cli.NewStringSlice(defConf.DefaultCapabilities...),
},
&cli.StringSliceFlag{
Name: "default-sysctls",
Usage: "Sysctls to add to the containers",
EnvVars: []string{"CONTAINER_DEFAULT_SYSCTLS"},
Value: cli.NewStringSlice(defConf.DefaultSysctls...),
},
&cli.StringSliceFlag{
Name: "default-ulimits",
Usage: fmt.Sprintf("Ulimits to apply to containers by default (name=soft:hard) (default: %q)", defConf.DefaultUlimits),
EnvVars: []string{"CONTAINER_DEFAULT_ULIMITS"},
},
&cli.BoolFlag{
Name: "profile",
Usage: "Enable pprof remote profiler on localhost:6060",
EnvVars: []string{"CONTAINER_PROFILE"},
},
&cli.StringFlag{
Name: "profile-cpu",
Usage: "Write a pprof CPU profile to the provided path",
EnvVars: []string{"CONTAINER_PROFILE_CPU"},
},
&cli.StringFlag{
Name: "profile-mem",
Usage: "Write a pprof memory profile to the provided path",
EnvVars: []string{"CONTAINER_PROFILE_MEM"},
},
&cli.IntFlag{
Name: "profile-port",
Value: 6060,
Usage: "Port for the pprof profiler",
EnvVars: []string{"CONTAINER_PROFILE_PORT"},
},
&cli.BoolFlag{
Name: "enable-profile-unix-socket",
Usage: "Enable pprof profiler on crio unix domain socket",
EnvVars: []string{"ENABLE_PROFILE_UNIX_SOCKET"},
},
&cli.BoolFlag{
Name: "enable-metrics",
Usage: "Enable metrics endpoint for the server on localhost:9090",
EnvVars: []string{"CONTAINER_ENABLE_METRICS"},
},
&cli.IntFlag{
Name: "metrics-port",
Value: 9090,
Usage: "Port for the metrics endpoint",
EnvVars: []string{"CONTAINER_METRICS_PORT"},
},
&cli.StringFlag{
Name: "metrics-socket",
Usage: "Socket for the metrics endpoint",
EnvVars: []string{"CONTAINER_METRICS_SOCKET"},
},
&cli.StringFlag{
Name: "metrics-cert",
Usage: "Certificate for the secure metrics endpoint",
EnvVars: []string{"CONTAINER_METRICS_CERT"},
},
&cli.StringFlag{
Name: "metrics-key",
Usage: "Certificate key for the secure metrics endpoint",
EnvVars: []string{"CONTAINER_METRICS_KEY"},
},
&cli.StringSliceFlag{
Name: "metrics-collectors",
Usage: "Enabled metrics collectors",
Value: cli.NewStringSlice(collectors.All().ToSlice()...),
EnvVars: []string{"CONTAINER_METRICS_COLLECTORS"},
},
&cli.StringFlag{
Name: "big-files-temporary-dir",
Usage: `Path to the temporary directory to use for storing big files, used to store image blobs and data streams related to containers image management.`,
EnvVars: []string{"CONTAINER_BIG_FILES_TEMPORARY_DIR"},
},
&cli.BoolFlag{
Name: "read-only",
Usage: fmt.Sprintf("Setup all unprivileged containers to run as read-only. Automatically mounts tmpfs on `/run`, `/tmp` and `/var/tmp`. (default: %t)", defConf.ReadOnly),
EnvVars: []string{"CONTAINER_READ_ONLY"},
},
&cli.StringFlag{
Name: "bind-mount-prefix",
Usage: fmt.Sprintf("A prefix to use for the source of the bind mounts. This option would be useful if you were running CRI-O in a container. And had `/` mounted on `/host` in your container. Then if you ran CRI-O with the `--bind-mount-prefix=/host` option, CRI-O would add /host to any bind mounts it is handed over CRI. If Kubernetes asked to have `/var/lib/foobar` bind mounted into the container, then CRI-O would bind mount `/host/var/lib/foobar`. Since CRI-O itself is running in a container with `/` or the host mounted on `/host`, the container would end up with `/var/lib/foobar` from the host mounted in the container rather then `/var/lib/foobar` from the CRI-O container. (default: %q)", defConf.BindMountPrefix),
EnvVars: []string{"CONTAINER_BIND_MOUNT_PREFIX"},
},
&cli.StringFlag{
Name: "uid-mappings",
Usage: fmt.Sprintf("Specify the UID mappings to use for the user namespace (default: %q)", defConf.UIDMappings),
Value: "",
EnvVars: []string{"CONTAINER_UID_MAPPINGS"},
},
&cli.StringFlag{
Name: "gid-mappings",
Usage: fmt.Sprintf("Specify the GID mappings to use for the user namespace (default: %q)", defConf.GIDMappings),
Value: "",
EnvVars: []string{"CONTAINER_GID_MAPPINGS"},
},
&cli.StringSliceFlag{
Name: "additional-devices",
Usage: "Devices to add to the containers ",
Value: cli.NewStringSlice(defConf.AdditionalDevices...),
EnvVars: []string{"CONTAINER_ADDITIONAL_DEVICES"},
},
&cli.BoolFlag{
Name: "device-ownership-from-security-context",
Usage: "Set devices' uid/gid ownership from runAsUser/runAsGroup",
},
&cli.StringSliceFlag{
Name: "conmon-env",
Value: cli.NewStringSlice(defConf.ConmonEnv...),
Usage: "Environment variable list for the conmon process, used for passing necessary environment variables to conmon or the runtime",
EnvVars: []string{"CONTAINER_CONMON_ENV"},
},
&cli.StringSliceFlag{
Name: "default-env",
Value: cli.NewStringSlice(defConf.DefaultEnv...),
Usage: "Additional environment variables to set for all containers",
EnvVars: []string{"CONTAINER_DEFAULT_ENV"},
},
&cli.StringFlag{
Name: "container-attach-socket-dir",
Usage: "Path to directory for container attach sockets",
Value: defConf.ContainerAttachSocketDir,
EnvVars: []string{"CONTAINER_ATTACH_SOCKET_DIR"},
TakesFile: true,
},
&cli.StringFlag{
Name: "container-exits-dir",
Usage: "Path to directory in which container exit files are written to by conmon", Value: defConf.ContainerExitsDir,
EnvVars: []string{"CONTAINER_EXITS_DIR"},
TakesFile: true,
},
&cli.Int64Flag{
Name: "ctr-stop-timeout",
Usage: "The minimal amount of time in seconds to wait before issuing a timeout regarding the proper termination of the container. The lowest possible value is 30s, whereas lower values are not considered by CRI-O",
Value: defConf.CtrStopTimeout,
EnvVars: []string{"CONTAINER_STOP_TIMEOUT"},
},
&cli.IntFlag{
Name: "grpc-max-recv-msg-size",
Usage: "Maximum grpc receive message size in bytes",
Value: defConf.GRPCMaxRecvMsgSize,
EnvVars: []string{"CONTAINER_GRPC_MAX_RECV_MSG_SIZE"},
},
&cli.IntFlag{
Name: "grpc-max-send-msg-size",
Usage: "Maximum grpc receive message size",
Value: defConf.GRPCMaxSendMsgSize,
EnvVars: []string{"CONTAINER_GRPC_MAX_SEND_MSG_SIZE"},
},
&cli.BoolFlag{
Name: "drop-infra-ctr",
Usage: fmt.Sprintf("Determines whether pods are created without an infra container, when the pod is not using a pod level PID namespace (default: %v)", defConf.DropInfraCtr),
EnvVars: []string{"CONTAINER_DROP_INFRA_CTR"},
},
&cli.StringFlag{
Name: "pinns-path",
Usage: fmt.Sprintf("The path to find the pinns binary, which is needed to manage namespace lifecycle. Will be searched for in $PATH if empty (default: %q)", defConf.PinnsPath),
EnvVars: []string{"CONTAINER_PINNS_PATH"},
},
&cli.StringFlag{
Name: "namespaces-dir",
Usage: "The directory where the state of the managed namespaces gets tracked. Only used when manage-ns-lifecycle is true",
Value: defConf.NamespacesDir,
EnvVars: []string{"CONTAINER_NAMESPACES_DIR"},
},
&cli.BoolFlag{
Name: "no-pivot",
Usage: fmt.Sprintf("If true, the runtime will not use `pivot_root`, but instead use `MS_MOVE` (default: %v)", defConf.NoPivot),
EnvVars: []string{"CONTAINER_NO_PIVOT"},
},
&cli.BoolFlag{
Name: "stream-enable-tls",
Usage: fmt.Sprintf("Enable encrypted TLS transport of the stream server (default: %v)", defConf.StreamEnableTLS),
EnvVars: []string{"CONTAINER_ENABLE_TLS"},
},
&cli.StringFlag{
Name: "stream-tls-ca",
Usage: fmt.Sprintf("Path to the x509 CA(s) file used to verify and authenticate client communication with the encrypted stream. This file can change and CRI-O will automatically pick up the changes within 5 minutes (default: %q)", defConf.StreamTLSCA),
EnvVars: []string{"CONTAINER_TLS_CA"},
TakesFile: true,
},
&cli.StringFlag{
Name: "stream-tls-cert",
Usage: fmt.Sprintf("Path to the x509 certificate file used to serve the encrypted stream. This file can change and CRI-O will automatically pick up the changes within 5 minutes (default: %q)", defConf.StreamTLSCert),
EnvVars: []string{"CONTAINER_TLS_CERT"},
TakesFile: true,
},
&cli.StringFlag{
Name: "stream-tls-key",
Usage: fmt.Sprintf("Path to the key file used to serve the encrypted stream. This file can change and CRI-O will automatically pick up the changes within 5 minutes (default: %q)", defConf.StreamTLSKey),
EnvVars: []string{"CONTAINER_TLS_KEY"},
TakesFile: true,
},
&cli.StringFlag{
Name: "stream-idle-timeout",
Usage: "Length of time until open streams terminate due to lack of activity",
EnvVars: []string{"STREAM_IDLE_TIMEOUT"},
},
&cli.StringFlag{
Name: "registries-conf",
Usage: "path to the registries.conf file",
Destination: &defConf.SystemContext.SystemRegistriesConfPath,
Hidden: true,
EnvVars: []string{"CONTAINER_REGISTRIES_CONF"},
TakesFile: true,
},
&cli.StringFlag{
Name: "registries-conf-dir",
Usage: "path to the registries.conf.d directory",
Destination: &defConf.SystemContext.SystemRegistriesConfDirPath,
Hidden: true,
EnvVars: []string{"CONTAINER_REGISTRIES_CONF_DIR"},
TakesFile: true,
},
&cli.StringFlag{
Name: "address",
Usage: "address used for the publish command",
Hidden: true,
},
&cli.StringFlag{
Name: "version-file",
Usage: "Location for CRI-O to lay down the temporary version file. It is used to check if crio wipe should wipe containers, which should always happen on a node reboot",
Value: defConf.VersionFile,
EnvVars: []string{"CONTAINER_VERSION_FILE"},
TakesFile: true,
},
&cli.StringFlag{
Name: "version-file-persist",
Usage: "Location for CRI-O to lay down the persistent version file. It is used to check if crio wipe should wipe images, which should only happen when CRI-O has been upgraded",
Value: defConf.VersionFile,
EnvVars: []string{"CONTAINER_VERSION_FILE_PERSIST"},
TakesFile: true,
},
&cli.BoolFlag{
Name: "internal-wipe",
Usage: "Whether CRI-O should wipe containers after a reboot and images after an upgrade when the server starts. If set to false, one must run `crio wipe` to wipe the containers and images in these situations. This option is deprecated, and will be removed in the future.",
Value: defConf.InternalWipe,
EnvVars: []string{"CONTAINER_INTERNAL_WIPE"},
},
&cli.StringFlag{
Name: "infra-ctr-cpuset",
Usage: "CPU set to run infra containers, if not specified CRI-O will use all online CPUs to run infra containers (default: '').",
EnvVars: []string{"CONTAINER_INFRA_CTR_CPUSET"},
},
&cli.StringFlag{
Name: "clean-shutdown-file",
Usage: "Location for CRI-O to lay down the clean shutdown file. It indicates whether we've had time to sync changes to disk before shutting down. If not found, crio wipe will clear the storage directory",
Value: defConf.CleanShutdownFile,
EnvVars: []string{"CONTAINER_CLEAN_SHUTDOWN_FILE"},
TakesFile: true,
},
&cli.StringSliceFlag{
Name: "absent-mount-sources-to-reject",
Value: cli.NewStringSlice(defConf.AbsentMountSourcesToReject...),
Usage: "A list of paths that, when absent from the host, will cause a container creation to fail (as opposed to the current behavior of creating a directory).",
EnvVars: []string{"CONTAINER_ABSENT_MOUNT_SOURCES_TO_REJECT"},
},
}
}
// StringSliceTrySplit parses the string slice from the CLI context.
// If the parsing returns just a single item, then we try to parse them by `,`
// to allow users to provide their flags comma separated.
func StringSliceTrySplit(ctx *cli.Context, name string) []string {
values := ctx.StringSlice(name)
separator := ","
// It looks like we only parsed one item, let's see if there are more
if len(values) == 1 && strings.Contains(values[0], separator) {
values = strings.Split(values[0], separator)
// Trim whitespace
for i := range values {
values[i] = strings.TrimSpace(values[i])
}
logrus.Infof(
"Parsed commma separated CLI flag %q into dedicated values %v",
name, values,
)
} else {
// Copy the slice to avoid the cli flags being overwritten
values = append(values[:0:0], values...)
}
return values
}
| GetAndMergeConfigFromContext |
styles.ts | import styled from "styled-components";
export const HeaderContainer = styled.head `
width: 100%;
height: 90px;
background: var(--BACKGROUND);
display: flex;
justify-content: center;
align-items: center;
`
export const HeaderWrapper = styled.div `
width: 80%;
height: 90px;
display: flex;
align-items: center;
justify-content: space-between;
`
export const UserInfo = styled.div `
display: flex;
align-items: center;
justify-content: center;
| color: var(--PRIMARY);
}
` | text-align: right;
span {
font-weight: bold;
|
customlayers.py | # --------------------------------------------------------
# Written by: Romuald FOTSO
# Licensed: MIT License
# Copyright (c) 2017
# Based on 'dandxy89' github repository:
# https://github.com/dandxy89/ImageModels/blob/master/KerasLayers/Custom_layers.py
# --------------------------------------------------------
from keras.engine import Layer
from keras import backend as K
class LRN2D(Layer):
def | (self, alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
if n % 2 == 0:
raise NotImplementedError(
"LRN2D only works with odd n. n provided: " + str(n))
super(LRN2D, self).__init__(**kwargs)
self.alpha = alpha
self.k = k
self.beta = beta
self.n = n
def get_output(self, train):
X = self.get_input(train)
b, ch, r, c = K.shape(X)
half_n = self.n // 2
input_sqr = K.square(X)
extra_channels = K.zeros((b, ch + 2 * half_n, r, c))
input_sqr = K.concatenate([extra_channels[:, :half_n, :, :],
input_sqr,
extra_channels[:, half_n + ch:, :, :]],
axis=1)
scale = self.k
for i in range(self.n):
scale += self.alpha * input_sqr[:, i:i + ch, :, :]
scale = scale ** self.beta
return X / scale
def get_config(self):
config = {"name": self.__class__.__name__,
"alpha": self.alpha,
"k": self.k,
"beta": self.beta,
"n": self.n}
base_config = super(LRN2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| __init__ |
server.rs | // Copyright (c) 2016 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cell::UnsafeCell;
use std::error;
use std::result;
use std::sync::{Arc, RwLock};
use fnv::FnvHasher;
use libc;
use protobuf::{self, parse_from_bytes};
use protobuf::core::Message as ProtoBufMessage;
use protocol::{self, Routable, RouteKey};
use time;
use zmq;
use config::{self, RouteAddrs, Shards, ToAddrString};
use error::{Error, Result};
const PING_INTERVAL: i64 = 2000;
const SERVER_TTL: i64 = 6000;
const MAX_HOPS: usize = 8;
lazy_static! {
/// A threadsafe shared ZMQ context for consuming services.
///
/// You probably want to use this context to create new ZMQ sockets unless you *do not* want to
/// connect them together using an in-proc queue.
pub static ref ZMQ_CONTEXT: Box<ServerContext> = {
let ctx = ServerContext::new();
Box::new(ctx)
};
}
/// This is a wrapper to provide interior mutability of an underlying `zmq::Context` and allows
/// for sharing/sending of a `zmq::Context` between threads.
pub struct ServerContext(UnsafeCell<zmq::Context>);
impl ServerContext {
pub fn new() -> Self {
ServerContext(UnsafeCell::new(zmq::Context::new()))
}
pub fn as_mut(&self) -> &mut zmq::Context {
unsafe { &mut *self.0.get() }
}
}
unsafe impl Send for ServerContext {}
unsafe impl Sync for ServerContext {}
pub struct Envelope {
pub msg: protocol::net::Msg,
hops: Vec<zmq::Message>,
started: bool,
}
impl Envelope {
pub fn new(hops: Vec<zmq::Message>, msg: protocol::net::Msg) -> Self {
let mut env = Envelope::default();
env.hops = hops;
env.msg = msg;
env
}
pub fn add_hop(&mut self, hop: zmq::Message) -> Result<()> {
if self.max_hops() {
return Err(Error::MaxHops);
}
self.hops.push(hop);
Ok(())
}
pub fn body(&self) -> &[u8] {
self.msg.get_body()
}
pub fn hops(&self) -> &Vec<zmq::Message> {
&self.hops
}
pub fn max_hops(&self) -> bool {
self.hops.len() >= MAX_HOPS
}
pub fn message_id(&self) -> &str {
self.msg.get_message_id()
}
pub fn route_info(&self) -> &protocol::net::RouteInfo {
self.msg.get_route_info()
}
pub fn protocol(&self) -> protocol::net::Protocol {
self.msg.get_route_info().get_protocol()
}
pub fn reply<M: ProtoBufMessage>(&mut self, sock: &mut zmq::Socket, msg: &M) -> Result<()> {
try!(self.send_header(sock));
let rep = protocol::Message::new(msg).build();
try!(sock.send(&rep.write_to_bytes().unwrap(), zmq::SNDMORE));
Ok(())
}
pub fn reply_complete<M: ProtoBufMessage>(&mut self,
sock: &mut zmq::Socket,
msg: &M)
-> Result<()> {
try!(self.send_header(sock));
let rep = protocol::Message::new(msg).build();
let bytes = try!(rep.write_to_bytes());
try!(sock.send(&bytes, 0));
Ok(())
}
pub fn parse_msg<M: protobuf::MessageStatic>(&self) -> Result<M> {
let msg: M = try!(parse_from_bytes(&self.body()));
Ok(msg)
}
pub fn reset(&mut self) {
self.started = false;
self.hops.clear();
self.msg = protocol::net::Msg::new();
}
fn send_header(&mut self, sock: &mut zmq::Socket) -> Result<()> {
if !self.started {
for hop in self.hops.iter() {
sock.send(hop, zmq::SNDMORE).unwrap();
}
sock.send(&[], zmq::SNDMORE).unwrap();
sock.send_str("RP", zmq::SNDMORE).unwrap();
self.started = true;
}
Ok(())
}
}
impl Default for Envelope {
fn default() -> Envelope {
Envelope {
msg: protocol::net::Msg::new(),
hops: Vec::with_capacity(MAX_HOPS),
started: false,
}
}
}
pub trait Application {
type Error: error::Error;
fn run(&mut self) -> result::Result<(), Self::Error>;
}
pub trait NetIdent {
fn component() -> Option<&'static str> {
None
}
fn net_ident() -> String {
let hostname = super::hostname().unwrap();
let pid = unsafe { libc::getpid() };
if let Some(component) = Self::component() {
format!("{}#{}@{}", component, pid, hostname)
} else {
format!("{}@{}", pid, hostname)
}
}
}
pub trait Service: NetIdent {
type Application: Application;
type Config: config::RouteAddrs + config::Shards;
type Error: error::Error + From<Error> + From<zmq::Error>;
fn protocol() -> protocol::net::Protocol;
fn config(&self) -> &Arc<RwLock<Self::Config>>;
fn conn(&self) -> &RouteConn;
fn conn_mut(&mut self) -> &mut RouteConn;
fn connect(&mut self) -> result::Result<(), Self::Error> {
let mut reg = protocol::routesrv::Registration::new();
reg.set_protocol(Self::protocol());
reg.set_endpoint(Self::net_ident());
let (hb_addrs, addrs) = {
let cfg = self.config().read().unwrap();
reg.set_shards(cfg.shards().clone());
let hb_addrs: Vec<String> = cfg.route_addrs()
.iter()
.map(|f| format!("tcp://{}:{}", f.ip(), cfg.heartbeat_port()))
.collect();
let addrs: Vec<String> = cfg.route_addrs()
.iter()
.map(|f| f.to_addr_string())
.collect();
(hb_addrs, addrs)
};
for addr in &hb_addrs {
println!("Connecting to {:?}...", addr);
try!(self.conn_mut().register(&addr));
}
let mut ready = 0;
let mut rt = try!(zmq::Message::new());
let mut hb = try!(zmq::Message::new());
while ready < hb_addrs.len() {
try!(self.conn_mut().heartbeat.recv(&mut rt, 0));
try!(self.conn_mut().heartbeat.recv(&mut hb, 0));
debug!("received reg request, {:?}", hb.as_str());
try!(self.conn_mut().heartbeat.send_str("R", zmq::SNDMORE));
try!(self.conn_mut().heartbeat.send(®.write_to_bytes().unwrap(), 0));
try!(self.conn_mut().heartbeat.recv(&mut hb, 0));
ready += 1;
}
for addr in addrs {
try!(self.conn_mut().connect(&addr));
}
println!("Connected");
Ok(())
}
}
#[derive(Eq, Hash)]
pub struct ServerReg {
/// Server identifier
pub endpoint: String,
/// True if known to be alive
pub alive: bool,
/// Next ping at this time
pub ping_at: i64,
/// Connection expires at this time
pub expires: i64,
}
impl ServerReg {
pub fn new(endpoint: String) -> Self {
let now_ms = Self::clock_time();
ServerReg {
endpoint: endpoint,
alive: false,
ping_at: now_ms + PING_INTERVAL,
expires: now_ms + SERVER_TTL,
}
}
pub fn | () -> i64 {
let timespec = time::get_time();
(timespec.sec as i64 * 1000) + (timespec.nsec as i64 / 1000 / 1000)
}
pub fn ping(&mut self, socket: &mut zmq::Socket) -> Result<()> {
let now_ms = Self::clock_time();
if now_ms >= self.ping_at {
let ping = protocol::net::Ping::new();
let req = protocol::Message::new(&ping).build();
let bytes = try!(req.write_to_bytes());
try!(socket.send(&bytes, 0));
self.ping_at = Self::clock_time() + PING_INTERVAL;
}
Ok(())
}
}
impl PartialEq for ServerReg {
fn eq(&self, other: &ServerReg) -> bool {
if self.endpoint != other.endpoint {
return false;
}
true
}
}
pub struct RouteConn {
pub ident: String,
pub socket: zmq::Socket,
pub heartbeat: zmq::Socket,
hasher: FnvHasher,
}
impl RouteConn {
pub fn new(ident: String, context: &mut zmq::Context) -> Result<Self> {
let socket = try!(context.socket(zmq::DEALER));
let heartbeat = try!(context.socket(zmq::DEALER));
try!(socket.set_identity(ident.as_bytes()));
try!(heartbeat.set_identity(format!("hb#{}", ident).as_bytes()));
try!(heartbeat.set_probe_router(true));
Ok(RouteConn {
ident: ident,
socket: socket,
heartbeat: heartbeat,
hasher: FnvHasher::default(),
})
}
pub fn connect(&mut self, addr: &str) -> Result<()> {
try!(self.socket.connect(addr));
Ok(())
}
pub fn register(&mut self, addr: &str) -> Result<()> {
try!(self.heartbeat.connect(addr));
Ok(())
}
pub fn recv(&mut self, flags: i32) -> Result<protocol::net::Msg> {
let envelope = try!(self.socket.recv_msg(flags));
let msg: protocol::net::Msg = parse_from_bytes(&envelope).unwrap();
Ok(msg)
}
pub fn route<M: Routable>(&mut self, msg: &M) -> Result<()> {
let route_hash = msg.route_key().map(|key| key.hash(&mut self.hasher));
let req = protocol::Message::new(msg).routing(route_hash).build();
let bytes = try!(req.write_to_bytes());
try!(self.socket.send(&bytes, 0));
Ok(())
}
}
| clock_time |
MockStorageApi.ts | /*
* Copyright 2020 The Backstage Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { StorageApi, StorageValueSnapshot } from '@backstage/core-plugin-api';
import { JsonValue, Observable } from '@backstage/types';
import ObservableImpl from 'zen-observable';
/**
* Type for map holding data in {@link MockStorageApi}
* @public
*/
export type MockStorageBucket = { [key: string]: any };
/**
* Mock implementation of the {@link core-plugin-api#StorageApi} to be used in tests
* @public
*/
export class MockStorageApi implements StorageApi {
private readonly namespace: string;
private readonly data: MockStorageBucket;
private readonly bucketStorageApis: Map<string, MockStorageApi>;
private constructor(
namespace: string,
bucketStorageApis: Map<string, MockStorageApi>,
data?: MockStorageBucket,
) {
this.namespace = namespace;
this.bucketStorageApis = bucketStorageApis;
this.data = { ...data };
}
static create(data?: MockStorageBucket) {
return new MockStorageApi('', new Map(), data);
}
forBucket(name: string): StorageApi {
if (!this.bucketStorageApis.has(name)) {
this.bucketStorageApis.set(
name,
new MockStorageApi(
`${this.namespace}/${name}`,
this.bucketStorageApis,
this.data,
),
);
}
return this.bucketStorageApis.get(name)!;
}
get<T>(key: string): T | undefined {
return this.snapshot(key).value as T | undefined;
}
snapshot<T extends JsonValue>(key: string): StorageValueSnapshot<T> {
if (this.data.hasOwnProperty(this.getKeyName(key))) {
const data = this.data[this.getKeyName(key)];
return {
key,
presence: 'present',
value: data,
newValue: data,
};
}
return {
key,
presence: 'absent',
value: undefined,
newValue: undefined,
}; | }
async set<T>(key: string, data: T): Promise<void> {
this.data[this.getKeyName(key)] = data;
this.notifyChanges({
key,
presence: 'present',
value: data,
newValue: data,
});
}
async remove(key: string): Promise<void> {
delete this.data[this.getKeyName(key)];
this.notifyChanges({
key,
presence: 'absent',
value: undefined,
newValue: undefined,
});
}
observe$<T>(key: string): Observable<StorageValueSnapshot<T>> {
return this.observable.filter(({ key: messageKey }) => messageKey === key);
}
private getKeyName(key: string) {
return `${this.namespace}/${encodeURIComponent(key)}`;
}
private notifyChanges<T>(message: StorageValueSnapshot<T>) {
for (const subscription of this.subscribers) {
subscription.next(message);
}
}
private subscribers = new Set<
ZenObservable.SubscriptionObserver<StorageValueSnapshot<JsonValue>>
>();
private readonly observable = new ObservableImpl<
StorageValueSnapshot<JsonValue>
>(subscriber => {
this.subscribers.add(subscriber);
return () => {
this.subscribers.delete(subscriber);
};
});
} | |
bytes.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bytes implements functions for the manipulation of byte slices.
// It is analogous to the facilities of the strings package.
package bytes
import (
"internal/bytealg"
"unicode"
"unicode/utf8"
)
// Equal reports whether a and b
// are the same length and contain the same bytes.
// A nil argument is equivalent to an empty slice.
func Equal(a, b []byte) bool {
// Neither cmd/compile nor gccgo allocates for these string conversions.
return string(a) == string(b)
}
// Compare returns an integer comparing two byte slices lexicographically.
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
// A nil argument is equivalent to an empty slice.
func Compare(a, b []byte) int {
return bytealg.Compare(a, b)
}
// explode splits s into a slice of UTF-8 sequences, one per Unicode code point (still slices of bytes),
// up to a maximum of n byte slices. Invalid UTF-8 sequences are chopped into individual bytes.
func explode(s []byte, n int) [][]byte {
if n <= 0 {
n = len(s)
}
a := make([][]byte, n)
var size int
na := 0
for len(s) > 0 {
if na+1 >= n {
a[na] = s
na++
break
}
_, size = utf8.DecodeRune(s)
a[na] = s[0:size:size]
s = s[size:]
na++
}
return a[0:na]
}
// Count counts the number of non-overlapping instances of sep in s.
// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
func Count(s, sep []byte) int {
// special case
if len(sep) == 0 {
return utf8.RuneCount(s) + 1
}
if len(sep) == 1 {
return bytealg.Count(s, sep[0])
}
n := 0
for {
i := Index(s, sep)
if i == -1 {
return n
}
n++
s = s[i+len(sep):]
}
}
// Contains reports whether subslice is within b.
func Contains(b, subslice []byte) bool {
return Index(b, subslice) != -1
}
// ContainsAny reports whether any of the UTF-8-encoded code points in chars are within b.
func ContainsAny(b []byte, chars string) bool {
return IndexAny(b, chars) >= 0
}
// ContainsRune reports whether the rune is contained in the UTF-8-encoded byte slice b.
func ContainsRune(b []byte, r rune) bool {
return IndexRune(b, r) >= 0
}
// IndexByte returns the index of the first instance of c in b, or -1 if c is not present in b.
func IndexByte(b []byte, c byte) int {
return bytealg.IndexByte(b, c)
}
func indexBytePortable(s []byte, c byte) int {
for i, b := range s {
if b == c {
return i
}
}
return -1
}
// LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
func LastIndex(s, sep []byte) int {
n := len(sep)
switch {
case n == 0:
return len(s)
case n == 1:
return LastIndexByte(s, sep[0])
case n == len(s):
if Equal(s, sep) {
return 0
}
return -1
case n > len(s):
return -1
}
// Rabin-Karp search from the end of the string
hashss, pow := bytealg.HashStrRevBytes(sep)
last := len(s) - n
var h uint32
for i := len(s) - 1; i >= last; i-- {
h = h*bytealg.PrimeRK + uint32(s[i])
}
if h == hashss && Equal(s[last:], sep) {
return last | h -= pow * uint32(s[i+n])
if h == hashss && Equal(s[i:i+n], sep) {
return i
}
}
return -1
}
// LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
func LastIndexByte(s []byte, c byte) int {
for i := len(s) - 1; i >= 0; i-- {
if s[i] == c {
return i
}
}
return -1
}
// IndexRune interprets s as a sequence of UTF-8-encoded code points.
// It returns the byte index of the first occurrence in s of the given rune.
// It returns -1 if rune is not present in s.
// If r is utf8.RuneError, it returns the first instance of any
// invalid UTF-8 byte sequence.
func IndexRune(s []byte, r rune) int {
switch {
case 0 <= r && r < utf8.RuneSelf:
return IndexByte(s, byte(r))
case r == utf8.RuneError:
for i := 0; i < len(s); {
r1, n := utf8.DecodeRune(s[i:])
if r1 == utf8.RuneError {
return i
}
i += n
}
return -1
case !utf8.ValidRune(r):
return -1
default:
var b [utf8.UTFMax]byte
n := utf8.EncodeRune(b[:], r)
return Index(s, b[:n])
}
}
// IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
// It returns the byte index of the first occurrence in s of any of the Unicode
// code points in chars. It returns -1 if chars is empty or if there is no code
// point in common.
func IndexAny(s []byte, chars string) int {
if chars == "" {
// Avoid scanning all of s.
return -1
}
if len(s) == 1 {
r := rune(s[0])
if r >= utf8.RuneSelf {
// search utf8.RuneError.
for _, r = range chars {
if r == utf8.RuneError {
return 0
}
}
return -1
}
if bytealg.IndexByteString(chars, s[0]) >= 0 {
return 0
}
return -1
}
if len(chars) == 1 {
r := rune(chars[0])
if r >= utf8.RuneSelf {
r = utf8.RuneError
}
return IndexRune(s, r)
}
if len(s) > 8 {
if as, isASCII := makeASCIISet(chars); isASCII {
for i, c := range s {
if as.contains(c) {
return i
}
}
return -1
}
}
var width int
for i := 0; i < len(s); i += width {
r := rune(s[i])
if r < utf8.RuneSelf {
if bytealg.IndexByteString(chars, s[i]) >= 0 {
return i
}
width = 1
continue
}
r, width = utf8.DecodeRune(s[i:])
if r != utf8.RuneError {
// r is 2 to 4 bytes
if len(chars) == width {
if chars == string(r) {
return i
}
continue
}
// Use bytealg.IndexString for performance if available.
if bytealg.MaxLen >= width {
if bytealg.IndexString(chars, string(r)) >= 0 {
return i
}
continue
}
}
for _, ch := range chars {
if r == ch {
return i
}
}
}
return -1
}
// LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
// points. It returns the byte index of the last occurrence in s of any of
// the Unicode code points in chars. It returns -1 if chars is empty or if
// there is no code point in common.
func LastIndexAny(s []byte, chars string) int {
if chars == "" {
// Avoid scanning all of s.
return -1
}
if len(s) > 8 {
if as, isASCII := makeASCIISet(chars); isASCII {
for i := len(s) - 1; i >= 0; i-- {
if as.contains(s[i]) {
return i
}
}
return -1
}
}
if len(s) == 1 {
r := rune(s[0])
if r >= utf8.RuneSelf {
for _, r = range chars {
if r == utf8.RuneError {
return 0
}
}
return -1
}
if bytealg.IndexByteString(chars, s[0]) >= 0 {
return 0
}
return -1
}
if len(chars) == 1 {
cr := rune(chars[0])
if cr >= utf8.RuneSelf {
cr = utf8.RuneError
}
for i := len(s); i > 0; {
r, size := utf8.DecodeLastRune(s[:i])
i -= size
if r == cr {
return i
}
}
return -1
}
for i := len(s); i > 0; {
r := rune(s[i-1])
if r < utf8.RuneSelf {
if bytealg.IndexByteString(chars, s[i-1]) >= 0 {
return i - 1
}
i--
continue
}
r, size := utf8.DecodeLastRune(s[:i])
i -= size
if r != utf8.RuneError {
// r is 2 to 4 bytes
if len(chars) == size {
if chars == string(r) {
return i
}
continue
}
// Use bytealg.IndexString for performance if available.
if bytealg.MaxLen >= size {
if bytealg.IndexString(chars, string(r)) >= 0 {
return i
}
continue
}
}
for _, ch := range chars {
if r == ch {
return i
}
}
}
return -1
}
// Generic split: splits after each instance of sep,
// including sepSave bytes of sep in the subslices.
func genSplit(s, sep []byte, sepSave, n int) [][]byte {
if n == 0 {
return nil
}
if len(sep) == 0 {
return explode(s, n)
}
if n < 0 {
n = Count(s, sep) + 1
}
a := make([][]byte, n)
n--
i := 0
for i < n {
m := Index(s, sep)
if m < 0 {
break
}
a[i] = s[: m+sepSave : m+sepSave]
s = s[m+len(sep):]
i++
}
a[i] = s
return a[:i+1]
}
// SplitN slices s into subslices separated by sep and returns a slice of
// the subslices between those separators.
// If sep is empty, SplitN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
// n == 0: the result is nil (zero subslices)
// n < 0: all subslices
func SplitN(s, sep []byte, n int) [][]byte { return genSplit(s, sep, 0, n) }
// SplitAfterN slices s into subslices after each instance of sep and
// returns a slice of those subslices.
// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
// The count determines the number of subslices to return:
// n > 0: at most n subslices; the last subslice will be the unsplit remainder.
// n == 0: the result is nil (zero subslices)
// n < 0: all subslices
func SplitAfterN(s, sep []byte, n int) [][]byte {
return genSplit(s, sep, len(sep), n)
}
// Split slices s into all subslices separated by sep and returns a slice of
// the subslices between those separators.
// If sep is empty, Split splits after each UTF-8 sequence.
// It is equivalent to SplitN with a count of -1.
func Split(s, sep []byte) [][]byte { return genSplit(s, sep, 0, -1) }
// SplitAfter slices s into all subslices after each instance of sep and
// returns a slice of those subslices.
// If sep is empty, SplitAfter splits after each UTF-8 sequence.
// It is equivalent to SplitAfterN with a count of -1.
func SplitAfter(s, sep []byte) [][]byte {
return genSplit(s, sep, len(sep), -1)
}
var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
// Fields interprets s as a sequence of UTF-8-encoded code points.
// It splits the slice s around each instance of one or more consecutive white space
// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
// empty slice if s contains only white space.
func Fields(s []byte) [][]byte {
// First count the fields.
// This is an exact count if s is ASCII, otherwise it is an approximation.
n := 0
wasSpace := 1
// setBits is used to track which bits are set in the bytes of s.
setBits := uint8(0)
for i := 0; i < len(s); i++ {
r := s[i]
setBits |= r
isSpace := int(asciiSpace[r])
n += wasSpace & ^isSpace
wasSpace = isSpace
}
if setBits >= utf8.RuneSelf {
// Some runes in the input slice are not ASCII.
return FieldsFunc(s, unicode.IsSpace)
}
// ASCII fast path
a := make([][]byte, n)
na := 0
fieldStart := 0
i := 0
// Skip spaces in the front of the input.
for i < len(s) && asciiSpace[s[i]] != 0 {
i++
}
fieldStart = i
for i < len(s) {
if asciiSpace[s[i]] == 0 {
i++
continue
}
a[na] = s[fieldStart:i:i]
na++
i++
// Skip spaces in between fields.
for i < len(s) && asciiSpace[s[i]] != 0 {
i++
}
fieldStart = i
}
if fieldStart < len(s) { // Last field might end at EOF.
a[na] = s[fieldStart:len(s):len(s)]
}
return a
}
// FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
// It splits the slice s at each run of code points c satisfying f(c) and
// returns a slice of subslices of s. If all code points in s satisfy f(c), or
// len(s) == 0, an empty slice is returned.
//
// FieldsFunc makes no guarantees about the order in which it calls f(c)
// and assumes that f always returns the same value for a given c.
func FieldsFunc(s []byte, f func(rune) bool) [][]byte {
// A span is used to record a slice of s of the form s[start:end].
// The start index is inclusive and the end index is exclusive.
type span struct {
start int
end int
}
spans := make([]span, 0, 32)
// Find the field start and end indices.
// Doing this in a separate pass (rather than slicing the string s
// and collecting the result substrings right away) is significantly
// more efficient, possibly due to cache effects.
start := -1 // valid span start if >= 0
for i := 0; i < len(s); {
size := 1
r := rune(s[i])
if r >= utf8.RuneSelf {
r, size = utf8.DecodeRune(s[i:])
}
if f(r) {
if start >= 0 {
spans = append(spans, span{start, i})
start = -1
}
} else {
if start < 0 {
start = i
}
}
i += size
}
// Last field might end at EOF.
if start >= 0 {
spans = append(spans, span{start, len(s)})
}
// Create subslices from recorded field indices.
a := make([][]byte, len(spans))
for i, span := range spans {
a[i] = s[span.start:span.end:span.end]
}
return a
}
// Join concatenates the elements of s to create a new byte slice. The separator
// sep is placed between elements in the resulting slice.
func Join(s [][]byte, sep []byte) []byte {
if len(s) == 0 {
return []byte{}
}
if len(s) == 1 {
// Just return a copy.
return append([]byte(nil), s[0]...)
}
n := len(sep) * (len(s) - 1)
for _, v := range s {
n += len(v)
}
b := make([]byte, n)
bp := copy(b, s[0])
for _, v := range s[1:] {
bp += copy(b[bp:], sep)
bp += copy(b[bp:], v)
}
return b
}
// HasPrefix tests whether the byte slice s begins with prefix.
func HasPrefix(s, prefix []byte) bool {
return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
}
// HasSuffix tests whether the byte slice s ends with suffix.
func HasSuffix(s, suffix []byte) bool {
return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
}
// Map returns a copy of the byte slice s with all its characters modified
// according to the mapping function. If mapping returns a negative value, the character is
// dropped from the byte slice with no replacement. The characters in s and the
// output are interpreted as UTF-8-encoded code points.
func Map(mapping func(r rune) rune, s []byte) []byte {
// In the worst case, the slice can grow when mapped, making
// things unpleasant. But it's so rare we barge in assuming it's
// fine. It could also shrink but that falls out naturally.
maxbytes := len(s) // length of b
nbytes := 0 // number of bytes encoded in b
b := make([]byte, maxbytes)
for i := 0; i < len(s); {
wid := 1
r := rune(s[i])
if r >= utf8.RuneSelf {
r, wid = utf8.DecodeRune(s[i:])
}
r = mapping(r)
if r >= 0 {
rl := utf8.RuneLen(r)
if rl < 0 {
rl = len(string(utf8.RuneError))
}
if nbytes+rl > maxbytes {
// Grow the buffer.
maxbytes = maxbytes*2 + utf8.UTFMax
nb := make([]byte, maxbytes)
copy(nb, b[0:nbytes])
b = nb
}
nbytes += utf8.EncodeRune(b[nbytes:maxbytes], r)
}
i += wid
}
return b[0:nbytes]
}
// Repeat returns a new byte slice consisting of count copies of b.
//
// It panics if count is negative or if
// the result of (len(b) * count) overflows.
func Repeat(b []byte, count int) []byte {
if count == 0 {
return []byte{}
}
// Since we cannot return an error on overflow,
// we should panic if the repeat will generate
// an overflow.
// See Issue golang.org/issue/16237.
if count < 0 {
panic("bytes: negative Repeat count")
} else if len(b)*count/count != len(b) {
panic("bytes: Repeat count causes overflow")
}
nb := make([]byte, len(b)*count)
bp := copy(nb, b)
for bp < len(nb) {
copy(nb[bp:], nb[:bp])
bp *= 2
}
return nb
}
// ToUpper returns a copy of the byte slice s with all Unicode letters mapped to
// their upper case.
func ToUpper(s []byte) []byte {
isASCII, hasLower := true, false
for i := 0; i < len(s); i++ {
c := s[i]
if c >= utf8.RuneSelf {
isASCII = false
break
}
hasLower = hasLower || ('a' <= c && c <= 'z')
}
if isASCII { // optimize for ASCII-only byte slices.
if !hasLower {
// Just return a copy.
return append([]byte(""), s...)
}
b := make([]byte, len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if 'a' <= c && c <= 'z' {
c -= 'a' - 'A'
}
b[i] = c
}
return b
}
return Map(unicode.ToUpper, s)
}
// ToLower returns a copy of the byte slice s with all Unicode letters mapped to
// their lower case.
func ToLower(s []byte) []byte {
isASCII, hasUpper := true, false
for i := 0; i < len(s); i++ {
c := s[i]
if c >= utf8.RuneSelf {
isASCII = false
break
}
hasUpper = hasUpper || ('A' <= c && c <= 'Z')
}
if isASCII { // optimize for ASCII-only byte slices.
if !hasUpper {
return append([]byte(""), s...)
}
b := make([]byte, len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if 'A' <= c && c <= 'Z' {
c += 'a' - 'A'
}
b[i] = c
}
return b
}
return Map(unicode.ToLower, s)
}
// ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case.
func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) }
// ToUpperSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
// upper case, giving priority to the special casing rules.
func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(c.ToUpper, s)
}
// ToLowerSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
// lower case, giving priority to the special casing rules.
func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(c.ToLower, s)
}
// ToTitleSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
// title case, giving priority to the special casing rules.
func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte {
return Map(c.ToTitle, s)
}
// ToValidUTF8 treats s as UTF-8-encoded bytes and returns a copy with each run of bytes
// representing invalid UTF-8 replaced with the bytes in replacement, which may be empty.
func ToValidUTF8(s, replacement []byte) []byte {
b := make([]byte, 0, len(s)+len(replacement))
invalid := false // previous byte was from an invalid UTF-8 sequence
for i := 0; i < len(s); {
c := s[i]
if c < utf8.RuneSelf {
i++
invalid = false
b = append(b, byte(c))
continue
}
_, wid := utf8.DecodeRune(s[i:])
if wid == 1 {
i++
if !invalid {
invalid = true
b = append(b, replacement...)
}
continue
}
invalid = false
b = append(b, s[i:i+wid]...)
i += wid
}
return b
}
// isSeparator reports whether the rune could mark a word boundary.
// TODO: update when package unicode captures more of the properties.
func isSeparator(r rune) bool {
// ASCII alphanumerics and underscore are not separators
if r <= 0x7F {
switch {
case '0' <= r && r <= '9':
return false
case 'a' <= r && r <= 'z':
return false
case 'A' <= r && r <= 'Z':
return false
case r == '_':
return false
}
return true
}
// Letters and digits are not separators
if unicode.IsLetter(r) || unicode.IsDigit(r) {
return false
}
// Otherwise, all we can do for now is treat spaces as separators.
return unicode.IsSpace(r)
}
// Title treats s as UTF-8-encoded bytes and returns a copy with all Unicode letters that begin
// words mapped to their title case.
//
// BUG(rsc): The rule Title uses for word boundaries does not handle Unicode punctuation properly.
func Title(s []byte) []byte {
// Use a closure here to remember state.
// Hackish but effective. Depends on Map scanning in order and calling
// the closure once per rune.
prev := ' '
return Map(
func(r rune) rune {
if isSeparator(prev) {
prev = r
return unicode.ToTitle(r)
}
prev = r
return r
},
s)
}
// TrimLeftFunc treats s as UTF-8-encoded bytes and returns a subslice of s by slicing off
// all leading UTF-8-encoded code points c that satisfy f(c).
func TrimLeftFunc(s []byte, f func(r rune) bool) []byte {
i := indexFunc(s, f, false)
if i == -1 {
return nil
}
return s[i:]
}
// TrimRightFunc returns a subslice of s by slicing off all trailing
// UTF-8-encoded code points c that satisfy f(c).
func TrimRightFunc(s []byte, f func(r rune) bool) []byte {
i := lastIndexFunc(s, f, false)
if i >= 0 && s[i] >= utf8.RuneSelf {
_, wid := utf8.DecodeRune(s[i:])
i += wid
} else {
i++
}
return s[0:i]
}
// TrimFunc returns a subslice of s by slicing off all leading and trailing
// UTF-8-encoded code points c that satisfy f(c).
func TrimFunc(s []byte, f func(r rune) bool) []byte {
return TrimRightFunc(TrimLeftFunc(s, f), f)
}
// TrimPrefix returns s without the provided leading prefix string.
// If s doesn't start with prefix, s is returned unchanged.
func TrimPrefix(s, prefix []byte) []byte {
if HasPrefix(s, prefix) {
return s[len(prefix):]
}
return s
}
// TrimSuffix returns s without the provided trailing suffix string.
// If s doesn't end with suffix, s is returned unchanged.
func TrimSuffix(s, suffix []byte) []byte {
if HasSuffix(s, suffix) {
return s[:len(s)-len(suffix)]
}
return s
}
// IndexFunc interprets s as a sequence of UTF-8-encoded code points.
// It returns the byte index in s of the first Unicode
// code point satisfying f(c), or -1 if none do.
func IndexFunc(s []byte, f func(r rune) bool) int {
return indexFunc(s, f, true)
}
// LastIndexFunc interprets s as a sequence of UTF-8-encoded code points.
// It returns the byte index in s of the last Unicode
// code point satisfying f(c), or -1 if none do.
func LastIndexFunc(s []byte, f func(r rune) bool) int {
return lastIndexFunc(s, f, true)
}
// indexFunc is the same as IndexFunc except that if
// truth==false, the sense of the predicate function is
// inverted.
func indexFunc(s []byte, f func(r rune) bool, truth bool) int {
start := 0
for start < len(s) {
wid := 1
r := rune(s[start])
if r >= utf8.RuneSelf {
r, wid = utf8.DecodeRune(s[start:])
}
if f(r) == truth {
return start
}
start += wid
}
return -1
}
// lastIndexFunc is the same as LastIndexFunc except that if
// truth==false, the sense of the predicate function is
// inverted.
func lastIndexFunc(s []byte, f func(r rune) bool, truth bool) int {
for i := len(s); i > 0; {
r, size := rune(s[i-1]), 1
if r >= utf8.RuneSelf {
r, size = utf8.DecodeLastRune(s[0:i])
}
i -= size
if f(r) == truth {
return i
}
}
return -1
}
// asciiSet is a 32-byte value, where each bit represents the presence of a
// given ASCII character in the set. The 128-bits of the lower 16 bytes,
// starting with the least-significant bit of the lowest word to the
// most-significant bit of the highest word, map to the full range of all
// 128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
// ensuring that any non-ASCII character will be reported as not in the set.
type asciiSet [8]uint32
// makeASCIISet creates a set of ASCII characters and reports whether all
// characters in chars are ASCII.
func makeASCIISet(chars string) (as asciiSet, ok bool) {
for i := 0; i < len(chars); i++ {
c := chars[i]
if c >= utf8.RuneSelf {
return as, false
}
as[c>>5] |= 1 << uint(c&31)
}
return as, true
}
// contains reports whether c is inside the set.
func (as *asciiSet) contains(c byte) bool {
return (as[c>>5] & (1 << uint(c&31))) != 0
}
func makeCutsetFunc(cutset string) func(r rune) bool {
if as, isASCII := makeASCIISet(cutset); isASCII {
return func(r rune) bool {
return r < utf8.RuneSelf && as.contains(byte(r))
}
}
return func(r rune) bool {
for _, c := range cutset {
if c == r {
return true
}
}
return false
}
}
// Trim returns a subslice of s by slicing off all leading and
// trailing UTF-8-encoded code points contained in cutset.
func Trim(s []byte, cutset string) []byte {
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
return trimLeftByte(trimRightByte(s, cutset[0]), cutset[0])
}
return TrimFunc(s, makeCutsetFunc(cutset))
}
// TrimLeft returns a subslice of s by slicing off all leading
// UTF-8-encoded code points contained in cutset.
func TrimLeft(s []byte, cutset string) []byte {
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
return trimLeftByte(s, cutset[0])
}
return TrimLeftFunc(s, makeCutsetFunc(cutset))
}
func trimLeftByte(s []byte, c byte) []byte {
for len(s) > 0 && s[0] == c {
s = s[1:]
}
return s
}
// TrimRight returns a subslice of s by slicing off all trailing
// UTF-8-encoded code points that are contained in cutset.
func TrimRight(s []byte, cutset string) []byte {
if len(cutset) == 1 && cutset[0] < utf8.RuneSelf {
return trimRightByte(s, cutset[0])
}
return TrimRightFunc(s, makeCutsetFunc(cutset))
}
func trimRightByte(s []byte, c byte) []byte {
for len(s) > 0 && s[len(s)-1] == c {
s = s[:len(s)-1]
}
return s
}
// TrimSpace returns a subslice of s by slicing off all leading and
// trailing white space, as defined by Unicode.
func TrimSpace(s []byte) []byte {
// Fast path for ASCII: look for the first ASCII non-space byte
start := 0
for ; start < len(s); start++ {
c := s[start]
if c >= utf8.RuneSelf {
// If we run into a non-ASCII byte, fall back to the
// slower unicode-aware method on the remaining bytes
return TrimFunc(s[start:], unicode.IsSpace)
}
if asciiSpace[c] == 0 {
break
}
}
// Now look for the first ASCII non-space byte from the end
stop := len(s)
for ; stop > start; stop-- {
c := s[stop-1]
if c >= utf8.RuneSelf {
return TrimFunc(s[start:stop], unicode.IsSpace)
}
if asciiSpace[c] == 0 {
break
}
}
// At this point s[start:stop] starts and ends with an ASCII
// non-space bytes, so we're done. Non-ASCII cases have already
// been handled above.
if start == stop {
// Special case to preserve previous TrimLeftFunc behavior,
// returning nil instead of empty slice if all spaces.
return nil
}
return s[start:stop]
}
// Runes interprets s as a sequence of UTF-8-encoded code points.
// It returns a slice of runes (Unicode code points) equivalent to s.
func Runes(s []byte) []rune {
t := make([]rune, utf8.RuneCount(s))
i := 0
for len(s) > 0 {
r, l := utf8.DecodeRune(s)
t[i] = r
i++
s = s[l:]
}
return t
}
// Replace returns a copy of the slice s with the first n
// non-overlapping instances of old replaced by new.
// If old is empty, it matches at the beginning of the slice
// and after each UTF-8 sequence, yielding up to k+1 replacements
// for a k-rune slice.
// If n < 0, there is no limit on the number of replacements.
func Replace(s, old, new []byte, n int) []byte {
m := 0
if n != 0 {
// Compute number of replacements.
m = Count(s, old)
}
if m == 0 {
// Just return a copy.
return append([]byte(nil), s...)
}
if n < 0 || m < n {
n = m
}
// Apply replacements to buffer.
t := make([]byte, len(s)+n*(len(new)-len(old)))
w := 0
start := 0
for i := 0; i < n; i++ {
j := start
if len(old) == 0 {
if i > 0 {
_, wid := utf8.DecodeRune(s[start:])
j += wid
}
} else {
j += Index(s[start:], old)
}
w += copy(t[w:], s[start:j])
w += copy(t[w:], new)
start = j + len(old)
}
w += copy(t[w:], s[start:])
return t[0:w]
}
// ReplaceAll returns a copy of the slice s with all
// non-overlapping instances of old replaced by new.
// If old is empty, it matches at the beginning of the slice
// and after each UTF-8 sequence, yielding up to k+1 replacements
// for a k-rune slice.
func ReplaceAll(s, old, new []byte) []byte {
return Replace(s, old, new, -1)
}
// EqualFold reports whether s and t, interpreted as UTF-8 strings,
// are equal under Unicode case-folding, which is a more general
// form of case-insensitivity.
func EqualFold(s, t []byte) bool {
for len(s) != 0 && len(t) != 0 {
// Extract first rune from each.
var sr, tr rune
if s[0] < utf8.RuneSelf {
sr, s = rune(s[0]), s[1:]
} else {
r, size := utf8.DecodeRune(s)
sr, s = r, s[size:]
}
if t[0] < utf8.RuneSelf {
tr, t = rune(t[0]), t[1:]
} else {
r, size := utf8.DecodeRune(t)
tr, t = r, t[size:]
}
// If they match, keep going; if not, return false.
// Easy case.
if tr == sr {
continue
}
// Make sr < tr to simplify what follows.
if tr < sr {
tr, sr = sr, tr
}
// Fast check for ASCII.
if tr < utf8.RuneSelf {
// ASCII only, sr/tr must be upper/lower case
if 'A' <= sr && sr <= 'Z' && tr == sr+'a'-'A' {
continue
}
return false
}
// General case. SimpleFold(x) returns the next equivalent rune > x
// or wraps around to smaller values.
r := unicode.SimpleFold(sr)
for r != sr && r < tr {
r = unicode.SimpleFold(r)
}
if r == tr {
continue
}
return false
}
// One string is empty. Are both?
return len(s) == len(t)
}
// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
func Index(s, sep []byte) int {
n := len(sep)
switch {
case n == 0:
return 0
case n == 1:
return IndexByte(s, sep[0])
case n == len(s):
if Equal(sep, s) {
return 0
}
return -1
case n > len(s):
return -1
case n <= bytealg.MaxLen:
// Use brute force when s and sep both are small
if len(s) <= bytealg.MaxBruteForce {
return bytealg.Index(s, sep)
}
c0 := sep[0]
c1 := sep[1]
i := 0
t := len(s) - n + 1
fails := 0
for i < t {
if s[i] != c0 {
// IndexByte is faster than bytealg.Index, so use it as long as
// we're not getting lots of false positives.
o := IndexByte(s[i+1:t], c0)
if o < 0 {
return -1
}
i += o + 1
}
if s[i+1] == c1 && Equal(s[i:i+n], sep) {
return i
}
fails++
i++
// Switch to bytealg.Index when IndexByte produces too many false positives.
if fails > bytealg.Cutover(i) {
r := bytealg.Index(s[i:], sep)
if r >= 0 {
return r + i
}
return -1
}
}
return -1
}
c0 := sep[0]
c1 := sep[1]
i := 0
fails := 0
t := len(s) - n + 1
for i < t {
if s[i] != c0 {
o := IndexByte(s[i+1:t], c0)
if o < 0 {
break
}
i += o + 1
}
if s[i+1] == c1 && Equal(s[i:i+n], sep) {
return i
}
i++
fails++
if fails >= bytealg.Cutover(i) && i < t {
// Give up on IndexByte, it isn't skipping ahead
// far enough to be better than Rabin-Karp.
// Experiments (using IndexPeriodic) suggest
// the cutover is about 16 byte skips.
// TODO: if large prefixes of sep are matching
// we should cutover at even larger average skips,
// because Equal becomes that much more expensive.
// This code does not take that effect into account.
j := bytealg.IndexRabinKarpBytes(s[i:], sep)
if j < 0 {
return -1
}
return i + j
}
}
return -1
} | }
for i := last - 1; i >= 0; i-- {
h *= bytealg.PrimeRK
h += uint32(s[i]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.